Loading...
1/***************************************************************************
2 dpti.c - description
3 -------------------
4 begin : Thu Sep 7 2000
5 copyright : (C) 2000 by Adaptec
6
7 July 30, 2001 First version being submitted
8 for inclusion in the kernel. V2.4
9
10 See Documentation/scsi/dpti.txt for history, notes, license info
11 and credits
12 ***************************************************************************/
13
14/***************************************************************************
15 * *
16 * This program is free software; you can redistribute it and/or modify *
17 * it under the terms of the GNU General Public License as published by *
18 * the Free Software Foundation; either version 2 of the License, or *
19 * (at your option) any later version. *
20 * *
21 ***************************************************************************/
22/***************************************************************************
23 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
24 - Support 2.6 kernel and DMA-mapping
25 - ioctl fix for raid tools
26 - use schedule_timeout in long long loop
27 **************************************************************************/
28
29/*#define DEBUG 1 */
30/*#define UARTDELAY 1 */
31
32#include <linux/module.h>
33
34MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
35MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
36
37////////////////////////////////////////////////////////////////
38
39#include <linux/ioctl.h> /* For SCSI-Passthrough */
40#include <asm/uaccess.h>
41
42#include <linux/stat.h>
43#include <linux/slab.h> /* for kmalloc() */
44#include <linux/pci.h> /* for PCI support */
45#include <linux/proc_fs.h>
46#include <linux/blkdev.h>
47#include <linux/delay.h> /* for udelay */
48#include <linux/interrupt.h>
49#include <linux/kernel.h> /* for printk */
50#include <linux/sched.h>
51#include <linux/reboot.h>
52#include <linux/spinlock.h>
53#include <linux/dma-mapping.h>
54
55#include <linux/timer.h>
56#include <linux/string.h>
57#include <linux/ioport.h>
58#include <linux/mutex.h>
59
60#include <asm/processor.h> /* for boot_cpu_data */
61#include <asm/pgtable.h>
62#include <asm/io.h> /* for virt_to_bus, etc. */
63
64#include <scsi/scsi.h>
65#include <scsi/scsi_cmnd.h>
66#include <scsi/scsi_device.h>
67#include <scsi/scsi_host.h>
68#include <scsi/scsi_tcq.h>
69
70#include "dpt/dptsig.h"
71#include "dpti.h"
72
73/*============================================================================
74 * Create a binary signature - this is read by dptsig
75 * Needed for our management apps
76 *============================================================================
77 */
78static DEFINE_MUTEX(adpt_mutex);
79static dpt_sig_S DPTI_sig = {
80 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
81#ifdef __i386__
82 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
83#elif defined(__ia64__)
84 PROC_INTEL, PROC_IA64,
85#elif defined(__sparc__)
86 PROC_ULTRASPARC, PROC_ULTRASPARC,
87#elif defined(__alpha__)
88 PROC_ALPHA, PROC_ALPHA,
89#else
90 (-1),(-1),
91#endif
92 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
93 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
94 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
95};
96
97
98
99
100/*============================================================================
101 * Globals
102 *============================================================================
103 */
104
105static DEFINE_MUTEX(adpt_configuration_lock);
106
107static struct i2o_sys_tbl *sys_tbl;
108static dma_addr_t sys_tbl_pa;
109static int sys_tbl_ind;
110static int sys_tbl_len;
111
112static adpt_hba* hba_chain = NULL;
113static int hba_count = 0;
114
115static struct class *adpt_sysfs_class;
116
117static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
118#ifdef CONFIG_COMPAT
119static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
120#endif
121
122static const struct file_operations adpt_fops = {
123 .unlocked_ioctl = adpt_unlocked_ioctl,
124 .open = adpt_open,
125 .release = adpt_close,
126#ifdef CONFIG_COMPAT
127 .compat_ioctl = compat_adpt_ioctl,
128#endif
129 .llseek = noop_llseek,
130};
131
132/* Structures and definitions for synchronous message posting.
133 * See adpt_i2o_post_wait() for description
134 * */
135struct adpt_i2o_post_wait_data
136{
137 int status;
138 u32 id;
139 adpt_wait_queue_head_t *wq;
140 struct adpt_i2o_post_wait_data *next;
141};
142
143static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
144static u32 adpt_post_wait_id = 0;
145static DEFINE_SPINLOCK(adpt_post_wait_lock);
146
147
148/*============================================================================
149 * Functions
150 *============================================================================
151 */
152
153static inline int dpt_dma64(adpt_hba *pHba)
154{
155 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
156}
157
158static inline u32 dma_high(dma_addr_t addr)
159{
160 return upper_32_bits(addr);
161}
162
163static inline u32 dma_low(dma_addr_t addr)
164{
165 return (u32)addr;
166}
167
168static u8 adpt_read_blink_led(adpt_hba* host)
169{
170 if (host->FwDebugBLEDflag_P) {
171 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
172 return readb(host->FwDebugBLEDvalue_P);
173 }
174 }
175 return 0;
176}
177
178/*============================================================================
179 * Scsi host template interface functions
180 *============================================================================
181 */
182
183static struct pci_device_id dptids[] = {
184 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
185 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
186 { 0, }
187};
188MODULE_DEVICE_TABLE(pci,dptids);
189
190static int adpt_detect(struct scsi_host_template* sht)
191{
192 struct pci_dev *pDev = NULL;
193 adpt_hba *pHba;
194 adpt_hba *next;
195
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198 /* search for all Adatpec I2O RAID cards */
199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 if(adpt_install_hba(sht, pDev) ){
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
207 pci_dev_get(pDev);
208 }
209 }
210
211 /* In INIT state, Activate IOPs */
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
214 // Activate does get status , init outbound, and get hrt
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221 /* Active IOPs in HOLD state */
222
223rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227 /*
228 * If build_sys_table fails, we kill everything and bail
229 * as we can't init the IOPs w/o a system table
230 */
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238 /* If IOP don't get online, we need to rebuild the System table */
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246 /* Active IOPs now in OPERATIONAL state */
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
279 if (adpt_sysfs_class) {
280 struct device *dev = device_create(adpt_sysfs_class,
281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
289 }
290
291 // Register our control device node
292 // nodes will need to be created in /dev to access this
293 // the nodes can not be created from within the driver
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 adpt_i2o_sys_shutdown();
296 return 0;
297 }
298 return hba_count;
299}
300
301
302/*
303 * scsi_unregister will be called AFTER we return.
304 */
305static int adpt_release(struct Scsi_Host *host)
306{
307 adpt_hba* pHba = (adpt_hba*) host->hostdata[0];
308// adpt_i2o_quiesce_hba(pHba);
309 adpt_i2o_delete_hba(pHba);
310 scsi_unregister(host);
311 return 0;
312}
313
314
315static void adpt_inquiry(adpt_hba* pHba)
316{
317 u32 msg[17];
318 u32 *mptr;
319 u32 *lenptr;
320 int direction;
321 int scsidir;
322 u32 len;
323 u32 reqlen;
324 u8* buf;
325 dma_addr_t addr;
326 u8 scb[16];
327 s32 rcode;
328
329 memset(msg, 0, sizeof(msg));
330 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
331 if(!buf){
332 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
333 return;
334 }
335 memset((void*)buf, 0, 36);
336
337 len = 36;
338 direction = 0x00000000;
339 scsidir =0x40000000; // DATA IN (iop<--dev)
340
341 if (dpt_dma64(pHba))
342 reqlen = 17; // SINGLE SGE, 64 bit
343 else
344 reqlen = 14; // SINGLE SGE, 32 bit
345 /* Stick the headers on */
346 msg[0] = reqlen<<16 | SGL_OFFSET_12;
347 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
348 msg[2] = 0;
349 msg[3] = 0;
350 // Adaptec/DPT Private stuff
351 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
352 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
353 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
354 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
355 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
356 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
357 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
358
359 mptr=msg+7;
360
361 memset(scb, 0, sizeof(scb));
362 // Write SCSI command into the message - always 16 byte block
363 scb[0] = INQUIRY;
364 scb[1] = 0;
365 scb[2] = 0;
366 scb[3] = 0;
367 scb[4] = 36;
368 scb[5] = 0;
369 // Don't care about the rest of scb
370
371 memcpy(mptr, scb, sizeof(scb));
372 mptr+=4;
373 lenptr=mptr++; /* Remember me - fill in when we know */
374
375 /* Now fill in the SGList and command */
376 *lenptr = len;
377 if (dpt_dma64(pHba)) {
378 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
379 *mptr++ = 1 << PAGE_SHIFT;
380 *mptr++ = 0xD0000000|direction|len;
381 *mptr++ = dma_low(addr);
382 *mptr++ = dma_high(addr);
383 } else {
384 *mptr++ = 0xD0000000|direction|len;
385 *mptr++ = addr;
386 }
387
388 // Send it on it's way
389 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
390 if (rcode != 0) {
391 sprintf(pHba->detail, "Adaptec I2O RAID");
392 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
393 if (rcode != -ETIME && rcode != -EINTR)
394 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
395 } else {
396 memset(pHba->detail, 0, sizeof(pHba->detail));
397 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
398 memcpy(&(pHba->detail[16]), " Model: ", 8);
399 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
400 memcpy(&(pHba->detail[40]), " FW: ", 4);
401 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
402 pHba->detail[48] = '\0'; /* precautionary */
403 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
404 }
405 adpt_i2o_status_get(pHba);
406 return ;
407}
408
409
410static int adpt_slave_configure(struct scsi_device * device)
411{
412 struct Scsi_Host *host = device->host;
413 adpt_hba* pHba;
414
415 pHba = (adpt_hba *) host->hostdata[0];
416
417 if (host->can_queue && device->tagged_supported) {
418 scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
419 host->can_queue - 1);
420 } else {
421 scsi_adjust_queue_depth(device, 0, 1);
422 }
423 return 0;
424}
425
426static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
427{
428 adpt_hba* pHba = NULL;
429 struct adpt_device* pDev = NULL; /* dpt per device information */
430
431 cmd->scsi_done = done;
432 /*
433 * SCSI REQUEST_SENSE commands will be executed automatically by the
434 * Host Adapter for any errors, so they should not be executed
435 * explicitly unless the Sense Data is zero indicating that no error
436 * occurred.
437 */
438
439 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
440 cmd->result = (DID_OK << 16);
441 cmd->scsi_done(cmd);
442 return 0;
443 }
444
445 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
446 if (!pHba) {
447 return FAILED;
448 }
449
450 rmb();
451 /*
452 * TODO: I need to block here if I am processing ioctl cmds
453 * but if the outstanding cmds all finish before the ioctl,
454 * the scsi-core will not know to start sending cmds to me again.
455 * I need to a way to restart the scsi-cores queues or should I block
456 * calling scsi_done on the outstanding cmds instead
457 * for now we don't set the IOCTL state
458 */
459 if(((pHba->state) & DPTI_STATE_IOCTL) || ((pHba->state) & DPTI_STATE_RESET)) {
460 pHba->host->last_reset = jiffies;
461 pHba->host->resetting = 1;
462 return 1;
463 }
464
465 // TODO if the cmd->device if offline then I may need to issue a bus rescan
466 // followed by a get_lct to see if the device is there anymore
467 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
468 /*
469 * First command request for this device. Set up a pointer
470 * to the device structure. This should be a TEST_UNIT_READY
471 * command from scan_scsis_single.
472 */
473 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun)) == NULL) {
474 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
475 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
476 cmd->result = (DID_NO_CONNECT << 16);
477 cmd->scsi_done(cmd);
478 return 0;
479 }
480 cmd->device->hostdata = pDev;
481 }
482 pDev->pScsi_dev = cmd->device;
483
484 /*
485 * If we are being called from when the device is being reset,
486 * delay processing of the command until later.
487 */
488 if (pDev->state & DPTI_DEV_RESET ) {
489 return FAILED;
490 }
491 return adpt_scsi_to_i2o(pHba, cmd, pDev);
492}
493
494static DEF_SCSI_QCMD(adpt_queue)
495
496static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
497 sector_t capacity, int geom[])
498{
499 int heads=-1;
500 int sectors=-1;
501 int cylinders=-1;
502
503 // *** First lets set the default geometry ****
504
505 // If the capacity is less than ox2000
506 if (capacity < 0x2000 ) { // floppy
507 heads = 18;
508 sectors = 2;
509 }
510 // else if between 0x2000 and 0x20000
511 else if (capacity < 0x20000) {
512 heads = 64;
513 sectors = 32;
514 }
515 // else if between 0x20000 and 0x40000
516 else if (capacity < 0x40000) {
517 heads = 65;
518 sectors = 63;
519 }
520 // else if between 0x4000 and 0x80000
521 else if (capacity < 0x80000) {
522 heads = 128;
523 sectors = 63;
524 }
525 // else if greater than 0x80000
526 else {
527 heads = 255;
528 sectors = 63;
529 }
530 cylinders = sector_div(capacity, heads * sectors);
531
532 // Special case if CDROM
533 if(sdev->type == 5) { // CDROM
534 heads = 252;
535 sectors = 63;
536 cylinders = 1111;
537 }
538
539 geom[0] = heads;
540 geom[1] = sectors;
541 geom[2] = cylinders;
542
543 PDEBUG("adpt_bios_param: exit\n");
544 return 0;
545}
546
547
548static const char *adpt_info(struct Scsi_Host *host)
549{
550 adpt_hba* pHba;
551
552 pHba = (adpt_hba *) host->hostdata[0];
553 return (char *) (pHba->detail);
554}
555
556static int adpt_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
557 int length, int inout)
558{
559 struct adpt_device* d;
560 int id;
561 int chan;
562 int len = 0;
563 int begin = 0;
564 int pos = 0;
565 adpt_hba* pHba;
566 int unit;
567
568 *start = buffer;
569 if (inout == TRUE) {
570 /*
571 * The user has done a write and wants us to take the
572 * data in the buffer and do something with it.
573 * proc_scsiwrite calls us with inout = 1
574 *
575 * Read data from buffer (writing to us) - NOT SUPPORTED
576 */
577 return -EINVAL;
578 }
579
580 /*
581 * inout = 0 means the user has done a read and wants information
582 * returned, so we write information about the cards into the buffer
583 * proc_scsiread() calls us with inout = 0
584 */
585
586 // Find HBA (host bus adapter) we are looking for
587 mutex_lock(&adpt_configuration_lock);
588 for (pHba = hba_chain; pHba; pHba = pHba->next) {
589 if (pHba->host == host) {
590 break; /* found adapter */
591 }
592 }
593 mutex_unlock(&adpt_configuration_lock);
594 if (pHba == NULL) {
595 return 0;
596 }
597 host = pHba->host;
598
599 len = sprintf(buffer , "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
600 len += sprintf(buffer+len, "%s\n", pHba->detail);
601 len += sprintf(buffer+len, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
602 pHba->host->host_no, pHba->name, host->irq);
603 len += sprintf(buffer+len, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
604 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
605
606 pos = begin + len;
607
608 /* CHECKPOINT */
609 if(pos > offset + length) {
610 goto stop_output;
611 }
612 if(pos <= offset) {
613 /*
614 * If we haven't even written to where we last left
615 * off (the last time we were called), reset the
616 * beginning pointer.
617 */
618 len = 0;
619 begin = pos;
620 }
621 len += sprintf(buffer+len, "Devices:\n");
622 for(chan = 0; chan < MAX_CHANNEL; chan++) {
623 for(id = 0; id < MAX_ID; id++) {
624 d = pHba->channel[chan].device[id];
625 while(d){
626 len += sprintf(buffer+len,"\t%-24.24s", d->pScsi_dev->vendor);
627 len += sprintf(buffer+len," Rev: %-8.8s\n", d->pScsi_dev->rev);
628 pos = begin + len;
629
630
631 /* CHECKPOINT */
632 if(pos > offset + length) {
633 goto stop_output;
634 }
635 if(pos <= offset) {
636 len = 0;
637 begin = pos;
638 }
639
640 unit = d->pI2o_dev->lct_data.tid;
641 len += sprintf(buffer+len, "\tTID=%d, (Channel=%d, Target=%d, Lun=%d) (%s)\n\n",
642 unit, (int)d->scsi_channel, (int)d->scsi_id, (int)d->scsi_lun,
643 scsi_device_online(d->pScsi_dev)? "online":"offline");
644 pos = begin + len;
645
646 /* CHECKPOINT */
647 if(pos > offset + length) {
648 goto stop_output;
649 }
650 if(pos <= offset) {
651 len = 0;
652 begin = pos;
653 }
654
655 d = d->next_lun;
656 }
657 }
658 }
659
660 /*
661 * begin is where we last checked our position with regards to offset
662 * begin is always less than offset. len is relative to begin. It
663 * is the number of bytes written past begin
664 *
665 */
666stop_output:
667 /* stop the output and calculate the correct length */
668 *(buffer + len) = '\0';
669
670 *start = buffer + (offset - begin); /* Start of wanted data */
671 len -= (offset - begin);
672 if(len > length) {
673 len = length;
674 } else if(len < 0){
675 len = 0;
676 **start = '\0';
677 }
678 return len;
679}
680
681/*
682 * Turn a struct scsi_cmnd * into a unique 32 bit 'context'.
683 */
684static u32 adpt_cmd_to_context(struct scsi_cmnd *cmd)
685{
686 return (u32)cmd->serial_number;
687}
688
689/*
690 * Go from a u32 'context' to a struct scsi_cmnd * .
691 * This could probably be made more efficient.
692 */
693static struct scsi_cmnd *
694 adpt_cmd_from_context(adpt_hba * pHba, u32 context)
695{
696 struct scsi_cmnd * cmd;
697 struct scsi_device * d;
698
699 if (context == 0)
700 return NULL;
701
702 spin_unlock(pHba->host->host_lock);
703 shost_for_each_device(d, pHba->host) {
704 unsigned long flags;
705 spin_lock_irqsave(&d->list_lock, flags);
706 list_for_each_entry(cmd, &d->cmd_list, list) {
707 if (((u32)cmd->serial_number == context)) {
708 spin_unlock_irqrestore(&d->list_lock, flags);
709 scsi_device_put(d);
710 spin_lock(pHba->host->host_lock);
711 return cmd;
712 }
713 }
714 spin_unlock_irqrestore(&d->list_lock, flags);
715 }
716 spin_lock(pHba->host->host_lock);
717
718 return NULL;
719}
720
721/*
722 * Turn a pointer to ioctl reply data into an u32 'context'
723 */
724static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
725{
726#if BITS_PER_LONG == 32
727 return (u32)(unsigned long)reply;
728#else
729 ulong flags = 0;
730 u32 nr, i;
731
732 spin_lock_irqsave(pHba->host->host_lock, flags);
733 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
734 for (i = 0; i < nr; i++) {
735 if (pHba->ioctl_reply_context[i] == NULL) {
736 pHba->ioctl_reply_context[i] = reply;
737 break;
738 }
739 }
740 spin_unlock_irqrestore(pHba->host->host_lock, flags);
741 if (i >= nr) {
742 kfree (reply);
743 printk(KERN_WARNING"%s: Too many outstanding "
744 "ioctl commands\n", pHba->name);
745 return (u32)-1;
746 }
747
748 return i;
749#endif
750}
751
752/*
753 * Go from an u32 'context' to a pointer to ioctl reply data.
754 */
755static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
756{
757#if BITS_PER_LONG == 32
758 return (void *)(unsigned long)context;
759#else
760 void *p = pHba->ioctl_reply_context[context];
761 pHba->ioctl_reply_context[context] = NULL;
762
763 return p;
764#endif
765}
766
767/*===========================================================================
768 * Error Handling routines
769 *===========================================================================
770 */
771
772static int adpt_abort(struct scsi_cmnd * cmd)
773{
774 adpt_hba* pHba = NULL; /* host bus adapter structure */
775 struct adpt_device* dptdevice; /* dpt per device information */
776 u32 msg[5];
777 int rcode;
778
779 if(cmd->serial_number == 0){
780 return FAILED;
781 }
782 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
783 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
784 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
785 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
786 return FAILED;
787 }
788
789 memset(msg, 0, sizeof(msg));
790 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
791 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
792 msg[2] = 0;
793 msg[3]= 0;
794 msg[4] = adpt_cmd_to_context(cmd);
795 if (pHba->host)
796 spin_lock_irq(pHba->host->host_lock);
797 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
798 if (pHba->host)
799 spin_unlock_irq(pHba->host->host_lock);
800 if (rcode != 0) {
801 if(rcode == -EOPNOTSUPP ){
802 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
803 return FAILED;
804 }
805 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
806 return FAILED;
807 }
808 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
809 return SUCCESS;
810}
811
812
813#define I2O_DEVICE_RESET 0x27
814// This is the same for BLK and SCSI devices
815// NOTE this is wrong in the i2o.h definitions
816// This is not currently supported by our adapter but we issue it anyway
817static int adpt_device_reset(struct scsi_cmnd* cmd)
818{
819 adpt_hba* pHba;
820 u32 msg[4];
821 u32 rcode;
822 int old_state;
823 struct adpt_device* d = cmd->device->hostdata;
824
825 pHba = (void*) cmd->device->host->hostdata[0];
826 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
827 if (!d) {
828 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
829 return FAILED;
830 }
831 memset(msg, 0, sizeof(msg));
832 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
833 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
834 msg[2] = 0;
835 msg[3] = 0;
836
837 if (pHba->host)
838 spin_lock_irq(pHba->host->host_lock);
839 old_state = d->state;
840 d->state |= DPTI_DEV_RESET;
841 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
842 d->state = old_state;
843 if (pHba->host)
844 spin_unlock_irq(pHba->host->host_lock);
845 if (rcode != 0) {
846 if(rcode == -EOPNOTSUPP ){
847 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
848 return FAILED;
849 }
850 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
851 return FAILED;
852 } else {
853 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
854 return SUCCESS;
855 }
856}
857
858
859#define I2O_HBA_BUS_RESET 0x87
860// This version of bus reset is called by the eh_error handler
861static int adpt_bus_reset(struct scsi_cmnd* cmd)
862{
863 adpt_hba* pHba;
864 u32 msg[4];
865 u32 rcode;
866
867 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
868 memset(msg, 0, sizeof(msg));
869 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
870 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
871 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
872 msg[2] = 0;
873 msg[3] = 0;
874 if (pHba->host)
875 spin_lock_irq(pHba->host->host_lock);
876 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
877 if (pHba->host)
878 spin_unlock_irq(pHba->host->host_lock);
879 if (rcode != 0) {
880 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
881 return FAILED;
882 } else {
883 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
884 return SUCCESS;
885 }
886}
887
888// This version of reset is called by the eh_error_handler
889static int __adpt_reset(struct scsi_cmnd* cmd)
890{
891 adpt_hba* pHba;
892 int rcode;
893 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
894 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n",pHba->name,cmd->device->channel,pHba->channel[cmd->device->channel].tid );
895 rcode = adpt_hba_reset(pHba);
896 if(rcode == 0){
897 printk(KERN_WARNING"%s: HBA reset complete\n",pHba->name);
898 return SUCCESS;
899 } else {
900 printk(KERN_WARNING"%s: HBA reset failed (%x)\n",pHba->name, rcode);
901 return FAILED;
902 }
903}
904
905static int adpt_reset(struct scsi_cmnd* cmd)
906{
907 int rc;
908
909 spin_lock_irq(cmd->device->host->host_lock);
910 rc = __adpt_reset(cmd);
911 spin_unlock_irq(cmd->device->host->host_lock);
912
913 return rc;
914}
915
916// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
917static int adpt_hba_reset(adpt_hba* pHba)
918{
919 int rcode;
920
921 pHba->state |= DPTI_STATE_RESET;
922
923 // Activate does get status , init outbound, and get hrt
924 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
925 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
926 adpt_i2o_delete_hba(pHba);
927 return rcode;
928 }
929
930 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
931 adpt_i2o_delete_hba(pHba);
932 return rcode;
933 }
934 PDEBUG("%s: in HOLD state\n",pHba->name);
935
936 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
937 adpt_i2o_delete_hba(pHba);
938 return rcode;
939 }
940 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
941
942 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
943 adpt_i2o_delete_hba(pHba);
944 return rcode;
945 }
946
947 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
948 adpt_i2o_delete_hba(pHba);
949 return rcode;
950 }
951 pHba->state &= ~DPTI_STATE_RESET;
952
953 adpt_fail_posted_scbs(pHba);
954 return 0; /* return success */
955}
956
957/*===========================================================================
958 *
959 *===========================================================================
960 */
961
962
963static void adpt_i2o_sys_shutdown(void)
964{
965 adpt_hba *pHba, *pNext;
966 struct adpt_i2o_post_wait_data *p1, *old;
967
968 printk(KERN_INFO"Shutting down Adaptec I2O controllers.\n");
969 printk(KERN_INFO" This could take a few minutes if there are many devices attached\n");
970 /* Delete all IOPs from the controller chain */
971 /* They should have already been released by the
972 * scsi-core
973 */
974 for (pHba = hba_chain; pHba; pHba = pNext) {
975 pNext = pHba->next;
976 adpt_i2o_delete_hba(pHba);
977 }
978
979 /* Remove any timedout entries from the wait queue. */
980// spin_lock_irqsave(&adpt_post_wait_lock, flags);
981 /* Nothing should be outstanding at this point so just
982 * free them
983 */
984 for(p1 = adpt_post_wait_queue; p1;) {
985 old = p1;
986 p1 = p1->next;
987 kfree(old);
988 }
989// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
990 adpt_post_wait_queue = NULL;
991
992 printk(KERN_INFO "Adaptec I2O controllers down.\n");
993}
994
995static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
996{
997
998 adpt_hba* pHba = NULL;
999 adpt_hba* p = NULL;
1000 ulong base_addr0_phys = 0;
1001 ulong base_addr1_phys = 0;
1002 u32 hba_map0_area_size = 0;
1003 u32 hba_map1_area_size = 0;
1004 void __iomem *base_addr_virt = NULL;
1005 void __iomem *msg_addr_virt = NULL;
1006 int dma64 = 0;
1007
1008 int raptorFlag = FALSE;
1009
1010 if(pci_enable_device(pDev)) {
1011 return -EINVAL;
1012 }
1013
1014 if (pci_request_regions(pDev, "dpt_i2o")) {
1015 PERROR("dpti: adpt_config_hba: pci request region failed\n");
1016 return -EINVAL;
1017 }
1018
1019 pci_set_master(pDev);
1020
1021 /*
1022 * See if we should enable dma64 mode.
1023 */
1024 if (sizeof(dma_addr_t) > 4 &&
1025 pci_set_dma_mask(pDev, DMA_BIT_MASK(64)) == 0) {
1026 if (dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32))
1027 dma64 = 1;
1028 }
1029 if (!dma64 && pci_set_dma_mask(pDev, DMA_BIT_MASK(32)) != 0)
1030 return -EINVAL;
1031
1032 /* adapter only supports message blocks below 4GB */
1033 pci_set_consistent_dma_mask(pDev, DMA_BIT_MASK(32));
1034
1035 base_addr0_phys = pci_resource_start(pDev,0);
1036 hba_map0_area_size = pci_resource_len(pDev,0);
1037
1038 // Check if standard PCI card or single BAR Raptor
1039 if(pDev->device == PCI_DPT_DEVICE_ID){
1040 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
1041 // Raptor card with this device id needs 4M
1042 hba_map0_area_size = 0x400000;
1043 } else { // Not Raptor - it is a PCI card
1044 if(hba_map0_area_size > 0x100000 ){
1045 hba_map0_area_size = 0x100000;
1046 }
1047 }
1048 } else {// Raptor split BAR config
1049 // Use BAR1 in this configuration
1050 base_addr1_phys = pci_resource_start(pDev,1);
1051 hba_map1_area_size = pci_resource_len(pDev,1);
1052 raptorFlag = TRUE;
1053 }
1054
1055#if BITS_PER_LONG == 64
1056 /*
1057 * The original Adaptec 64 bit driver has this comment here:
1058 * "x86_64 machines need more optimal mappings"
1059 *
1060 * I assume some HBAs report ridiculously large mappings
1061 * and we need to limit them on platforms with IOMMUs.
1062 */
1063 if (raptorFlag == TRUE) {
1064 if (hba_map0_area_size > 128)
1065 hba_map0_area_size = 128;
1066 if (hba_map1_area_size > 524288)
1067 hba_map1_area_size = 524288;
1068 } else {
1069 if (hba_map0_area_size > 524288)
1070 hba_map0_area_size = 524288;
1071 }
1072#endif
1073
1074 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
1075 if (!base_addr_virt) {
1076 pci_release_regions(pDev);
1077 PERROR("dpti: adpt_config_hba: io remap failed\n");
1078 return -EINVAL;
1079 }
1080
1081 if(raptorFlag == TRUE) {
1082 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
1083 if (!msg_addr_virt) {
1084 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
1085 iounmap(base_addr_virt);
1086 pci_release_regions(pDev);
1087 return -EINVAL;
1088 }
1089 } else {
1090 msg_addr_virt = base_addr_virt;
1091 }
1092
1093 // Allocate and zero the data structure
1094 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
1095 if (!pHba) {
1096 if (msg_addr_virt != base_addr_virt)
1097 iounmap(msg_addr_virt);
1098 iounmap(base_addr_virt);
1099 pci_release_regions(pDev);
1100 return -ENOMEM;
1101 }
1102
1103 mutex_lock(&adpt_configuration_lock);
1104
1105 if(hba_chain != NULL){
1106 for(p = hba_chain; p->next; p = p->next);
1107 p->next = pHba;
1108 } else {
1109 hba_chain = pHba;
1110 }
1111 pHba->next = NULL;
1112 pHba->unit = hba_count;
1113 sprintf(pHba->name, "dpti%d", hba_count);
1114 hba_count++;
1115
1116 mutex_unlock(&adpt_configuration_lock);
1117
1118 pHba->pDev = pDev;
1119 pHba->base_addr_phys = base_addr0_phys;
1120
1121 // Set up the Virtual Base Address of the I2O Device
1122 pHba->base_addr_virt = base_addr_virt;
1123 pHba->msg_addr_virt = msg_addr_virt;
1124 pHba->irq_mask = base_addr_virt+0x30;
1125 pHba->post_port = base_addr_virt+0x40;
1126 pHba->reply_port = base_addr_virt+0x44;
1127
1128 pHba->hrt = NULL;
1129 pHba->lct = NULL;
1130 pHba->lct_size = 0;
1131 pHba->status_block = NULL;
1132 pHba->post_count = 0;
1133 pHba->state = DPTI_STATE_RESET;
1134 pHba->pDev = pDev;
1135 pHba->devices = NULL;
1136 pHba->dma64 = dma64;
1137
1138 // Initializing the spinlocks
1139 spin_lock_init(&pHba->state_lock);
1140 spin_lock_init(&adpt_post_wait_lock);
1141
1142 if(raptorFlag == 0){
1143 printk(KERN_INFO "Adaptec I2O RAID controller"
1144 " %d at %p size=%x irq=%d%s\n",
1145 hba_count-1, base_addr_virt,
1146 hba_map0_area_size, pDev->irq,
1147 dma64 ? " (64-bit DMA)" : "");
1148 } else {
1149 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1150 hba_count-1, pDev->irq,
1151 dma64 ? " (64-bit DMA)" : "");
1152 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1153 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1154 }
1155
1156 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1157 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1158 adpt_i2o_delete_hba(pHba);
1159 return -EINVAL;
1160 }
1161
1162 return 0;
1163}
1164
1165
1166static void adpt_i2o_delete_hba(adpt_hba* pHba)
1167{
1168 adpt_hba* p1;
1169 adpt_hba* p2;
1170 struct i2o_device* d;
1171 struct i2o_device* next;
1172 int i;
1173 int j;
1174 struct adpt_device* pDev;
1175 struct adpt_device* pNext;
1176
1177
1178 mutex_lock(&adpt_configuration_lock);
1179 // scsi_unregister calls our adpt_release which
1180 // does a quiese
1181 if(pHba->host){
1182 free_irq(pHba->host->irq, pHba);
1183 }
1184 p2 = NULL;
1185 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1186 if(p1 == pHba) {
1187 if(p2) {
1188 p2->next = p1->next;
1189 } else {
1190 hba_chain = p1->next;
1191 }
1192 break;
1193 }
1194 }
1195
1196 hba_count--;
1197 mutex_unlock(&adpt_configuration_lock);
1198
1199 iounmap(pHba->base_addr_virt);
1200 pci_release_regions(pHba->pDev);
1201 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1202 iounmap(pHba->msg_addr_virt);
1203 }
1204 if(pHba->FwDebugBuffer_P)
1205 iounmap(pHba->FwDebugBuffer_P);
1206 if(pHba->hrt) {
1207 dma_free_coherent(&pHba->pDev->dev,
1208 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1209 pHba->hrt, pHba->hrt_pa);
1210 }
1211 if(pHba->lct) {
1212 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1213 pHba->lct, pHba->lct_pa);
1214 }
1215 if(pHba->status_block) {
1216 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1217 pHba->status_block, pHba->status_block_pa);
1218 }
1219 if(pHba->reply_pool) {
1220 dma_free_coherent(&pHba->pDev->dev,
1221 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1222 pHba->reply_pool, pHba->reply_pool_pa);
1223 }
1224
1225 for(d = pHba->devices; d ; d = next){
1226 next = d->next;
1227 kfree(d);
1228 }
1229 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1230 for(j = 0; j < MAX_ID; j++){
1231 if(pHba->channel[i].device[j] != NULL){
1232 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1233 pNext = pDev->next_lun;
1234 kfree(pDev);
1235 }
1236 }
1237 }
1238 }
1239 pci_dev_put(pHba->pDev);
1240 if (adpt_sysfs_class)
1241 device_destroy(adpt_sysfs_class,
1242 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1243 kfree(pHba);
1244
1245 if(hba_count <= 0){
1246 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1247 if (adpt_sysfs_class) {
1248 class_destroy(adpt_sysfs_class);
1249 adpt_sysfs_class = NULL;
1250 }
1251 }
1252}
1253
1254static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u32 lun)
1255{
1256 struct adpt_device* d;
1257
1258 if(chan < 0 || chan >= MAX_CHANNEL)
1259 return NULL;
1260
1261 if( pHba->channel[chan].device == NULL){
1262 printk(KERN_DEBUG"Adaptec I2O RAID: Trying to find device before they are allocated\n");
1263 return NULL;
1264 }
1265
1266 d = pHba->channel[chan].device[id];
1267 if(!d || d->tid == 0) {
1268 return NULL;
1269 }
1270
1271 /* If it is the only lun at that address then this should match*/
1272 if(d->scsi_lun == lun){
1273 return d;
1274 }
1275
1276 /* else we need to look through all the luns */
1277 for(d=d->next_lun ; d ; d = d->next_lun){
1278 if(d->scsi_lun == lun){
1279 return d;
1280 }
1281 }
1282 return NULL;
1283}
1284
1285
1286static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1287{
1288 // I used my own version of the WAIT_QUEUE_HEAD
1289 // to handle some version differences
1290 // When embedded in the kernel this could go back to the vanilla one
1291 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1292 int status = 0;
1293 ulong flags = 0;
1294 struct adpt_i2o_post_wait_data *p1, *p2;
1295 struct adpt_i2o_post_wait_data *wait_data =
1296 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1297 DECLARE_WAITQUEUE(wait, current);
1298
1299 if (!wait_data)
1300 return -ENOMEM;
1301
1302 /*
1303 * The spin locking is needed to keep anyone from playing
1304 * with the queue pointers and id while we do the same
1305 */
1306 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1307 // TODO we need a MORE unique way of getting ids
1308 // to support async LCT get
1309 wait_data->next = adpt_post_wait_queue;
1310 adpt_post_wait_queue = wait_data;
1311 adpt_post_wait_id++;
1312 adpt_post_wait_id &= 0x7fff;
1313 wait_data->id = adpt_post_wait_id;
1314 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1315
1316 wait_data->wq = &adpt_wq_i2o_post;
1317 wait_data->status = -ETIMEDOUT;
1318
1319 add_wait_queue(&adpt_wq_i2o_post, &wait);
1320
1321 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1322 timeout *= HZ;
1323 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1324 set_current_state(TASK_INTERRUPTIBLE);
1325 if(pHba->host)
1326 spin_unlock_irq(pHba->host->host_lock);
1327 if (!timeout)
1328 schedule();
1329 else{
1330 timeout = schedule_timeout(timeout);
1331 if (timeout == 0) {
1332 // I/O issued, but cannot get result in
1333 // specified time. Freeing resorces is
1334 // dangerous.
1335 status = -ETIME;
1336 }
1337 }
1338 if(pHba->host)
1339 spin_lock_irq(pHba->host->host_lock);
1340 }
1341 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1342
1343 if(status == -ETIMEDOUT){
1344 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1345 // We will have to free the wait_data memory during shutdown
1346 return status;
1347 }
1348
1349 /* Remove the entry from the queue. */
1350 p2 = NULL;
1351 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1352 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1353 if(p1 == wait_data) {
1354 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1355 status = -EOPNOTSUPP;
1356 }
1357 if(p2) {
1358 p2->next = p1->next;
1359 } else {
1360 adpt_post_wait_queue = p1->next;
1361 }
1362 break;
1363 }
1364 }
1365 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1366
1367 kfree(wait_data);
1368
1369 return status;
1370}
1371
1372
1373static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1374{
1375
1376 u32 m = EMPTY_QUEUE;
1377 u32 __iomem *msg;
1378 ulong timeout = jiffies + 30*HZ;
1379 do {
1380 rmb();
1381 m = readl(pHba->post_port);
1382 if (m != EMPTY_QUEUE) {
1383 break;
1384 }
1385 if(time_after(jiffies,timeout)){
1386 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1387 return -ETIMEDOUT;
1388 }
1389 schedule_timeout_uninterruptible(1);
1390 } while(m == EMPTY_QUEUE);
1391
1392 msg = pHba->msg_addr_virt + m;
1393 memcpy_toio(msg, data, len);
1394 wmb();
1395
1396 //post message
1397 writel(m, pHba->post_port);
1398 wmb();
1399
1400 return 0;
1401}
1402
1403
1404static void adpt_i2o_post_wait_complete(u32 context, int status)
1405{
1406 struct adpt_i2o_post_wait_data *p1 = NULL;
1407 /*
1408 * We need to search through the adpt_post_wait
1409 * queue to see if the given message is still
1410 * outstanding. If not, it means that the IOP
1411 * took longer to respond to the message than we
1412 * had allowed and timer has already expired.
1413 * Not much we can do about that except log
1414 * it for debug purposes, increase timeout, and recompile
1415 *
1416 * Lock needed to keep anyone from moving queue pointers
1417 * around while we're looking through them.
1418 */
1419
1420 context &= 0x7fff;
1421
1422 spin_lock(&adpt_post_wait_lock);
1423 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1424 if(p1->id == context) {
1425 p1->status = status;
1426 spin_unlock(&adpt_post_wait_lock);
1427 wake_up_interruptible(p1->wq);
1428 return;
1429 }
1430 }
1431 spin_unlock(&adpt_post_wait_lock);
1432 // If this happens we lose commands that probably really completed
1433 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1434 printk(KERN_DEBUG" Tasks in wait queue:\n");
1435 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1436 printk(KERN_DEBUG" %d\n",p1->id);
1437 }
1438 return;
1439}
1440
1441static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1442{
1443 u32 msg[8];
1444 u8* status;
1445 dma_addr_t addr;
1446 u32 m = EMPTY_QUEUE ;
1447 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1448
1449 if(pHba->initialized == FALSE) { // First time reset should be quick
1450 timeout = jiffies + (25*HZ);
1451 } else {
1452 adpt_i2o_quiesce_hba(pHba);
1453 }
1454
1455 do {
1456 rmb();
1457 m = readl(pHba->post_port);
1458 if (m != EMPTY_QUEUE) {
1459 break;
1460 }
1461 if(time_after(jiffies,timeout)){
1462 printk(KERN_WARNING"Timeout waiting for message!\n");
1463 return -ETIMEDOUT;
1464 }
1465 schedule_timeout_uninterruptible(1);
1466 } while (m == EMPTY_QUEUE);
1467
1468 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1469 if(status == NULL) {
1470 adpt_send_nop(pHba, m);
1471 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1472 return -ENOMEM;
1473 }
1474 memset(status,0,4);
1475
1476 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1477 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1478 msg[2]=0;
1479 msg[3]=0;
1480 msg[4]=0;
1481 msg[5]=0;
1482 msg[6]=dma_low(addr);
1483 msg[7]=dma_high(addr);
1484
1485 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1486 wmb();
1487 writel(m, pHba->post_port);
1488 wmb();
1489
1490 while(*status == 0){
1491 if(time_after(jiffies,timeout)){
1492 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1493 /* We lose 4 bytes of "status" here, but we cannot
1494 free these because controller may awake and corrupt
1495 those bytes at any time */
1496 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1497 return -ETIMEDOUT;
1498 }
1499 rmb();
1500 schedule_timeout_uninterruptible(1);
1501 }
1502
1503 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1504 PDEBUG("%s: Reset in progress...\n", pHba->name);
1505 // Here we wait for message frame to become available
1506 // indicated that reset has finished
1507 do {
1508 rmb();
1509 m = readl(pHba->post_port);
1510 if (m != EMPTY_QUEUE) {
1511 break;
1512 }
1513 if(time_after(jiffies,timeout)){
1514 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1515 /* We lose 4 bytes of "status" here, but we
1516 cannot free these because controller may
1517 awake and corrupt those bytes at any time */
1518 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1519 return -ETIMEDOUT;
1520 }
1521 schedule_timeout_uninterruptible(1);
1522 } while (m == EMPTY_QUEUE);
1523 // Flush the offset
1524 adpt_send_nop(pHba, m);
1525 }
1526 adpt_i2o_status_get(pHba);
1527 if(*status == 0x02 ||
1528 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1529 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1530 pHba->name);
1531 } else {
1532 PDEBUG("%s: Reset completed.\n", pHba->name);
1533 }
1534
1535 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1536#ifdef UARTDELAY
1537 // This delay is to allow someone attached to the card through the debug UART to
1538 // set up the dump levels that they want before the rest of the initialization sequence
1539 adpt_delay(20000);
1540#endif
1541 return 0;
1542}
1543
1544
1545static int adpt_i2o_parse_lct(adpt_hba* pHba)
1546{
1547 int i;
1548 int max;
1549 int tid;
1550 struct i2o_device *d;
1551 i2o_lct *lct = pHba->lct;
1552 u8 bus_no = 0;
1553 s16 scsi_id;
1554 s16 scsi_lun;
1555 u32 buf[10]; // larger than 7, or 8 ...
1556 struct adpt_device* pDev;
1557
1558 if (lct == NULL) {
1559 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1560 return -1;
1561 }
1562
1563 max = lct->table_size;
1564 max -= 3;
1565 max /= 9;
1566
1567 for(i=0;i<max;i++) {
1568 if( lct->lct_entry[i].user_tid != 0xfff){
1569 /*
1570 * If we have hidden devices, we need to inform the upper layers about
1571 * the possible maximum id reference to handle device access when
1572 * an array is disassembled. This code has no other purpose but to
1573 * allow us future access to devices that are currently hidden
1574 * behind arrays, hotspares or have not been configured (JBOD mode).
1575 */
1576 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1577 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1578 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1579 continue;
1580 }
1581 tid = lct->lct_entry[i].tid;
1582 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1583 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1584 continue;
1585 }
1586 bus_no = buf[0]>>16;
1587 scsi_id = buf[1];
1588 scsi_lun = (buf[2]>>8 )&0xff;
1589 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1590 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1591 continue;
1592 }
1593 if (scsi_id >= MAX_ID){
1594 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1595 continue;
1596 }
1597 if(bus_no > pHba->top_scsi_channel){
1598 pHba->top_scsi_channel = bus_no;
1599 }
1600 if(scsi_id > pHba->top_scsi_id){
1601 pHba->top_scsi_id = scsi_id;
1602 }
1603 if(scsi_lun > pHba->top_scsi_lun){
1604 pHba->top_scsi_lun = scsi_lun;
1605 }
1606 continue;
1607 }
1608 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1609 if(d==NULL)
1610 {
1611 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1612 return -ENOMEM;
1613 }
1614
1615 d->controller = pHba;
1616 d->next = NULL;
1617
1618 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1619
1620 d->flags = 0;
1621 tid = d->lct_data.tid;
1622 adpt_i2o_report_hba_unit(pHba, d);
1623 adpt_i2o_install_device(pHba, d);
1624 }
1625 bus_no = 0;
1626 for(d = pHba->devices; d ; d = d->next) {
1627 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1628 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1629 tid = d->lct_data.tid;
1630 // TODO get the bus_no from hrt-but for now they are in order
1631 //bus_no =
1632 if(bus_no > pHba->top_scsi_channel){
1633 pHba->top_scsi_channel = bus_no;
1634 }
1635 pHba->channel[bus_no].type = d->lct_data.class_id;
1636 pHba->channel[bus_no].tid = tid;
1637 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1638 {
1639 pHba->channel[bus_no].scsi_id = buf[1];
1640 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1641 }
1642 // TODO remove - this is just until we get from hrt
1643 bus_no++;
1644 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1645 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1646 break;
1647 }
1648 }
1649 }
1650
1651 // Setup adpt_device table
1652 for(d = pHba->devices; d ; d = d->next) {
1653 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1654 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1655 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1656
1657 tid = d->lct_data.tid;
1658 scsi_id = -1;
1659 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1660 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1661 bus_no = buf[0]>>16;
1662 scsi_id = buf[1];
1663 scsi_lun = (buf[2]>>8 )&0xff;
1664 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1665 continue;
1666 }
1667 if (scsi_id >= MAX_ID) {
1668 continue;
1669 }
1670 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1671 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1672 if(pDev == NULL) {
1673 return -ENOMEM;
1674 }
1675 pHba->channel[bus_no].device[scsi_id] = pDev;
1676 } else {
1677 for( pDev = pHba->channel[bus_no].device[scsi_id];
1678 pDev->next_lun; pDev = pDev->next_lun){
1679 }
1680 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1681 if(pDev->next_lun == NULL) {
1682 return -ENOMEM;
1683 }
1684 pDev = pDev->next_lun;
1685 }
1686 pDev->tid = tid;
1687 pDev->scsi_channel = bus_no;
1688 pDev->scsi_id = scsi_id;
1689 pDev->scsi_lun = scsi_lun;
1690 pDev->pI2o_dev = d;
1691 d->owner = pDev;
1692 pDev->type = (buf[0])&0xff;
1693 pDev->flags = (buf[0]>>8)&0xff;
1694 if(scsi_id > pHba->top_scsi_id){
1695 pHba->top_scsi_id = scsi_id;
1696 }
1697 if(scsi_lun > pHba->top_scsi_lun){
1698 pHba->top_scsi_lun = scsi_lun;
1699 }
1700 }
1701 if(scsi_id == -1){
1702 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1703 d->lct_data.identity_tag);
1704 }
1705 }
1706 }
1707 return 0;
1708}
1709
1710
1711/*
1712 * Each I2O controller has a chain of devices on it - these match
1713 * the useful parts of the LCT of the board.
1714 */
1715
1716static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1717{
1718 mutex_lock(&adpt_configuration_lock);
1719 d->controller=pHba;
1720 d->owner=NULL;
1721 d->next=pHba->devices;
1722 d->prev=NULL;
1723 if (pHba->devices != NULL){
1724 pHba->devices->prev=d;
1725 }
1726 pHba->devices=d;
1727 *d->dev_name = 0;
1728
1729 mutex_unlock(&adpt_configuration_lock);
1730 return 0;
1731}
1732
1733static int adpt_open(struct inode *inode, struct file *file)
1734{
1735 int minor;
1736 adpt_hba* pHba;
1737
1738 mutex_lock(&adpt_mutex);
1739 //TODO check for root access
1740 //
1741 minor = iminor(inode);
1742 if (minor >= hba_count) {
1743 mutex_unlock(&adpt_mutex);
1744 return -ENXIO;
1745 }
1746 mutex_lock(&adpt_configuration_lock);
1747 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1748 if (pHba->unit == minor) {
1749 break; /* found adapter */
1750 }
1751 }
1752 if (pHba == NULL) {
1753 mutex_unlock(&adpt_configuration_lock);
1754 mutex_unlock(&adpt_mutex);
1755 return -ENXIO;
1756 }
1757
1758// if(pHba->in_use){
1759 // mutex_unlock(&adpt_configuration_lock);
1760// return -EBUSY;
1761// }
1762
1763 pHba->in_use = 1;
1764 mutex_unlock(&adpt_configuration_lock);
1765 mutex_unlock(&adpt_mutex);
1766
1767 return 0;
1768}
1769
1770static int adpt_close(struct inode *inode, struct file *file)
1771{
1772 int minor;
1773 adpt_hba* pHba;
1774
1775 minor = iminor(inode);
1776 if (minor >= hba_count) {
1777 return -ENXIO;
1778 }
1779 mutex_lock(&adpt_configuration_lock);
1780 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1781 if (pHba->unit == minor) {
1782 break; /* found adapter */
1783 }
1784 }
1785 mutex_unlock(&adpt_configuration_lock);
1786 if (pHba == NULL) {
1787 return -ENXIO;
1788 }
1789
1790 pHba->in_use = 0;
1791
1792 return 0;
1793}
1794
1795
1796static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1797{
1798 u32 msg[MAX_MESSAGE_SIZE];
1799 u32* reply = NULL;
1800 u32 size = 0;
1801 u32 reply_size = 0;
1802 u32 __user *user_msg = arg;
1803 u32 __user * user_reply = NULL;
1804 void *sg_list[pHba->sg_tablesize];
1805 u32 sg_offset = 0;
1806 u32 sg_count = 0;
1807 int sg_index = 0;
1808 u32 i = 0;
1809 u32 rcode = 0;
1810 void *p = NULL;
1811 dma_addr_t addr;
1812 ulong flags = 0;
1813
1814 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1815 // get user msg size in u32s
1816 if(get_user(size, &user_msg[0])){
1817 return -EFAULT;
1818 }
1819 size = size>>16;
1820
1821 user_reply = &user_msg[size];
1822 if(size > MAX_MESSAGE_SIZE){
1823 return -EFAULT;
1824 }
1825 size *= 4; // Convert to bytes
1826
1827 /* Copy in the user's I2O command */
1828 if(copy_from_user(msg, user_msg, size)) {
1829 return -EFAULT;
1830 }
1831 get_user(reply_size, &user_reply[0]);
1832 reply_size = reply_size>>16;
1833 if(reply_size > REPLY_FRAME_SIZE){
1834 reply_size = REPLY_FRAME_SIZE;
1835 }
1836 reply_size *= 4;
1837 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1838 if(reply == NULL) {
1839 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1840 return -ENOMEM;
1841 }
1842 sg_offset = (msg[0]>>4)&0xf;
1843 msg[2] = 0x40000000; // IOCTL context
1844 msg[3] = adpt_ioctl_to_context(pHba, reply);
1845 if (msg[3] == (u32)-1)
1846 return -EBUSY;
1847
1848 memset(sg_list,0, sizeof(sg_list[0])*pHba->sg_tablesize);
1849 if(sg_offset) {
1850 // TODO add 64 bit API
1851 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1852 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1853 if (sg_count > pHba->sg_tablesize){
1854 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1855 kfree (reply);
1856 return -EINVAL;
1857 }
1858
1859 for(i = 0; i < sg_count; i++) {
1860 int sg_size;
1861
1862 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1863 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1864 rcode = -EINVAL;
1865 goto cleanup;
1866 }
1867 sg_size = sg[i].flag_count & 0xffffff;
1868 /* Allocate memory for the transfer */
1869 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1870 if(!p) {
1871 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1872 pHba->name,sg_size,i,sg_count);
1873 rcode = -ENOMEM;
1874 goto cleanup;
1875 }
1876 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1877 /* Copy in the user's SG buffer if necessary */
1878 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1879 // sg_simple_element API is 32 bit
1880 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1881 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1882 rcode = -EFAULT;
1883 goto cleanup;
1884 }
1885 }
1886 /* sg_simple_element API is 32 bit, but addr < 4GB */
1887 sg[i].addr_bus = addr;
1888 }
1889 }
1890
1891 do {
1892 if(pHba->host)
1893 spin_lock_irqsave(pHba->host->host_lock, flags);
1894 // This state stops any new commands from enterring the
1895 // controller while processing the ioctl
1896// pHba->state |= DPTI_STATE_IOCTL;
1897// We can't set this now - The scsi subsystem sets host_blocked and
1898// the queue empties and stops. We need a way to restart the queue
1899 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1900 if (rcode != 0)
1901 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1902 rcode, reply);
1903// pHba->state &= ~DPTI_STATE_IOCTL;
1904 if(pHba->host)
1905 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1906 } while(rcode == -ETIMEDOUT);
1907
1908 if(rcode){
1909 goto cleanup;
1910 }
1911
1912 if(sg_offset) {
1913 /* Copy back the Scatter Gather buffers back to user space */
1914 u32 j;
1915 // TODO add 64 bit API
1916 struct sg_simple_element* sg;
1917 int sg_size;
1918
1919 // re-acquire the original message to handle correctly the sg copy operation
1920 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1921 // get user msg size in u32s
1922 if(get_user(size, &user_msg[0])){
1923 rcode = -EFAULT;
1924 goto cleanup;
1925 }
1926 size = size>>16;
1927 size *= 4;
1928 if (size > MAX_MESSAGE_SIZE) {
1929 rcode = -EINVAL;
1930 goto cleanup;
1931 }
1932 /* Copy in the user's I2O command */
1933 if (copy_from_user (msg, user_msg, size)) {
1934 rcode = -EFAULT;
1935 goto cleanup;
1936 }
1937 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1938
1939 // TODO add 64 bit API
1940 sg = (struct sg_simple_element*)(msg + sg_offset);
1941 for (j = 0; j < sg_count; j++) {
1942 /* Copy out the SG list to user's buffer if necessary */
1943 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1944 sg_size = sg[j].flag_count & 0xffffff;
1945 // sg_simple_element API is 32 bit
1946 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1947 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1948 rcode = -EFAULT;
1949 goto cleanup;
1950 }
1951 }
1952 }
1953 }
1954
1955 /* Copy back the reply to user space */
1956 if (reply_size) {
1957 // we wrote our own values for context - now restore the user supplied ones
1958 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1959 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1960 rcode = -EFAULT;
1961 }
1962 if(copy_to_user(user_reply, reply, reply_size)) {
1963 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1964 rcode = -EFAULT;
1965 }
1966 }
1967
1968
1969cleanup:
1970 if (rcode != -ETIME && rcode != -EINTR) {
1971 struct sg_simple_element *sg =
1972 (struct sg_simple_element*) (msg +sg_offset);
1973 kfree (reply);
1974 while(sg_index) {
1975 if(sg_list[--sg_index]) {
1976 dma_free_coherent(&pHba->pDev->dev,
1977 sg[sg_index].flag_count & 0xffffff,
1978 sg_list[sg_index],
1979 sg[sg_index].addr_bus);
1980 }
1981 }
1982 }
1983 return rcode;
1984}
1985
1986#if defined __ia64__
1987static void adpt_ia64_info(sysInfo_S* si)
1988{
1989 // This is all the info we need for now
1990 // We will add more info as our new
1991 // managmenent utility requires it
1992 si->processorType = PROC_IA64;
1993}
1994#endif
1995
1996#if defined __sparc__
1997static void adpt_sparc_info(sysInfo_S* si)
1998{
1999 // This is all the info we need for now
2000 // We will add more info as our new
2001 // managmenent utility requires it
2002 si->processorType = PROC_ULTRASPARC;
2003}
2004#endif
2005#if defined __alpha__
2006static void adpt_alpha_info(sysInfo_S* si)
2007{
2008 // This is all the info we need for now
2009 // We will add more info as our new
2010 // managmenent utility requires it
2011 si->processorType = PROC_ALPHA;
2012}
2013#endif
2014
2015#if defined __i386__
2016static void adpt_i386_info(sysInfo_S* si)
2017{
2018 // This is all the info we need for now
2019 // We will add more info as our new
2020 // managmenent utility requires it
2021 switch (boot_cpu_data.x86) {
2022 case CPU_386:
2023 si->processorType = PROC_386;
2024 break;
2025 case CPU_486:
2026 si->processorType = PROC_486;
2027 break;
2028 case CPU_586:
2029 si->processorType = PROC_PENTIUM;
2030 break;
2031 default: // Just in case
2032 si->processorType = PROC_PENTIUM;
2033 break;
2034 }
2035}
2036#endif
2037
2038/*
2039 * This routine returns information about the system. This does not effect
2040 * any logic and if the info is wrong - it doesn't matter.
2041 */
2042
2043/* Get all the info we can not get from kernel services */
2044static int adpt_system_info(void __user *buffer)
2045{
2046 sysInfo_S si;
2047
2048 memset(&si, 0, sizeof(si));
2049
2050 si.osType = OS_LINUX;
2051 si.osMajorVersion = 0;
2052 si.osMinorVersion = 0;
2053 si.osRevision = 0;
2054 si.busType = SI_PCI_BUS;
2055 si.processorFamily = DPTI_sig.dsProcessorFamily;
2056
2057#if defined __i386__
2058 adpt_i386_info(&si);
2059#elif defined (__ia64__)
2060 adpt_ia64_info(&si);
2061#elif defined(__sparc__)
2062 adpt_sparc_info(&si);
2063#elif defined (__alpha__)
2064 adpt_alpha_info(&si);
2065#else
2066 si.processorType = 0xff ;
2067#endif
2068 if (copy_to_user(buffer, &si, sizeof(si))){
2069 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
2070 return -EFAULT;
2071 }
2072
2073 return 0;
2074}
2075
2076static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
2077{
2078 int minor;
2079 int error = 0;
2080 adpt_hba* pHba;
2081 ulong flags = 0;
2082 void __user *argp = (void __user *)arg;
2083
2084 minor = iminor(inode);
2085 if (minor >= DPTI_MAX_HBA){
2086 return -ENXIO;
2087 }
2088 mutex_lock(&adpt_configuration_lock);
2089 for (pHba = hba_chain; pHba; pHba = pHba->next) {
2090 if (pHba->unit == minor) {
2091 break; /* found adapter */
2092 }
2093 }
2094 mutex_unlock(&adpt_configuration_lock);
2095 if(pHba == NULL){
2096 return -ENXIO;
2097 }
2098
2099 while((volatile u32) pHba->state & DPTI_STATE_RESET )
2100 schedule_timeout_uninterruptible(2);
2101
2102 switch (cmd) {
2103 // TODO: handle 3 cases
2104 case DPT_SIGNATURE:
2105 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
2106 return -EFAULT;
2107 }
2108 break;
2109 case I2OUSRCMD:
2110 return adpt_i2o_passthru(pHba, argp);
2111
2112 case DPT_CTRLINFO:{
2113 drvrHBAinfo_S HbaInfo;
2114
2115#define FLG_OSD_PCI_VALID 0x0001
2116#define FLG_OSD_DMA 0x0002
2117#define FLG_OSD_I2O 0x0004
2118 memset(&HbaInfo, 0, sizeof(HbaInfo));
2119 HbaInfo.drvrHBAnum = pHba->unit;
2120 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
2121 HbaInfo.blinkState = adpt_read_blink_led(pHba);
2122 HbaInfo.pciBusNum = pHba->pDev->bus->number;
2123 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
2124 HbaInfo.Interrupt = pHba->pDev->irq;
2125 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
2126 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2127 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2128 return -EFAULT;
2129 }
2130 break;
2131 }
2132 case DPT_SYSINFO:
2133 return adpt_system_info(argp);
2134 case DPT_BLINKLED:{
2135 u32 value;
2136 value = (u32)adpt_read_blink_led(pHba);
2137 if (copy_to_user(argp, &value, sizeof(value))) {
2138 return -EFAULT;
2139 }
2140 break;
2141 }
2142 case I2ORESETCMD:
2143 if(pHba->host)
2144 spin_lock_irqsave(pHba->host->host_lock, flags);
2145 adpt_hba_reset(pHba);
2146 if(pHba->host)
2147 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2148 break;
2149 case I2ORESCANCMD:
2150 adpt_rescan(pHba);
2151 break;
2152 default:
2153 return -EINVAL;
2154 }
2155
2156 return error;
2157}
2158
2159static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2160{
2161 struct inode *inode;
2162 long ret;
2163
2164 inode = file->f_dentry->d_inode;
2165
2166 mutex_lock(&adpt_mutex);
2167 ret = adpt_ioctl(inode, file, cmd, arg);
2168 mutex_unlock(&adpt_mutex);
2169
2170 return ret;
2171}
2172
2173#ifdef CONFIG_COMPAT
2174static long compat_adpt_ioctl(struct file *file,
2175 unsigned int cmd, unsigned long arg)
2176{
2177 struct inode *inode;
2178 long ret;
2179
2180 inode = file->f_dentry->d_inode;
2181
2182 mutex_lock(&adpt_mutex);
2183
2184 switch(cmd) {
2185 case DPT_SIGNATURE:
2186 case I2OUSRCMD:
2187 case DPT_CTRLINFO:
2188 case DPT_SYSINFO:
2189 case DPT_BLINKLED:
2190 case I2ORESETCMD:
2191 case I2ORESCANCMD:
2192 case (DPT_TARGET_BUSY & 0xFFFF):
2193 case DPT_TARGET_BUSY:
2194 ret = adpt_ioctl(inode, file, cmd, arg);
2195 break;
2196 default:
2197 ret = -ENOIOCTLCMD;
2198 }
2199
2200 mutex_unlock(&adpt_mutex);
2201
2202 return ret;
2203}
2204#endif
2205
2206static irqreturn_t adpt_isr(int irq, void *dev_id)
2207{
2208 struct scsi_cmnd* cmd;
2209 adpt_hba* pHba = dev_id;
2210 u32 m;
2211 void __iomem *reply;
2212 u32 status=0;
2213 u32 context;
2214 ulong flags = 0;
2215 int handled = 0;
2216
2217 if (pHba == NULL){
2218 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2219 return IRQ_NONE;
2220 }
2221 if(pHba->host)
2222 spin_lock_irqsave(pHba->host->host_lock, flags);
2223
2224 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2225 m = readl(pHba->reply_port);
2226 if(m == EMPTY_QUEUE){
2227 // Try twice then give up
2228 rmb();
2229 m = readl(pHba->reply_port);
2230 if(m == EMPTY_QUEUE){
2231 // This really should not happen
2232 printk(KERN_ERR"dpti: Could not get reply frame\n");
2233 goto out;
2234 }
2235 }
2236 if (pHba->reply_pool_pa <= m &&
2237 m < pHba->reply_pool_pa +
2238 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2239 reply = (u8 *)pHba->reply_pool +
2240 (m - pHba->reply_pool_pa);
2241 } else {
2242 /* Ick, we should *never* be here */
2243 printk(KERN_ERR "dpti: reply frame not from pool\n");
2244 reply = (u8 *)bus_to_virt(m);
2245 }
2246
2247 if (readl(reply) & MSG_FAIL) {
2248 u32 old_m = readl(reply+28);
2249 void __iomem *msg;
2250 u32 old_context;
2251 PDEBUG("%s: Failed message\n",pHba->name);
2252 if(old_m >= 0x100000){
2253 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2254 writel(m,pHba->reply_port);
2255 continue;
2256 }
2257 // Transaction context is 0 in failed reply frame
2258 msg = pHba->msg_addr_virt + old_m;
2259 old_context = readl(msg+12);
2260 writel(old_context, reply+12);
2261 adpt_send_nop(pHba, old_m);
2262 }
2263 context = readl(reply+8);
2264 if(context & 0x40000000){ // IOCTL
2265 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2266 if( p != NULL) {
2267 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2268 }
2269 // All IOCTLs will also be post wait
2270 }
2271 if(context & 0x80000000){ // Post wait message
2272 status = readl(reply+16);
2273 if(status >> 24){
2274 status &= 0xffff; /* Get detail status */
2275 } else {
2276 status = I2O_POST_WAIT_OK;
2277 }
2278 if(!(context & 0x40000000)) {
2279 cmd = adpt_cmd_from_context(pHba,
2280 readl(reply+12));
2281 if(cmd != NULL) {
2282 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2283 }
2284 }
2285 adpt_i2o_post_wait_complete(context, status);
2286 } else { // SCSI message
2287 cmd = adpt_cmd_from_context (pHba, readl(reply+12));
2288 if(cmd != NULL){
2289 scsi_dma_unmap(cmd);
2290 if(cmd->serial_number != 0) { // If not timedout
2291 adpt_i2o_to_scsi(reply, cmd);
2292 }
2293 }
2294 }
2295 writel(m, pHba->reply_port);
2296 wmb();
2297 rmb();
2298 }
2299 handled = 1;
2300out: if(pHba->host)
2301 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2302 return IRQ_RETVAL(handled);
2303}
2304
2305static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2306{
2307 int i;
2308 u32 msg[MAX_MESSAGE_SIZE];
2309 u32* mptr;
2310 u32* lptr;
2311 u32 *lenptr;
2312 int direction;
2313 int scsidir;
2314 int nseg;
2315 u32 len;
2316 u32 reqlen;
2317 s32 rcode;
2318 dma_addr_t addr;
2319
2320 memset(msg, 0 , sizeof(msg));
2321 len = scsi_bufflen(cmd);
2322 direction = 0x00000000;
2323
2324 scsidir = 0x00000000; // DATA NO XFER
2325 if(len) {
2326 /*
2327 * Set SCBFlags to indicate if data is being transferred
2328 * in or out, or no data transfer
2329 * Note: Do not have to verify index is less than 0 since
2330 * cmd->cmnd[0] is an unsigned char
2331 */
2332 switch(cmd->sc_data_direction){
2333 case DMA_FROM_DEVICE:
2334 scsidir =0x40000000; // DATA IN (iop<--dev)
2335 break;
2336 case DMA_TO_DEVICE:
2337 direction=0x04000000; // SGL OUT
2338 scsidir =0x80000000; // DATA OUT (iop-->dev)
2339 break;
2340 case DMA_NONE:
2341 break;
2342 case DMA_BIDIRECTIONAL:
2343 scsidir =0x40000000; // DATA IN (iop<--dev)
2344 // Assume In - and continue;
2345 break;
2346 default:
2347 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2348 pHba->name, cmd->cmnd[0]);
2349 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2350 cmd->scsi_done(cmd);
2351 return 0;
2352 }
2353 }
2354 // msg[0] is set later
2355 // I2O_CMD_SCSI_EXEC
2356 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2357 msg[2] = 0;
2358 msg[3] = adpt_cmd_to_context(cmd); /* Want SCSI control block back */
2359 // Our cards use the transaction context as the tag for queueing
2360 // Adaptec/DPT Private stuff
2361 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2362 msg[5] = d->tid;
2363 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2364 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2365 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2366 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2367 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2368
2369 mptr=msg+7;
2370
2371 // Write SCSI command into the message - always 16 byte block
2372 memset(mptr, 0, 16);
2373 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2374 mptr+=4;
2375 lenptr=mptr++; /* Remember me - fill in when we know */
2376 if (dpt_dma64(pHba)) {
2377 reqlen = 16; // SINGLE SGE
2378 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2379 *mptr++ = 1 << PAGE_SHIFT;
2380 } else {
2381 reqlen = 14; // SINGLE SGE
2382 }
2383 /* Now fill in the SGList and command */
2384
2385 nseg = scsi_dma_map(cmd);
2386 BUG_ON(nseg < 0);
2387 if (nseg) {
2388 struct scatterlist *sg;
2389
2390 len = 0;
2391 scsi_for_each_sg(cmd, sg, nseg, i) {
2392 lptr = mptr;
2393 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2394 len+=sg_dma_len(sg);
2395 addr = sg_dma_address(sg);
2396 *mptr++ = dma_low(addr);
2397 if (dpt_dma64(pHba))
2398 *mptr++ = dma_high(addr);
2399 /* Make this an end of list */
2400 if (i == nseg - 1)
2401 *lptr = direction|0xD0000000|sg_dma_len(sg);
2402 }
2403 reqlen = mptr - msg;
2404 *lenptr = len;
2405
2406 if(cmd->underflow && len != cmd->underflow){
2407 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2408 len, cmd->underflow);
2409 }
2410 } else {
2411 *lenptr = len = 0;
2412 reqlen = 12;
2413 }
2414
2415 /* Stick the headers on */
2416 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2417
2418 // Send it on it's way
2419 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2420 if (rcode == 0) {
2421 return 0;
2422 }
2423 return rcode;
2424}
2425
2426
2427static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2428{
2429 struct Scsi_Host *host;
2430
2431 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2432 if (host == NULL) {
2433 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2434 return -1;
2435 }
2436 host->hostdata[0] = (unsigned long)pHba;
2437 pHba->host = host;
2438
2439 host->irq = pHba->pDev->irq;
2440 /* no IO ports, so don't have to set host->io_port and
2441 * host->n_io_port
2442 */
2443 host->io_port = 0;
2444 host->n_io_port = 0;
2445 /* see comments in scsi_host.h */
2446 host->max_id = 16;
2447 host->max_lun = 256;
2448 host->max_channel = pHba->top_scsi_channel + 1;
2449 host->cmd_per_lun = 1;
2450 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2451 host->sg_tablesize = pHba->sg_tablesize;
2452 host->can_queue = pHba->post_fifo_size;
2453
2454 return 0;
2455}
2456
2457
2458static s32 adpt_i2o_to_scsi(void __iomem *reply, struct scsi_cmnd* cmd)
2459{
2460 adpt_hba* pHba;
2461 u32 hba_status;
2462 u32 dev_status;
2463 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2464 // I know this would look cleaner if I just read bytes
2465 // but the model I have been using for all the rest of the
2466 // io is in 4 byte words - so I keep that model
2467 u16 detailed_status = readl(reply+16) &0xffff;
2468 dev_status = (detailed_status & 0xff);
2469 hba_status = detailed_status >> 8;
2470
2471 // calculate resid for sg
2472 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2473
2474 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2475
2476 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2477
2478 if(!(reply_flags & MSG_FAIL)) {
2479 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2480 case I2O_SCSI_DSC_SUCCESS:
2481 cmd->result = (DID_OK << 16);
2482 // handle underflow
2483 if (readl(reply+20) < cmd->underflow) {
2484 cmd->result = (DID_ERROR <<16);
2485 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2486 }
2487 break;
2488 case I2O_SCSI_DSC_REQUEST_ABORTED:
2489 cmd->result = (DID_ABORT << 16);
2490 break;
2491 case I2O_SCSI_DSC_PATH_INVALID:
2492 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2493 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2494 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2495 case I2O_SCSI_DSC_NO_ADAPTER:
2496 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2497 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%d) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2498 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2499 cmd->result = (DID_TIME_OUT << 16);
2500 break;
2501 case I2O_SCSI_DSC_ADAPTER_BUSY:
2502 case I2O_SCSI_DSC_BUS_BUSY:
2503 cmd->result = (DID_BUS_BUSY << 16);
2504 break;
2505 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2506 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2507 cmd->result = (DID_RESET << 16);
2508 break;
2509 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2510 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2511 cmd->result = (DID_PARITY << 16);
2512 break;
2513 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2514 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2515 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2516 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2517 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2518 case I2O_SCSI_DSC_DATA_OVERRUN:
2519 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2520 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2521 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2522 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2523 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2524 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2525 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2526 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2527 case I2O_SCSI_DSC_INVALID_CDB:
2528 case I2O_SCSI_DSC_LUN_INVALID:
2529 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2530 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2531 case I2O_SCSI_DSC_NO_NEXUS:
2532 case I2O_SCSI_DSC_CDB_RECEIVED:
2533 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2534 case I2O_SCSI_DSC_QUEUE_FROZEN:
2535 case I2O_SCSI_DSC_REQUEST_INVALID:
2536 default:
2537 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2538 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2539 hba_status, dev_status, cmd->cmnd[0]);
2540 cmd->result = (DID_ERROR << 16);
2541 break;
2542 }
2543
2544 // copy over the request sense data if it was a check
2545 // condition status
2546 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2547 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2548 // Copy over the sense data
2549 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2550 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2551 cmd->sense_buffer[2] == DATA_PROTECT ){
2552 /* This is to handle an array failed */
2553 cmd->result = (DID_TIME_OUT << 16);
2554 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%d) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2555 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2556 hba_status, dev_status, cmd->cmnd[0]);
2557
2558 }
2559 }
2560 } else {
2561 /* In this condtion we could not talk to the tid
2562 * the card rejected it. We should signal a retry
2563 * for a limitted number of retries.
2564 */
2565 cmd->result = (DID_TIME_OUT << 16);
2566 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%d) tid=%d, cmd=0x%x\n",
2567 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, (u32)cmd->device->lun,
2568 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2569 }
2570
2571 cmd->result |= (dev_status);
2572
2573 if(cmd->scsi_done != NULL){
2574 cmd->scsi_done(cmd);
2575 }
2576 return cmd->result;
2577}
2578
2579
2580static s32 adpt_rescan(adpt_hba* pHba)
2581{
2582 s32 rcode;
2583 ulong flags = 0;
2584
2585 if(pHba->host)
2586 spin_lock_irqsave(pHba->host->host_lock, flags);
2587 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2588 goto out;
2589 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2590 goto out;
2591 rcode = 0;
2592out: if(pHba->host)
2593 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2594 return rcode;
2595}
2596
2597
2598static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2599{
2600 int i;
2601 int max;
2602 int tid;
2603 struct i2o_device *d;
2604 i2o_lct *lct = pHba->lct;
2605 u8 bus_no = 0;
2606 s16 scsi_id;
2607 s16 scsi_lun;
2608 u32 buf[10]; // at least 8 u32's
2609 struct adpt_device* pDev = NULL;
2610 struct i2o_device* pI2o_dev = NULL;
2611
2612 if (lct == NULL) {
2613 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2614 return -1;
2615 }
2616
2617 max = lct->table_size;
2618 max -= 3;
2619 max /= 9;
2620
2621 // Mark each drive as unscanned
2622 for (d = pHba->devices; d; d = d->next) {
2623 pDev =(struct adpt_device*) d->owner;
2624 if(!pDev){
2625 continue;
2626 }
2627 pDev->state |= DPTI_DEV_UNSCANNED;
2628 }
2629
2630 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2631
2632 for(i=0;i<max;i++) {
2633 if( lct->lct_entry[i].user_tid != 0xfff){
2634 continue;
2635 }
2636
2637 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2638 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2639 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2640 tid = lct->lct_entry[i].tid;
2641 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2642 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2643 continue;
2644 }
2645 bus_no = buf[0]>>16;
2646 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2647 printk(KERN_WARNING
2648 "%s: Channel number %d out of range\n",
2649 pHba->name, bus_no);
2650 continue;
2651 }
2652
2653 scsi_id = buf[1];
2654 scsi_lun = (buf[2]>>8 )&0xff;
2655 pDev = pHba->channel[bus_no].device[scsi_id];
2656 /* da lun */
2657 while(pDev) {
2658 if(pDev->scsi_lun == scsi_lun) {
2659 break;
2660 }
2661 pDev = pDev->next_lun;
2662 }
2663 if(!pDev ) { // Something new add it
2664 d = kmalloc(sizeof(struct i2o_device),
2665 GFP_ATOMIC);
2666 if(d==NULL)
2667 {
2668 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2669 return -ENOMEM;
2670 }
2671
2672 d->controller = pHba;
2673 d->next = NULL;
2674
2675 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2676
2677 d->flags = 0;
2678 adpt_i2o_report_hba_unit(pHba, d);
2679 adpt_i2o_install_device(pHba, d);
2680
2681 pDev = pHba->channel[bus_no].device[scsi_id];
2682 if( pDev == NULL){
2683 pDev =
2684 kzalloc(sizeof(struct adpt_device),
2685 GFP_ATOMIC);
2686 if(pDev == NULL) {
2687 return -ENOMEM;
2688 }
2689 pHba->channel[bus_no].device[scsi_id] = pDev;
2690 } else {
2691 while (pDev->next_lun) {
2692 pDev = pDev->next_lun;
2693 }
2694 pDev = pDev->next_lun =
2695 kzalloc(sizeof(struct adpt_device),
2696 GFP_ATOMIC);
2697 if(pDev == NULL) {
2698 return -ENOMEM;
2699 }
2700 }
2701 pDev->tid = d->lct_data.tid;
2702 pDev->scsi_channel = bus_no;
2703 pDev->scsi_id = scsi_id;
2704 pDev->scsi_lun = scsi_lun;
2705 pDev->pI2o_dev = d;
2706 d->owner = pDev;
2707 pDev->type = (buf[0])&0xff;
2708 pDev->flags = (buf[0]>>8)&0xff;
2709 // Too late, SCSI system has made up it's mind, but what the hey ...
2710 if(scsi_id > pHba->top_scsi_id){
2711 pHba->top_scsi_id = scsi_id;
2712 }
2713 if(scsi_lun > pHba->top_scsi_lun){
2714 pHba->top_scsi_lun = scsi_lun;
2715 }
2716 continue;
2717 } // end of new i2o device
2718
2719 // We found an old device - check it
2720 while(pDev) {
2721 if(pDev->scsi_lun == scsi_lun) {
2722 if(!scsi_device_online(pDev->pScsi_dev)) {
2723 printk(KERN_WARNING"%s: Setting device (%d,%d,%d) back online\n",
2724 pHba->name,bus_no,scsi_id,scsi_lun);
2725 if (pDev->pScsi_dev) {
2726 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2727 }
2728 }
2729 d = pDev->pI2o_dev;
2730 if(d->lct_data.tid != tid) { // something changed
2731 pDev->tid = tid;
2732 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2733 if (pDev->pScsi_dev) {
2734 pDev->pScsi_dev->changed = TRUE;
2735 pDev->pScsi_dev->removable = TRUE;
2736 }
2737 }
2738 // Found it - mark it scanned
2739 pDev->state = DPTI_DEV_ONLINE;
2740 break;
2741 }
2742 pDev = pDev->next_lun;
2743 }
2744 }
2745 }
2746 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2747 pDev =(struct adpt_device*) pI2o_dev->owner;
2748 if(!pDev){
2749 continue;
2750 }
2751 // Drive offline drives that previously existed but could not be found
2752 // in the LCT table
2753 if (pDev->state & DPTI_DEV_UNSCANNED){
2754 pDev->state = DPTI_DEV_OFFLINE;
2755 printk(KERN_WARNING"%s: Device (%d,%d,%d) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2756 if (pDev->pScsi_dev) {
2757 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2758 }
2759 }
2760 }
2761 return 0;
2762}
2763
2764static void adpt_fail_posted_scbs(adpt_hba* pHba)
2765{
2766 struct scsi_cmnd* cmd = NULL;
2767 struct scsi_device* d = NULL;
2768
2769 shost_for_each_device(d, pHba->host) {
2770 unsigned long flags;
2771 spin_lock_irqsave(&d->list_lock, flags);
2772 list_for_each_entry(cmd, &d->cmd_list, list) {
2773 if(cmd->serial_number == 0){
2774 continue;
2775 }
2776 cmd->result = (DID_OK << 16) | (QUEUE_FULL <<1);
2777 cmd->scsi_done(cmd);
2778 }
2779 spin_unlock_irqrestore(&d->list_lock, flags);
2780 }
2781}
2782
2783
2784/*============================================================================
2785 * Routines from i2o subsystem
2786 *============================================================================
2787 */
2788
2789
2790
2791/*
2792 * Bring an I2O controller into HOLD state. See the spec.
2793 */
2794static int adpt_i2o_activate_hba(adpt_hba* pHba)
2795{
2796 int rcode;
2797
2798 if(pHba->initialized ) {
2799 if (adpt_i2o_status_get(pHba) < 0) {
2800 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2801 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2802 return rcode;
2803 }
2804 if (adpt_i2o_status_get(pHba) < 0) {
2805 printk(KERN_INFO "HBA not responding.\n");
2806 return -1;
2807 }
2808 }
2809
2810 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2811 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2812 return -1;
2813 }
2814
2815 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2816 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2817 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2818 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2819 adpt_i2o_reset_hba(pHba);
2820 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2821 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2822 return -1;
2823 }
2824 }
2825 } else {
2826 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2827 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2828 return rcode;
2829 }
2830
2831 }
2832
2833 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2834 return -1;
2835 }
2836
2837 /* In HOLD state */
2838
2839 if (adpt_i2o_hrt_get(pHba) < 0) {
2840 return -1;
2841 }
2842
2843 return 0;
2844}
2845
2846/*
2847 * Bring a controller online into OPERATIONAL state.
2848 */
2849
2850static int adpt_i2o_online_hba(adpt_hba* pHba)
2851{
2852 if (adpt_i2o_systab_send(pHba) < 0) {
2853 adpt_i2o_delete_hba(pHba);
2854 return -1;
2855 }
2856 /* In READY state */
2857
2858 if (adpt_i2o_enable_hba(pHba) < 0) {
2859 adpt_i2o_delete_hba(pHba);
2860 return -1;
2861 }
2862
2863 /* In OPERATIONAL state */
2864 return 0;
2865}
2866
2867static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2868{
2869 u32 __iomem *msg;
2870 ulong timeout = jiffies + 5*HZ;
2871
2872 while(m == EMPTY_QUEUE){
2873 rmb();
2874 m = readl(pHba->post_port);
2875 if(m != EMPTY_QUEUE){
2876 break;
2877 }
2878 if(time_after(jiffies,timeout)){
2879 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2880 return 2;
2881 }
2882 schedule_timeout_uninterruptible(1);
2883 }
2884 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2885 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2886 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2887 writel( 0,&msg[2]);
2888 wmb();
2889
2890 writel(m, pHba->post_port);
2891 wmb();
2892 return 0;
2893}
2894
2895static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2896{
2897 u8 *status;
2898 dma_addr_t addr;
2899 u32 __iomem *msg = NULL;
2900 int i;
2901 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2902 u32 m;
2903
2904 do {
2905 rmb();
2906 m = readl(pHba->post_port);
2907 if (m != EMPTY_QUEUE) {
2908 break;
2909 }
2910
2911 if(time_after(jiffies,timeout)){
2912 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2913 return -ETIMEDOUT;
2914 }
2915 schedule_timeout_uninterruptible(1);
2916 } while(m == EMPTY_QUEUE);
2917
2918 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2919
2920 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2921 if (!status) {
2922 adpt_send_nop(pHba, m);
2923 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2924 pHba->name);
2925 return -ENOMEM;
2926 }
2927 memset(status, 0, 4);
2928
2929 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2930 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2931 writel(0, &msg[2]);
2932 writel(0x0106, &msg[3]); /* Transaction context */
2933 writel(4096, &msg[4]); /* Host page frame size */
2934 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2935 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2936 writel((u32)addr, &msg[7]);
2937
2938 writel(m, pHba->post_port);
2939 wmb();
2940
2941 // Wait for the reply status to come back
2942 do {
2943 if (*status) {
2944 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2945 break;
2946 }
2947 }
2948 rmb();
2949 if(time_after(jiffies,timeout)){
2950 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2951 /* We lose 4 bytes of "status" here, but we
2952 cannot free these because controller may
2953 awake and corrupt those bytes at any time */
2954 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2955 return -ETIMEDOUT;
2956 }
2957 schedule_timeout_uninterruptible(1);
2958 } while (1);
2959
2960 // If the command was successful, fill the fifo with our reply
2961 // message packets
2962 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2963 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2964 return -2;
2965 }
2966 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2967
2968 if(pHba->reply_pool != NULL) {
2969 dma_free_coherent(&pHba->pDev->dev,
2970 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2971 pHba->reply_pool, pHba->reply_pool_pa);
2972 }
2973
2974 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2975 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2976 &pHba->reply_pool_pa, GFP_KERNEL);
2977 if (!pHba->reply_pool) {
2978 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2979 return -ENOMEM;
2980 }
2981 memset(pHba->reply_pool, 0 , pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4);
2982
2983 for(i = 0; i < pHba->reply_fifo_size; i++) {
2984 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2985 pHba->reply_port);
2986 wmb();
2987 }
2988 adpt_i2o_status_get(pHba);
2989 return 0;
2990}
2991
2992
2993/*
2994 * I2O System Table. Contains information about
2995 * all the IOPs in the system. Used to inform IOPs
2996 * about each other's existence.
2997 *
2998 * sys_tbl_ver is the CurrentChangeIndicator that is
2999 * used by IOPs to track changes.
3000 */
3001
3002
3003
3004static s32 adpt_i2o_status_get(adpt_hba* pHba)
3005{
3006 ulong timeout;
3007 u32 m;
3008 u32 __iomem *msg;
3009 u8 *status_block=NULL;
3010
3011 if(pHba->status_block == NULL) {
3012 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
3013 sizeof(i2o_status_block),
3014 &pHba->status_block_pa, GFP_KERNEL);
3015 if(pHba->status_block == NULL) {
3016 printk(KERN_ERR
3017 "dpti%d: Get Status Block failed; Out of memory. \n",
3018 pHba->unit);
3019 return -ENOMEM;
3020 }
3021 }
3022 memset(pHba->status_block, 0, sizeof(i2o_status_block));
3023 status_block = (u8*)(pHba->status_block);
3024 timeout = jiffies+TMOUT_GETSTATUS*HZ;
3025 do {
3026 rmb();
3027 m = readl(pHba->post_port);
3028 if (m != EMPTY_QUEUE) {
3029 break;
3030 }
3031 if(time_after(jiffies,timeout)){
3032 printk(KERN_ERR "%s: Timeout waiting for message !\n",
3033 pHba->name);
3034 return -ETIMEDOUT;
3035 }
3036 schedule_timeout_uninterruptible(1);
3037 } while(m==EMPTY_QUEUE);
3038
3039
3040 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
3041
3042 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
3043 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
3044 writel(1, &msg[2]);
3045 writel(0, &msg[3]);
3046 writel(0, &msg[4]);
3047 writel(0, &msg[5]);
3048 writel( dma_low(pHba->status_block_pa), &msg[6]);
3049 writel( dma_high(pHba->status_block_pa), &msg[7]);
3050 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
3051
3052 //post message
3053 writel(m, pHba->post_port);
3054 wmb();
3055
3056 while(status_block[87]!=0xff){
3057 if(time_after(jiffies,timeout)){
3058 printk(KERN_ERR"dpti%d: Get status timeout.\n",
3059 pHba->unit);
3060 return -ETIMEDOUT;
3061 }
3062 rmb();
3063 schedule_timeout_uninterruptible(1);
3064 }
3065
3066 // Set up our number of outbound and inbound messages
3067 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
3068 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
3069 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
3070 }
3071
3072 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
3073 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
3074 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
3075 }
3076
3077 // Calculate the Scatter Gather list size
3078 if (dpt_dma64(pHba)) {
3079 pHba->sg_tablesize
3080 = ((pHba->status_block->inbound_frame_size * 4
3081 - 14 * sizeof(u32))
3082 / (sizeof(struct sg_simple_element) + sizeof(u32)));
3083 } else {
3084 pHba->sg_tablesize
3085 = ((pHba->status_block->inbound_frame_size * 4
3086 - 12 * sizeof(u32))
3087 / sizeof(struct sg_simple_element));
3088 }
3089 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
3090 pHba->sg_tablesize = SG_LIST_ELEMENTS;
3091 }
3092
3093
3094#ifdef DEBUG
3095 printk("dpti%d: State = ",pHba->unit);
3096 switch(pHba->status_block->iop_state) {
3097 case 0x01:
3098 printk("INIT\n");
3099 break;
3100 case 0x02:
3101 printk("RESET\n");
3102 break;
3103 case 0x04:
3104 printk("HOLD\n");
3105 break;
3106 case 0x05:
3107 printk("READY\n");
3108 break;
3109 case 0x08:
3110 printk("OPERATIONAL\n");
3111 break;
3112 case 0x10:
3113 printk("FAILED\n");
3114 break;
3115 case 0x11:
3116 printk("FAULTED\n");
3117 break;
3118 default:
3119 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
3120 }
3121#endif
3122 return 0;
3123}
3124
3125/*
3126 * Get the IOP's Logical Configuration Table
3127 */
3128static int adpt_i2o_lct_get(adpt_hba* pHba)
3129{
3130 u32 msg[8];
3131 int ret;
3132 u32 buf[16];
3133
3134 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
3135 pHba->lct_size = pHba->status_block->expected_lct_size;
3136 }
3137 do {
3138 if (pHba->lct == NULL) {
3139 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
3140 pHba->lct_size, &pHba->lct_pa,
3141 GFP_ATOMIC);
3142 if(pHba->lct == NULL) {
3143 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3144 pHba->name);
3145 return -ENOMEM;
3146 }
3147 }
3148 memset(pHba->lct, 0, pHba->lct_size);
3149
3150 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3151 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3152 msg[2] = 0;
3153 msg[3] = 0;
3154 msg[4] = 0xFFFFFFFF; /* All devices */
3155 msg[5] = 0x00000000; /* Report now */
3156 msg[6] = 0xD0000000|pHba->lct_size;
3157 msg[7] = (u32)pHba->lct_pa;
3158
3159 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3160 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3161 pHba->name, ret);
3162 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3163 return ret;
3164 }
3165
3166 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3167 pHba->lct_size = pHba->lct->table_size << 2;
3168 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3169 pHba->lct, pHba->lct_pa);
3170 pHba->lct = NULL;
3171 }
3172 } while (pHba->lct == NULL);
3173
3174 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3175
3176
3177 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3178 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3179 pHba->FwDebugBufferSize = buf[1];
3180 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3181 pHba->FwDebugBufferSize);
3182 if (pHba->FwDebugBuffer_P) {
3183 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3184 FW_DEBUG_FLAGS_OFFSET;
3185 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3186 FW_DEBUG_BLED_OFFSET;
3187 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3188 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3189 FW_DEBUG_STR_LENGTH_OFFSET;
3190 pHba->FwDebugBuffer_P += buf[2];
3191 pHba->FwDebugFlags = 0;
3192 }
3193 }
3194
3195 return 0;
3196}
3197
3198static int adpt_i2o_build_sys_table(void)
3199{
3200 adpt_hba* pHba = hba_chain;
3201 int count = 0;
3202
3203 if (sys_tbl)
3204 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3205 sys_tbl, sys_tbl_pa);
3206
3207 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3208 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3209
3210 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3211 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3212 if (!sys_tbl) {
3213 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3214 return -ENOMEM;
3215 }
3216 memset(sys_tbl, 0, sys_tbl_len);
3217
3218 sys_tbl->num_entries = hba_count;
3219 sys_tbl->version = I2OVERSION;
3220 sys_tbl->change_ind = sys_tbl_ind++;
3221
3222 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3223 u64 addr;
3224 // Get updated Status Block so we have the latest information
3225 if (adpt_i2o_status_get(pHba)) {
3226 sys_tbl->num_entries--;
3227 continue; // try next one
3228 }
3229
3230 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3231 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3232 sys_tbl->iops[count].seg_num = 0;
3233 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3234 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3235 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3236 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3237 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3238 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3239 addr = pHba->base_addr_phys + 0x40;
3240 sys_tbl->iops[count].inbound_low = dma_low(addr);
3241 sys_tbl->iops[count].inbound_high = dma_high(addr);
3242
3243 count++;
3244 }
3245
3246#ifdef DEBUG
3247{
3248 u32 *table = (u32*)sys_tbl;
3249 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3250 for(count = 0; count < (sys_tbl_len >>2); count++) {
3251 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3252 count, table[count]);
3253 }
3254}
3255#endif
3256
3257 return 0;
3258}
3259
3260
3261/*
3262 * Dump the information block associated with a given unit (TID)
3263 */
3264
3265static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3266{
3267 char buf[64];
3268 int unit = d->lct_data.tid;
3269
3270 printk(KERN_INFO "TID %3.3d ", unit);
3271
3272 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3273 {
3274 buf[16]=0;
3275 printk(" Vendor: %-12.12s", buf);
3276 }
3277 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3278 {
3279 buf[16]=0;
3280 printk(" Device: %-12.12s", buf);
3281 }
3282 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3283 {
3284 buf[8]=0;
3285 printk(" Rev: %-12.12s\n", buf);
3286 }
3287#ifdef DEBUG
3288 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3289 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3290 printk(KERN_INFO "\tFlags: ");
3291
3292 if(d->lct_data.device_flags&(1<<0))
3293 printk("C"); // ConfigDialog requested
3294 if(d->lct_data.device_flags&(1<<1))
3295 printk("U"); // Multi-user capable
3296 if(!(d->lct_data.device_flags&(1<<4)))
3297 printk("P"); // Peer service enabled!
3298 if(!(d->lct_data.device_flags&(1<<5)))
3299 printk("M"); // Mgmt service enabled!
3300 printk("\n");
3301#endif
3302}
3303
3304#ifdef DEBUG
3305/*
3306 * Do i2o class name lookup
3307 */
3308static const char *adpt_i2o_get_class_name(int class)
3309{
3310 int idx = 16;
3311 static char *i2o_class_name[] = {
3312 "Executive",
3313 "Device Driver Module",
3314 "Block Device",
3315 "Tape Device",
3316 "LAN Interface",
3317 "WAN Interface",
3318 "Fibre Channel Port",
3319 "Fibre Channel Device",
3320 "SCSI Device",
3321 "ATE Port",
3322 "ATE Device",
3323 "Floppy Controller",
3324 "Floppy Device",
3325 "Secondary Bus Port",
3326 "Peer Transport Agent",
3327 "Peer Transport",
3328 "Unknown"
3329 };
3330
3331 switch(class&0xFFF) {
3332 case I2O_CLASS_EXECUTIVE:
3333 idx = 0; break;
3334 case I2O_CLASS_DDM:
3335 idx = 1; break;
3336 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3337 idx = 2; break;
3338 case I2O_CLASS_SEQUENTIAL_STORAGE:
3339 idx = 3; break;
3340 case I2O_CLASS_LAN:
3341 idx = 4; break;
3342 case I2O_CLASS_WAN:
3343 idx = 5; break;
3344 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3345 idx = 6; break;
3346 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3347 idx = 7; break;
3348 case I2O_CLASS_SCSI_PERIPHERAL:
3349 idx = 8; break;
3350 case I2O_CLASS_ATE_PORT:
3351 idx = 9; break;
3352 case I2O_CLASS_ATE_PERIPHERAL:
3353 idx = 10; break;
3354 case I2O_CLASS_FLOPPY_CONTROLLER:
3355 idx = 11; break;
3356 case I2O_CLASS_FLOPPY_DEVICE:
3357 idx = 12; break;
3358 case I2O_CLASS_BUS_ADAPTER_PORT:
3359 idx = 13; break;
3360 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3361 idx = 14; break;
3362 case I2O_CLASS_PEER_TRANSPORT:
3363 idx = 15; break;
3364 }
3365 return i2o_class_name[idx];
3366}
3367#endif
3368
3369
3370static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3371{
3372 u32 msg[6];
3373 int ret, size = sizeof(i2o_hrt);
3374
3375 do {
3376 if (pHba->hrt == NULL) {
3377 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3378 size, &pHba->hrt_pa, GFP_KERNEL);
3379 if (pHba->hrt == NULL) {
3380 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3381 return -ENOMEM;
3382 }
3383 }
3384
3385 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3386 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3387 msg[2]= 0;
3388 msg[3]= 0;
3389 msg[4]= (0xD0000000 | size); /* Simple transaction */
3390 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3391
3392 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3393 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3394 return ret;
3395 }
3396
3397 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3398 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3399 dma_free_coherent(&pHba->pDev->dev, size,
3400 pHba->hrt, pHba->hrt_pa);
3401 size = newsize;
3402 pHba->hrt = NULL;
3403 }
3404 } while(pHba->hrt == NULL);
3405 return 0;
3406}
3407
3408/*
3409 * Query one scalar group value or a whole scalar group.
3410 */
3411static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3412 int group, int field, void *buf, int buflen)
3413{
3414 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3415 u8 *opblk_va;
3416 dma_addr_t opblk_pa;
3417 u8 *resblk_va;
3418 dma_addr_t resblk_pa;
3419
3420 int size;
3421
3422 /* 8 bytes for header */
3423 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3424 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3425 if (resblk_va == NULL) {
3426 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3427 return -ENOMEM;
3428 }
3429
3430 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3431 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3432 if (opblk_va == NULL) {
3433 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3434 resblk_va, resblk_pa);
3435 printk(KERN_CRIT "%s: query operatio failed; Out of memory.\n",
3436 pHba->name);
3437 return -ENOMEM;
3438 }
3439 if (field == -1) /* whole group */
3440 opblk[4] = -1;
3441
3442 memcpy(opblk_va, opblk, sizeof(opblk));
3443 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3444 opblk_va, opblk_pa, sizeof(opblk),
3445 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3446 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3447 if (size == -ETIME) {
3448 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3449 resblk_va, resblk_pa);
3450 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3451 return -ETIME;
3452 } else if (size == -EINTR) {
3453 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3454 resblk_va, resblk_pa);
3455 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3456 return -EINTR;
3457 }
3458
3459 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3460
3461 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3462 resblk_va, resblk_pa);
3463 if (size < 0)
3464 return size;
3465
3466 return buflen;
3467}
3468
3469
3470/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3471 *
3472 * This function can be used for all UtilParamsGet/Set operations.
3473 * The OperationBlock is given in opblk-buffer,
3474 * and results are returned in resblk-buffer.
3475 * Note that the minimum sized resblk is 8 bytes and contains
3476 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3477 */
3478static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3479 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3480 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3481{
3482 u32 msg[9];
3483 u32 *res = (u32 *)resblk_va;
3484 int wait_status;
3485
3486 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3487 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3488 msg[2] = 0;
3489 msg[3] = 0;
3490 msg[4] = 0;
3491 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3492 msg[6] = (u32)opblk_pa;
3493 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3494 msg[8] = (u32)resblk_pa;
3495
3496 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3497 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3498 return wait_status; /* -DetailedStatus */
3499 }
3500
3501 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3502 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3503 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3504 pHba->name,
3505 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3506 : "PARAMS_GET",
3507 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3508 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3509 }
3510
3511 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3512}
3513
3514
3515static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3516{
3517 u32 msg[4];
3518 int ret;
3519
3520 adpt_i2o_status_get(pHba);
3521
3522 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3523
3524 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3525 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3526 return 0;
3527 }
3528
3529 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3530 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3531 msg[2] = 0;
3532 msg[3] = 0;
3533
3534 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3535 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3536 pHba->unit, -ret);
3537 } else {
3538 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3539 }
3540
3541 adpt_i2o_status_get(pHba);
3542 return ret;
3543}
3544
3545
3546/*
3547 * Enable IOP. Allows the IOP to resume external operations.
3548 */
3549static int adpt_i2o_enable_hba(adpt_hba* pHba)
3550{
3551 u32 msg[4];
3552 int ret;
3553
3554 adpt_i2o_status_get(pHba);
3555 if(!pHba->status_block){
3556 return -ENOMEM;
3557 }
3558 /* Enable only allowed on READY state */
3559 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3560 return 0;
3561
3562 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3563 return -EINVAL;
3564
3565 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3566 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3567 msg[2]= 0;
3568 msg[3]= 0;
3569
3570 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3571 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3572 pHba->name, ret);
3573 } else {
3574 PDEBUG("%s: Enabled.\n", pHba->name);
3575 }
3576
3577 adpt_i2o_status_get(pHba);
3578 return ret;
3579}
3580
3581
3582static int adpt_i2o_systab_send(adpt_hba* pHba)
3583{
3584 u32 msg[12];
3585 int ret;
3586
3587 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3588 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3589 msg[2] = 0;
3590 msg[3] = 0;
3591 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3592 msg[5] = 0; /* Segment 0 */
3593
3594 /*
3595 * Provide three SGL-elements:
3596 * System table (SysTab), Private memory space declaration and
3597 * Private i/o space declaration
3598 */
3599 msg[6] = 0x54000000 | sys_tbl_len;
3600 msg[7] = (u32)sys_tbl_pa;
3601 msg[8] = 0x54000000 | 0;
3602 msg[9] = 0;
3603 msg[10] = 0xD4000000 | 0;
3604 msg[11] = 0;
3605
3606 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3607 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3608 pHba->name, ret);
3609 }
3610#ifdef DEBUG
3611 else {
3612 PINFO("%s: SysTab set.\n", pHba->name);
3613 }
3614#endif
3615
3616 return ret;
3617 }
3618
3619
3620/*============================================================================
3621 *
3622 *============================================================================
3623 */
3624
3625
3626#ifdef UARTDELAY
3627
3628static static void adpt_delay(int millisec)
3629{
3630 int i;
3631 for (i = 0; i < millisec; i++) {
3632 udelay(1000); /* delay for one millisecond */
3633 }
3634}
3635
3636#endif
3637
3638static struct scsi_host_template driver_template = {
3639 .module = THIS_MODULE,
3640 .name = "dpt_i2o",
3641 .proc_name = "dpt_i2o",
3642 .proc_info = adpt_proc_info,
3643 .info = adpt_info,
3644 .queuecommand = adpt_queue,
3645 .eh_abort_handler = adpt_abort,
3646 .eh_device_reset_handler = adpt_device_reset,
3647 .eh_bus_reset_handler = adpt_bus_reset,
3648 .eh_host_reset_handler = adpt_reset,
3649 .bios_param = adpt_bios_param,
3650 .slave_configure = adpt_slave_configure,
3651 .can_queue = MAX_TO_IOP_MESSAGES,
3652 .this_id = 7,
3653 .cmd_per_lun = 1,
3654 .use_clustering = ENABLE_CLUSTERING,
3655};
3656
3657static int __init adpt_init(void)
3658{
3659 int error;
3660 adpt_hba *pHba, *next;
3661
3662 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3663
3664 error = adpt_detect(&driver_template);
3665 if (error < 0)
3666 return error;
3667 if (hba_chain == NULL)
3668 return -ENODEV;
3669
3670 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3671 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3672 if (error)
3673 goto fail;
3674 scsi_scan_host(pHba->host);
3675 }
3676 return 0;
3677fail:
3678 for (pHba = hba_chain; pHba; pHba = next) {
3679 next = pHba->next;
3680 scsi_remove_host(pHba->host);
3681 }
3682 return error;
3683}
3684
3685static void __exit adpt_exit(void)
3686{
3687 adpt_hba *pHba, *next;
3688
3689 for (pHba = hba_chain; pHba; pHba = pHba->next)
3690 scsi_remove_host(pHba->host);
3691 for (pHba = hba_chain; pHba; pHba = next) {
3692 next = pHba->next;
3693 adpt_release(pHba->host);
3694 }
3695}
3696
3697module_init(adpt_init);
3698module_exit(adpt_exit);
3699
3700MODULE_LICENSE("GPL");
1// SPDX-License-Identifier: GPL-2.0-or-later
2/***************************************************************************
3 dpti.c - description
4 -------------------
5 begin : Thu Sep 7 2000
6 copyright : (C) 2000 by Adaptec
7
8 July 30, 2001 First version being submitted
9 for inclusion in the kernel. V2.4
10
11 See Documentation/scsi/dpti.rst for history, notes, license info
12 and credits
13 ***************************************************************************/
14
15/***************************************************************************
16 * *
17 * *
18 ***************************************************************************/
19/***************************************************************************
20 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
21 - Support 2.6 kernel and DMA-mapping
22 - ioctl fix for raid tools
23 - use schedule_timeout in long long loop
24 **************************************************************************/
25
26/*#define DEBUG 1 */
27/*#define UARTDELAY 1 */
28
29#include <linux/module.h>
30#include <linux/pgtable.h>
31
32MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
33MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
34
35////////////////////////////////////////////////////////////////
36
37#include <linux/ioctl.h> /* For SCSI-Passthrough */
38#include <linux/uaccess.h>
39
40#include <linux/stat.h>
41#include <linux/slab.h> /* for kmalloc() */
42#include <linux/pci.h> /* for PCI support */
43#include <linux/proc_fs.h>
44#include <linux/blkdev.h>
45#include <linux/delay.h> /* for udelay */
46#include <linux/interrupt.h>
47#include <linux/kernel.h> /* for printk */
48#include <linux/sched.h>
49#include <linux/reboot.h>
50#include <linux/spinlock.h>
51#include <linux/dma-mapping.h>
52
53#include <linux/timer.h>
54#include <linux/string.h>
55#include <linux/ioport.h>
56#include <linux/mutex.h>
57
58#include <asm/processor.h> /* for boot_cpu_data */
59#include <asm/io.h> /* for virt_to_bus, etc. */
60
61#include <scsi/scsi.h>
62#include <scsi/scsi_cmnd.h>
63#include <scsi/scsi_device.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi_tcq.h>
66
67#include "dpt/dptsig.h"
68#include "dpti.h"
69
70/*============================================================================
71 * Create a binary signature - this is read by dptsig
72 * Needed for our management apps
73 *============================================================================
74 */
75static DEFINE_MUTEX(adpt_mutex);
76static dpt_sig_S DPTI_sig = {
77 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
78#ifdef __i386__
79 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
80#elif defined(__ia64__)
81 PROC_INTEL, PROC_IA64,
82#elif defined(__sparc__)
83 PROC_ULTRASPARC, PROC_ULTRASPARC,
84#elif defined(__alpha__)
85 PROC_ALPHA, PROC_ALPHA,
86#else
87 (-1),(-1),
88#endif
89 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
90 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
91 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
92};
93
94
95
96
97/*============================================================================
98 * Globals
99 *============================================================================
100 */
101
102static DEFINE_MUTEX(adpt_configuration_lock);
103
104static struct i2o_sys_tbl *sys_tbl;
105static dma_addr_t sys_tbl_pa;
106static int sys_tbl_ind;
107static int sys_tbl_len;
108
109static adpt_hba* hba_chain = NULL;
110static int hba_count = 0;
111
112static struct class *adpt_sysfs_class;
113
114static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
115#ifdef CONFIG_COMPAT
116static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
117#endif
118
119static const struct file_operations adpt_fops = {
120 .unlocked_ioctl = adpt_unlocked_ioctl,
121 .open = adpt_open,
122 .release = adpt_close,
123#ifdef CONFIG_COMPAT
124 .compat_ioctl = compat_adpt_ioctl,
125#endif
126 .llseek = noop_llseek,
127};
128
129/* Structures and definitions for synchronous message posting.
130 * See adpt_i2o_post_wait() for description
131 * */
132struct adpt_i2o_post_wait_data
133{
134 int status;
135 u32 id;
136 adpt_wait_queue_head_t *wq;
137 struct adpt_i2o_post_wait_data *next;
138};
139
140static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
141static u32 adpt_post_wait_id = 0;
142static DEFINE_SPINLOCK(adpt_post_wait_lock);
143
144
145/*============================================================================
146 * Functions
147 *============================================================================
148 */
149
150static inline int dpt_dma64(adpt_hba *pHba)
151{
152 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153}
154
155static inline u32 dma_high(dma_addr_t addr)
156{
157 return upper_32_bits(addr);
158}
159
160static inline u32 dma_low(dma_addr_t addr)
161{
162 return (u32)addr;
163}
164
165static u8 adpt_read_blink_led(adpt_hba* host)
166{
167 if (host->FwDebugBLEDflag_P) {
168 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
169 return readb(host->FwDebugBLEDvalue_P);
170 }
171 }
172 return 0;
173}
174
175/*============================================================================
176 * Scsi host template interface functions
177 *============================================================================
178 */
179
180#ifdef MODULE
181static struct pci_device_id dptids[] = {
182 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 { 0, }
185};
186#endif
187
188MODULE_DEVICE_TABLE(pci,dptids);
189
190static int adpt_detect(struct scsi_host_template* sht)
191{
192 struct pci_dev *pDev = NULL;
193 adpt_hba *pHba;
194 adpt_hba *next;
195
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198 /* search for all Adatpec I2O RAID cards */
199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 if(adpt_install_hba(sht, pDev) ){
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
207 pci_dev_get(pDev);
208 }
209 }
210
211 /* In INIT state, Activate IOPs */
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
214 // Activate does get status , init outbound, and get hrt
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221 /* Active IOPs in HOLD state */
222
223rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227 /*
228 * If build_sys_table fails, we kill everything and bail
229 * as we can't init the IOPs w/o a system table
230 */
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238 /* If IOP don't get online, we need to rebuild the System table */
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246 /* Active IOPs now in OPERATIONAL state */
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
279 if (adpt_sysfs_class) {
280 struct device *dev = device_create(adpt_sysfs_class,
281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
289 }
290
291 // Register our control device node
292 // nodes will need to be created in /dev to access this
293 // the nodes can not be created from within the driver
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 adpt_i2o_sys_shutdown();
296 return 0;
297 }
298 return hba_count;
299}
300
301
302static void adpt_release(adpt_hba *pHba)
303{
304 struct Scsi_Host *shost = pHba->host;
305
306 scsi_remove_host(shost);
307// adpt_i2o_quiesce_hba(pHba);
308 adpt_i2o_delete_hba(pHba);
309 scsi_host_put(shost);
310}
311
312
313static void adpt_inquiry(adpt_hba* pHba)
314{
315 u32 msg[17];
316 u32 *mptr;
317 u32 *lenptr;
318 int direction;
319 int scsidir;
320 u32 len;
321 u32 reqlen;
322 u8* buf;
323 dma_addr_t addr;
324 u8 scb[16];
325 s32 rcode;
326
327 memset(msg, 0, sizeof(msg));
328 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
329 if(!buf){
330 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
331 return;
332 }
333 memset((void*)buf, 0, 36);
334
335 len = 36;
336 direction = 0x00000000;
337 scsidir =0x40000000; // DATA IN (iop<--dev)
338
339 if (dpt_dma64(pHba))
340 reqlen = 17; // SINGLE SGE, 64 bit
341 else
342 reqlen = 14; // SINGLE SGE, 32 bit
343 /* Stick the headers on */
344 msg[0] = reqlen<<16 | SGL_OFFSET_12;
345 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
346 msg[2] = 0;
347 msg[3] = 0;
348 // Adaptec/DPT Private stuff
349 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
350 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
351 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
352 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
353 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
354 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
355 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
356
357 mptr=msg+7;
358
359 memset(scb, 0, sizeof(scb));
360 // Write SCSI command into the message - always 16 byte block
361 scb[0] = INQUIRY;
362 scb[1] = 0;
363 scb[2] = 0;
364 scb[3] = 0;
365 scb[4] = 36;
366 scb[5] = 0;
367 // Don't care about the rest of scb
368
369 memcpy(mptr, scb, sizeof(scb));
370 mptr+=4;
371 lenptr=mptr++; /* Remember me - fill in when we know */
372
373 /* Now fill in the SGList and command */
374 *lenptr = len;
375 if (dpt_dma64(pHba)) {
376 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
377 *mptr++ = 1 << PAGE_SHIFT;
378 *mptr++ = 0xD0000000|direction|len;
379 *mptr++ = dma_low(addr);
380 *mptr++ = dma_high(addr);
381 } else {
382 *mptr++ = 0xD0000000|direction|len;
383 *mptr++ = addr;
384 }
385
386 // Send it on it's way
387 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
388 if (rcode != 0) {
389 sprintf(pHba->detail, "Adaptec I2O RAID");
390 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
391 if (rcode != -ETIME && rcode != -EINTR)
392 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
393 } else {
394 memset(pHba->detail, 0, sizeof(pHba->detail));
395 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
396 memcpy(&(pHba->detail[16]), " Model: ", 8);
397 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
398 memcpy(&(pHba->detail[40]), " FW: ", 4);
399 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
400 pHba->detail[48] = '\0'; /* precautionary */
401 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
402 }
403 adpt_i2o_status_get(pHba);
404 return ;
405}
406
407
408static int adpt_slave_configure(struct scsi_device * device)
409{
410 struct Scsi_Host *host = device->host;
411 adpt_hba* pHba;
412
413 pHba = (adpt_hba *) host->hostdata[0];
414
415 if (host->can_queue && device->tagged_supported) {
416 scsi_change_queue_depth(device,
417 host->can_queue - 1);
418 }
419 return 0;
420}
421
422static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
423{
424 adpt_hba* pHba = NULL;
425 struct adpt_device* pDev = NULL; /* dpt per device information */
426
427 cmd->scsi_done = done;
428 /*
429 * SCSI REQUEST_SENSE commands will be executed automatically by the
430 * Host Adapter for any errors, so they should not be executed
431 * explicitly unless the Sense Data is zero indicating that no error
432 * occurred.
433 */
434
435 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
436 cmd->result = (DID_OK << 16);
437 cmd->scsi_done(cmd);
438 return 0;
439 }
440
441 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
442 if (!pHba) {
443 return FAILED;
444 }
445
446 rmb();
447 if ((pHba->state) & DPTI_STATE_RESET)
448 return SCSI_MLQUEUE_HOST_BUSY;
449
450 // TODO if the cmd->device if offline then I may need to issue a bus rescan
451 // followed by a get_lct to see if the device is there anymore
452 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
453 /*
454 * First command request for this device. Set up a pointer
455 * to the device structure. This should be a TEST_UNIT_READY
456 * command from scan_scsis_single.
457 */
458 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
459 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
460 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
461 cmd->result = (DID_NO_CONNECT << 16);
462 cmd->scsi_done(cmd);
463 return 0;
464 }
465 cmd->device->hostdata = pDev;
466 }
467 pDev->pScsi_dev = cmd->device;
468
469 /*
470 * If we are being called from when the device is being reset,
471 * delay processing of the command until later.
472 */
473 if (pDev->state & DPTI_DEV_RESET ) {
474 return FAILED;
475 }
476 return adpt_scsi_to_i2o(pHba, cmd, pDev);
477}
478
479static DEF_SCSI_QCMD(adpt_queue)
480
481static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
482 sector_t capacity, int geom[])
483{
484 int heads=-1;
485 int sectors=-1;
486 int cylinders=-1;
487
488 // *** First lets set the default geometry ****
489
490 // If the capacity is less than ox2000
491 if (capacity < 0x2000 ) { // floppy
492 heads = 18;
493 sectors = 2;
494 }
495 // else if between 0x2000 and 0x20000
496 else if (capacity < 0x20000) {
497 heads = 64;
498 sectors = 32;
499 }
500 // else if between 0x20000 and 0x40000
501 else if (capacity < 0x40000) {
502 heads = 65;
503 sectors = 63;
504 }
505 // else if between 0x4000 and 0x80000
506 else if (capacity < 0x80000) {
507 heads = 128;
508 sectors = 63;
509 }
510 // else if greater than 0x80000
511 else {
512 heads = 255;
513 sectors = 63;
514 }
515 cylinders = sector_div(capacity, heads * sectors);
516
517 // Special case if CDROM
518 if(sdev->type == 5) { // CDROM
519 heads = 252;
520 sectors = 63;
521 cylinders = 1111;
522 }
523
524 geom[0] = heads;
525 geom[1] = sectors;
526 geom[2] = cylinders;
527
528 PDEBUG("adpt_bios_param: exit\n");
529 return 0;
530}
531
532
533static const char *adpt_info(struct Scsi_Host *host)
534{
535 adpt_hba* pHba;
536
537 pHba = (adpt_hba *) host->hostdata[0];
538 return (char *) (pHba->detail);
539}
540
541static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
542{
543 struct adpt_device* d;
544 int id;
545 int chan;
546 adpt_hba* pHba;
547 int unit;
548
549 // Find HBA (host bus adapter) we are looking for
550 mutex_lock(&adpt_configuration_lock);
551 for (pHba = hba_chain; pHba; pHba = pHba->next) {
552 if (pHba->host == host) {
553 break; /* found adapter */
554 }
555 }
556 mutex_unlock(&adpt_configuration_lock);
557 if (pHba == NULL) {
558 return 0;
559 }
560 host = pHba->host;
561
562 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
563 seq_printf(m, "%s\n", pHba->detail);
564 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
565 pHba->host->host_no, pHba->name, host->irq);
566 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
567 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
568
569 seq_puts(m, "Devices:\n");
570 for(chan = 0; chan < MAX_CHANNEL; chan++) {
571 for(id = 0; id < MAX_ID; id++) {
572 d = pHba->channel[chan].device[id];
573 while(d) {
574 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
575 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
576
577 unit = d->pI2o_dev->lct_data.tid;
578 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
579 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
580 scsi_device_online(d->pScsi_dev)? "online":"offline");
581 d = d->next_lun;
582 }
583 }
584 }
585 return 0;
586}
587
588/*
589 * Turn a pointer to ioctl reply data into an u32 'context'
590 */
591static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
592{
593#if BITS_PER_LONG == 32
594 return (u32)(unsigned long)reply;
595#else
596 ulong flags = 0;
597 u32 nr, i;
598
599 spin_lock_irqsave(pHba->host->host_lock, flags);
600 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
601 for (i = 0; i < nr; i++) {
602 if (pHba->ioctl_reply_context[i] == NULL) {
603 pHba->ioctl_reply_context[i] = reply;
604 break;
605 }
606 }
607 spin_unlock_irqrestore(pHba->host->host_lock, flags);
608 if (i >= nr) {
609 printk(KERN_WARNING"%s: Too many outstanding "
610 "ioctl commands\n", pHba->name);
611 return (u32)-1;
612 }
613
614 return i;
615#endif
616}
617
618/*
619 * Go from an u32 'context' to a pointer to ioctl reply data.
620 */
621static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
622{
623#if BITS_PER_LONG == 32
624 return (void *)(unsigned long)context;
625#else
626 void *p = pHba->ioctl_reply_context[context];
627 pHba->ioctl_reply_context[context] = NULL;
628
629 return p;
630#endif
631}
632
633/*===========================================================================
634 * Error Handling routines
635 *===========================================================================
636 */
637
638static int adpt_abort(struct scsi_cmnd * cmd)
639{
640 adpt_hba* pHba = NULL; /* host bus adapter structure */
641 struct adpt_device* dptdevice; /* dpt per device information */
642 u32 msg[5];
643 int rcode;
644
645 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
646 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
647 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
648 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
649 return FAILED;
650 }
651
652 memset(msg, 0, sizeof(msg));
653 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
654 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
655 msg[2] = 0;
656 msg[3]= 0;
657 /* Add 1 to avoid firmware treating it as invalid command */
658 msg[4] = cmd->request->tag + 1;
659 if (pHba->host)
660 spin_lock_irq(pHba->host->host_lock);
661 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
662 if (pHba->host)
663 spin_unlock_irq(pHba->host->host_lock);
664 if (rcode != 0) {
665 if(rcode == -EOPNOTSUPP ){
666 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
667 return FAILED;
668 }
669 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
670 return FAILED;
671 }
672 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
673 return SUCCESS;
674}
675
676
677#define I2O_DEVICE_RESET 0x27
678// This is the same for BLK and SCSI devices
679// NOTE this is wrong in the i2o.h definitions
680// This is not currently supported by our adapter but we issue it anyway
681static int adpt_device_reset(struct scsi_cmnd* cmd)
682{
683 adpt_hba* pHba;
684 u32 msg[4];
685 u32 rcode;
686 int old_state;
687 struct adpt_device* d = cmd->device->hostdata;
688
689 pHba = (void*) cmd->device->host->hostdata[0];
690 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
691 if (!d) {
692 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
693 return FAILED;
694 }
695 memset(msg, 0, sizeof(msg));
696 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
697 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
698 msg[2] = 0;
699 msg[3] = 0;
700
701 if (pHba->host)
702 spin_lock_irq(pHba->host->host_lock);
703 old_state = d->state;
704 d->state |= DPTI_DEV_RESET;
705 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
706 d->state = old_state;
707 if (pHba->host)
708 spin_unlock_irq(pHba->host->host_lock);
709 if (rcode != 0) {
710 if(rcode == -EOPNOTSUPP ){
711 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
712 return FAILED;
713 }
714 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
715 return FAILED;
716 } else {
717 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
718 return SUCCESS;
719 }
720}
721
722
723#define I2O_HBA_BUS_RESET 0x87
724// This version of bus reset is called by the eh_error handler
725static int adpt_bus_reset(struct scsi_cmnd* cmd)
726{
727 adpt_hba* pHba;
728 u32 msg[4];
729 u32 rcode;
730
731 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
732 memset(msg, 0, sizeof(msg));
733 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
734 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
735 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
736 msg[2] = 0;
737 msg[3] = 0;
738 if (pHba->host)
739 spin_lock_irq(pHba->host->host_lock);
740 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
741 if (pHba->host)
742 spin_unlock_irq(pHba->host->host_lock);
743 if (rcode != 0) {
744 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
745 return FAILED;
746 } else {
747 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
748 return SUCCESS;
749 }
750}
751
752// This version of reset is called by the eh_error_handler
753static int __adpt_reset(struct scsi_cmnd* cmd)
754{
755 adpt_hba* pHba;
756 int rcode;
757 char name[32];
758
759 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
760 strncpy(name, pHba->name, sizeof(name));
761 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
762 rcode = adpt_hba_reset(pHba);
763 if(rcode == 0){
764 printk(KERN_WARNING"%s: HBA reset complete\n", name);
765 return SUCCESS;
766 } else {
767 printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
768 return FAILED;
769 }
770}
771
772static int adpt_reset(struct scsi_cmnd* cmd)
773{
774 int rc;
775
776 spin_lock_irq(cmd->device->host->host_lock);
777 rc = __adpt_reset(cmd);
778 spin_unlock_irq(cmd->device->host->host_lock);
779
780 return rc;
781}
782
783// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
784static int adpt_hba_reset(adpt_hba* pHba)
785{
786 int rcode;
787
788 pHba->state |= DPTI_STATE_RESET;
789
790 // Activate does get status , init outbound, and get hrt
791 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
792 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
793 adpt_i2o_delete_hba(pHba);
794 return rcode;
795 }
796
797 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
798 adpt_i2o_delete_hba(pHba);
799 return rcode;
800 }
801 PDEBUG("%s: in HOLD state\n",pHba->name);
802
803 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
804 adpt_i2o_delete_hba(pHba);
805 return rcode;
806 }
807 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
808
809 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
810 adpt_i2o_delete_hba(pHba);
811 return rcode;
812 }
813
814 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
815 adpt_i2o_delete_hba(pHba);
816 return rcode;
817 }
818 pHba->state &= ~DPTI_STATE_RESET;
819
820 scsi_host_complete_all_commands(pHba->host, DID_RESET);
821 return 0; /* return success */
822}
823
824/*===========================================================================
825 *
826 *===========================================================================
827 */
828
829
830static void adpt_i2o_sys_shutdown(void)
831{
832 adpt_hba *pHba, *pNext;
833 struct adpt_i2o_post_wait_data *p1, *old;
834
835 printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
836 printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
837 /* Delete all IOPs from the controller chain */
838 /* They should have already been released by the
839 * scsi-core
840 */
841 for (pHba = hba_chain; pHba; pHba = pNext) {
842 pNext = pHba->next;
843 adpt_i2o_delete_hba(pHba);
844 }
845
846 /* Remove any timedout entries from the wait queue. */
847// spin_lock_irqsave(&adpt_post_wait_lock, flags);
848 /* Nothing should be outstanding at this point so just
849 * free them
850 */
851 for(p1 = adpt_post_wait_queue; p1;) {
852 old = p1;
853 p1 = p1->next;
854 kfree(old);
855 }
856// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
857 adpt_post_wait_queue = NULL;
858
859 printk(KERN_INFO "Adaptec I2O controllers down.\n");
860}
861
862static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
863{
864
865 adpt_hba* pHba = NULL;
866 adpt_hba* p = NULL;
867 ulong base_addr0_phys = 0;
868 ulong base_addr1_phys = 0;
869 u32 hba_map0_area_size = 0;
870 u32 hba_map1_area_size = 0;
871 void __iomem *base_addr_virt = NULL;
872 void __iomem *msg_addr_virt = NULL;
873 int dma64 = 0;
874
875 int raptorFlag = FALSE;
876
877 if(pci_enable_device(pDev)) {
878 return -EINVAL;
879 }
880
881 if (pci_request_regions(pDev, "dpt_i2o")) {
882 PERROR("dpti: adpt_config_hba: pci request region failed\n");
883 return -EINVAL;
884 }
885
886 pci_set_master(pDev);
887
888 /*
889 * See if we should enable dma64 mode.
890 */
891 if (sizeof(dma_addr_t) > 4 &&
892 dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
893 dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
894 dma64 = 1;
895
896 if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
897 return -EINVAL;
898
899 /* adapter only supports message blocks below 4GB */
900 dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
901
902 base_addr0_phys = pci_resource_start(pDev,0);
903 hba_map0_area_size = pci_resource_len(pDev,0);
904
905 // Check if standard PCI card or single BAR Raptor
906 if(pDev->device == PCI_DPT_DEVICE_ID){
907 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
908 // Raptor card with this device id needs 4M
909 hba_map0_area_size = 0x400000;
910 } else { // Not Raptor - it is a PCI card
911 if(hba_map0_area_size > 0x100000 ){
912 hba_map0_area_size = 0x100000;
913 }
914 }
915 } else {// Raptor split BAR config
916 // Use BAR1 in this configuration
917 base_addr1_phys = pci_resource_start(pDev,1);
918 hba_map1_area_size = pci_resource_len(pDev,1);
919 raptorFlag = TRUE;
920 }
921
922#if BITS_PER_LONG == 64
923 /*
924 * The original Adaptec 64 bit driver has this comment here:
925 * "x86_64 machines need more optimal mappings"
926 *
927 * I assume some HBAs report ridiculously large mappings
928 * and we need to limit them on platforms with IOMMUs.
929 */
930 if (raptorFlag == TRUE) {
931 if (hba_map0_area_size > 128)
932 hba_map0_area_size = 128;
933 if (hba_map1_area_size > 524288)
934 hba_map1_area_size = 524288;
935 } else {
936 if (hba_map0_area_size > 524288)
937 hba_map0_area_size = 524288;
938 }
939#endif
940
941 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
942 if (!base_addr_virt) {
943 pci_release_regions(pDev);
944 PERROR("dpti: adpt_config_hba: io remap failed\n");
945 return -EINVAL;
946 }
947
948 if(raptorFlag == TRUE) {
949 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
950 if (!msg_addr_virt) {
951 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
952 iounmap(base_addr_virt);
953 pci_release_regions(pDev);
954 return -EINVAL;
955 }
956 } else {
957 msg_addr_virt = base_addr_virt;
958 }
959
960 // Allocate and zero the data structure
961 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
962 if (!pHba) {
963 if (msg_addr_virt != base_addr_virt)
964 iounmap(msg_addr_virt);
965 iounmap(base_addr_virt);
966 pci_release_regions(pDev);
967 return -ENOMEM;
968 }
969
970 mutex_lock(&adpt_configuration_lock);
971
972 if(hba_chain != NULL){
973 for(p = hba_chain; p->next; p = p->next);
974 p->next = pHba;
975 } else {
976 hba_chain = pHba;
977 }
978 pHba->next = NULL;
979 pHba->unit = hba_count;
980 sprintf(pHba->name, "dpti%d", hba_count);
981 hba_count++;
982
983 mutex_unlock(&adpt_configuration_lock);
984
985 pHba->pDev = pDev;
986 pHba->base_addr_phys = base_addr0_phys;
987
988 // Set up the Virtual Base Address of the I2O Device
989 pHba->base_addr_virt = base_addr_virt;
990 pHba->msg_addr_virt = msg_addr_virt;
991 pHba->irq_mask = base_addr_virt+0x30;
992 pHba->post_port = base_addr_virt+0x40;
993 pHba->reply_port = base_addr_virt+0x44;
994
995 pHba->hrt = NULL;
996 pHba->lct = NULL;
997 pHba->lct_size = 0;
998 pHba->status_block = NULL;
999 pHba->post_count = 0;
1000 pHba->state = DPTI_STATE_RESET;
1001 pHba->pDev = pDev;
1002 pHba->devices = NULL;
1003 pHba->dma64 = dma64;
1004
1005 // Initializing the spinlocks
1006 spin_lock_init(&pHba->state_lock);
1007 spin_lock_init(&adpt_post_wait_lock);
1008
1009 if(raptorFlag == 0){
1010 printk(KERN_INFO "Adaptec I2O RAID controller"
1011 " %d at %p size=%x irq=%d%s\n",
1012 hba_count-1, base_addr_virt,
1013 hba_map0_area_size, pDev->irq,
1014 dma64 ? " (64-bit DMA)" : "");
1015 } else {
1016 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1017 hba_count-1, pDev->irq,
1018 dma64 ? " (64-bit DMA)" : "");
1019 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1020 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1021 }
1022
1023 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1024 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1025 adpt_i2o_delete_hba(pHba);
1026 return -EINVAL;
1027 }
1028
1029 return 0;
1030}
1031
1032
1033static void adpt_i2o_delete_hba(adpt_hba* pHba)
1034{
1035 adpt_hba* p1;
1036 adpt_hba* p2;
1037 struct i2o_device* d;
1038 struct i2o_device* next;
1039 int i;
1040 int j;
1041 struct adpt_device* pDev;
1042 struct adpt_device* pNext;
1043
1044
1045 mutex_lock(&adpt_configuration_lock);
1046 if(pHba->host){
1047 free_irq(pHba->host->irq, pHba);
1048 }
1049 p2 = NULL;
1050 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1051 if(p1 == pHba) {
1052 if(p2) {
1053 p2->next = p1->next;
1054 } else {
1055 hba_chain = p1->next;
1056 }
1057 break;
1058 }
1059 }
1060
1061 hba_count--;
1062 mutex_unlock(&adpt_configuration_lock);
1063
1064 iounmap(pHba->base_addr_virt);
1065 pci_release_regions(pHba->pDev);
1066 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1067 iounmap(pHba->msg_addr_virt);
1068 }
1069 if(pHba->FwDebugBuffer_P)
1070 iounmap(pHba->FwDebugBuffer_P);
1071 if(pHba->hrt) {
1072 dma_free_coherent(&pHba->pDev->dev,
1073 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1074 pHba->hrt, pHba->hrt_pa);
1075 }
1076 if(pHba->lct) {
1077 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1078 pHba->lct, pHba->lct_pa);
1079 }
1080 if(pHba->status_block) {
1081 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1082 pHba->status_block, pHba->status_block_pa);
1083 }
1084 if(pHba->reply_pool) {
1085 dma_free_coherent(&pHba->pDev->dev,
1086 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1087 pHba->reply_pool, pHba->reply_pool_pa);
1088 }
1089
1090 for(d = pHba->devices; d ; d = next){
1091 next = d->next;
1092 kfree(d);
1093 }
1094 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1095 for(j = 0; j < MAX_ID; j++){
1096 if(pHba->channel[i].device[j] != NULL){
1097 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1098 pNext = pDev->next_lun;
1099 kfree(pDev);
1100 }
1101 }
1102 }
1103 }
1104 pci_dev_put(pHba->pDev);
1105 if (adpt_sysfs_class)
1106 device_destroy(adpt_sysfs_class,
1107 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1108 kfree(pHba);
1109
1110 if(hba_count <= 0){
1111 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1112 if (adpt_sysfs_class) {
1113 class_destroy(adpt_sysfs_class);
1114 adpt_sysfs_class = NULL;
1115 }
1116 }
1117}
1118
1119static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1120{
1121 struct adpt_device* d;
1122
1123 if (chan >= MAX_CHANNEL)
1124 return NULL;
1125
1126 d = pHba->channel[chan].device[id];
1127 if(!d || d->tid == 0) {
1128 return NULL;
1129 }
1130
1131 /* If it is the only lun at that address then this should match*/
1132 if(d->scsi_lun == lun){
1133 return d;
1134 }
1135
1136 /* else we need to look through all the luns */
1137 for(d=d->next_lun ; d ; d = d->next_lun){
1138 if(d->scsi_lun == lun){
1139 return d;
1140 }
1141 }
1142 return NULL;
1143}
1144
1145
1146static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1147{
1148 // I used my own version of the WAIT_QUEUE_HEAD
1149 // to handle some version differences
1150 // When embedded in the kernel this could go back to the vanilla one
1151 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1152 int status = 0;
1153 ulong flags = 0;
1154 struct adpt_i2o_post_wait_data *p1, *p2;
1155 struct adpt_i2o_post_wait_data *wait_data =
1156 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1157 DECLARE_WAITQUEUE(wait, current);
1158
1159 if (!wait_data)
1160 return -ENOMEM;
1161
1162 /*
1163 * The spin locking is needed to keep anyone from playing
1164 * with the queue pointers and id while we do the same
1165 */
1166 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1167 // TODO we need a MORE unique way of getting ids
1168 // to support async LCT get
1169 wait_data->next = adpt_post_wait_queue;
1170 adpt_post_wait_queue = wait_data;
1171 adpt_post_wait_id++;
1172 adpt_post_wait_id &= 0x7fff;
1173 wait_data->id = adpt_post_wait_id;
1174 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1175
1176 wait_data->wq = &adpt_wq_i2o_post;
1177 wait_data->status = -ETIMEDOUT;
1178
1179 add_wait_queue(&adpt_wq_i2o_post, &wait);
1180
1181 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1182 timeout *= HZ;
1183 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1184 set_current_state(TASK_INTERRUPTIBLE);
1185 if(pHba->host)
1186 spin_unlock_irq(pHba->host->host_lock);
1187 if (!timeout)
1188 schedule();
1189 else{
1190 timeout = schedule_timeout(timeout);
1191 if (timeout == 0) {
1192 // I/O issued, but cannot get result in
1193 // specified time. Freeing resorces is
1194 // dangerous.
1195 status = -ETIME;
1196 }
1197 }
1198 if(pHba->host)
1199 spin_lock_irq(pHba->host->host_lock);
1200 }
1201 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1202
1203 if(status == -ETIMEDOUT){
1204 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1205 // We will have to free the wait_data memory during shutdown
1206 return status;
1207 }
1208
1209 /* Remove the entry from the queue. */
1210 p2 = NULL;
1211 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1212 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1213 if(p1 == wait_data) {
1214 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1215 status = -EOPNOTSUPP;
1216 }
1217 if(p2) {
1218 p2->next = p1->next;
1219 } else {
1220 adpt_post_wait_queue = p1->next;
1221 }
1222 break;
1223 }
1224 }
1225 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1226
1227 kfree(wait_data);
1228
1229 return status;
1230}
1231
1232
1233static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1234{
1235
1236 u32 m = EMPTY_QUEUE;
1237 u32 __iomem *msg;
1238 ulong timeout = jiffies + 30*HZ;
1239 do {
1240 rmb();
1241 m = readl(pHba->post_port);
1242 if (m != EMPTY_QUEUE) {
1243 break;
1244 }
1245 if(time_after(jiffies,timeout)){
1246 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1247 return -ETIMEDOUT;
1248 }
1249 schedule_timeout_uninterruptible(1);
1250 } while(m == EMPTY_QUEUE);
1251
1252 msg = pHba->msg_addr_virt + m;
1253 memcpy_toio(msg, data, len);
1254 wmb();
1255
1256 //post message
1257 writel(m, pHba->post_port);
1258 wmb();
1259
1260 return 0;
1261}
1262
1263
1264static void adpt_i2o_post_wait_complete(u32 context, int status)
1265{
1266 struct adpt_i2o_post_wait_data *p1 = NULL;
1267 /*
1268 * We need to search through the adpt_post_wait
1269 * queue to see if the given message is still
1270 * outstanding. If not, it means that the IOP
1271 * took longer to respond to the message than we
1272 * had allowed and timer has already expired.
1273 * Not much we can do about that except log
1274 * it for debug purposes, increase timeout, and recompile
1275 *
1276 * Lock needed to keep anyone from moving queue pointers
1277 * around while we're looking through them.
1278 */
1279
1280 context &= 0x7fff;
1281
1282 spin_lock(&adpt_post_wait_lock);
1283 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1284 if(p1->id == context) {
1285 p1->status = status;
1286 spin_unlock(&adpt_post_wait_lock);
1287 wake_up_interruptible(p1->wq);
1288 return;
1289 }
1290 }
1291 spin_unlock(&adpt_post_wait_lock);
1292 // If this happens we lose commands that probably really completed
1293 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1294 printk(KERN_DEBUG" Tasks in wait queue:\n");
1295 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1296 printk(KERN_DEBUG" %d\n",p1->id);
1297 }
1298 return;
1299}
1300
1301static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1302{
1303 u32 msg[8];
1304 u8* status;
1305 dma_addr_t addr;
1306 u32 m = EMPTY_QUEUE ;
1307 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1308
1309 if(pHba->initialized == FALSE) { // First time reset should be quick
1310 timeout = jiffies + (25*HZ);
1311 } else {
1312 adpt_i2o_quiesce_hba(pHba);
1313 }
1314
1315 do {
1316 rmb();
1317 m = readl(pHba->post_port);
1318 if (m != EMPTY_QUEUE) {
1319 break;
1320 }
1321 if(time_after(jiffies,timeout)){
1322 printk(KERN_WARNING"Timeout waiting for message!\n");
1323 return -ETIMEDOUT;
1324 }
1325 schedule_timeout_uninterruptible(1);
1326 } while (m == EMPTY_QUEUE);
1327
1328 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1329 if(status == NULL) {
1330 adpt_send_nop(pHba, m);
1331 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1332 return -ENOMEM;
1333 }
1334
1335 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1336 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1337 msg[2]=0;
1338 msg[3]=0;
1339 msg[4]=0;
1340 msg[5]=0;
1341 msg[6]=dma_low(addr);
1342 msg[7]=dma_high(addr);
1343
1344 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1345 wmb();
1346 writel(m, pHba->post_port);
1347 wmb();
1348
1349 while(*status == 0){
1350 if(time_after(jiffies,timeout)){
1351 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1352 /* We lose 4 bytes of "status" here, but we cannot
1353 free these because controller may awake and corrupt
1354 those bytes at any time */
1355 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1356 return -ETIMEDOUT;
1357 }
1358 rmb();
1359 schedule_timeout_uninterruptible(1);
1360 }
1361
1362 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1363 PDEBUG("%s: Reset in progress...\n", pHba->name);
1364 // Here we wait for message frame to become available
1365 // indicated that reset has finished
1366 do {
1367 rmb();
1368 m = readl(pHba->post_port);
1369 if (m != EMPTY_QUEUE) {
1370 break;
1371 }
1372 if(time_after(jiffies,timeout)){
1373 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1374 /* We lose 4 bytes of "status" here, but we
1375 cannot free these because controller may
1376 awake and corrupt those bytes at any time */
1377 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1378 return -ETIMEDOUT;
1379 }
1380 schedule_timeout_uninterruptible(1);
1381 } while (m == EMPTY_QUEUE);
1382 // Flush the offset
1383 adpt_send_nop(pHba, m);
1384 }
1385 adpt_i2o_status_get(pHba);
1386 if(*status == 0x02 ||
1387 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1388 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1389 pHba->name);
1390 } else {
1391 PDEBUG("%s: Reset completed.\n", pHba->name);
1392 }
1393
1394 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1395#ifdef UARTDELAY
1396 // This delay is to allow someone attached to the card through the debug UART to
1397 // set up the dump levels that they want before the rest of the initialization sequence
1398 adpt_delay(20000);
1399#endif
1400 return 0;
1401}
1402
1403
1404static int adpt_i2o_parse_lct(adpt_hba* pHba)
1405{
1406 int i;
1407 int max;
1408 int tid;
1409 struct i2o_device *d;
1410 i2o_lct *lct = pHba->lct;
1411 u8 bus_no = 0;
1412 s16 scsi_id;
1413 u64 scsi_lun;
1414 u32 buf[10]; // larger than 7, or 8 ...
1415 struct adpt_device* pDev;
1416
1417 if (lct == NULL) {
1418 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1419 return -1;
1420 }
1421
1422 max = lct->table_size;
1423 max -= 3;
1424 max /= 9;
1425
1426 for(i=0;i<max;i++) {
1427 if( lct->lct_entry[i].user_tid != 0xfff){
1428 /*
1429 * If we have hidden devices, we need to inform the upper layers about
1430 * the possible maximum id reference to handle device access when
1431 * an array is disassembled. This code has no other purpose but to
1432 * allow us future access to devices that are currently hidden
1433 * behind arrays, hotspares or have not been configured (JBOD mode).
1434 */
1435 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1436 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1437 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1438 continue;
1439 }
1440 tid = lct->lct_entry[i].tid;
1441 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1442 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1443 continue;
1444 }
1445 bus_no = buf[0]>>16;
1446 scsi_id = buf[1];
1447 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1448 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1449 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1450 continue;
1451 }
1452 if (scsi_id >= MAX_ID){
1453 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1454 continue;
1455 }
1456 if(bus_no > pHba->top_scsi_channel){
1457 pHba->top_scsi_channel = bus_no;
1458 }
1459 if(scsi_id > pHba->top_scsi_id){
1460 pHba->top_scsi_id = scsi_id;
1461 }
1462 if(scsi_lun > pHba->top_scsi_lun){
1463 pHba->top_scsi_lun = scsi_lun;
1464 }
1465 continue;
1466 }
1467 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1468 if(d==NULL)
1469 {
1470 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1471 return -ENOMEM;
1472 }
1473
1474 d->controller = pHba;
1475 d->next = NULL;
1476
1477 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1478
1479 d->flags = 0;
1480 tid = d->lct_data.tid;
1481 adpt_i2o_report_hba_unit(pHba, d);
1482 adpt_i2o_install_device(pHba, d);
1483 }
1484 bus_no = 0;
1485 for(d = pHba->devices; d ; d = d->next) {
1486 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1487 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1488 tid = d->lct_data.tid;
1489 // TODO get the bus_no from hrt-but for now they are in order
1490 //bus_no =
1491 if(bus_no > pHba->top_scsi_channel){
1492 pHba->top_scsi_channel = bus_no;
1493 }
1494 pHba->channel[bus_no].type = d->lct_data.class_id;
1495 pHba->channel[bus_no].tid = tid;
1496 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1497 {
1498 pHba->channel[bus_no].scsi_id = buf[1];
1499 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1500 }
1501 // TODO remove - this is just until we get from hrt
1502 bus_no++;
1503 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1504 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1505 break;
1506 }
1507 }
1508 }
1509
1510 // Setup adpt_device table
1511 for(d = pHba->devices; d ; d = d->next) {
1512 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1513 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1514 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1515
1516 tid = d->lct_data.tid;
1517 scsi_id = -1;
1518 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1519 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1520 bus_no = buf[0]>>16;
1521 scsi_id = buf[1];
1522 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1523 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1524 continue;
1525 }
1526 if (scsi_id >= MAX_ID) {
1527 continue;
1528 }
1529 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1530 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1531 if(pDev == NULL) {
1532 return -ENOMEM;
1533 }
1534 pHba->channel[bus_no].device[scsi_id] = pDev;
1535 } else {
1536 for( pDev = pHba->channel[bus_no].device[scsi_id];
1537 pDev->next_lun; pDev = pDev->next_lun){
1538 }
1539 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1540 if(pDev->next_lun == NULL) {
1541 return -ENOMEM;
1542 }
1543 pDev = pDev->next_lun;
1544 }
1545 pDev->tid = tid;
1546 pDev->scsi_channel = bus_no;
1547 pDev->scsi_id = scsi_id;
1548 pDev->scsi_lun = scsi_lun;
1549 pDev->pI2o_dev = d;
1550 d->owner = pDev;
1551 pDev->type = (buf[0])&0xff;
1552 pDev->flags = (buf[0]>>8)&0xff;
1553 if(scsi_id > pHba->top_scsi_id){
1554 pHba->top_scsi_id = scsi_id;
1555 }
1556 if(scsi_lun > pHba->top_scsi_lun){
1557 pHba->top_scsi_lun = scsi_lun;
1558 }
1559 }
1560 if(scsi_id == -1){
1561 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1562 d->lct_data.identity_tag);
1563 }
1564 }
1565 }
1566 return 0;
1567}
1568
1569
1570/*
1571 * Each I2O controller has a chain of devices on it - these match
1572 * the useful parts of the LCT of the board.
1573 */
1574
1575static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1576{
1577 mutex_lock(&adpt_configuration_lock);
1578 d->controller=pHba;
1579 d->owner=NULL;
1580 d->next=pHba->devices;
1581 d->prev=NULL;
1582 if (pHba->devices != NULL){
1583 pHba->devices->prev=d;
1584 }
1585 pHba->devices=d;
1586 *d->dev_name = 0;
1587
1588 mutex_unlock(&adpt_configuration_lock);
1589 return 0;
1590}
1591
1592static int adpt_open(struct inode *inode, struct file *file)
1593{
1594 int minor;
1595 adpt_hba* pHba;
1596
1597 mutex_lock(&adpt_mutex);
1598 //TODO check for root access
1599 //
1600 minor = iminor(inode);
1601 if (minor >= hba_count) {
1602 mutex_unlock(&adpt_mutex);
1603 return -ENXIO;
1604 }
1605 mutex_lock(&adpt_configuration_lock);
1606 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1607 if (pHba->unit == minor) {
1608 break; /* found adapter */
1609 }
1610 }
1611 if (pHba == NULL) {
1612 mutex_unlock(&adpt_configuration_lock);
1613 mutex_unlock(&adpt_mutex);
1614 return -ENXIO;
1615 }
1616
1617// if(pHba->in_use){
1618 // mutex_unlock(&adpt_configuration_lock);
1619// return -EBUSY;
1620// }
1621
1622 pHba->in_use = 1;
1623 mutex_unlock(&adpt_configuration_lock);
1624 mutex_unlock(&adpt_mutex);
1625
1626 return 0;
1627}
1628
1629static int adpt_close(struct inode *inode, struct file *file)
1630{
1631 int minor;
1632 adpt_hba* pHba;
1633
1634 minor = iminor(inode);
1635 if (minor >= hba_count) {
1636 return -ENXIO;
1637 }
1638 mutex_lock(&adpt_configuration_lock);
1639 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1640 if (pHba->unit == minor) {
1641 break; /* found adapter */
1642 }
1643 }
1644 mutex_unlock(&adpt_configuration_lock);
1645 if (pHba == NULL) {
1646 return -ENXIO;
1647 }
1648
1649 pHba->in_use = 0;
1650
1651 return 0;
1652}
1653
1654
1655static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1656{
1657 u32 msg[MAX_MESSAGE_SIZE];
1658 u32* reply = NULL;
1659 u32 size = 0;
1660 u32 reply_size = 0;
1661 u32 __user *user_msg = arg;
1662 u32 __user * user_reply = NULL;
1663 void **sg_list = NULL;
1664 u32 sg_offset = 0;
1665 u32 sg_count = 0;
1666 int sg_index = 0;
1667 u32 i = 0;
1668 u32 rcode = 0;
1669 void *p = NULL;
1670 dma_addr_t addr;
1671 ulong flags = 0;
1672
1673 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1674 // get user msg size in u32s
1675 if(get_user(size, &user_msg[0])){
1676 return -EFAULT;
1677 }
1678 size = size>>16;
1679
1680 user_reply = &user_msg[size];
1681 if(size > MAX_MESSAGE_SIZE){
1682 return -EFAULT;
1683 }
1684 size *= 4; // Convert to bytes
1685
1686 /* Copy in the user's I2O command */
1687 if(copy_from_user(msg, user_msg, size)) {
1688 return -EFAULT;
1689 }
1690 get_user(reply_size, &user_reply[0]);
1691 reply_size = reply_size>>16;
1692 if(reply_size > REPLY_FRAME_SIZE){
1693 reply_size = REPLY_FRAME_SIZE;
1694 }
1695 reply_size *= 4;
1696 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1697 if(reply == NULL) {
1698 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1699 return -ENOMEM;
1700 }
1701 sg_offset = (msg[0]>>4)&0xf;
1702 msg[2] = 0x40000000; // IOCTL context
1703 msg[3] = adpt_ioctl_to_context(pHba, reply);
1704 if (msg[3] == (u32)-1) {
1705 rcode = -EBUSY;
1706 goto free;
1707 }
1708
1709 sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1710 if (!sg_list) {
1711 rcode = -ENOMEM;
1712 goto free;
1713 }
1714 if(sg_offset) {
1715 // TODO add 64 bit API
1716 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1717 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1718 if (sg_count > pHba->sg_tablesize){
1719 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1720 rcode = -EINVAL;
1721 goto free;
1722 }
1723
1724 for(i = 0; i < sg_count; i++) {
1725 int sg_size;
1726
1727 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1728 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1729 rcode = -EINVAL;
1730 goto cleanup;
1731 }
1732 sg_size = sg[i].flag_count & 0xffffff;
1733 /* Allocate memory for the transfer */
1734 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1735 if(!p) {
1736 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1737 pHba->name,sg_size,i,sg_count);
1738 rcode = -ENOMEM;
1739 goto cleanup;
1740 }
1741 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1742 /* Copy in the user's SG buffer if necessary */
1743 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1744 // sg_simple_element API is 32 bit
1745 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1746 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1747 rcode = -EFAULT;
1748 goto cleanup;
1749 }
1750 }
1751 /* sg_simple_element API is 32 bit, but addr < 4GB */
1752 sg[i].addr_bus = addr;
1753 }
1754 }
1755
1756 do {
1757 /*
1758 * Stop any new commands from enterring the
1759 * controller while processing the ioctl
1760 */
1761 if (pHba->host) {
1762 scsi_block_requests(pHba->host);
1763 spin_lock_irqsave(pHba->host->host_lock, flags);
1764 }
1765 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1766 if (rcode != 0)
1767 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1768 rcode, reply);
1769 if (pHba->host) {
1770 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1771 scsi_unblock_requests(pHba->host);
1772 }
1773 } while (rcode == -ETIMEDOUT);
1774
1775 if(rcode){
1776 goto cleanup;
1777 }
1778
1779 if(sg_offset) {
1780 /* Copy back the Scatter Gather buffers back to user space */
1781 u32 j;
1782 // TODO add 64 bit API
1783 struct sg_simple_element* sg;
1784 int sg_size;
1785
1786 // re-acquire the original message to handle correctly the sg copy operation
1787 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1788 // get user msg size in u32s
1789 if(get_user(size, &user_msg[0])){
1790 rcode = -EFAULT;
1791 goto cleanup;
1792 }
1793 size = size>>16;
1794 size *= 4;
1795 if (size > MAX_MESSAGE_SIZE) {
1796 rcode = -EINVAL;
1797 goto cleanup;
1798 }
1799 /* Copy in the user's I2O command */
1800 if (copy_from_user (msg, user_msg, size)) {
1801 rcode = -EFAULT;
1802 goto cleanup;
1803 }
1804 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1805
1806 // TODO add 64 bit API
1807 sg = (struct sg_simple_element*)(msg + sg_offset);
1808 for (j = 0; j < sg_count; j++) {
1809 /* Copy out the SG list to user's buffer if necessary */
1810 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1811 sg_size = sg[j].flag_count & 0xffffff;
1812 // sg_simple_element API is 32 bit
1813 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1814 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1815 rcode = -EFAULT;
1816 goto cleanup;
1817 }
1818 }
1819 }
1820 }
1821
1822 /* Copy back the reply to user space */
1823 if (reply_size) {
1824 // we wrote our own values for context - now restore the user supplied ones
1825 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1826 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1827 rcode = -EFAULT;
1828 }
1829 if(copy_to_user(user_reply, reply, reply_size)) {
1830 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1831 rcode = -EFAULT;
1832 }
1833 }
1834
1835
1836cleanup:
1837 if (rcode != -ETIME && rcode != -EINTR) {
1838 struct sg_simple_element *sg =
1839 (struct sg_simple_element*) (msg +sg_offset);
1840 while(sg_index) {
1841 if(sg_list[--sg_index]) {
1842 dma_free_coherent(&pHba->pDev->dev,
1843 sg[sg_index].flag_count & 0xffffff,
1844 sg_list[sg_index],
1845 sg[sg_index].addr_bus);
1846 }
1847 }
1848 }
1849
1850free:
1851 kfree(sg_list);
1852 kfree(reply);
1853 return rcode;
1854}
1855
1856#if defined __ia64__
1857static void adpt_ia64_info(sysInfo_S* si)
1858{
1859 // This is all the info we need for now
1860 // We will add more info as our new
1861 // managmenent utility requires it
1862 si->processorType = PROC_IA64;
1863}
1864#endif
1865
1866#if defined __sparc__
1867static void adpt_sparc_info(sysInfo_S* si)
1868{
1869 // This is all the info we need for now
1870 // We will add more info as our new
1871 // managmenent utility requires it
1872 si->processorType = PROC_ULTRASPARC;
1873}
1874#endif
1875#if defined __alpha__
1876static void adpt_alpha_info(sysInfo_S* si)
1877{
1878 // This is all the info we need for now
1879 // We will add more info as our new
1880 // managmenent utility requires it
1881 si->processorType = PROC_ALPHA;
1882}
1883#endif
1884
1885#if defined __i386__
1886
1887#include <uapi/asm/vm86.h>
1888
1889static void adpt_i386_info(sysInfo_S* si)
1890{
1891 // This is all the info we need for now
1892 // We will add more info as our new
1893 // managmenent utility requires it
1894 switch (boot_cpu_data.x86) {
1895 case CPU_386:
1896 si->processorType = PROC_386;
1897 break;
1898 case CPU_486:
1899 si->processorType = PROC_486;
1900 break;
1901 case CPU_586:
1902 si->processorType = PROC_PENTIUM;
1903 break;
1904 default: // Just in case
1905 si->processorType = PROC_PENTIUM;
1906 break;
1907 }
1908}
1909#endif
1910
1911/*
1912 * This routine returns information about the system. This does not effect
1913 * any logic and if the info is wrong - it doesn't matter.
1914 */
1915
1916/* Get all the info we can not get from kernel services */
1917static int adpt_system_info(void __user *buffer)
1918{
1919 sysInfo_S si;
1920
1921 memset(&si, 0, sizeof(si));
1922
1923 si.osType = OS_LINUX;
1924 si.osMajorVersion = 0;
1925 si.osMinorVersion = 0;
1926 si.osRevision = 0;
1927 si.busType = SI_PCI_BUS;
1928 si.processorFamily = DPTI_sig.dsProcessorFamily;
1929
1930#if defined __i386__
1931 adpt_i386_info(&si);
1932#elif defined (__ia64__)
1933 adpt_ia64_info(&si);
1934#elif defined(__sparc__)
1935 adpt_sparc_info(&si);
1936#elif defined (__alpha__)
1937 adpt_alpha_info(&si);
1938#else
1939 si.processorType = 0xff ;
1940#endif
1941 if (copy_to_user(buffer, &si, sizeof(si))){
1942 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1943 return -EFAULT;
1944 }
1945
1946 return 0;
1947}
1948
1949static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1950{
1951 int minor;
1952 int error = 0;
1953 adpt_hba* pHba;
1954 ulong flags = 0;
1955 void __user *argp = (void __user *)arg;
1956
1957 minor = iminor(inode);
1958 if (minor >= DPTI_MAX_HBA){
1959 return -ENXIO;
1960 }
1961 mutex_lock(&adpt_configuration_lock);
1962 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1963 if (pHba->unit == minor) {
1964 break; /* found adapter */
1965 }
1966 }
1967 mutex_unlock(&adpt_configuration_lock);
1968 if(pHba == NULL){
1969 return -ENXIO;
1970 }
1971
1972 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1973 schedule_timeout_uninterruptible(2);
1974
1975 switch (cmd) {
1976 // TODO: handle 3 cases
1977 case DPT_SIGNATURE:
1978 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1979 return -EFAULT;
1980 }
1981 break;
1982 case I2OUSRCMD:
1983 return adpt_i2o_passthru(pHba, argp);
1984
1985 case DPT_CTRLINFO:{
1986 drvrHBAinfo_S HbaInfo;
1987
1988#define FLG_OSD_PCI_VALID 0x0001
1989#define FLG_OSD_DMA 0x0002
1990#define FLG_OSD_I2O 0x0004
1991 memset(&HbaInfo, 0, sizeof(HbaInfo));
1992 HbaInfo.drvrHBAnum = pHba->unit;
1993 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1994 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1995 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1996 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1997 HbaInfo.Interrupt = pHba->pDev->irq;
1998 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1999 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2000 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2001 return -EFAULT;
2002 }
2003 break;
2004 }
2005 case DPT_SYSINFO:
2006 return adpt_system_info(argp);
2007 case DPT_BLINKLED:{
2008 u32 value;
2009 value = (u32)adpt_read_blink_led(pHba);
2010 if (copy_to_user(argp, &value, sizeof(value))) {
2011 return -EFAULT;
2012 }
2013 break;
2014 }
2015 case I2ORESETCMD: {
2016 struct Scsi_Host *shost = pHba->host;
2017
2018 if (shost)
2019 spin_lock_irqsave(shost->host_lock, flags);
2020 adpt_hba_reset(pHba);
2021 if (shost)
2022 spin_unlock_irqrestore(shost->host_lock, flags);
2023 break;
2024 }
2025 case I2ORESCANCMD:
2026 adpt_rescan(pHba);
2027 break;
2028 default:
2029 return -EINVAL;
2030 }
2031
2032 return error;
2033}
2034
2035static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2036{
2037 struct inode *inode;
2038 long ret;
2039
2040 inode = file_inode(file);
2041
2042 mutex_lock(&adpt_mutex);
2043 ret = adpt_ioctl(inode, file, cmd, arg);
2044 mutex_unlock(&adpt_mutex);
2045
2046 return ret;
2047}
2048
2049#ifdef CONFIG_COMPAT
2050static long compat_adpt_ioctl(struct file *file,
2051 unsigned int cmd, unsigned long arg)
2052{
2053 struct inode *inode;
2054 long ret;
2055
2056 inode = file_inode(file);
2057
2058 mutex_lock(&adpt_mutex);
2059
2060 switch(cmd) {
2061 case DPT_SIGNATURE:
2062 case I2OUSRCMD:
2063 case DPT_CTRLINFO:
2064 case DPT_SYSINFO:
2065 case DPT_BLINKLED:
2066 case I2ORESETCMD:
2067 case I2ORESCANCMD:
2068 case (DPT_TARGET_BUSY & 0xFFFF):
2069 case DPT_TARGET_BUSY:
2070 ret = adpt_ioctl(inode, file, cmd, arg);
2071 break;
2072 default:
2073 ret = -ENOIOCTLCMD;
2074 }
2075
2076 mutex_unlock(&adpt_mutex);
2077
2078 return ret;
2079}
2080#endif
2081
2082static irqreturn_t adpt_isr(int irq, void *dev_id)
2083{
2084 struct scsi_cmnd* cmd;
2085 adpt_hba* pHba = dev_id;
2086 u32 m;
2087 void __iomem *reply;
2088 u32 status=0;
2089 u32 context;
2090 ulong flags = 0;
2091 int handled = 0;
2092
2093 if (pHba == NULL){
2094 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2095 return IRQ_NONE;
2096 }
2097 if(pHba->host)
2098 spin_lock_irqsave(pHba->host->host_lock, flags);
2099
2100 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2101 m = readl(pHba->reply_port);
2102 if(m == EMPTY_QUEUE){
2103 // Try twice then give up
2104 rmb();
2105 m = readl(pHba->reply_port);
2106 if(m == EMPTY_QUEUE){
2107 // This really should not happen
2108 printk(KERN_ERR"dpti: Could not get reply frame\n");
2109 goto out;
2110 }
2111 }
2112 if (pHba->reply_pool_pa <= m &&
2113 m < pHba->reply_pool_pa +
2114 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2115 reply = (u8 *)pHba->reply_pool +
2116 (m - pHba->reply_pool_pa);
2117 } else {
2118 /* Ick, we should *never* be here */
2119 printk(KERN_ERR "dpti: reply frame not from pool\n");
2120 reply = (u8 *)bus_to_virt(m);
2121 }
2122
2123 if (readl(reply) & MSG_FAIL) {
2124 u32 old_m = readl(reply+28);
2125 void __iomem *msg;
2126 u32 old_context;
2127 PDEBUG("%s: Failed message\n",pHba->name);
2128 if(old_m >= 0x100000){
2129 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2130 writel(m,pHba->reply_port);
2131 continue;
2132 }
2133 // Transaction context is 0 in failed reply frame
2134 msg = pHba->msg_addr_virt + old_m;
2135 old_context = readl(msg+12);
2136 writel(old_context, reply+12);
2137 adpt_send_nop(pHba, old_m);
2138 }
2139 context = readl(reply+8);
2140 if(context & 0x40000000){ // IOCTL
2141 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2142 if( p != NULL) {
2143 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2144 }
2145 // All IOCTLs will also be post wait
2146 }
2147 if(context & 0x80000000){ // Post wait message
2148 status = readl(reply+16);
2149 if(status >> 24){
2150 status &= 0xffff; /* Get detail status */
2151 } else {
2152 status = I2O_POST_WAIT_OK;
2153 }
2154 if(!(context & 0x40000000)) {
2155 /*
2156 * The request tag is one less than the command tag
2157 * as the firmware might treat a 0 tag as invalid
2158 */
2159 cmd = scsi_host_find_tag(pHba->host,
2160 readl(reply + 12) - 1);
2161 if(cmd != NULL) {
2162 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2163 }
2164 }
2165 adpt_i2o_post_wait_complete(context, status);
2166 } else { // SCSI message
2167 /*
2168 * The request tag is one less than the command tag
2169 * as the firmware might treat a 0 tag as invalid
2170 */
2171 cmd = scsi_host_find_tag(pHba->host,
2172 readl(reply + 12) - 1);
2173 if(cmd != NULL){
2174 scsi_dma_unmap(cmd);
2175 adpt_i2o_scsi_complete(reply, cmd);
2176 }
2177 }
2178 writel(m, pHba->reply_port);
2179 wmb();
2180 rmb();
2181 }
2182 handled = 1;
2183out: if(pHba->host)
2184 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2185 return IRQ_RETVAL(handled);
2186}
2187
2188static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2189{
2190 int i;
2191 u32 msg[MAX_MESSAGE_SIZE];
2192 u32* mptr;
2193 u32* lptr;
2194 u32 *lenptr;
2195 int direction;
2196 int scsidir;
2197 int nseg;
2198 u32 len;
2199 u32 reqlen;
2200 s32 rcode;
2201 dma_addr_t addr;
2202
2203 memset(msg, 0 , sizeof(msg));
2204 len = scsi_bufflen(cmd);
2205 direction = 0x00000000;
2206
2207 scsidir = 0x00000000; // DATA NO XFER
2208 if(len) {
2209 /*
2210 * Set SCBFlags to indicate if data is being transferred
2211 * in or out, or no data transfer
2212 * Note: Do not have to verify index is less than 0 since
2213 * cmd->cmnd[0] is an unsigned char
2214 */
2215 switch(cmd->sc_data_direction){
2216 case DMA_FROM_DEVICE:
2217 scsidir =0x40000000; // DATA IN (iop<--dev)
2218 break;
2219 case DMA_TO_DEVICE:
2220 direction=0x04000000; // SGL OUT
2221 scsidir =0x80000000; // DATA OUT (iop-->dev)
2222 break;
2223 case DMA_NONE:
2224 break;
2225 case DMA_BIDIRECTIONAL:
2226 scsidir =0x40000000; // DATA IN (iop<--dev)
2227 // Assume In - and continue;
2228 break;
2229 default:
2230 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2231 pHba->name, cmd->cmnd[0]);
2232 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2233 cmd->scsi_done(cmd);
2234 return 0;
2235 }
2236 }
2237 // msg[0] is set later
2238 // I2O_CMD_SCSI_EXEC
2239 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2240 msg[2] = 0;
2241 /* Add 1 to avoid firmware treating it as invalid command */
2242 msg[3] = cmd->request->tag + 1;
2243 // Our cards use the transaction context as the tag for queueing
2244 // Adaptec/DPT Private stuff
2245 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2246 msg[5] = d->tid;
2247 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2248 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2249 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2250 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2251 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2252
2253 mptr=msg+7;
2254
2255 // Write SCSI command into the message - always 16 byte block
2256 memset(mptr, 0, 16);
2257 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2258 mptr+=4;
2259 lenptr=mptr++; /* Remember me - fill in when we know */
2260 if (dpt_dma64(pHba)) {
2261 reqlen = 16; // SINGLE SGE
2262 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2263 *mptr++ = 1 << PAGE_SHIFT;
2264 } else {
2265 reqlen = 14; // SINGLE SGE
2266 }
2267 /* Now fill in the SGList and command */
2268
2269 nseg = scsi_dma_map(cmd);
2270 BUG_ON(nseg < 0);
2271 if (nseg) {
2272 struct scatterlist *sg;
2273
2274 len = 0;
2275 scsi_for_each_sg(cmd, sg, nseg, i) {
2276 lptr = mptr;
2277 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2278 len+=sg_dma_len(sg);
2279 addr = sg_dma_address(sg);
2280 *mptr++ = dma_low(addr);
2281 if (dpt_dma64(pHba))
2282 *mptr++ = dma_high(addr);
2283 /* Make this an end of list */
2284 if (i == nseg - 1)
2285 *lptr = direction|0xD0000000|sg_dma_len(sg);
2286 }
2287 reqlen = mptr - msg;
2288 *lenptr = len;
2289
2290 if(cmd->underflow && len != cmd->underflow){
2291 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2292 len, cmd->underflow);
2293 }
2294 } else {
2295 *lenptr = len = 0;
2296 reqlen = 12;
2297 }
2298
2299 /* Stick the headers on */
2300 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2301
2302 // Send it on it's way
2303 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2304 if (rcode == 0) {
2305 return 0;
2306 }
2307 return rcode;
2308}
2309
2310
2311static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2312{
2313 struct Scsi_Host *host;
2314
2315 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2316 if (host == NULL) {
2317 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2318 return -1;
2319 }
2320 host->hostdata[0] = (unsigned long)pHba;
2321 pHba->host = host;
2322
2323 host->irq = pHba->pDev->irq;
2324 /* no IO ports, so don't have to set host->io_port and
2325 * host->n_io_port
2326 */
2327 host->io_port = 0;
2328 host->n_io_port = 0;
2329 /* see comments in scsi_host.h */
2330 host->max_id = 16;
2331 host->max_lun = 256;
2332 host->max_channel = pHba->top_scsi_channel + 1;
2333 host->cmd_per_lun = 1;
2334 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2335 host->sg_tablesize = pHba->sg_tablesize;
2336 host->can_queue = pHba->post_fifo_size;
2337
2338 return 0;
2339}
2340
2341
2342static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
2343{
2344 adpt_hba* pHba;
2345 u32 hba_status;
2346 u32 dev_status;
2347 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2348 // I know this would look cleaner if I just read bytes
2349 // but the model I have been using for all the rest of the
2350 // io is in 4 byte words - so I keep that model
2351 u16 detailed_status = readl(reply+16) &0xffff;
2352 dev_status = (detailed_status & 0xff);
2353 hba_status = detailed_status >> 8;
2354
2355 // calculate resid for sg
2356 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2357
2358 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2359
2360 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2361
2362 if(!(reply_flags & MSG_FAIL)) {
2363 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2364 case I2O_SCSI_DSC_SUCCESS:
2365 cmd->result = (DID_OK << 16);
2366 // handle underflow
2367 if (readl(reply+20) < cmd->underflow) {
2368 cmd->result = (DID_ERROR <<16);
2369 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2370 }
2371 break;
2372 case I2O_SCSI_DSC_REQUEST_ABORTED:
2373 cmd->result = (DID_ABORT << 16);
2374 break;
2375 case I2O_SCSI_DSC_PATH_INVALID:
2376 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2377 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2378 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2379 case I2O_SCSI_DSC_NO_ADAPTER:
2380 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2381 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2382 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2383 cmd->result = (DID_TIME_OUT << 16);
2384 break;
2385 case I2O_SCSI_DSC_ADAPTER_BUSY:
2386 case I2O_SCSI_DSC_BUS_BUSY:
2387 cmd->result = (DID_BUS_BUSY << 16);
2388 break;
2389 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2390 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2391 cmd->result = (DID_RESET << 16);
2392 break;
2393 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2394 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2395 cmd->result = (DID_PARITY << 16);
2396 break;
2397 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2398 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2399 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2400 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2401 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2402 case I2O_SCSI_DSC_DATA_OVERRUN:
2403 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2404 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2405 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2406 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2407 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2408 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2409 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2410 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2411 case I2O_SCSI_DSC_INVALID_CDB:
2412 case I2O_SCSI_DSC_LUN_INVALID:
2413 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2414 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2415 case I2O_SCSI_DSC_NO_NEXUS:
2416 case I2O_SCSI_DSC_CDB_RECEIVED:
2417 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2418 case I2O_SCSI_DSC_QUEUE_FROZEN:
2419 case I2O_SCSI_DSC_REQUEST_INVALID:
2420 default:
2421 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2422 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2423 hba_status, dev_status, cmd->cmnd[0]);
2424 cmd->result = (DID_ERROR << 16);
2425 break;
2426 }
2427
2428 // copy over the request sense data if it was a check
2429 // condition status
2430 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2431 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2432 // Copy over the sense data
2433 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2434 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2435 cmd->sense_buffer[2] == DATA_PROTECT ){
2436 /* This is to handle an array failed */
2437 cmd->result = (DID_TIME_OUT << 16);
2438 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2439 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2440 hba_status, dev_status, cmd->cmnd[0]);
2441
2442 }
2443 }
2444 } else {
2445 /* In this condtion we could not talk to the tid
2446 * the card rejected it. We should signal a retry
2447 * for a limitted number of retries.
2448 */
2449 cmd->result = (DID_TIME_OUT << 16);
2450 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2451 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2452 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2453 }
2454
2455 cmd->result |= (dev_status);
2456
2457 if(cmd->scsi_done != NULL){
2458 cmd->scsi_done(cmd);
2459 }
2460}
2461
2462
2463static s32 adpt_rescan(adpt_hba* pHba)
2464{
2465 s32 rcode;
2466 ulong flags = 0;
2467
2468 if(pHba->host)
2469 spin_lock_irqsave(pHba->host->host_lock, flags);
2470 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2471 goto out;
2472 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2473 goto out;
2474 rcode = 0;
2475out: if(pHba->host)
2476 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2477 return rcode;
2478}
2479
2480
2481static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2482{
2483 int i;
2484 int max;
2485 int tid;
2486 struct i2o_device *d;
2487 i2o_lct *lct = pHba->lct;
2488 u8 bus_no = 0;
2489 s16 scsi_id;
2490 u64 scsi_lun;
2491 u32 buf[10]; // at least 8 u32's
2492 struct adpt_device* pDev = NULL;
2493 struct i2o_device* pI2o_dev = NULL;
2494
2495 if (lct == NULL) {
2496 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2497 return -1;
2498 }
2499
2500 max = lct->table_size;
2501 max -= 3;
2502 max /= 9;
2503
2504 // Mark each drive as unscanned
2505 for (d = pHba->devices; d; d = d->next) {
2506 pDev =(struct adpt_device*) d->owner;
2507 if(!pDev){
2508 continue;
2509 }
2510 pDev->state |= DPTI_DEV_UNSCANNED;
2511 }
2512
2513 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2514
2515 for(i=0;i<max;i++) {
2516 if( lct->lct_entry[i].user_tid != 0xfff){
2517 continue;
2518 }
2519
2520 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2521 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2522 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2523 tid = lct->lct_entry[i].tid;
2524 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2525 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2526 continue;
2527 }
2528 bus_no = buf[0]>>16;
2529 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2530 printk(KERN_WARNING
2531 "%s: Channel number %d out of range\n",
2532 pHba->name, bus_no);
2533 continue;
2534 }
2535
2536 scsi_id = buf[1];
2537 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2538 pDev = pHba->channel[bus_no].device[scsi_id];
2539 /* da lun */
2540 while(pDev) {
2541 if(pDev->scsi_lun == scsi_lun) {
2542 break;
2543 }
2544 pDev = pDev->next_lun;
2545 }
2546 if(!pDev ) { // Something new add it
2547 d = kmalloc(sizeof(struct i2o_device),
2548 GFP_ATOMIC);
2549 if(d==NULL)
2550 {
2551 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2552 return -ENOMEM;
2553 }
2554
2555 d->controller = pHba;
2556 d->next = NULL;
2557
2558 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2559
2560 d->flags = 0;
2561 adpt_i2o_report_hba_unit(pHba, d);
2562 adpt_i2o_install_device(pHba, d);
2563
2564 pDev = pHba->channel[bus_no].device[scsi_id];
2565 if( pDev == NULL){
2566 pDev =
2567 kzalloc(sizeof(struct adpt_device),
2568 GFP_ATOMIC);
2569 if(pDev == NULL) {
2570 return -ENOMEM;
2571 }
2572 pHba->channel[bus_no].device[scsi_id] = pDev;
2573 } else {
2574 while (pDev->next_lun) {
2575 pDev = pDev->next_lun;
2576 }
2577 pDev = pDev->next_lun =
2578 kzalloc(sizeof(struct adpt_device),
2579 GFP_ATOMIC);
2580 if(pDev == NULL) {
2581 return -ENOMEM;
2582 }
2583 }
2584 pDev->tid = d->lct_data.tid;
2585 pDev->scsi_channel = bus_no;
2586 pDev->scsi_id = scsi_id;
2587 pDev->scsi_lun = scsi_lun;
2588 pDev->pI2o_dev = d;
2589 d->owner = pDev;
2590 pDev->type = (buf[0])&0xff;
2591 pDev->flags = (buf[0]>>8)&0xff;
2592 // Too late, SCSI system has made up it's mind, but what the hey ...
2593 if(scsi_id > pHba->top_scsi_id){
2594 pHba->top_scsi_id = scsi_id;
2595 }
2596 if(scsi_lun > pHba->top_scsi_lun){
2597 pHba->top_scsi_lun = scsi_lun;
2598 }
2599 continue;
2600 } // end of new i2o device
2601
2602 // We found an old device - check it
2603 while(pDev) {
2604 if(pDev->scsi_lun == scsi_lun) {
2605 if(!scsi_device_online(pDev->pScsi_dev)) {
2606 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2607 pHba->name,bus_no,scsi_id,scsi_lun);
2608 if (pDev->pScsi_dev) {
2609 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2610 }
2611 }
2612 d = pDev->pI2o_dev;
2613 if(d->lct_data.tid != tid) { // something changed
2614 pDev->tid = tid;
2615 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2616 if (pDev->pScsi_dev) {
2617 pDev->pScsi_dev->changed = TRUE;
2618 pDev->pScsi_dev->removable = TRUE;
2619 }
2620 }
2621 // Found it - mark it scanned
2622 pDev->state = DPTI_DEV_ONLINE;
2623 break;
2624 }
2625 pDev = pDev->next_lun;
2626 }
2627 }
2628 }
2629 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2630 pDev =(struct adpt_device*) pI2o_dev->owner;
2631 if(!pDev){
2632 continue;
2633 }
2634 // Drive offline drives that previously existed but could not be found
2635 // in the LCT table
2636 if (pDev->state & DPTI_DEV_UNSCANNED){
2637 pDev->state = DPTI_DEV_OFFLINE;
2638 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2639 if (pDev->pScsi_dev) {
2640 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2641 }
2642 }
2643 }
2644 return 0;
2645}
2646
2647/*============================================================================
2648 * Routines from i2o subsystem
2649 *============================================================================
2650 */
2651
2652
2653
2654/*
2655 * Bring an I2O controller into HOLD state. See the spec.
2656 */
2657static int adpt_i2o_activate_hba(adpt_hba* pHba)
2658{
2659 int rcode;
2660
2661 if(pHba->initialized ) {
2662 if (adpt_i2o_status_get(pHba) < 0) {
2663 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2664 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2665 return rcode;
2666 }
2667 if (adpt_i2o_status_get(pHba) < 0) {
2668 printk(KERN_INFO "HBA not responding.\n");
2669 return -1;
2670 }
2671 }
2672
2673 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2674 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2675 return -1;
2676 }
2677
2678 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2679 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2680 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2681 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2682 adpt_i2o_reset_hba(pHba);
2683 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2684 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2685 return -1;
2686 }
2687 }
2688 } else {
2689 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2690 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2691 return rcode;
2692 }
2693
2694 }
2695
2696 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2697 return -1;
2698 }
2699
2700 /* In HOLD state */
2701
2702 if (adpt_i2o_hrt_get(pHba) < 0) {
2703 return -1;
2704 }
2705
2706 return 0;
2707}
2708
2709/*
2710 * Bring a controller online into OPERATIONAL state.
2711 */
2712
2713static int adpt_i2o_online_hba(adpt_hba* pHba)
2714{
2715 if (adpt_i2o_systab_send(pHba) < 0)
2716 return -1;
2717 /* In READY state */
2718
2719 if (adpt_i2o_enable_hba(pHba) < 0)
2720 return -1;
2721
2722 /* In OPERATIONAL state */
2723 return 0;
2724}
2725
2726static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2727{
2728 u32 __iomem *msg;
2729 ulong timeout = jiffies + 5*HZ;
2730
2731 while(m == EMPTY_QUEUE){
2732 rmb();
2733 m = readl(pHba->post_port);
2734 if(m != EMPTY_QUEUE){
2735 break;
2736 }
2737 if(time_after(jiffies,timeout)){
2738 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2739 return 2;
2740 }
2741 schedule_timeout_uninterruptible(1);
2742 }
2743 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2744 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2745 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2746 writel( 0,&msg[2]);
2747 wmb();
2748
2749 writel(m, pHba->post_port);
2750 wmb();
2751 return 0;
2752}
2753
2754static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2755{
2756 u8 *status;
2757 dma_addr_t addr;
2758 u32 __iomem *msg = NULL;
2759 int i;
2760 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2761 u32 m;
2762
2763 do {
2764 rmb();
2765 m = readl(pHba->post_port);
2766 if (m != EMPTY_QUEUE) {
2767 break;
2768 }
2769
2770 if(time_after(jiffies,timeout)){
2771 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2772 return -ETIMEDOUT;
2773 }
2774 schedule_timeout_uninterruptible(1);
2775 } while(m == EMPTY_QUEUE);
2776
2777 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2778
2779 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2780 if (!status) {
2781 adpt_send_nop(pHba, m);
2782 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2783 pHba->name);
2784 return -ENOMEM;
2785 }
2786
2787 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2788 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2789 writel(0, &msg[2]);
2790 writel(0x0106, &msg[3]); /* Transaction context */
2791 writel(4096, &msg[4]); /* Host page frame size */
2792 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2793 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2794 writel((u32)addr, &msg[7]);
2795
2796 writel(m, pHba->post_port);
2797 wmb();
2798
2799 // Wait for the reply status to come back
2800 do {
2801 if (*status) {
2802 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2803 break;
2804 }
2805 }
2806 rmb();
2807 if(time_after(jiffies,timeout)){
2808 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2809 /* We lose 4 bytes of "status" here, but we
2810 cannot free these because controller may
2811 awake and corrupt those bytes at any time */
2812 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2813 return -ETIMEDOUT;
2814 }
2815 schedule_timeout_uninterruptible(1);
2816 } while (1);
2817
2818 // If the command was successful, fill the fifo with our reply
2819 // message packets
2820 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2821 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2822 return -2;
2823 }
2824 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2825
2826 if(pHba->reply_pool != NULL) {
2827 dma_free_coherent(&pHba->pDev->dev,
2828 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2829 pHba->reply_pool, pHba->reply_pool_pa);
2830 }
2831
2832 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2833 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2834 &pHba->reply_pool_pa, GFP_KERNEL);
2835 if (!pHba->reply_pool) {
2836 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2837 return -ENOMEM;
2838 }
2839
2840 for(i = 0; i < pHba->reply_fifo_size; i++) {
2841 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2842 pHba->reply_port);
2843 wmb();
2844 }
2845 adpt_i2o_status_get(pHba);
2846 return 0;
2847}
2848
2849
2850/*
2851 * I2O System Table. Contains information about
2852 * all the IOPs in the system. Used to inform IOPs
2853 * about each other's existence.
2854 *
2855 * sys_tbl_ver is the CurrentChangeIndicator that is
2856 * used by IOPs to track changes.
2857 */
2858
2859
2860
2861static s32 adpt_i2o_status_get(adpt_hba* pHba)
2862{
2863 ulong timeout;
2864 u32 m;
2865 u32 __iomem *msg;
2866 u8 *status_block=NULL;
2867
2868 if(pHba->status_block == NULL) {
2869 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2870 sizeof(i2o_status_block),
2871 &pHba->status_block_pa, GFP_KERNEL);
2872 if(pHba->status_block == NULL) {
2873 printk(KERN_ERR
2874 "dpti%d: Get Status Block failed; Out of memory. \n",
2875 pHba->unit);
2876 return -ENOMEM;
2877 }
2878 }
2879 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2880 status_block = (u8*)(pHba->status_block);
2881 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2882 do {
2883 rmb();
2884 m = readl(pHba->post_port);
2885 if (m != EMPTY_QUEUE) {
2886 break;
2887 }
2888 if(time_after(jiffies,timeout)){
2889 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2890 pHba->name);
2891 return -ETIMEDOUT;
2892 }
2893 schedule_timeout_uninterruptible(1);
2894 } while(m==EMPTY_QUEUE);
2895
2896
2897 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2898
2899 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2900 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2901 writel(1, &msg[2]);
2902 writel(0, &msg[3]);
2903 writel(0, &msg[4]);
2904 writel(0, &msg[5]);
2905 writel( dma_low(pHba->status_block_pa), &msg[6]);
2906 writel( dma_high(pHba->status_block_pa), &msg[7]);
2907 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2908
2909 //post message
2910 writel(m, pHba->post_port);
2911 wmb();
2912
2913 while(status_block[87]!=0xff){
2914 if(time_after(jiffies,timeout)){
2915 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2916 pHba->unit);
2917 return -ETIMEDOUT;
2918 }
2919 rmb();
2920 schedule_timeout_uninterruptible(1);
2921 }
2922
2923 // Set up our number of outbound and inbound messages
2924 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2925 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2926 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2927 }
2928
2929 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2930 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2931 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2932 }
2933
2934 // Calculate the Scatter Gather list size
2935 if (dpt_dma64(pHba)) {
2936 pHba->sg_tablesize
2937 = ((pHba->status_block->inbound_frame_size * 4
2938 - 14 * sizeof(u32))
2939 / (sizeof(struct sg_simple_element) + sizeof(u32)));
2940 } else {
2941 pHba->sg_tablesize
2942 = ((pHba->status_block->inbound_frame_size * 4
2943 - 12 * sizeof(u32))
2944 / sizeof(struct sg_simple_element));
2945 }
2946 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2947 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2948 }
2949
2950
2951#ifdef DEBUG
2952 printk("dpti%d: State = ",pHba->unit);
2953 switch(pHba->status_block->iop_state) {
2954 case 0x01:
2955 printk("INIT\n");
2956 break;
2957 case 0x02:
2958 printk("RESET\n");
2959 break;
2960 case 0x04:
2961 printk("HOLD\n");
2962 break;
2963 case 0x05:
2964 printk("READY\n");
2965 break;
2966 case 0x08:
2967 printk("OPERATIONAL\n");
2968 break;
2969 case 0x10:
2970 printk("FAILED\n");
2971 break;
2972 case 0x11:
2973 printk("FAULTED\n");
2974 break;
2975 default:
2976 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2977 }
2978#endif
2979 return 0;
2980}
2981
2982/*
2983 * Get the IOP's Logical Configuration Table
2984 */
2985static int adpt_i2o_lct_get(adpt_hba* pHba)
2986{
2987 u32 msg[8];
2988 int ret;
2989 u32 buf[16];
2990
2991 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2992 pHba->lct_size = pHba->status_block->expected_lct_size;
2993 }
2994 do {
2995 if (pHba->lct == NULL) {
2996 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
2997 pHba->lct_size, &pHba->lct_pa,
2998 GFP_ATOMIC);
2999 if(pHba->lct == NULL) {
3000 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3001 pHba->name);
3002 return -ENOMEM;
3003 }
3004 }
3005 memset(pHba->lct, 0, pHba->lct_size);
3006
3007 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3008 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3009 msg[2] = 0;
3010 msg[3] = 0;
3011 msg[4] = 0xFFFFFFFF; /* All devices */
3012 msg[5] = 0x00000000; /* Report now */
3013 msg[6] = 0xD0000000|pHba->lct_size;
3014 msg[7] = (u32)pHba->lct_pa;
3015
3016 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3017 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3018 pHba->name, ret);
3019 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3020 return ret;
3021 }
3022
3023 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3024 pHba->lct_size = pHba->lct->table_size << 2;
3025 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3026 pHba->lct, pHba->lct_pa);
3027 pHba->lct = NULL;
3028 }
3029 } while (pHba->lct == NULL);
3030
3031 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3032
3033
3034 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3035 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3036 pHba->FwDebugBufferSize = buf[1];
3037 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3038 pHba->FwDebugBufferSize);
3039 if (pHba->FwDebugBuffer_P) {
3040 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3041 FW_DEBUG_FLAGS_OFFSET;
3042 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3043 FW_DEBUG_BLED_OFFSET;
3044 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3045 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3046 FW_DEBUG_STR_LENGTH_OFFSET;
3047 pHba->FwDebugBuffer_P += buf[2];
3048 pHba->FwDebugFlags = 0;
3049 }
3050 }
3051
3052 return 0;
3053}
3054
3055static int adpt_i2o_build_sys_table(void)
3056{
3057 adpt_hba* pHba = hba_chain;
3058 int count = 0;
3059
3060 if (sys_tbl)
3061 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3062 sys_tbl, sys_tbl_pa);
3063
3064 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3065 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3066
3067 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3068 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3069 if (!sys_tbl) {
3070 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3071 return -ENOMEM;
3072 }
3073
3074 sys_tbl->num_entries = hba_count;
3075 sys_tbl->version = I2OVERSION;
3076 sys_tbl->change_ind = sys_tbl_ind++;
3077
3078 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3079 u64 addr;
3080 // Get updated Status Block so we have the latest information
3081 if (adpt_i2o_status_get(pHba)) {
3082 sys_tbl->num_entries--;
3083 continue; // try next one
3084 }
3085
3086 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3087 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3088 sys_tbl->iops[count].seg_num = 0;
3089 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3090 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3091 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3092 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3093 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3094 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3095 addr = pHba->base_addr_phys + 0x40;
3096 sys_tbl->iops[count].inbound_low = dma_low(addr);
3097 sys_tbl->iops[count].inbound_high = dma_high(addr);
3098
3099 count++;
3100 }
3101
3102#ifdef DEBUG
3103{
3104 u32 *table = (u32*)sys_tbl;
3105 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3106 for(count = 0; count < (sys_tbl_len >>2); count++) {
3107 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3108 count, table[count]);
3109 }
3110}
3111#endif
3112
3113 return 0;
3114}
3115
3116
3117/*
3118 * Dump the information block associated with a given unit (TID)
3119 */
3120
3121static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3122{
3123 char buf[64];
3124 int unit = d->lct_data.tid;
3125
3126 printk(KERN_INFO "TID %3.3d ", unit);
3127
3128 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3129 {
3130 buf[16]=0;
3131 printk(" Vendor: %-12.12s", buf);
3132 }
3133 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3134 {
3135 buf[16]=0;
3136 printk(" Device: %-12.12s", buf);
3137 }
3138 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3139 {
3140 buf[8]=0;
3141 printk(" Rev: %-12.12s\n", buf);
3142 }
3143#ifdef DEBUG
3144 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3145 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3146 printk(KERN_INFO "\tFlags: ");
3147
3148 if(d->lct_data.device_flags&(1<<0))
3149 printk("C"); // ConfigDialog requested
3150 if(d->lct_data.device_flags&(1<<1))
3151 printk("U"); // Multi-user capable
3152 if(!(d->lct_data.device_flags&(1<<4)))
3153 printk("P"); // Peer service enabled!
3154 if(!(d->lct_data.device_flags&(1<<5)))
3155 printk("M"); // Mgmt service enabled!
3156 printk("\n");
3157#endif
3158}
3159
3160#ifdef DEBUG
3161/*
3162 * Do i2o class name lookup
3163 */
3164static const char *adpt_i2o_get_class_name(int class)
3165{
3166 int idx = 16;
3167 static char *i2o_class_name[] = {
3168 "Executive",
3169 "Device Driver Module",
3170 "Block Device",
3171 "Tape Device",
3172 "LAN Interface",
3173 "WAN Interface",
3174 "Fibre Channel Port",
3175 "Fibre Channel Device",
3176 "SCSI Device",
3177 "ATE Port",
3178 "ATE Device",
3179 "Floppy Controller",
3180 "Floppy Device",
3181 "Secondary Bus Port",
3182 "Peer Transport Agent",
3183 "Peer Transport",
3184 "Unknown"
3185 };
3186
3187 switch(class&0xFFF) {
3188 case I2O_CLASS_EXECUTIVE:
3189 idx = 0; break;
3190 case I2O_CLASS_DDM:
3191 idx = 1; break;
3192 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3193 idx = 2; break;
3194 case I2O_CLASS_SEQUENTIAL_STORAGE:
3195 idx = 3; break;
3196 case I2O_CLASS_LAN:
3197 idx = 4; break;
3198 case I2O_CLASS_WAN:
3199 idx = 5; break;
3200 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3201 idx = 6; break;
3202 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3203 idx = 7; break;
3204 case I2O_CLASS_SCSI_PERIPHERAL:
3205 idx = 8; break;
3206 case I2O_CLASS_ATE_PORT:
3207 idx = 9; break;
3208 case I2O_CLASS_ATE_PERIPHERAL:
3209 idx = 10; break;
3210 case I2O_CLASS_FLOPPY_CONTROLLER:
3211 idx = 11; break;
3212 case I2O_CLASS_FLOPPY_DEVICE:
3213 idx = 12; break;
3214 case I2O_CLASS_BUS_ADAPTER_PORT:
3215 idx = 13; break;
3216 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3217 idx = 14; break;
3218 case I2O_CLASS_PEER_TRANSPORT:
3219 idx = 15; break;
3220 }
3221 return i2o_class_name[idx];
3222}
3223#endif
3224
3225
3226static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3227{
3228 u32 msg[6];
3229 int ret, size = sizeof(i2o_hrt);
3230
3231 do {
3232 if (pHba->hrt == NULL) {
3233 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3234 size, &pHba->hrt_pa, GFP_KERNEL);
3235 if (pHba->hrt == NULL) {
3236 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3237 return -ENOMEM;
3238 }
3239 }
3240
3241 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3242 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3243 msg[2]= 0;
3244 msg[3]= 0;
3245 msg[4]= (0xD0000000 | size); /* Simple transaction */
3246 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3247
3248 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3249 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3250 return ret;
3251 }
3252
3253 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3254 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3255 dma_free_coherent(&pHba->pDev->dev, size,
3256 pHba->hrt, pHba->hrt_pa);
3257 size = newsize;
3258 pHba->hrt = NULL;
3259 }
3260 } while(pHba->hrt == NULL);
3261 return 0;
3262}
3263
3264/*
3265 * Query one scalar group value or a whole scalar group.
3266 */
3267static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3268 int group, int field, void *buf, int buflen)
3269{
3270 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3271 u8 *opblk_va;
3272 dma_addr_t opblk_pa;
3273 u8 *resblk_va;
3274 dma_addr_t resblk_pa;
3275
3276 int size;
3277
3278 /* 8 bytes for header */
3279 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3280 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3281 if (resblk_va == NULL) {
3282 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3283 return -ENOMEM;
3284 }
3285
3286 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3287 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3288 if (opblk_va == NULL) {
3289 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3290 resblk_va, resblk_pa);
3291 printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3292 pHba->name);
3293 return -ENOMEM;
3294 }
3295 if (field == -1) /* whole group */
3296 opblk[4] = -1;
3297
3298 memcpy(opblk_va, opblk, sizeof(opblk));
3299 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3300 opblk_va, opblk_pa, sizeof(opblk),
3301 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3302 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3303 if (size == -ETIME) {
3304 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3305 resblk_va, resblk_pa);
3306 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3307 return -ETIME;
3308 } else if (size == -EINTR) {
3309 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3310 resblk_va, resblk_pa);
3311 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3312 return -EINTR;
3313 }
3314
3315 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3316
3317 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3318 resblk_va, resblk_pa);
3319 if (size < 0)
3320 return size;
3321
3322 return buflen;
3323}
3324
3325
3326/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3327 *
3328 * This function can be used for all UtilParamsGet/Set operations.
3329 * The OperationBlock is given in opblk-buffer,
3330 * and results are returned in resblk-buffer.
3331 * Note that the minimum sized resblk is 8 bytes and contains
3332 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3333 */
3334static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3335 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3336 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3337{
3338 u32 msg[9];
3339 u32 *res = (u32 *)resblk_va;
3340 int wait_status;
3341
3342 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3343 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3344 msg[2] = 0;
3345 msg[3] = 0;
3346 msg[4] = 0;
3347 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3348 msg[6] = (u32)opblk_pa;
3349 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3350 msg[8] = (u32)resblk_pa;
3351
3352 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3353 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3354 return wait_status; /* -DetailedStatus */
3355 }
3356
3357 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3358 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3359 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3360 pHba->name,
3361 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3362 : "PARAMS_GET",
3363 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3364 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3365 }
3366
3367 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3368}
3369
3370
3371static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3372{
3373 u32 msg[4];
3374 int ret;
3375
3376 adpt_i2o_status_get(pHba);
3377
3378 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3379
3380 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3381 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3382 return 0;
3383 }
3384
3385 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3386 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3387 msg[2] = 0;
3388 msg[3] = 0;
3389
3390 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3391 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3392 pHba->unit, -ret);
3393 } else {
3394 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3395 }
3396
3397 adpt_i2o_status_get(pHba);
3398 return ret;
3399}
3400
3401
3402/*
3403 * Enable IOP. Allows the IOP to resume external operations.
3404 */
3405static int adpt_i2o_enable_hba(adpt_hba* pHba)
3406{
3407 u32 msg[4];
3408 int ret;
3409
3410 adpt_i2o_status_get(pHba);
3411 if(!pHba->status_block){
3412 return -ENOMEM;
3413 }
3414 /* Enable only allowed on READY state */
3415 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3416 return 0;
3417
3418 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3419 return -EINVAL;
3420
3421 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3422 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3423 msg[2]= 0;
3424 msg[3]= 0;
3425
3426 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3427 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3428 pHba->name, ret);
3429 } else {
3430 PDEBUG("%s: Enabled.\n", pHba->name);
3431 }
3432
3433 adpt_i2o_status_get(pHba);
3434 return ret;
3435}
3436
3437
3438static int adpt_i2o_systab_send(adpt_hba* pHba)
3439{
3440 u32 msg[12];
3441 int ret;
3442
3443 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3444 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3445 msg[2] = 0;
3446 msg[3] = 0;
3447 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3448 msg[5] = 0; /* Segment 0 */
3449
3450 /*
3451 * Provide three SGL-elements:
3452 * System table (SysTab), Private memory space declaration and
3453 * Private i/o space declaration
3454 */
3455 msg[6] = 0x54000000 | sys_tbl_len;
3456 msg[7] = (u32)sys_tbl_pa;
3457 msg[8] = 0x54000000 | 0;
3458 msg[9] = 0;
3459 msg[10] = 0xD4000000 | 0;
3460 msg[11] = 0;
3461
3462 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3463 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3464 pHba->name, ret);
3465 }
3466#ifdef DEBUG
3467 else {
3468 PINFO("%s: SysTab set.\n", pHba->name);
3469 }
3470#endif
3471
3472 return ret;
3473}
3474
3475
3476/*============================================================================
3477 *
3478 *============================================================================
3479 */
3480
3481
3482#ifdef UARTDELAY
3483
3484static static void adpt_delay(int millisec)
3485{
3486 int i;
3487 for (i = 0; i < millisec; i++) {
3488 udelay(1000); /* delay for one millisecond */
3489 }
3490}
3491
3492#endif
3493
3494static struct scsi_host_template driver_template = {
3495 .module = THIS_MODULE,
3496 .name = "dpt_i2o",
3497 .proc_name = "dpt_i2o",
3498 .show_info = adpt_show_info,
3499 .info = adpt_info,
3500 .queuecommand = adpt_queue,
3501 .eh_abort_handler = adpt_abort,
3502 .eh_device_reset_handler = adpt_device_reset,
3503 .eh_bus_reset_handler = adpt_bus_reset,
3504 .eh_host_reset_handler = adpt_reset,
3505 .bios_param = adpt_bios_param,
3506 .slave_configure = adpt_slave_configure,
3507 .can_queue = MAX_TO_IOP_MESSAGES,
3508 .this_id = 7,
3509};
3510
3511static int __init adpt_init(void)
3512{
3513 int error;
3514 adpt_hba *pHba, *next;
3515
3516 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3517
3518 error = adpt_detect(&driver_template);
3519 if (error < 0)
3520 return error;
3521 if (hba_chain == NULL)
3522 return -ENODEV;
3523
3524 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3525 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3526 if (error)
3527 goto fail;
3528 scsi_scan_host(pHba->host);
3529 }
3530 return 0;
3531fail:
3532 for (pHba = hba_chain; pHba; pHba = next) {
3533 next = pHba->next;
3534 scsi_remove_host(pHba->host);
3535 }
3536 return error;
3537}
3538
3539static void __exit adpt_exit(void)
3540{
3541 adpt_hba *pHba, *next;
3542
3543 for (pHba = hba_chain; pHba; pHba = next) {
3544 next = pHba->next;
3545 adpt_release(pHba);
3546 }
3547}
3548
3549module_init(adpt_init);
3550module_exit(adpt_exit);
3551
3552MODULE_LICENSE("GPL");