Loading...
Note: File does not exist in v6.8.
1// SPDX-License-Identifier: GPL-2.0-or-later
2/***************************************************************************
3 dpti.c - description
4 -------------------
5 begin : Thu Sep 7 2000
6 copyright : (C) 2000 by Adaptec
7
8 July 30, 2001 First version being submitted
9 for inclusion in the kernel. V2.4
10
11 See Documentation/scsi/dpti.rst for history, notes, license info
12 and credits
13 ***************************************************************************/
14
15/***************************************************************************
16 * *
17 * *
18 ***************************************************************************/
19/***************************************************************************
20 * Sat Dec 20 2003 Go Taniguchi <go@turbolinux.co.jp>
21 - Support 2.6 kernel and DMA-mapping
22 - ioctl fix for raid tools
23 - use schedule_timeout in long long loop
24 **************************************************************************/
25
26/*#define DEBUG 1 */
27/*#define UARTDELAY 1 */
28
29#include <linux/module.h>
30#include <linux/pgtable.h>
31
32MODULE_AUTHOR("Deanna Bonds, with _lots_ of help from Mark Salyzyn");
33MODULE_DESCRIPTION("Adaptec I2O RAID Driver");
34
35////////////////////////////////////////////////////////////////
36
37#include <linux/ioctl.h> /* For SCSI-Passthrough */
38#include <linux/uaccess.h>
39
40#include <linux/stat.h>
41#include <linux/slab.h> /* for kmalloc() */
42#include <linux/pci.h> /* for PCI support */
43#include <linux/proc_fs.h>
44#include <linux/blkdev.h>
45#include <linux/delay.h> /* for udelay */
46#include <linux/interrupt.h>
47#include <linux/kernel.h> /* for printk */
48#include <linux/sched.h>
49#include <linux/reboot.h>
50#include <linux/spinlock.h>
51#include <linux/dma-mapping.h>
52
53#include <linux/timer.h>
54#include <linux/string.h>
55#include <linux/ioport.h>
56#include <linux/mutex.h>
57
58#include <asm/processor.h> /* for boot_cpu_data */
59#include <asm/io.h> /* for virt_to_bus, etc. */
60
61#include <scsi/scsi.h>
62#include <scsi/scsi_cmnd.h>
63#include <scsi/scsi_device.h>
64#include <scsi/scsi_host.h>
65#include <scsi/scsi_tcq.h>
66
67#include "dpt/dptsig.h"
68#include "dpti.h"
69
70/*============================================================================
71 * Create a binary signature - this is read by dptsig
72 * Needed for our management apps
73 *============================================================================
74 */
75static DEFINE_MUTEX(adpt_mutex);
76static dpt_sig_S DPTI_sig = {
77 {'d', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION,
78#ifdef __i386__
79 PROC_INTEL, PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM,
80#elif defined(__ia64__)
81 PROC_INTEL, PROC_IA64,
82#elif defined(__sparc__)
83 PROC_ULTRASPARC, PROC_ULTRASPARC,
84#elif defined(__alpha__)
85 PROC_ALPHA, PROC_ALPHA,
86#else
87 (-1),(-1),
88#endif
89 FT_HBADRVR, 0, OEM_DPT, OS_LINUX, CAP_OVERLAP, DEV_ALL,
90 ADF_ALL_SC5, 0, 0, DPT_VERSION, DPT_REVISION, DPT_SUBREVISION,
91 DPT_MONTH, DPT_DAY, DPT_YEAR, "Adaptec Linux I2O RAID Driver"
92};
93
94
95
96
97/*============================================================================
98 * Globals
99 *============================================================================
100 */
101
102static DEFINE_MUTEX(adpt_configuration_lock);
103
104static struct i2o_sys_tbl *sys_tbl;
105static dma_addr_t sys_tbl_pa;
106static int sys_tbl_ind;
107static int sys_tbl_len;
108
109static adpt_hba* hba_chain = NULL;
110static int hba_count = 0;
111
112static struct class *adpt_sysfs_class;
113
114static long adpt_unlocked_ioctl(struct file *, unsigned int, unsigned long);
115#ifdef CONFIG_COMPAT
116static long compat_adpt_ioctl(struct file *, unsigned int, unsigned long);
117#endif
118
119static const struct file_operations adpt_fops = {
120 .unlocked_ioctl = adpt_unlocked_ioctl,
121 .open = adpt_open,
122 .release = adpt_close,
123#ifdef CONFIG_COMPAT
124 .compat_ioctl = compat_adpt_ioctl,
125#endif
126 .llseek = noop_llseek,
127};
128
129/* Structures and definitions for synchronous message posting.
130 * See adpt_i2o_post_wait() for description
131 * */
132struct adpt_i2o_post_wait_data
133{
134 int status;
135 u32 id;
136 adpt_wait_queue_head_t *wq;
137 struct adpt_i2o_post_wait_data *next;
138};
139
140static struct adpt_i2o_post_wait_data *adpt_post_wait_queue = NULL;
141static u32 adpt_post_wait_id = 0;
142static DEFINE_SPINLOCK(adpt_post_wait_lock);
143
144
145/*============================================================================
146 * Functions
147 *============================================================================
148 */
149
150static inline int dpt_dma64(adpt_hba *pHba)
151{
152 return (sizeof(dma_addr_t) > 4 && (pHba)->dma64);
153}
154
155static inline u32 dma_high(dma_addr_t addr)
156{
157 return upper_32_bits(addr);
158}
159
160static inline u32 dma_low(dma_addr_t addr)
161{
162 return (u32)addr;
163}
164
165static u8 adpt_read_blink_led(adpt_hba* host)
166{
167 if (host->FwDebugBLEDflag_P) {
168 if( readb(host->FwDebugBLEDflag_P) == 0xbc ){
169 return readb(host->FwDebugBLEDvalue_P);
170 }
171 }
172 return 0;
173}
174
175/*============================================================================
176 * Scsi host template interface functions
177 *============================================================================
178 */
179
180#ifdef MODULE
181static struct pci_device_id dptids[] = {
182 { PCI_DPT_VENDOR_ID, PCI_DPT_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
183 { PCI_DPT_VENDOR_ID, PCI_DPT_RAPTOR_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
184 { 0, }
185};
186#endif
187
188MODULE_DEVICE_TABLE(pci,dptids);
189
190static int adpt_detect(struct scsi_host_template* sht)
191{
192 struct pci_dev *pDev = NULL;
193 adpt_hba *pHba;
194 adpt_hba *next;
195
196 PINFO("Detecting Adaptec I2O RAID controllers...\n");
197
198 /* search for all Adatpec I2O RAID cards */
199 while ((pDev = pci_get_device( PCI_DPT_VENDOR_ID, PCI_ANY_ID, pDev))) {
200 if(pDev->device == PCI_DPT_DEVICE_ID ||
201 pDev->device == PCI_DPT_RAPTOR_DEVICE_ID){
202 if(adpt_install_hba(sht, pDev) ){
203 PERROR("Could not Init an I2O RAID device\n");
204 PERROR("Will not try to detect others.\n");
205 return hba_count-1;
206 }
207 pci_dev_get(pDev);
208 }
209 }
210
211 /* In INIT state, Activate IOPs */
212 for (pHba = hba_chain; pHba; pHba = next) {
213 next = pHba->next;
214 // Activate does get status , init outbound, and get hrt
215 if (adpt_i2o_activate_hba(pHba) < 0) {
216 adpt_i2o_delete_hba(pHba);
217 }
218 }
219
220
221 /* Active IOPs in HOLD state */
222
223rebuild_sys_tab:
224 if (hba_chain == NULL)
225 return 0;
226
227 /*
228 * If build_sys_table fails, we kill everything and bail
229 * as we can't init the IOPs w/o a system table
230 */
231 if (adpt_i2o_build_sys_table() < 0) {
232 adpt_i2o_sys_shutdown();
233 return 0;
234 }
235
236 PDEBUG("HBA's in HOLD state\n");
237
238 /* If IOP don't get online, we need to rebuild the System table */
239 for (pHba = hba_chain; pHba; pHba = pHba->next) {
240 if (adpt_i2o_online_hba(pHba) < 0) {
241 adpt_i2o_delete_hba(pHba);
242 goto rebuild_sys_tab;
243 }
244 }
245
246 /* Active IOPs now in OPERATIONAL state */
247 PDEBUG("HBA's in OPERATIONAL state\n");
248
249 printk("dpti: If you have a lot of devices this could take a few minutes.\n");
250 for (pHba = hba_chain; pHba; pHba = next) {
251 next = pHba->next;
252 printk(KERN_INFO"%s: Reading the hardware resource table.\n", pHba->name);
253 if (adpt_i2o_lct_get(pHba) < 0){
254 adpt_i2o_delete_hba(pHba);
255 continue;
256 }
257
258 if (adpt_i2o_parse_lct(pHba) < 0){
259 adpt_i2o_delete_hba(pHba);
260 continue;
261 }
262 adpt_inquiry(pHba);
263 }
264
265 adpt_sysfs_class = class_create(THIS_MODULE, "dpt_i2o");
266 if (IS_ERR(adpt_sysfs_class)) {
267 printk(KERN_WARNING"dpti: unable to create dpt_i2o class\n");
268 adpt_sysfs_class = NULL;
269 }
270
271 for (pHba = hba_chain; pHba; pHba = next) {
272 next = pHba->next;
273 if (adpt_scsi_host_alloc(pHba, sht) < 0){
274 adpt_i2o_delete_hba(pHba);
275 continue;
276 }
277 pHba->initialized = TRUE;
278 pHba->state &= ~DPTI_STATE_RESET;
279 if (adpt_sysfs_class) {
280 struct device *dev = device_create(adpt_sysfs_class,
281 NULL, MKDEV(DPTI_I2O_MAJOR, pHba->unit), NULL,
282 "dpti%d", pHba->unit);
283 if (IS_ERR(dev)) {
284 printk(KERN_WARNING"dpti%d: unable to "
285 "create device in dpt_i2o class\n",
286 pHba->unit);
287 }
288 }
289 }
290
291 // Register our control device node
292 // nodes will need to be created in /dev to access this
293 // the nodes can not be created from within the driver
294 if (hba_count && register_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER, &adpt_fops)) {
295 adpt_i2o_sys_shutdown();
296 return 0;
297 }
298 return hba_count;
299}
300
301
302static void adpt_release(adpt_hba *pHba)
303{
304 struct Scsi_Host *shost = pHba->host;
305
306 scsi_remove_host(shost);
307// adpt_i2o_quiesce_hba(pHba);
308 adpt_i2o_delete_hba(pHba);
309 scsi_host_put(shost);
310}
311
312
313static void adpt_inquiry(adpt_hba* pHba)
314{
315 u32 msg[17];
316 u32 *mptr;
317 u32 *lenptr;
318 int direction;
319 int scsidir;
320 u32 len;
321 u32 reqlen;
322 u8* buf;
323 dma_addr_t addr;
324 u8 scb[16];
325 s32 rcode;
326
327 memset(msg, 0, sizeof(msg));
328 buf = dma_alloc_coherent(&pHba->pDev->dev, 80, &addr, GFP_KERNEL);
329 if(!buf){
330 printk(KERN_ERR"%s: Could not allocate buffer\n",pHba->name);
331 return;
332 }
333 memset((void*)buf, 0, 36);
334
335 len = 36;
336 direction = 0x00000000;
337 scsidir =0x40000000; // DATA IN (iop<--dev)
338
339 if (dpt_dma64(pHba))
340 reqlen = 17; // SINGLE SGE, 64 bit
341 else
342 reqlen = 14; // SINGLE SGE, 32 bit
343 /* Stick the headers on */
344 msg[0] = reqlen<<16 | SGL_OFFSET_12;
345 msg[1] = (0xff<<24|HOST_TID<<12|ADAPTER_TID);
346 msg[2] = 0;
347 msg[3] = 0;
348 // Adaptec/DPT Private stuff
349 msg[4] = I2O_CMD_SCSI_EXEC|DPT_ORGANIZATION_ID<<16;
350 msg[5] = ADAPTER_TID | 1<<16 /* Interpret*/;
351 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
352 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
353 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
354 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
355 msg[6] = scsidir|0x20a00000| 6 /* cmd len*/;
356
357 mptr=msg+7;
358
359 memset(scb, 0, sizeof(scb));
360 // Write SCSI command into the message - always 16 byte block
361 scb[0] = INQUIRY;
362 scb[1] = 0;
363 scb[2] = 0;
364 scb[3] = 0;
365 scb[4] = 36;
366 scb[5] = 0;
367 // Don't care about the rest of scb
368
369 memcpy(mptr, scb, sizeof(scb));
370 mptr+=4;
371 lenptr=mptr++; /* Remember me - fill in when we know */
372
373 /* Now fill in the SGList and command */
374 *lenptr = len;
375 if (dpt_dma64(pHba)) {
376 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
377 *mptr++ = 1 << PAGE_SHIFT;
378 *mptr++ = 0xD0000000|direction|len;
379 *mptr++ = dma_low(addr);
380 *mptr++ = dma_high(addr);
381 } else {
382 *mptr++ = 0xD0000000|direction|len;
383 *mptr++ = addr;
384 }
385
386 // Send it on it's way
387 rcode = adpt_i2o_post_wait(pHba, msg, reqlen<<2, 120);
388 if (rcode != 0) {
389 sprintf(pHba->detail, "Adaptec I2O RAID");
390 printk(KERN_INFO "%s: Inquiry Error (%d)\n",pHba->name,rcode);
391 if (rcode != -ETIME && rcode != -EINTR)
392 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
393 } else {
394 memset(pHba->detail, 0, sizeof(pHba->detail));
395 memcpy(&(pHba->detail), "Vendor: Adaptec ", 16);
396 memcpy(&(pHba->detail[16]), " Model: ", 8);
397 memcpy(&(pHba->detail[24]), (u8*) &buf[16], 16);
398 memcpy(&(pHba->detail[40]), " FW: ", 4);
399 memcpy(&(pHba->detail[44]), (u8*) &buf[32], 4);
400 pHba->detail[48] = '\0'; /* precautionary */
401 dma_free_coherent(&pHba->pDev->dev, 80, buf, addr);
402 }
403 adpt_i2o_status_get(pHba);
404 return ;
405}
406
407
408static int adpt_slave_configure(struct scsi_device * device)
409{
410 struct Scsi_Host *host = device->host;
411 adpt_hba* pHba;
412
413 pHba = (adpt_hba *) host->hostdata[0];
414
415 if (host->can_queue && device->tagged_supported) {
416 scsi_change_queue_depth(device,
417 host->can_queue - 1);
418 }
419 return 0;
420}
421
422static int adpt_queue_lck(struct scsi_cmnd * cmd, void (*done) (struct scsi_cmnd *))
423{
424 adpt_hba* pHba = NULL;
425 struct adpt_device* pDev = NULL; /* dpt per device information */
426
427 cmd->scsi_done = done;
428 /*
429 * SCSI REQUEST_SENSE commands will be executed automatically by the
430 * Host Adapter for any errors, so they should not be executed
431 * explicitly unless the Sense Data is zero indicating that no error
432 * occurred.
433 */
434
435 if ((cmd->cmnd[0] == REQUEST_SENSE) && (cmd->sense_buffer[0] != 0)) {
436 cmd->result = (DID_OK << 16);
437 cmd->scsi_done(cmd);
438 return 0;
439 }
440
441 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
442 if (!pHba) {
443 return FAILED;
444 }
445
446 rmb();
447 if ((pHba->state) & DPTI_STATE_RESET)
448 return SCSI_MLQUEUE_HOST_BUSY;
449
450 // TODO if the cmd->device if offline then I may need to issue a bus rescan
451 // followed by a get_lct to see if the device is there anymore
452 if((pDev = (struct adpt_device*) (cmd->device->hostdata)) == NULL) {
453 /*
454 * First command request for this device. Set up a pointer
455 * to the device structure. This should be a TEST_UNIT_READY
456 * command from scan_scsis_single.
457 */
458 if ((pDev = adpt_find_device(pHba, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun)) == NULL) {
459 // TODO: if any luns are at this bus, scsi id then fake a TEST_UNIT_READY and INQUIRY response
460 // with type 7F (for all luns less than the max for this bus,id) so the lun scan will continue.
461 cmd->result = (DID_NO_CONNECT << 16);
462 cmd->scsi_done(cmd);
463 return 0;
464 }
465 cmd->device->hostdata = pDev;
466 }
467 pDev->pScsi_dev = cmd->device;
468
469 /*
470 * If we are being called from when the device is being reset,
471 * delay processing of the command until later.
472 */
473 if (pDev->state & DPTI_DEV_RESET ) {
474 return FAILED;
475 }
476 return adpt_scsi_to_i2o(pHba, cmd, pDev);
477}
478
479static DEF_SCSI_QCMD(adpt_queue)
480
481static int adpt_bios_param(struct scsi_device *sdev, struct block_device *dev,
482 sector_t capacity, int geom[])
483{
484 int heads=-1;
485 int sectors=-1;
486 int cylinders=-1;
487
488 // *** First lets set the default geometry ****
489
490 // If the capacity is less than ox2000
491 if (capacity < 0x2000 ) { // floppy
492 heads = 18;
493 sectors = 2;
494 }
495 // else if between 0x2000 and 0x20000
496 else if (capacity < 0x20000) {
497 heads = 64;
498 sectors = 32;
499 }
500 // else if between 0x20000 and 0x40000
501 else if (capacity < 0x40000) {
502 heads = 65;
503 sectors = 63;
504 }
505 // else if between 0x4000 and 0x80000
506 else if (capacity < 0x80000) {
507 heads = 128;
508 sectors = 63;
509 }
510 // else if greater than 0x80000
511 else {
512 heads = 255;
513 sectors = 63;
514 }
515 cylinders = sector_div(capacity, heads * sectors);
516
517 // Special case if CDROM
518 if(sdev->type == 5) { // CDROM
519 heads = 252;
520 sectors = 63;
521 cylinders = 1111;
522 }
523
524 geom[0] = heads;
525 geom[1] = sectors;
526 geom[2] = cylinders;
527
528 PDEBUG("adpt_bios_param: exit\n");
529 return 0;
530}
531
532
533static const char *adpt_info(struct Scsi_Host *host)
534{
535 adpt_hba* pHba;
536
537 pHba = (adpt_hba *) host->hostdata[0];
538 return (char *) (pHba->detail);
539}
540
541static int adpt_show_info(struct seq_file *m, struct Scsi_Host *host)
542{
543 struct adpt_device* d;
544 int id;
545 int chan;
546 adpt_hba* pHba;
547 int unit;
548
549 // Find HBA (host bus adapter) we are looking for
550 mutex_lock(&adpt_configuration_lock);
551 for (pHba = hba_chain; pHba; pHba = pHba->next) {
552 if (pHba->host == host) {
553 break; /* found adapter */
554 }
555 }
556 mutex_unlock(&adpt_configuration_lock);
557 if (pHba == NULL) {
558 return 0;
559 }
560 host = pHba->host;
561
562 seq_printf(m, "Adaptec I2O RAID Driver Version: %s\n\n", DPT_I2O_VERSION);
563 seq_printf(m, "%s\n", pHba->detail);
564 seq_printf(m, "SCSI Host=scsi%d Control Node=/dev/%s irq=%d\n",
565 pHba->host->host_no, pHba->name, host->irq);
566 seq_printf(m, "\tpost fifo size = %d\n\treply fifo size = %d\n\tsg table size = %d\n\n",
567 host->can_queue, (int) pHba->reply_fifo_size , host->sg_tablesize);
568
569 seq_puts(m, "Devices:\n");
570 for(chan = 0; chan < MAX_CHANNEL; chan++) {
571 for(id = 0; id < MAX_ID; id++) {
572 d = pHba->channel[chan].device[id];
573 while(d) {
574 seq_printf(m,"\t%-24.24s", d->pScsi_dev->vendor);
575 seq_printf(m," Rev: %-8.8s\n", d->pScsi_dev->rev);
576
577 unit = d->pI2o_dev->lct_data.tid;
578 seq_printf(m, "\tTID=%d, (Channel=%d, Target=%d, Lun=%llu) (%s)\n\n",
579 unit, (int)d->scsi_channel, (int)d->scsi_id, d->scsi_lun,
580 scsi_device_online(d->pScsi_dev)? "online":"offline");
581 d = d->next_lun;
582 }
583 }
584 }
585 return 0;
586}
587
588/*
589 * Turn a pointer to ioctl reply data into an u32 'context'
590 */
591static u32 adpt_ioctl_to_context(adpt_hba * pHba, void *reply)
592{
593#if BITS_PER_LONG == 32
594 return (u32)(unsigned long)reply;
595#else
596 ulong flags = 0;
597 u32 nr, i;
598
599 spin_lock_irqsave(pHba->host->host_lock, flags);
600 nr = ARRAY_SIZE(pHba->ioctl_reply_context);
601 for (i = 0; i < nr; i++) {
602 if (pHba->ioctl_reply_context[i] == NULL) {
603 pHba->ioctl_reply_context[i] = reply;
604 break;
605 }
606 }
607 spin_unlock_irqrestore(pHba->host->host_lock, flags);
608 if (i >= nr) {
609 printk(KERN_WARNING"%s: Too many outstanding "
610 "ioctl commands\n", pHba->name);
611 return (u32)-1;
612 }
613
614 return i;
615#endif
616}
617
618/*
619 * Go from an u32 'context' to a pointer to ioctl reply data.
620 */
621static void *adpt_ioctl_from_context(adpt_hba *pHba, u32 context)
622{
623#if BITS_PER_LONG == 32
624 return (void *)(unsigned long)context;
625#else
626 void *p = pHba->ioctl_reply_context[context];
627 pHba->ioctl_reply_context[context] = NULL;
628
629 return p;
630#endif
631}
632
633/*===========================================================================
634 * Error Handling routines
635 *===========================================================================
636 */
637
638static int adpt_abort(struct scsi_cmnd * cmd)
639{
640 adpt_hba* pHba = NULL; /* host bus adapter structure */
641 struct adpt_device* dptdevice; /* dpt per device information */
642 u32 msg[5];
643 int rcode;
644
645 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
646 printk(KERN_INFO"%s: Trying to Abort\n",pHba->name);
647 if ((dptdevice = (void*) (cmd->device->hostdata)) == NULL) {
648 printk(KERN_ERR "%s: Unable to abort: No device in cmnd\n",pHba->name);
649 return FAILED;
650 }
651
652 memset(msg, 0, sizeof(msg));
653 msg[0] = FIVE_WORD_MSG_SIZE|SGL_OFFSET_0;
654 msg[1] = I2O_CMD_SCSI_ABORT<<24|HOST_TID<<12|dptdevice->tid;
655 msg[2] = 0;
656 msg[3]= 0;
657 /* Add 1 to avoid firmware treating it as invalid command */
658 msg[4] = cmd->request->tag + 1;
659 if (pHba->host)
660 spin_lock_irq(pHba->host->host_lock);
661 rcode = adpt_i2o_post_wait(pHba, msg, sizeof(msg), FOREVER);
662 if (pHba->host)
663 spin_unlock_irq(pHba->host->host_lock);
664 if (rcode != 0) {
665 if(rcode == -EOPNOTSUPP ){
666 printk(KERN_INFO"%s: Abort cmd not supported\n",pHba->name);
667 return FAILED;
668 }
669 printk(KERN_INFO"%s: Abort failed.\n",pHba->name);
670 return FAILED;
671 }
672 printk(KERN_INFO"%s: Abort complete.\n",pHba->name);
673 return SUCCESS;
674}
675
676
677#define I2O_DEVICE_RESET 0x27
678// This is the same for BLK and SCSI devices
679// NOTE this is wrong in the i2o.h definitions
680// This is not currently supported by our adapter but we issue it anyway
681static int adpt_device_reset(struct scsi_cmnd* cmd)
682{
683 adpt_hba* pHba;
684 u32 msg[4];
685 u32 rcode;
686 int old_state;
687 struct adpt_device* d = cmd->device->hostdata;
688
689 pHba = (void*) cmd->device->host->hostdata[0];
690 printk(KERN_INFO"%s: Trying to reset device\n",pHba->name);
691 if (!d) {
692 printk(KERN_INFO"%s: Reset Device: Device Not found\n",pHba->name);
693 return FAILED;
694 }
695 memset(msg, 0, sizeof(msg));
696 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
697 msg[1] = (I2O_DEVICE_RESET<<24|HOST_TID<<12|d->tid);
698 msg[2] = 0;
699 msg[3] = 0;
700
701 if (pHba->host)
702 spin_lock_irq(pHba->host->host_lock);
703 old_state = d->state;
704 d->state |= DPTI_DEV_RESET;
705 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
706 d->state = old_state;
707 if (pHba->host)
708 spin_unlock_irq(pHba->host->host_lock);
709 if (rcode != 0) {
710 if(rcode == -EOPNOTSUPP ){
711 printk(KERN_INFO"%s: Device reset not supported\n",pHba->name);
712 return FAILED;
713 }
714 printk(KERN_INFO"%s: Device reset failed\n",pHba->name);
715 return FAILED;
716 } else {
717 printk(KERN_INFO"%s: Device reset successful\n",pHba->name);
718 return SUCCESS;
719 }
720}
721
722
723#define I2O_HBA_BUS_RESET 0x87
724// This version of bus reset is called by the eh_error handler
725static int adpt_bus_reset(struct scsi_cmnd* cmd)
726{
727 adpt_hba* pHba;
728 u32 msg[4];
729 u32 rcode;
730
731 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
732 memset(msg, 0, sizeof(msg));
733 printk(KERN_WARNING"%s: Bus reset: SCSI Bus %d: tid: %d\n",pHba->name, cmd->device->channel,pHba->channel[cmd->device->channel].tid );
734 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
735 msg[1] = (I2O_HBA_BUS_RESET<<24|HOST_TID<<12|pHba->channel[cmd->device->channel].tid);
736 msg[2] = 0;
737 msg[3] = 0;
738 if (pHba->host)
739 spin_lock_irq(pHba->host->host_lock);
740 rcode = adpt_i2o_post_wait(pHba, msg,sizeof(msg), FOREVER);
741 if (pHba->host)
742 spin_unlock_irq(pHba->host->host_lock);
743 if (rcode != 0) {
744 printk(KERN_WARNING"%s: Bus reset failed.\n",pHba->name);
745 return FAILED;
746 } else {
747 printk(KERN_WARNING"%s: Bus reset success.\n",pHba->name);
748 return SUCCESS;
749 }
750}
751
752// This version of reset is called by the eh_error_handler
753static int __adpt_reset(struct scsi_cmnd* cmd)
754{
755 adpt_hba* pHba;
756 int rcode;
757 char name[32];
758
759 pHba = (adpt_hba*)cmd->device->host->hostdata[0];
760 strncpy(name, pHba->name, sizeof(name));
761 printk(KERN_WARNING"%s: Hba Reset: scsi id %d: tid: %d\n", name, cmd->device->channel, pHba->channel[cmd->device->channel].tid);
762 rcode = adpt_hba_reset(pHba);
763 if(rcode == 0){
764 printk(KERN_WARNING"%s: HBA reset complete\n", name);
765 return SUCCESS;
766 } else {
767 printk(KERN_WARNING"%s: HBA reset failed (%x)\n", name, rcode);
768 return FAILED;
769 }
770}
771
772static int adpt_reset(struct scsi_cmnd* cmd)
773{
774 int rc;
775
776 spin_lock_irq(cmd->device->host->host_lock);
777 rc = __adpt_reset(cmd);
778 spin_unlock_irq(cmd->device->host->host_lock);
779
780 return rc;
781}
782
783// This version of reset is called by the ioctls and indirectly from eh_error_handler via adpt_reset
784static int adpt_hba_reset(adpt_hba* pHba)
785{
786 int rcode;
787
788 pHba->state |= DPTI_STATE_RESET;
789
790 // Activate does get status , init outbound, and get hrt
791 if ((rcode=adpt_i2o_activate_hba(pHba)) < 0) {
792 printk(KERN_ERR "%s: Could not activate\n", pHba->name);
793 adpt_i2o_delete_hba(pHba);
794 return rcode;
795 }
796
797 if ((rcode=adpt_i2o_build_sys_table()) < 0) {
798 adpt_i2o_delete_hba(pHba);
799 return rcode;
800 }
801 PDEBUG("%s: in HOLD state\n",pHba->name);
802
803 if ((rcode=adpt_i2o_online_hba(pHba)) < 0) {
804 adpt_i2o_delete_hba(pHba);
805 return rcode;
806 }
807 PDEBUG("%s: in OPERATIONAL state\n",pHba->name);
808
809 if ((rcode=adpt_i2o_lct_get(pHba)) < 0){
810 adpt_i2o_delete_hba(pHba);
811 return rcode;
812 }
813
814 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0){
815 adpt_i2o_delete_hba(pHba);
816 return rcode;
817 }
818 pHba->state &= ~DPTI_STATE_RESET;
819
820 scsi_host_complete_all_commands(pHba->host, DID_RESET);
821 return 0; /* return success */
822}
823
824/*===========================================================================
825 *
826 *===========================================================================
827 */
828
829
830static void adpt_i2o_sys_shutdown(void)
831{
832 adpt_hba *pHba, *pNext;
833 struct adpt_i2o_post_wait_data *p1, *old;
834
835 printk(KERN_INFO "Shutting down Adaptec I2O controllers.\n");
836 printk(KERN_INFO " This could take a few minutes if there are many devices attached\n");
837 /* Delete all IOPs from the controller chain */
838 /* They should have already been released by the
839 * scsi-core
840 */
841 for (pHba = hba_chain; pHba; pHba = pNext) {
842 pNext = pHba->next;
843 adpt_i2o_delete_hba(pHba);
844 }
845
846 /* Remove any timedout entries from the wait queue. */
847// spin_lock_irqsave(&adpt_post_wait_lock, flags);
848 /* Nothing should be outstanding at this point so just
849 * free them
850 */
851 for(p1 = adpt_post_wait_queue; p1;) {
852 old = p1;
853 p1 = p1->next;
854 kfree(old);
855 }
856// spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
857 adpt_post_wait_queue = NULL;
858
859 printk(KERN_INFO "Adaptec I2O controllers down.\n");
860}
861
862static int adpt_install_hba(struct scsi_host_template* sht, struct pci_dev* pDev)
863{
864
865 adpt_hba* pHba = NULL;
866 adpt_hba* p = NULL;
867 ulong base_addr0_phys = 0;
868 ulong base_addr1_phys = 0;
869 u32 hba_map0_area_size = 0;
870 u32 hba_map1_area_size = 0;
871 void __iomem *base_addr_virt = NULL;
872 void __iomem *msg_addr_virt = NULL;
873 int dma64 = 0;
874
875 int raptorFlag = FALSE;
876
877 if(pci_enable_device(pDev)) {
878 return -EINVAL;
879 }
880
881 if (pci_request_regions(pDev, "dpt_i2o")) {
882 PERROR("dpti: adpt_config_hba: pci request region failed\n");
883 return -EINVAL;
884 }
885
886 pci_set_master(pDev);
887
888 /*
889 * See if we should enable dma64 mode.
890 */
891 if (sizeof(dma_addr_t) > 4 &&
892 dma_get_required_mask(&pDev->dev) > DMA_BIT_MASK(32) &&
893 dma_set_mask(&pDev->dev, DMA_BIT_MASK(64)) == 0)
894 dma64 = 1;
895
896 if (!dma64 && dma_set_mask(&pDev->dev, DMA_BIT_MASK(32)) != 0)
897 return -EINVAL;
898
899 /* adapter only supports message blocks below 4GB */
900 dma_set_coherent_mask(&pDev->dev, DMA_BIT_MASK(32));
901
902 base_addr0_phys = pci_resource_start(pDev,0);
903 hba_map0_area_size = pci_resource_len(pDev,0);
904
905 // Check if standard PCI card or single BAR Raptor
906 if(pDev->device == PCI_DPT_DEVICE_ID){
907 if(pDev->subsystem_device >=0xc032 && pDev->subsystem_device <= 0xc03b){
908 // Raptor card with this device id needs 4M
909 hba_map0_area_size = 0x400000;
910 } else { // Not Raptor - it is a PCI card
911 if(hba_map0_area_size > 0x100000 ){
912 hba_map0_area_size = 0x100000;
913 }
914 }
915 } else {// Raptor split BAR config
916 // Use BAR1 in this configuration
917 base_addr1_phys = pci_resource_start(pDev,1);
918 hba_map1_area_size = pci_resource_len(pDev,1);
919 raptorFlag = TRUE;
920 }
921
922#if BITS_PER_LONG == 64
923 /*
924 * The original Adaptec 64 bit driver has this comment here:
925 * "x86_64 machines need more optimal mappings"
926 *
927 * I assume some HBAs report ridiculously large mappings
928 * and we need to limit them on platforms with IOMMUs.
929 */
930 if (raptorFlag == TRUE) {
931 if (hba_map0_area_size > 128)
932 hba_map0_area_size = 128;
933 if (hba_map1_area_size > 524288)
934 hba_map1_area_size = 524288;
935 } else {
936 if (hba_map0_area_size > 524288)
937 hba_map0_area_size = 524288;
938 }
939#endif
940
941 base_addr_virt = ioremap(base_addr0_phys,hba_map0_area_size);
942 if (!base_addr_virt) {
943 pci_release_regions(pDev);
944 PERROR("dpti: adpt_config_hba: io remap failed\n");
945 return -EINVAL;
946 }
947
948 if(raptorFlag == TRUE) {
949 msg_addr_virt = ioremap(base_addr1_phys, hba_map1_area_size );
950 if (!msg_addr_virt) {
951 PERROR("dpti: adpt_config_hba: io remap failed on BAR1\n");
952 iounmap(base_addr_virt);
953 pci_release_regions(pDev);
954 return -EINVAL;
955 }
956 } else {
957 msg_addr_virt = base_addr_virt;
958 }
959
960 // Allocate and zero the data structure
961 pHba = kzalloc(sizeof(adpt_hba), GFP_KERNEL);
962 if (!pHba) {
963 if (msg_addr_virt != base_addr_virt)
964 iounmap(msg_addr_virt);
965 iounmap(base_addr_virt);
966 pci_release_regions(pDev);
967 return -ENOMEM;
968 }
969
970 mutex_lock(&adpt_configuration_lock);
971
972 if(hba_chain != NULL){
973 for(p = hba_chain; p->next; p = p->next);
974 p->next = pHba;
975 } else {
976 hba_chain = pHba;
977 }
978 pHba->next = NULL;
979 pHba->unit = hba_count;
980 sprintf(pHba->name, "dpti%d", hba_count);
981 hba_count++;
982
983 mutex_unlock(&adpt_configuration_lock);
984
985 pHba->pDev = pDev;
986 pHba->base_addr_phys = base_addr0_phys;
987
988 // Set up the Virtual Base Address of the I2O Device
989 pHba->base_addr_virt = base_addr_virt;
990 pHba->msg_addr_virt = msg_addr_virt;
991 pHba->irq_mask = base_addr_virt+0x30;
992 pHba->post_port = base_addr_virt+0x40;
993 pHba->reply_port = base_addr_virt+0x44;
994
995 pHba->hrt = NULL;
996 pHba->lct = NULL;
997 pHba->lct_size = 0;
998 pHba->status_block = NULL;
999 pHba->post_count = 0;
1000 pHba->state = DPTI_STATE_RESET;
1001 pHba->pDev = pDev;
1002 pHba->devices = NULL;
1003 pHba->dma64 = dma64;
1004
1005 // Initializing the spinlocks
1006 spin_lock_init(&pHba->state_lock);
1007 spin_lock_init(&adpt_post_wait_lock);
1008
1009 if(raptorFlag == 0){
1010 printk(KERN_INFO "Adaptec I2O RAID controller"
1011 " %d at %p size=%x irq=%d%s\n",
1012 hba_count-1, base_addr_virt,
1013 hba_map0_area_size, pDev->irq,
1014 dma64 ? " (64-bit DMA)" : "");
1015 } else {
1016 printk(KERN_INFO"Adaptec I2O RAID controller %d irq=%d%s\n",
1017 hba_count-1, pDev->irq,
1018 dma64 ? " (64-bit DMA)" : "");
1019 printk(KERN_INFO" BAR0 %p - size= %x\n",base_addr_virt,hba_map0_area_size);
1020 printk(KERN_INFO" BAR1 %p - size= %x\n",msg_addr_virt,hba_map1_area_size);
1021 }
1022
1023 if (request_irq (pDev->irq, adpt_isr, IRQF_SHARED, pHba->name, pHba)) {
1024 printk(KERN_ERR"%s: Couldn't register IRQ %d\n", pHba->name, pDev->irq);
1025 adpt_i2o_delete_hba(pHba);
1026 return -EINVAL;
1027 }
1028
1029 return 0;
1030}
1031
1032
1033static void adpt_i2o_delete_hba(adpt_hba* pHba)
1034{
1035 adpt_hba* p1;
1036 adpt_hba* p2;
1037 struct i2o_device* d;
1038 struct i2o_device* next;
1039 int i;
1040 int j;
1041 struct adpt_device* pDev;
1042 struct adpt_device* pNext;
1043
1044
1045 mutex_lock(&adpt_configuration_lock);
1046 if(pHba->host){
1047 free_irq(pHba->host->irq, pHba);
1048 }
1049 p2 = NULL;
1050 for( p1 = hba_chain; p1; p2 = p1,p1=p1->next){
1051 if(p1 == pHba) {
1052 if(p2) {
1053 p2->next = p1->next;
1054 } else {
1055 hba_chain = p1->next;
1056 }
1057 break;
1058 }
1059 }
1060
1061 hba_count--;
1062 mutex_unlock(&adpt_configuration_lock);
1063
1064 iounmap(pHba->base_addr_virt);
1065 pci_release_regions(pHba->pDev);
1066 if(pHba->msg_addr_virt != pHba->base_addr_virt){
1067 iounmap(pHba->msg_addr_virt);
1068 }
1069 if(pHba->FwDebugBuffer_P)
1070 iounmap(pHba->FwDebugBuffer_P);
1071 if(pHba->hrt) {
1072 dma_free_coherent(&pHba->pDev->dev,
1073 pHba->hrt->num_entries * pHba->hrt->entry_len << 2,
1074 pHba->hrt, pHba->hrt_pa);
1075 }
1076 if(pHba->lct) {
1077 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
1078 pHba->lct, pHba->lct_pa);
1079 }
1080 if(pHba->status_block) {
1081 dma_free_coherent(&pHba->pDev->dev, sizeof(i2o_status_block),
1082 pHba->status_block, pHba->status_block_pa);
1083 }
1084 if(pHba->reply_pool) {
1085 dma_free_coherent(&pHba->pDev->dev,
1086 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
1087 pHba->reply_pool, pHba->reply_pool_pa);
1088 }
1089
1090 for(d = pHba->devices; d ; d = next){
1091 next = d->next;
1092 kfree(d);
1093 }
1094 for(i = 0 ; i < pHba->top_scsi_channel ; i++){
1095 for(j = 0; j < MAX_ID; j++){
1096 if(pHba->channel[i].device[j] != NULL){
1097 for(pDev = pHba->channel[i].device[j]; pDev; pDev = pNext){
1098 pNext = pDev->next_lun;
1099 kfree(pDev);
1100 }
1101 }
1102 }
1103 }
1104 pci_dev_put(pHba->pDev);
1105 if (adpt_sysfs_class)
1106 device_destroy(adpt_sysfs_class,
1107 MKDEV(DPTI_I2O_MAJOR, pHba->unit));
1108 kfree(pHba);
1109
1110 if(hba_count <= 0){
1111 unregister_chrdev(DPTI_I2O_MAJOR, DPT_DRIVER);
1112 if (adpt_sysfs_class) {
1113 class_destroy(adpt_sysfs_class);
1114 adpt_sysfs_class = NULL;
1115 }
1116 }
1117}
1118
1119static struct adpt_device* adpt_find_device(adpt_hba* pHba, u32 chan, u32 id, u64 lun)
1120{
1121 struct adpt_device* d;
1122
1123 if (chan >= MAX_CHANNEL)
1124 return NULL;
1125
1126 d = pHba->channel[chan].device[id];
1127 if(!d || d->tid == 0) {
1128 return NULL;
1129 }
1130
1131 /* If it is the only lun at that address then this should match*/
1132 if(d->scsi_lun == lun){
1133 return d;
1134 }
1135
1136 /* else we need to look through all the luns */
1137 for(d=d->next_lun ; d ; d = d->next_lun){
1138 if(d->scsi_lun == lun){
1139 return d;
1140 }
1141 }
1142 return NULL;
1143}
1144
1145
1146static int adpt_i2o_post_wait(adpt_hba* pHba, u32* msg, int len, int timeout)
1147{
1148 // I used my own version of the WAIT_QUEUE_HEAD
1149 // to handle some version differences
1150 // When embedded in the kernel this could go back to the vanilla one
1151 ADPT_DECLARE_WAIT_QUEUE_HEAD(adpt_wq_i2o_post);
1152 int status = 0;
1153 ulong flags = 0;
1154 struct adpt_i2o_post_wait_data *p1, *p2;
1155 struct adpt_i2o_post_wait_data *wait_data =
1156 kmalloc(sizeof(struct adpt_i2o_post_wait_data), GFP_ATOMIC);
1157 DECLARE_WAITQUEUE(wait, current);
1158
1159 if (!wait_data)
1160 return -ENOMEM;
1161
1162 /*
1163 * The spin locking is needed to keep anyone from playing
1164 * with the queue pointers and id while we do the same
1165 */
1166 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1167 // TODO we need a MORE unique way of getting ids
1168 // to support async LCT get
1169 wait_data->next = adpt_post_wait_queue;
1170 adpt_post_wait_queue = wait_data;
1171 adpt_post_wait_id++;
1172 adpt_post_wait_id &= 0x7fff;
1173 wait_data->id = adpt_post_wait_id;
1174 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1175
1176 wait_data->wq = &adpt_wq_i2o_post;
1177 wait_data->status = -ETIMEDOUT;
1178
1179 add_wait_queue(&adpt_wq_i2o_post, &wait);
1180
1181 msg[2] |= 0x80000000 | ((u32)wait_data->id);
1182 timeout *= HZ;
1183 if((status = adpt_i2o_post_this(pHba, msg, len)) == 0){
1184 set_current_state(TASK_INTERRUPTIBLE);
1185 if(pHba->host)
1186 spin_unlock_irq(pHba->host->host_lock);
1187 if (!timeout)
1188 schedule();
1189 else{
1190 timeout = schedule_timeout(timeout);
1191 if (timeout == 0) {
1192 // I/O issued, but cannot get result in
1193 // specified time. Freeing resorces is
1194 // dangerous.
1195 status = -ETIME;
1196 }
1197 }
1198 if(pHba->host)
1199 spin_lock_irq(pHba->host->host_lock);
1200 }
1201 remove_wait_queue(&adpt_wq_i2o_post, &wait);
1202
1203 if(status == -ETIMEDOUT){
1204 printk(KERN_INFO"dpti%d: POST WAIT TIMEOUT\n",pHba->unit);
1205 // We will have to free the wait_data memory during shutdown
1206 return status;
1207 }
1208
1209 /* Remove the entry from the queue. */
1210 p2 = NULL;
1211 spin_lock_irqsave(&adpt_post_wait_lock, flags);
1212 for(p1 = adpt_post_wait_queue; p1; p2 = p1, p1 = p1->next) {
1213 if(p1 == wait_data) {
1214 if(p1->status == I2O_DETAIL_STATUS_UNSUPPORTED_FUNCTION ) {
1215 status = -EOPNOTSUPP;
1216 }
1217 if(p2) {
1218 p2->next = p1->next;
1219 } else {
1220 adpt_post_wait_queue = p1->next;
1221 }
1222 break;
1223 }
1224 }
1225 spin_unlock_irqrestore(&adpt_post_wait_lock, flags);
1226
1227 kfree(wait_data);
1228
1229 return status;
1230}
1231
1232
1233static s32 adpt_i2o_post_this(adpt_hba* pHba, u32* data, int len)
1234{
1235
1236 u32 m = EMPTY_QUEUE;
1237 u32 __iomem *msg;
1238 ulong timeout = jiffies + 30*HZ;
1239 do {
1240 rmb();
1241 m = readl(pHba->post_port);
1242 if (m != EMPTY_QUEUE) {
1243 break;
1244 }
1245 if(time_after(jiffies,timeout)){
1246 printk(KERN_WARNING"dpti%d: Timeout waiting for message frame!\n", pHba->unit);
1247 return -ETIMEDOUT;
1248 }
1249 schedule_timeout_uninterruptible(1);
1250 } while(m == EMPTY_QUEUE);
1251
1252 msg = pHba->msg_addr_virt + m;
1253 memcpy_toio(msg, data, len);
1254 wmb();
1255
1256 //post message
1257 writel(m, pHba->post_port);
1258 wmb();
1259
1260 return 0;
1261}
1262
1263
1264static void adpt_i2o_post_wait_complete(u32 context, int status)
1265{
1266 struct adpt_i2o_post_wait_data *p1 = NULL;
1267 /*
1268 * We need to search through the adpt_post_wait
1269 * queue to see if the given message is still
1270 * outstanding. If not, it means that the IOP
1271 * took longer to respond to the message than we
1272 * had allowed and timer has already expired.
1273 * Not much we can do about that except log
1274 * it for debug purposes, increase timeout, and recompile
1275 *
1276 * Lock needed to keep anyone from moving queue pointers
1277 * around while we're looking through them.
1278 */
1279
1280 context &= 0x7fff;
1281
1282 spin_lock(&adpt_post_wait_lock);
1283 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1284 if(p1->id == context) {
1285 p1->status = status;
1286 spin_unlock(&adpt_post_wait_lock);
1287 wake_up_interruptible(p1->wq);
1288 return;
1289 }
1290 }
1291 spin_unlock(&adpt_post_wait_lock);
1292 // If this happens we lose commands that probably really completed
1293 printk(KERN_DEBUG"dpti: Could Not find task %d in wait queue\n",context);
1294 printk(KERN_DEBUG" Tasks in wait queue:\n");
1295 for(p1 = adpt_post_wait_queue; p1; p1 = p1->next) {
1296 printk(KERN_DEBUG" %d\n",p1->id);
1297 }
1298 return;
1299}
1300
1301static s32 adpt_i2o_reset_hba(adpt_hba* pHba)
1302{
1303 u32 msg[8];
1304 u8* status;
1305 dma_addr_t addr;
1306 u32 m = EMPTY_QUEUE ;
1307 ulong timeout = jiffies + (TMOUT_IOPRESET*HZ);
1308
1309 if(pHba->initialized == FALSE) { // First time reset should be quick
1310 timeout = jiffies + (25*HZ);
1311 } else {
1312 adpt_i2o_quiesce_hba(pHba);
1313 }
1314
1315 do {
1316 rmb();
1317 m = readl(pHba->post_port);
1318 if (m != EMPTY_QUEUE) {
1319 break;
1320 }
1321 if(time_after(jiffies,timeout)){
1322 printk(KERN_WARNING"Timeout waiting for message!\n");
1323 return -ETIMEDOUT;
1324 }
1325 schedule_timeout_uninterruptible(1);
1326 } while (m == EMPTY_QUEUE);
1327
1328 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
1329 if(status == NULL) {
1330 adpt_send_nop(pHba, m);
1331 printk(KERN_ERR"IOP reset failed - no free memory.\n");
1332 return -ENOMEM;
1333 }
1334
1335 msg[0]=EIGHT_WORD_MSG_SIZE|SGL_OFFSET_0;
1336 msg[1]=I2O_CMD_ADAPTER_RESET<<24|HOST_TID<<12|ADAPTER_TID;
1337 msg[2]=0;
1338 msg[3]=0;
1339 msg[4]=0;
1340 msg[5]=0;
1341 msg[6]=dma_low(addr);
1342 msg[7]=dma_high(addr);
1343
1344 memcpy_toio(pHba->msg_addr_virt+m, msg, sizeof(msg));
1345 wmb();
1346 writel(m, pHba->post_port);
1347 wmb();
1348
1349 while(*status == 0){
1350 if(time_after(jiffies,timeout)){
1351 printk(KERN_WARNING"%s: IOP Reset Timeout\n",pHba->name);
1352 /* We lose 4 bytes of "status" here, but we cannot
1353 free these because controller may awake and corrupt
1354 those bytes at any time */
1355 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1356 return -ETIMEDOUT;
1357 }
1358 rmb();
1359 schedule_timeout_uninterruptible(1);
1360 }
1361
1362 if(*status == 0x01 /*I2O_EXEC_IOP_RESET_IN_PROGRESS*/) {
1363 PDEBUG("%s: Reset in progress...\n", pHba->name);
1364 // Here we wait for message frame to become available
1365 // indicated that reset has finished
1366 do {
1367 rmb();
1368 m = readl(pHba->post_port);
1369 if (m != EMPTY_QUEUE) {
1370 break;
1371 }
1372 if(time_after(jiffies,timeout)){
1373 printk(KERN_ERR "%s:Timeout waiting for IOP Reset.\n",pHba->name);
1374 /* We lose 4 bytes of "status" here, but we
1375 cannot free these because controller may
1376 awake and corrupt those bytes at any time */
1377 /* dma_free_coherent(&pHba->pDev->dev, 4, buf, addr); */
1378 return -ETIMEDOUT;
1379 }
1380 schedule_timeout_uninterruptible(1);
1381 } while (m == EMPTY_QUEUE);
1382 // Flush the offset
1383 adpt_send_nop(pHba, m);
1384 }
1385 adpt_i2o_status_get(pHba);
1386 if(*status == 0x02 ||
1387 pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
1388 printk(KERN_WARNING"%s: Reset reject, trying to clear\n",
1389 pHba->name);
1390 } else {
1391 PDEBUG("%s: Reset completed.\n", pHba->name);
1392 }
1393
1394 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
1395#ifdef UARTDELAY
1396 // This delay is to allow someone attached to the card through the debug UART to
1397 // set up the dump levels that they want before the rest of the initialization sequence
1398 adpt_delay(20000);
1399#endif
1400 return 0;
1401}
1402
1403
1404static int adpt_i2o_parse_lct(adpt_hba* pHba)
1405{
1406 int i;
1407 int max;
1408 int tid;
1409 struct i2o_device *d;
1410 i2o_lct *lct = pHba->lct;
1411 u8 bus_no = 0;
1412 s16 scsi_id;
1413 u64 scsi_lun;
1414 u32 buf[10]; // larger than 7, or 8 ...
1415 struct adpt_device* pDev;
1416
1417 if (lct == NULL) {
1418 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
1419 return -1;
1420 }
1421
1422 max = lct->table_size;
1423 max -= 3;
1424 max /= 9;
1425
1426 for(i=0;i<max;i++) {
1427 if( lct->lct_entry[i].user_tid != 0xfff){
1428 /*
1429 * If we have hidden devices, we need to inform the upper layers about
1430 * the possible maximum id reference to handle device access when
1431 * an array is disassembled. This code has no other purpose but to
1432 * allow us future access to devices that are currently hidden
1433 * behind arrays, hotspares or have not been configured (JBOD mode).
1434 */
1435 if( lct->lct_entry[i].class_id != I2O_CLASS_RANDOM_BLOCK_STORAGE &&
1436 lct->lct_entry[i].class_id != I2O_CLASS_SCSI_PERIPHERAL &&
1437 lct->lct_entry[i].class_id != I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1438 continue;
1439 }
1440 tid = lct->lct_entry[i].tid;
1441 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1442 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
1443 continue;
1444 }
1445 bus_no = buf[0]>>16;
1446 scsi_id = buf[1];
1447 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1448 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1449 printk(KERN_WARNING"%s: Channel number %d out of range \n", pHba->name, bus_no);
1450 continue;
1451 }
1452 if (scsi_id >= MAX_ID){
1453 printk(KERN_WARNING"%s: SCSI ID %d out of range \n", pHba->name, bus_no);
1454 continue;
1455 }
1456 if(bus_no > pHba->top_scsi_channel){
1457 pHba->top_scsi_channel = bus_no;
1458 }
1459 if(scsi_id > pHba->top_scsi_id){
1460 pHba->top_scsi_id = scsi_id;
1461 }
1462 if(scsi_lun > pHba->top_scsi_lun){
1463 pHba->top_scsi_lun = scsi_lun;
1464 }
1465 continue;
1466 }
1467 d = kmalloc(sizeof(struct i2o_device), GFP_KERNEL);
1468 if(d==NULL)
1469 {
1470 printk(KERN_CRIT"%s: Out of memory for I2O device data.\n",pHba->name);
1471 return -ENOMEM;
1472 }
1473
1474 d->controller = pHba;
1475 d->next = NULL;
1476
1477 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
1478
1479 d->flags = 0;
1480 tid = d->lct_data.tid;
1481 adpt_i2o_report_hba_unit(pHba, d);
1482 adpt_i2o_install_device(pHba, d);
1483 }
1484 bus_no = 0;
1485 for(d = pHba->devices; d ; d = d->next) {
1486 if(d->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT ||
1487 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PORT){
1488 tid = d->lct_data.tid;
1489 // TODO get the bus_no from hrt-but for now they are in order
1490 //bus_no =
1491 if(bus_no > pHba->top_scsi_channel){
1492 pHba->top_scsi_channel = bus_no;
1493 }
1494 pHba->channel[bus_no].type = d->lct_data.class_id;
1495 pHba->channel[bus_no].tid = tid;
1496 if(adpt_i2o_query_scalar(pHba, tid, 0x0200, -1, buf, 28)>=0)
1497 {
1498 pHba->channel[bus_no].scsi_id = buf[1];
1499 PDEBUG("Bus %d - SCSI ID %d.\n", bus_no, buf[1]);
1500 }
1501 // TODO remove - this is just until we get from hrt
1502 bus_no++;
1503 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1504 printk(KERN_WARNING"%s: Channel number %d out of range - LCT\n", pHba->name, bus_no);
1505 break;
1506 }
1507 }
1508 }
1509
1510 // Setup adpt_device table
1511 for(d = pHba->devices; d ; d = d->next) {
1512 if(d->lct_data.class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
1513 d->lct_data.class_id == I2O_CLASS_SCSI_PERIPHERAL ||
1514 d->lct_data.class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
1515
1516 tid = d->lct_data.tid;
1517 scsi_id = -1;
1518 // I2O_DPT_DEVICE_INFO_GROUP_NO;
1519 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)>=0) {
1520 bus_no = buf[0]>>16;
1521 scsi_id = buf[1];
1522 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
1523 if(bus_no >= MAX_CHANNEL) { // Something wrong skip it
1524 continue;
1525 }
1526 if (scsi_id >= MAX_ID) {
1527 continue;
1528 }
1529 if( pHba->channel[bus_no].device[scsi_id] == NULL){
1530 pDev = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1531 if(pDev == NULL) {
1532 return -ENOMEM;
1533 }
1534 pHba->channel[bus_no].device[scsi_id] = pDev;
1535 } else {
1536 for( pDev = pHba->channel[bus_no].device[scsi_id];
1537 pDev->next_lun; pDev = pDev->next_lun){
1538 }
1539 pDev->next_lun = kzalloc(sizeof(struct adpt_device),GFP_KERNEL);
1540 if(pDev->next_lun == NULL) {
1541 return -ENOMEM;
1542 }
1543 pDev = pDev->next_lun;
1544 }
1545 pDev->tid = tid;
1546 pDev->scsi_channel = bus_no;
1547 pDev->scsi_id = scsi_id;
1548 pDev->scsi_lun = scsi_lun;
1549 pDev->pI2o_dev = d;
1550 d->owner = pDev;
1551 pDev->type = (buf[0])&0xff;
1552 pDev->flags = (buf[0]>>8)&0xff;
1553 if(scsi_id > pHba->top_scsi_id){
1554 pHba->top_scsi_id = scsi_id;
1555 }
1556 if(scsi_lun > pHba->top_scsi_lun){
1557 pHba->top_scsi_lun = scsi_lun;
1558 }
1559 }
1560 if(scsi_id == -1){
1561 printk(KERN_WARNING"Could not find SCSI ID for %s\n",
1562 d->lct_data.identity_tag);
1563 }
1564 }
1565 }
1566 return 0;
1567}
1568
1569
1570/*
1571 * Each I2O controller has a chain of devices on it - these match
1572 * the useful parts of the LCT of the board.
1573 */
1574
1575static int adpt_i2o_install_device(adpt_hba* pHba, struct i2o_device *d)
1576{
1577 mutex_lock(&adpt_configuration_lock);
1578 d->controller=pHba;
1579 d->owner=NULL;
1580 d->next=pHba->devices;
1581 d->prev=NULL;
1582 if (pHba->devices != NULL){
1583 pHba->devices->prev=d;
1584 }
1585 pHba->devices=d;
1586 *d->dev_name = 0;
1587
1588 mutex_unlock(&adpt_configuration_lock);
1589 return 0;
1590}
1591
1592static int adpt_open(struct inode *inode, struct file *file)
1593{
1594 int minor;
1595 adpt_hba* pHba;
1596
1597 mutex_lock(&adpt_mutex);
1598 //TODO check for root access
1599 //
1600 minor = iminor(inode);
1601 if (minor >= hba_count) {
1602 mutex_unlock(&adpt_mutex);
1603 return -ENXIO;
1604 }
1605 mutex_lock(&adpt_configuration_lock);
1606 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1607 if (pHba->unit == minor) {
1608 break; /* found adapter */
1609 }
1610 }
1611 if (pHba == NULL) {
1612 mutex_unlock(&adpt_configuration_lock);
1613 mutex_unlock(&adpt_mutex);
1614 return -ENXIO;
1615 }
1616
1617// if(pHba->in_use){
1618 // mutex_unlock(&adpt_configuration_lock);
1619// return -EBUSY;
1620// }
1621
1622 pHba->in_use = 1;
1623 mutex_unlock(&adpt_configuration_lock);
1624 mutex_unlock(&adpt_mutex);
1625
1626 return 0;
1627}
1628
1629static int adpt_close(struct inode *inode, struct file *file)
1630{
1631 int minor;
1632 adpt_hba* pHba;
1633
1634 minor = iminor(inode);
1635 if (minor >= hba_count) {
1636 return -ENXIO;
1637 }
1638 mutex_lock(&adpt_configuration_lock);
1639 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1640 if (pHba->unit == minor) {
1641 break; /* found adapter */
1642 }
1643 }
1644 mutex_unlock(&adpt_configuration_lock);
1645 if (pHba == NULL) {
1646 return -ENXIO;
1647 }
1648
1649 pHba->in_use = 0;
1650
1651 return 0;
1652}
1653
1654
1655static int adpt_i2o_passthru(adpt_hba* pHba, u32 __user *arg)
1656{
1657 u32 msg[MAX_MESSAGE_SIZE];
1658 u32* reply = NULL;
1659 u32 size = 0;
1660 u32 reply_size = 0;
1661 u32 __user *user_msg = arg;
1662 u32 __user * user_reply = NULL;
1663 void **sg_list = NULL;
1664 u32 sg_offset = 0;
1665 u32 sg_count = 0;
1666 int sg_index = 0;
1667 u32 i = 0;
1668 u32 rcode = 0;
1669 void *p = NULL;
1670 dma_addr_t addr;
1671 ulong flags = 0;
1672
1673 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1674 // get user msg size in u32s
1675 if(get_user(size, &user_msg[0])){
1676 return -EFAULT;
1677 }
1678 size = size>>16;
1679
1680 user_reply = &user_msg[size];
1681 if(size > MAX_MESSAGE_SIZE){
1682 return -EFAULT;
1683 }
1684 size *= 4; // Convert to bytes
1685
1686 /* Copy in the user's I2O command */
1687 if(copy_from_user(msg, user_msg, size)) {
1688 return -EFAULT;
1689 }
1690 get_user(reply_size, &user_reply[0]);
1691 reply_size = reply_size>>16;
1692 if(reply_size > REPLY_FRAME_SIZE){
1693 reply_size = REPLY_FRAME_SIZE;
1694 }
1695 reply_size *= 4;
1696 reply = kzalloc(REPLY_FRAME_SIZE*4, GFP_KERNEL);
1697 if(reply == NULL) {
1698 printk(KERN_WARNING"%s: Could not allocate reply buffer\n",pHba->name);
1699 return -ENOMEM;
1700 }
1701 sg_offset = (msg[0]>>4)&0xf;
1702 msg[2] = 0x40000000; // IOCTL context
1703 msg[3] = adpt_ioctl_to_context(pHba, reply);
1704 if (msg[3] == (u32)-1) {
1705 rcode = -EBUSY;
1706 goto free;
1707 }
1708
1709 sg_list = kcalloc(pHba->sg_tablesize, sizeof(*sg_list), GFP_KERNEL);
1710 if (!sg_list) {
1711 rcode = -ENOMEM;
1712 goto free;
1713 }
1714 if(sg_offset) {
1715 // TODO add 64 bit API
1716 struct sg_simple_element *sg = (struct sg_simple_element*) (msg+sg_offset);
1717 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1718 if (sg_count > pHba->sg_tablesize){
1719 printk(KERN_DEBUG"%s:IOCTL SG List too large (%u)\n", pHba->name,sg_count);
1720 rcode = -EINVAL;
1721 goto free;
1722 }
1723
1724 for(i = 0; i < sg_count; i++) {
1725 int sg_size;
1726
1727 if (!(sg[i].flag_count & 0x10000000 /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT*/)) {
1728 printk(KERN_DEBUG"%s:Bad SG element %d - not simple (%x)\n",pHba->name,i, sg[i].flag_count);
1729 rcode = -EINVAL;
1730 goto cleanup;
1731 }
1732 sg_size = sg[i].flag_count & 0xffffff;
1733 /* Allocate memory for the transfer */
1734 p = dma_alloc_coherent(&pHba->pDev->dev, sg_size, &addr, GFP_KERNEL);
1735 if(!p) {
1736 printk(KERN_DEBUG"%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
1737 pHba->name,sg_size,i,sg_count);
1738 rcode = -ENOMEM;
1739 goto cleanup;
1740 }
1741 sg_list[sg_index++] = p; // sglist indexed with input frame, not our internal frame.
1742 /* Copy in the user's SG buffer if necessary */
1743 if(sg[i].flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR*/) {
1744 // sg_simple_element API is 32 bit
1745 if (copy_from_user(p,(void __user *)(ulong)sg[i].addr_bus, sg_size)) {
1746 printk(KERN_DEBUG"%s: Could not copy SG buf %d FROM user\n",pHba->name,i);
1747 rcode = -EFAULT;
1748 goto cleanup;
1749 }
1750 }
1751 /* sg_simple_element API is 32 bit, but addr < 4GB */
1752 sg[i].addr_bus = addr;
1753 }
1754 }
1755
1756 do {
1757 /*
1758 * Stop any new commands from enterring the
1759 * controller while processing the ioctl
1760 */
1761 if (pHba->host) {
1762 scsi_block_requests(pHba->host);
1763 spin_lock_irqsave(pHba->host->host_lock, flags);
1764 }
1765 rcode = adpt_i2o_post_wait(pHba, msg, size, FOREVER);
1766 if (rcode != 0)
1767 printk("adpt_i2o_passthru: post wait failed %d %p\n",
1768 rcode, reply);
1769 if (pHba->host) {
1770 spin_unlock_irqrestore(pHba->host->host_lock, flags);
1771 scsi_unblock_requests(pHba->host);
1772 }
1773 } while (rcode == -ETIMEDOUT);
1774
1775 if(rcode){
1776 goto cleanup;
1777 }
1778
1779 if(sg_offset) {
1780 /* Copy back the Scatter Gather buffers back to user space */
1781 u32 j;
1782 // TODO add 64 bit API
1783 struct sg_simple_element* sg;
1784 int sg_size;
1785
1786 // re-acquire the original message to handle correctly the sg copy operation
1787 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
1788 // get user msg size in u32s
1789 if(get_user(size, &user_msg[0])){
1790 rcode = -EFAULT;
1791 goto cleanup;
1792 }
1793 size = size>>16;
1794 size *= 4;
1795 if (size > MAX_MESSAGE_SIZE) {
1796 rcode = -EINVAL;
1797 goto cleanup;
1798 }
1799 /* Copy in the user's I2O command */
1800 if (copy_from_user (msg, user_msg, size)) {
1801 rcode = -EFAULT;
1802 goto cleanup;
1803 }
1804 sg_count = (size - sg_offset*4) / sizeof(struct sg_simple_element);
1805
1806 // TODO add 64 bit API
1807 sg = (struct sg_simple_element*)(msg + sg_offset);
1808 for (j = 0; j < sg_count; j++) {
1809 /* Copy out the SG list to user's buffer if necessary */
1810 if(! (sg[j].flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR*/)) {
1811 sg_size = sg[j].flag_count & 0xffffff;
1812 // sg_simple_element API is 32 bit
1813 if (copy_to_user((void __user *)(ulong)sg[j].addr_bus,sg_list[j], sg_size)) {
1814 printk(KERN_WARNING"%s: Could not copy %p TO user %x\n",pHba->name, sg_list[j], sg[j].addr_bus);
1815 rcode = -EFAULT;
1816 goto cleanup;
1817 }
1818 }
1819 }
1820 }
1821
1822 /* Copy back the reply to user space */
1823 if (reply_size) {
1824 // we wrote our own values for context - now restore the user supplied ones
1825 if(copy_from_user(reply+2, user_msg+2, sizeof(u32)*2)) {
1826 printk(KERN_WARNING"%s: Could not copy message context FROM user\n",pHba->name);
1827 rcode = -EFAULT;
1828 }
1829 if(copy_to_user(user_reply, reply, reply_size)) {
1830 printk(KERN_WARNING"%s: Could not copy reply TO user\n",pHba->name);
1831 rcode = -EFAULT;
1832 }
1833 }
1834
1835
1836cleanup:
1837 if (rcode != -ETIME && rcode != -EINTR) {
1838 struct sg_simple_element *sg =
1839 (struct sg_simple_element*) (msg +sg_offset);
1840 while(sg_index) {
1841 if(sg_list[--sg_index]) {
1842 dma_free_coherent(&pHba->pDev->dev,
1843 sg[sg_index].flag_count & 0xffffff,
1844 sg_list[sg_index],
1845 sg[sg_index].addr_bus);
1846 }
1847 }
1848 }
1849
1850free:
1851 kfree(sg_list);
1852 kfree(reply);
1853 return rcode;
1854}
1855
1856#if defined __ia64__
1857static void adpt_ia64_info(sysInfo_S* si)
1858{
1859 // This is all the info we need for now
1860 // We will add more info as our new
1861 // managmenent utility requires it
1862 si->processorType = PROC_IA64;
1863}
1864#endif
1865
1866#if defined __sparc__
1867static void adpt_sparc_info(sysInfo_S* si)
1868{
1869 // This is all the info we need for now
1870 // We will add more info as our new
1871 // managmenent utility requires it
1872 si->processorType = PROC_ULTRASPARC;
1873}
1874#endif
1875#if defined __alpha__
1876static void adpt_alpha_info(sysInfo_S* si)
1877{
1878 // This is all the info we need for now
1879 // We will add more info as our new
1880 // managmenent utility requires it
1881 si->processorType = PROC_ALPHA;
1882}
1883#endif
1884
1885#if defined __i386__
1886
1887#include <uapi/asm/vm86.h>
1888
1889static void adpt_i386_info(sysInfo_S* si)
1890{
1891 // This is all the info we need for now
1892 // We will add more info as our new
1893 // managmenent utility requires it
1894 switch (boot_cpu_data.x86) {
1895 case CPU_386:
1896 si->processorType = PROC_386;
1897 break;
1898 case CPU_486:
1899 si->processorType = PROC_486;
1900 break;
1901 case CPU_586:
1902 si->processorType = PROC_PENTIUM;
1903 break;
1904 default: // Just in case
1905 si->processorType = PROC_PENTIUM;
1906 break;
1907 }
1908}
1909#endif
1910
1911/*
1912 * This routine returns information about the system. This does not effect
1913 * any logic and if the info is wrong - it doesn't matter.
1914 */
1915
1916/* Get all the info we can not get from kernel services */
1917static int adpt_system_info(void __user *buffer)
1918{
1919 sysInfo_S si;
1920
1921 memset(&si, 0, sizeof(si));
1922
1923 si.osType = OS_LINUX;
1924 si.osMajorVersion = 0;
1925 si.osMinorVersion = 0;
1926 si.osRevision = 0;
1927 si.busType = SI_PCI_BUS;
1928 si.processorFamily = DPTI_sig.dsProcessorFamily;
1929
1930#if defined __i386__
1931 adpt_i386_info(&si);
1932#elif defined (__ia64__)
1933 adpt_ia64_info(&si);
1934#elif defined(__sparc__)
1935 adpt_sparc_info(&si);
1936#elif defined (__alpha__)
1937 adpt_alpha_info(&si);
1938#else
1939 si.processorType = 0xff ;
1940#endif
1941 if (copy_to_user(buffer, &si, sizeof(si))){
1942 printk(KERN_WARNING"dpti: Could not copy buffer TO user\n");
1943 return -EFAULT;
1944 }
1945
1946 return 0;
1947}
1948
1949static int adpt_ioctl(struct inode *inode, struct file *file, uint cmd, ulong arg)
1950{
1951 int minor;
1952 int error = 0;
1953 adpt_hba* pHba;
1954 ulong flags = 0;
1955 void __user *argp = (void __user *)arg;
1956
1957 minor = iminor(inode);
1958 if (minor >= DPTI_MAX_HBA){
1959 return -ENXIO;
1960 }
1961 mutex_lock(&adpt_configuration_lock);
1962 for (pHba = hba_chain; pHba; pHba = pHba->next) {
1963 if (pHba->unit == minor) {
1964 break; /* found adapter */
1965 }
1966 }
1967 mutex_unlock(&adpt_configuration_lock);
1968 if(pHba == NULL){
1969 return -ENXIO;
1970 }
1971
1972 while((volatile u32) pHba->state & DPTI_STATE_RESET )
1973 schedule_timeout_uninterruptible(2);
1974
1975 switch (cmd) {
1976 // TODO: handle 3 cases
1977 case DPT_SIGNATURE:
1978 if (copy_to_user(argp, &DPTI_sig, sizeof(DPTI_sig))) {
1979 return -EFAULT;
1980 }
1981 break;
1982 case I2OUSRCMD:
1983 return adpt_i2o_passthru(pHba, argp);
1984
1985 case DPT_CTRLINFO:{
1986 drvrHBAinfo_S HbaInfo;
1987
1988#define FLG_OSD_PCI_VALID 0x0001
1989#define FLG_OSD_DMA 0x0002
1990#define FLG_OSD_I2O 0x0004
1991 memset(&HbaInfo, 0, sizeof(HbaInfo));
1992 HbaInfo.drvrHBAnum = pHba->unit;
1993 HbaInfo.baseAddr = (ulong) pHba->base_addr_phys;
1994 HbaInfo.blinkState = adpt_read_blink_led(pHba);
1995 HbaInfo.pciBusNum = pHba->pDev->bus->number;
1996 HbaInfo.pciDeviceNum=PCI_SLOT(pHba->pDev->devfn);
1997 HbaInfo.Interrupt = pHba->pDev->irq;
1998 HbaInfo.hbaFlags = FLG_OSD_PCI_VALID | FLG_OSD_DMA | FLG_OSD_I2O;
1999 if(copy_to_user(argp, &HbaInfo, sizeof(HbaInfo))){
2000 printk(KERN_WARNING"%s: Could not copy HbaInfo TO user\n",pHba->name);
2001 return -EFAULT;
2002 }
2003 break;
2004 }
2005 case DPT_SYSINFO:
2006 return adpt_system_info(argp);
2007 case DPT_BLINKLED:{
2008 u32 value;
2009 value = (u32)adpt_read_blink_led(pHba);
2010 if (copy_to_user(argp, &value, sizeof(value))) {
2011 return -EFAULT;
2012 }
2013 break;
2014 }
2015 case I2ORESETCMD: {
2016 struct Scsi_Host *shost = pHba->host;
2017
2018 if (shost)
2019 spin_lock_irqsave(shost->host_lock, flags);
2020 adpt_hba_reset(pHba);
2021 if (shost)
2022 spin_unlock_irqrestore(shost->host_lock, flags);
2023 break;
2024 }
2025 case I2ORESCANCMD:
2026 adpt_rescan(pHba);
2027 break;
2028 default:
2029 return -EINVAL;
2030 }
2031
2032 return error;
2033}
2034
2035static long adpt_unlocked_ioctl(struct file *file, uint cmd, ulong arg)
2036{
2037 struct inode *inode;
2038 long ret;
2039
2040 inode = file_inode(file);
2041
2042 mutex_lock(&adpt_mutex);
2043 ret = adpt_ioctl(inode, file, cmd, arg);
2044 mutex_unlock(&adpt_mutex);
2045
2046 return ret;
2047}
2048
2049#ifdef CONFIG_COMPAT
2050static long compat_adpt_ioctl(struct file *file,
2051 unsigned int cmd, unsigned long arg)
2052{
2053 struct inode *inode;
2054 long ret;
2055
2056 inode = file_inode(file);
2057
2058 mutex_lock(&adpt_mutex);
2059
2060 switch(cmd) {
2061 case DPT_SIGNATURE:
2062 case I2OUSRCMD:
2063 case DPT_CTRLINFO:
2064 case DPT_SYSINFO:
2065 case DPT_BLINKLED:
2066 case I2ORESETCMD:
2067 case I2ORESCANCMD:
2068 case (DPT_TARGET_BUSY & 0xFFFF):
2069 case DPT_TARGET_BUSY:
2070 ret = adpt_ioctl(inode, file, cmd, arg);
2071 break;
2072 default:
2073 ret = -ENOIOCTLCMD;
2074 }
2075
2076 mutex_unlock(&adpt_mutex);
2077
2078 return ret;
2079}
2080#endif
2081
2082static irqreturn_t adpt_isr(int irq, void *dev_id)
2083{
2084 struct scsi_cmnd* cmd;
2085 adpt_hba* pHba = dev_id;
2086 u32 m;
2087 void __iomem *reply;
2088 u32 status=0;
2089 u32 context;
2090 ulong flags = 0;
2091 int handled = 0;
2092
2093 if (pHba == NULL){
2094 printk(KERN_WARNING"adpt_isr: NULL dev_id\n");
2095 return IRQ_NONE;
2096 }
2097 if(pHba->host)
2098 spin_lock_irqsave(pHba->host->host_lock, flags);
2099
2100 while( readl(pHba->irq_mask) & I2O_INTERRUPT_PENDING_B) {
2101 m = readl(pHba->reply_port);
2102 if(m == EMPTY_QUEUE){
2103 // Try twice then give up
2104 rmb();
2105 m = readl(pHba->reply_port);
2106 if(m == EMPTY_QUEUE){
2107 // This really should not happen
2108 printk(KERN_ERR"dpti: Could not get reply frame\n");
2109 goto out;
2110 }
2111 }
2112 if (pHba->reply_pool_pa <= m &&
2113 m < pHba->reply_pool_pa +
2114 (pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4)) {
2115 reply = (u8 *)pHba->reply_pool +
2116 (m - pHba->reply_pool_pa);
2117 } else {
2118 /* Ick, we should *never* be here */
2119 printk(KERN_ERR "dpti: reply frame not from pool\n");
2120 reply = (u8 *)bus_to_virt(m);
2121 }
2122
2123 if (readl(reply) & MSG_FAIL) {
2124 u32 old_m = readl(reply+28);
2125 void __iomem *msg;
2126 u32 old_context;
2127 PDEBUG("%s: Failed message\n",pHba->name);
2128 if(old_m >= 0x100000){
2129 printk(KERN_ERR"%s: Bad preserved MFA (%x)- dropping frame\n",pHba->name,old_m);
2130 writel(m,pHba->reply_port);
2131 continue;
2132 }
2133 // Transaction context is 0 in failed reply frame
2134 msg = pHba->msg_addr_virt + old_m;
2135 old_context = readl(msg+12);
2136 writel(old_context, reply+12);
2137 adpt_send_nop(pHba, old_m);
2138 }
2139 context = readl(reply+8);
2140 if(context & 0x40000000){ // IOCTL
2141 void *p = adpt_ioctl_from_context(pHba, readl(reply+12));
2142 if( p != NULL) {
2143 memcpy_fromio(p, reply, REPLY_FRAME_SIZE * 4);
2144 }
2145 // All IOCTLs will also be post wait
2146 }
2147 if(context & 0x80000000){ // Post wait message
2148 status = readl(reply+16);
2149 if(status >> 24){
2150 status &= 0xffff; /* Get detail status */
2151 } else {
2152 status = I2O_POST_WAIT_OK;
2153 }
2154 if(!(context & 0x40000000)) {
2155 /*
2156 * The request tag is one less than the command tag
2157 * as the firmware might treat a 0 tag as invalid
2158 */
2159 cmd = scsi_host_find_tag(pHba->host,
2160 readl(reply + 12) - 1);
2161 if(cmd != NULL) {
2162 printk(KERN_WARNING"%s: Apparent SCSI cmd in Post Wait Context - cmd=%p context=%x\n", pHba->name, cmd, context);
2163 }
2164 }
2165 adpt_i2o_post_wait_complete(context, status);
2166 } else { // SCSI message
2167 /*
2168 * The request tag is one less than the command tag
2169 * as the firmware might treat a 0 tag as invalid
2170 */
2171 cmd = scsi_host_find_tag(pHba->host,
2172 readl(reply + 12) - 1);
2173 if(cmd != NULL){
2174 scsi_dma_unmap(cmd);
2175 adpt_i2o_scsi_complete(reply, cmd);
2176 }
2177 }
2178 writel(m, pHba->reply_port);
2179 wmb();
2180 rmb();
2181 }
2182 handled = 1;
2183out: if(pHba->host)
2184 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2185 return IRQ_RETVAL(handled);
2186}
2187
2188static s32 adpt_scsi_to_i2o(adpt_hba* pHba, struct scsi_cmnd* cmd, struct adpt_device* d)
2189{
2190 int i;
2191 u32 msg[MAX_MESSAGE_SIZE];
2192 u32* mptr;
2193 u32* lptr;
2194 u32 *lenptr;
2195 int direction;
2196 int scsidir;
2197 int nseg;
2198 u32 len;
2199 u32 reqlen;
2200 s32 rcode;
2201 dma_addr_t addr;
2202
2203 memset(msg, 0 , sizeof(msg));
2204 len = scsi_bufflen(cmd);
2205 direction = 0x00000000;
2206
2207 scsidir = 0x00000000; // DATA NO XFER
2208 if(len) {
2209 /*
2210 * Set SCBFlags to indicate if data is being transferred
2211 * in or out, or no data transfer
2212 * Note: Do not have to verify index is less than 0 since
2213 * cmd->cmnd[0] is an unsigned char
2214 */
2215 switch(cmd->sc_data_direction){
2216 case DMA_FROM_DEVICE:
2217 scsidir =0x40000000; // DATA IN (iop<--dev)
2218 break;
2219 case DMA_TO_DEVICE:
2220 direction=0x04000000; // SGL OUT
2221 scsidir =0x80000000; // DATA OUT (iop-->dev)
2222 break;
2223 case DMA_NONE:
2224 break;
2225 case DMA_BIDIRECTIONAL:
2226 scsidir =0x40000000; // DATA IN (iop<--dev)
2227 // Assume In - and continue;
2228 break;
2229 default:
2230 printk(KERN_WARNING"%s: scsi opcode 0x%x not supported.\n",
2231 pHba->name, cmd->cmnd[0]);
2232 cmd->result = (DID_OK <<16) | (INITIATOR_ERROR << 8);
2233 cmd->scsi_done(cmd);
2234 return 0;
2235 }
2236 }
2237 // msg[0] is set later
2238 // I2O_CMD_SCSI_EXEC
2239 msg[1] = ((0xff<<24)|(HOST_TID<<12)|d->tid);
2240 msg[2] = 0;
2241 /* Add 1 to avoid firmware treating it as invalid command */
2242 msg[3] = cmd->request->tag + 1;
2243 // Our cards use the transaction context as the tag for queueing
2244 // Adaptec/DPT Private stuff
2245 msg[4] = I2O_CMD_SCSI_EXEC|(DPT_ORGANIZATION_ID<<16);
2246 msg[5] = d->tid;
2247 /* Direction, disconnect ok | sense data | simple queue , CDBLen */
2248 // I2O_SCB_FLAG_ENABLE_DISCONNECT |
2249 // I2O_SCB_FLAG_SIMPLE_QUEUE_TAG |
2250 // I2O_SCB_FLAG_SENSE_DATA_IN_MESSAGE;
2251 msg[6] = scsidir|0x20a00000|cmd->cmd_len;
2252
2253 mptr=msg+7;
2254
2255 // Write SCSI command into the message - always 16 byte block
2256 memset(mptr, 0, 16);
2257 memcpy(mptr, cmd->cmnd, cmd->cmd_len);
2258 mptr+=4;
2259 lenptr=mptr++; /* Remember me - fill in when we know */
2260 if (dpt_dma64(pHba)) {
2261 reqlen = 16; // SINGLE SGE
2262 *mptr++ = (0x7C<<24)+(2<<16)+0x02; /* Enable 64 bit */
2263 *mptr++ = 1 << PAGE_SHIFT;
2264 } else {
2265 reqlen = 14; // SINGLE SGE
2266 }
2267 /* Now fill in the SGList and command */
2268
2269 nseg = scsi_dma_map(cmd);
2270 BUG_ON(nseg < 0);
2271 if (nseg) {
2272 struct scatterlist *sg;
2273
2274 len = 0;
2275 scsi_for_each_sg(cmd, sg, nseg, i) {
2276 lptr = mptr;
2277 *mptr++ = direction|0x10000000|sg_dma_len(sg);
2278 len+=sg_dma_len(sg);
2279 addr = sg_dma_address(sg);
2280 *mptr++ = dma_low(addr);
2281 if (dpt_dma64(pHba))
2282 *mptr++ = dma_high(addr);
2283 /* Make this an end of list */
2284 if (i == nseg - 1)
2285 *lptr = direction|0xD0000000|sg_dma_len(sg);
2286 }
2287 reqlen = mptr - msg;
2288 *lenptr = len;
2289
2290 if(cmd->underflow && len != cmd->underflow){
2291 printk(KERN_WARNING"Cmd len %08X Cmd underflow %08X\n",
2292 len, cmd->underflow);
2293 }
2294 } else {
2295 *lenptr = len = 0;
2296 reqlen = 12;
2297 }
2298
2299 /* Stick the headers on */
2300 msg[0] = reqlen<<16 | ((reqlen > 12) ? SGL_OFFSET_12 : SGL_OFFSET_0);
2301
2302 // Send it on it's way
2303 rcode = adpt_i2o_post_this(pHba, msg, reqlen<<2);
2304 if (rcode == 0) {
2305 return 0;
2306 }
2307 return rcode;
2308}
2309
2310
2311static s32 adpt_scsi_host_alloc(adpt_hba* pHba, struct scsi_host_template *sht)
2312{
2313 struct Scsi_Host *host;
2314
2315 host = scsi_host_alloc(sht, sizeof(adpt_hba*));
2316 if (host == NULL) {
2317 printk("%s: scsi_host_alloc returned NULL\n", pHba->name);
2318 return -1;
2319 }
2320 host->hostdata[0] = (unsigned long)pHba;
2321 pHba->host = host;
2322
2323 host->irq = pHba->pDev->irq;
2324 /* no IO ports, so don't have to set host->io_port and
2325 * host->n_io_port
2326 */
2327 host->io_port = 0;
2328 host->n_io_port = 0;
2329 /* see comments in scsi_host.h */
2330 host->max_id = 16;
2331 host->max_lun = 256;
2332 host->max_channel = pHba->top_scsi_channel + 1;
2333 host->cmd_per_lun = 1;
2334 host->unique_id = (u32)sys_tbl_pa + pHba->unit;
2335 host->sg_tablesize = pHba->sg_tablesize;
2336 host->can_queue = pHba->post_fifo_size;
2337
2338 return 0;
2339}
2340
2341
2342static void adpt_i2o_scsi_complete(void __iomem *reply, struct scsi_cmnd *cmd)
2343{
2344 adpt_hba* pHba;
2345 u32 hba_status;
2346 u32 dev_status;
2347 u32 reply_flags = readl(reply) & 0xff00; // Leave it shifted up 8 bits
2348 // I know this would look cleaner if I just read bytes
2349 // but the model I have been using for all the rest of the
2350 // io is in 4 byte words - so I keep that model
2351 u16 detailed_status = readl(reply+16) &0xffff;
2352 dev_status = (detailed_status & 0xff);
2353 hba_status = detailed_status >> 8;
2354
2355 // calculate resid for sg
2356 scsi_set_resid(cmd, scsi_bufflen(cmd) - readl(reply+20));
2357
2358 pHba = (adpt_hba*) cmd->device->host->hostdata[0];
2359
2360 cmd->sense_buffer[0] = '\0'; // initialize sense valid flag to false
2361
2362 if(!(reply_flags & MSG_FAIL)) {
2363 switch(detailed_status & I2O_SCSI_DSC_MASK) {
2364 case I2O_SCSI_DSC_SUCCESS:
2365 cmd->result = (DID_OK << 16);
2366 // handle underflow
2367 if (readl(reply+20) < cmd->underflow) {
2368 cmd->result = (DID_ERROR <<16);
2369 printk(KERN_WARNING"%s: SCSI CMD underflow\n",pHba->name);
2370 }
2371 break;
2372 case I2O_SCSI_DSC_REQUEST_ABORTED:
2373 cmd->result = (DID_ABORT << 16);
2374 break;
2375 case I2O_SCSI_DSC_PATH_INVALID:
2376 case I2O_SCSI_DSC_DEVICE_NOT_PRESENT:
2377 case I2O_SCSI_DSC_SELECTION_TIMEOUT:
2378 case I2O_SCSI_DSC_COMMAND_TIMEOUT:
2379 case I2O_SCSI_DSC_NO_ADAPTER:
2380 case I2O_SCSI_DSC_RESOURCE_UNAVAILABLE:
2381 printk(KERN_WARNING"%s: SCSI Timeout-Device (%d,%d,%llu) hba status=0x%x, dev status=0x%x, cmd=0x%x\n",
2382 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun, hba_status, dev_status, cmd->cmnd[0]);
2383 cmd->result = (DID_TIME_OUT << 16);
2384 break;
2385 case I2O_SCSI_DSC_ADAPTER_BUSY:
2386 case I2O_SCSI_DSC_BUS_BUSY:
2387 cmd->result = (DID_BUS_BUSY << 16);
2388 break;
2389 case I2O_SCSI_DSC_SCSI_BUS_RESET:
2390 case I2O_SCSI_DSC_BDR_MESSAGE_SENT:
2391 cmd->result = (DID_RESET << 16);
2392 break;
2393 case I2O_SCSI_DSC_PARITY_ERROR_FAILURE:
2394 printk(KERN_WARNING"%s: SCSI CMD parity error\n",pHba->name);
2395 cmd->result = (DID_PARITY << 16);
2396 break;
2397 case I2O_SCSI_DSC_UNABLE_TO_ABORT:
2398 case I2O_SCSI_DSC_COMPLETE_WITH_ERROR:
2399 case I2O_SCSI_DSC_UNABLE_TO_TERMINATE:
2400 case I2O_SCSI_DSC_MR_MESSAGE_RECEIVED:
2401 case I2O_SCSI_DSC_AUTOSENSE_FAILED:
2402 case I2O_SCSI_DSC_DATA_OVERRUN:
2403 case I2O_SCSI_DSC_UNEXPECTED_BUS_FREE:
2404 case I2O_SCSI_DSC_SEQUENCE_FAILURE:
2405 case I2O_SCSI_DSC_REQUEST_LENGTH_ERROR:
2406 case I2O_SCSI_DSC_PROVIDE_FAILURE:
2407 case I2O_SCSI_DSC_REQUEST_TERMINATED:
2408 case I2O_SCSI_DSC_IDE_MESSAGE_SENT:
2409 case I2O_SCSI_DSC_UNACKNOWLEDGED_EVENT:
2410 case I2O_SCSI_DSC_MESSAGE_RECEIVED:
2411 case I2O_SCSI_DSC_INVALID_CDB:
2412 case I2O_SCSI_DSC_LUN_INVALID:
2413 case I2O_SCSI_DSC_SCSI_TID_INVALID:
2414 case I2O_SCSI_DSC_FUNCTION_UNAVAILABLE:
2415 case I2O_SCSI_DSC_NO_NEXUS:
2416 case I2O_SCSI_DSC_CDB_RECEIVED:
2417 case I2O_SCSI_DSC_LUN_ALREADY_ENABLED:
2418 case I2O_SCSI_DSC_QUEUE_FROZEN:
2419 case I2O_SCSI_DSC_REQUEST_INVALID:
2420 default:
2421 printk(KERN_WARNING"%s: SCSI error %0x-Device(%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2422 pHba->name, detailed_status & I2O_SCSI_DSC_MASK, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2423 hba_status, dev_status, cmd->cmnd[0]);
2424 cmd->result = (DID_ERROR << 16);
2425 break;
2426 }
2427
2428 // copy over the request sense data if it was a check
2429 // condition status
2430 if (dev_status == SAM_STAT_CHECK_CONDITION) {
2431 u32 len = min(SCSI_SENSE_BUFFERSIZE, 40);
2432 // Copy over the sense data
2433 memcpy_fromio(cmd->sense_buffer, (reply+28) , len);
2434 if(cmd->sense_buffer[0] == 0x70 /* class 7 */ &&
2435 cmd->sense_buffer[2] == DATA_PROTECT ){
2436 /* This is to handle an array failed */
2437 cmd->result = (DID_TIME_OUT << 16);
2438 printk(KERN_WARNING"%s: SCSI Data Protect-Device (%d,%d,%llu) hba_status=0x%x, dev_status=0x%x, cmd=0x%x\n",
2439 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2440 hba_status, dev_status, cmd->cmnd[0]);
2441
2442 }
2443 }
2444 } else {
2445 /* In this condtion we could not talk to the tid
2446 * the card rejected it. We should signal a retry
2447 * for a limitted number of retries.
2448 */
2449 cmd->result = (DID_TIME_OUT << 16);
2450 printk(KERN_WARNING"%s: I2O MSG_FAIL - Device (%d,%d,%llu) tid=%d, cmd=0x%x\n",
2451 pHba->name, (u32)cmd->device->channel, (u32)cmd->device->id, cmd->device->lun,
2452 ((struct adpt_device*)(cmd->device->hostdata))->tid, cmd->cmnd[0]);
2453 }
2454
2455 cmd->result |= (dev_status);
2456
2457 if(cmd->scsi_done != NULL){
2458 cmd->scsi_done(cmd);
2459 }
2460}
2461
2462
2463static s32 adpt_rescan(adpt_hba* pHba)
2464{
2465 s32 rcode;
2466 ulong flags = 0;
2467
2468 if(pHba->host)
2469 spin_lock_irqsave(pHba->host->host_lock, flags);
2470 if ((rcode=adpt_i2o_lct_get(pHba)) < 0)
2471 goto out;
2472 if ((rcode=adpt_i2o_reparse_lct(pHba)) < 0)
2473 goto out;
2474 rcode = 0;
2475out: if(pHba->host)
2476 spin_unlock_irqrestore(pHba->host->host_lock, flags);
2477 return rcode;
2478}
2479
2480
2481static s32 adpt_i2o_reparse_lct(adpt_hba* pHba)
2482{
2483 int i;
2484 int max;
2485 int tid;
2486 struct i2o_device *d;
2487 i2o_lct *lct = pHba->lct;
2488 u8 bus_no = 0;
2489 s16 scsi_id;
2490 u64 scsi_lun;
2491 u32 buf[10]; // at least 8 u32's
2492 struct adpt_device* pDev = NULL;
2493 struct i2o_device* pI2o_dev = NULL;
2494
2495 if (lct == NULL) {
2496 printk(KERN_ERR "%s: LCT is empty???\n",pHba->name);
2497 return -1;
2498 }
2499
2500 max = lct->table_size;
2501 max -= 3;
2502 max /= 9;
2503
2504 // Mark each drive as unscanned
2505 for (d = pHba->devices; d; d = d->next) {
2506 pDev =(struct adpt_device*) d->owner;
2507 if(!pDev){
2508 continue;
2509 }
2510 pDev->state |= DPTI_DEV_UNSCANNED;
2511 }
2512
2513 printk(KERN_INFO "%s: LCT has %d entries.\n", pHba->name,max);
2514
2515 for(i=0;i<max;i++) {
2516 if( lct->lct_entry[i].user_tid != 0xfff){
2517 continue;
2518 }
2519
2520 if( lct->lct_entry[i].class_id == I2O_CLASS_RANDOM_BLOCK_STORAGE ||
2521 lct->lct_entry[i].class_id == I2O_CLASS_SCSI_PERIPHERAL ||
2522 lct->lct_entry[i].class_id == I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL ){
2523 tid = lct->lct_entry[i].tid;
2524 if(adpt_i2o_query_scalar(pHba, tid, 0x8000, -1, buf, 32)<0) {
2525 printk(KERN_ERR"%s: Could not query device\n",pHba->name);
2526 continue;
2527 }
2528 bus_no = buf[0]>>16;
2529 if (bus_no >= MAX_CHANNEL) { /* Something wrong skip it */
2530 printk(KERN_WARNING
2531 "%s: Channel number %d out of range\n",
2532 pHba->name, bus_no);
2533 continue;
2534 }
2535
2536 scsi_id = buf[1];
2537 scsi_lun = scsilun_to_int((struct scsi_lun *)&buf[2]);
2538 pDev = pHba->channel[bus_no].device[scsi_id];
2539 /* da lun */
2540 while(pDev) {
2541 if(pDev->scsi_lun == scsi_lun) {
2542 break;
2543 }
2544 pDev = pDev->next_lun;
2545 }
2546 if(!pDev ) { // Something new add it
2547 d = kmalloc(sizeof(struct i2o_device),
2548 GFP_ATOMIC);
2549 if(d==NULL)
2550 {
2551 printk(KERN_CRIT "Out of memory for I2O device data.\n");
2552 return -ENOMEM;
2553 }
2554
2555 d->controller = pHba;
2556 d->next = NULL;
2557
2558 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2559
2560 d->flags = 0;
2561 adpt_i2o_report_hba_unit(pHba, d);
2562 adpt_i2o_install_device(pHba, d);
2563
2564 pDev = pHba->channel[bus_no].device[scsi_id];
2565 if( pDev == NULL){
2566 pDev =
2567 kzalloc(sizeof(struct adpt_device),
2568 GFP_ATOMIC);
2569 if(pDev == NULL) {
2570 return -ENOMEM;
2571 }
2572 pHba->channel[bus_no].device[scsi_id] = pDev;
2573 } else {
2574 while (pDev->next_lun) {
2575 pDev = pDev->next_lun;
2576 }
2577 pDev = pDev->next_lun =
2578 kzalloc(sizeof(struct adpt_device),
2579 GFP_ATOMIC);
2580 if(pDev == NULL) {
2581 return -ENOMEM;
2582 }
2583 }
2584 pDev->tid = d->lct_data.tid;
2585 pDev->scsi_channel = bus_no;
2586 pDev->scsi_id = scsi_id;
2587 pDev->scsi_lun = scsi_lun;
2588 pDev->pI2o_dev = d;
2589 d->owner = pDev;
2590 pDev->type = (buf[0])&0xff;
2591 pDev->flags = (buf[0]>>8)&0xff;
2592 // Too late, SCSI system has made up it's mind, but what the hey ...
2593 if(scsi_id > pHba->top_scsi_id){
2594 pHba->top_scsi_id = scsi_id;
2595 }
2596 if(scsi_lun > pHba->top_scsi_lun){
2597 pHba->top_scsi_lun = scsi_lun;
2598 }
2599 continue;
2600 } // end of new i2o device
2601
2602 // We found an old device - check it
2603 while(pDev) {
2604 if(pDev->scsi_lun == scsi_lun) {
2605 if(!scsi_device_online(pDev->pScsi_dev)) {
2606 printk(KERN_WARNING"%s: Setting device (%d,%d,%llu) back online\n",
2607 pHba->name,bus_no,scsi_id,scsi_lun);
2608 if (pDev->pScsi_dev) {
2609 scsi_device_set_state(pDev->pScsi_dev, SDEV_RUNNING);
2610 }
2611 }
2612 d = pDev->pI2o_dev;
2613 if(d->lct_data.tid != tid) { // something changed
2614 pDev->tid = tid;
2615 memcpy(&d->lct_data, &lct->lct_entry[i], sizeof(i2o_lct_entry));
2616 if (pDev->pScsi_dev) {
2617 pDev->pScsi_dev->changed = TRUE;
2618 pDev->pScsi_dev->removable = TRUE;
2619 }
2620 }
2621 // Found it - mark it scanned
2622 pDev->state = DPTI_DEV_ONLINE;
2623 break;
2624 }
2625 pDev = pDev->next_lun;
2626 }
2627 }
2628 }
2629 for (pI2o_dev = pHba->devices; pI2o_dev; pI2o_dev = pI2o_dev->next) {
2630 pDev =(struct adpt_device*) pI2o_dev->owner;
2631 if(!pDev){
2632 continue;
2633 }
2634 // Drive offline drives that previously existed but could not be found
2635 // in the LCT table
2636 if (pDev->state & DPTI_DEV_UNSCANNED){
2637 pDev->state = DPTI_DEV_OFFLINE;
2638 printk(KERN_WARNING"%s: Device (%d,%d,%llu) offline\n",pHba->name,pDev->scsi_channel,pDev->scsi_id,pDev->scsi_lun);
2639 if (pDev->pScsi_dev) {
2640 scsi_device_set_state(pDev->pScsi_dev, SDEV_OFFLINE);
2641 }
2642 }
2643 }
2644 return 0;
2645}
2646
2647/*============================================================================
2648 * Routines from i2o subsystem
2649 *============================================================================
2650 */
2651
2652
2653
2654/*
2655 * Bring an I2O controller into HOLD state. See the spec.
2656 */
2657static int adpt_i2o_activate_hba(adpt_hba* pHba)
2658{
2659 int rcode;
2660
2661 if(pHba->initialized ) {
2662 if (adpt_i2o_status_get(pHba) < 0) {
2663 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2664 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2665 return rcode;
2666 }
2667 if (adpt_i2o_status_get(pHba) < 0) {
2668 printk(KERN_INFO "HBA not responding.\n");
2669 return -1;
2670 }
2671 }
2672
2673 if(pHba->status_block->iop_state == ADAPTER_STATE_FAULTED) {
2674 printk(KERN_CRIT "%s: hardware fault\n", pHba->name);
2675 return -1;
2676 }
2677
2678 if (pHba->status_block->iop_state == ADAPTER_STATE_READY ||
2679 pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL ||
2680 pHba->status_block->iop_state == ADAPTER_STATE_HOLD ||
2681 pHba->status_block->iop_state == ADAPTER_STATE_FAILED) {
2682 adpt_i2o_reset_hba(pHba);
2683 if (adpt_i2o_status_get(pHba) < 0 || pHba->status_block->iop_state != ADAPTER_STATE_RESET) {
2684 printk(KERN_ERR "%s: Failed to initialize.\n", pHba->name);
2685 return -1;
2686 }
2687 }
2688 } else {
2689 if((rcode = adpt_i2o_reset_hba(pHba)) != 0){
2690 printk(KERN_WARNING"%s: Could NOT reset.\n", pHba->name);
2691 return rcode;
2692 }
2693
2694 }
2695
2696 if (adpt_i2o_init_outbound_q(pHba) < 0) {
2697 return -1;
2698 }
2699
2700 /* In HOLD state */
2701
2702 if (adpt_i2o_hrt_get(pHba) < 0) {
2703 return -1;
2704 }
2705
2706 return 0;
2707}
2708
2709/*
2710 * Bring a controller online into OPERATIONAL state.
2711 */
2712
2713static int adpt_i2o_online_hba(adpt_hba* pHba)
2714{
2715 if (adpt_i2o_systab_send(pHba) < 0)
2716 return -1;
2717 /* In READY state */
2718
2719 if (adpt_i2o_enable_hba(pHba) < 0)
2720 return -1;
2721
2722 /* In OPERATIONAL state */
2723 return 0;
2724}
2725
2726static s32 adpt_send_nop(adpt_hba*pHba,u32 m)
2727{
2728 u32 __iomem *msg;
2729 ulong timeout = jiffies + 5*HZ;
2730
2731 while(m == EMPTY_QUEUE){
2732 rmb();
2733 m = readl(pHba->post_port);
2734 if(m != EMPTY_QUEUE){
2735 break;
2736 }
2737 if(time_after(jiffies,timeout)){
2738 printk(KERN_ERR "%s: Timeout waiting for message frame!\n",pHba->name);
2739 return 2;
2740 }
2741 schedule_timeout_uninterruptible(1);
2742 }
2743 msg = (u32 __iomem *)(pHba->msg_addr_virt + m);
2744 writel( THREE_WORD_MSG_SIZE | SGL_OFFSET_0,&msg[0]);
2745 writel( I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | 0,&msg[1]);
2746 writel( 0,&msg[2]);
2747 wmb();
2748
2749 writel(m, pHba->post_port);
2750 wmb();
2751 return 0;
2752}
2753
2754static s32 adpt_i2o_init_outbound_q(adpt_hba* pHba)
2755{
2756 u8 *status;
2757 dma_addr_t addr;
2758 u32 __iomem *msg = NULL;
2759 int i;
2760 ulong timeout = jiffies + TMOUT_INITOUTBOUND*HZ;
2761 u32 m;
2762
2763 do {
2764 rmb();
2765 m = readl(pHba->post_port);
2766 if (m != EMPTY_QUEUE) {
2767 break;
2768 }
2769
2770 if(time_after(jiffies,timeout)){
2771 printk(KERN_WARNING"%s: Timeout waiting for message frame\n",pHba->name);
2772 return -ETIMEDOUT;
2773 }
2774 schedule_timeout_uninterruptible(1);
2775 } while(m == EMPTY_QUEUE);
2776
2777 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2778
2779 status = dma_alloc_coherent(&pHba->pDev->dev, 4, &addr, GFP_KERNEL);
2780 if (!status) {
2781 adpt_send_nop(pHba, m);
2782 printk(KERN_WARNING"%s: IOP reset failed - no free memory.\n",
2783 pHba->name);
2784 return -ENOMEM;
2785 }
2786
2787 writel(EIGHT_WORD_MSG_SIZE| SGL_OFFSET_6, &msg[0]);
2788 writel(I2O_CMD_OUTBOUND_INIT<<24 | HOST_TID<<12 | ADAPTER_TID, &msg[1]);
2789 writel(0, &msg[2]);
2790 writel(0x0106, &msg[3]); /* Transaction context */
2791 writel(4096, &msg[4]); /* Host page frame size */
2792 writel((REPLY_FRAME_SIZE)<<16|0x80, &msg[5]); /* Outbound msg frame size and Initcode */
2793 writel(0xD0000004, &msg[6]); /* Simple SG LE, EOB */
2794 writel((u32)addr, &msg[7]);
2795
2796 writel(m, pHba->post_port);
2797 wmb();
2798
2799 // Wait for the reply status to come back
2800 do {
2801 if (*status) {
2802 if (*status != 0x01 /*I2O_EXEC_OUTBOUND_INIT_IN_PROGRESS*/) {
2803 break;
2804 }
2805 }
2806 rmb();
2807 if(time_after(jiffies,timeout)){
2808 printk(KERN_WARNING"%s: Timeout Initializing\n",pHba->name);
2809 /* We lose 4 bytes of "status" here, but we
2810 cannot free these because controller may
2811 awake and corrupt those bytes at any time */
2812 /* dma_free_coherent(&pHba->pDev->dev, 4, status, addr); */
2813 return -ETIMEDOUT;
2814 }
2815 schedule_timeout_uninterruptible(1);
2816 } while (1);
2817
2818 // If the command was successful, fill the fifo with our reply
2819 // message packets
2820 if(*status != 0x04 /*I2O_EXEC_OUTBOUND_INIT_COMPLETE*/) {
2821 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2822 return -2;
2823 }
2824 dma_free_coherent(&pHba->pDev->dev, 4, status, addr);
2825
2826 if(pHba->reply_pool != NULL) {
2827 dma_free_coherent(&pHba->pDev->dev,
2828 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2829 pHba->reply_pool, pHba->reply_pool_pa);
2830 }
2831
2832 pHba->reply_pool = dma_alloc_coherent(&pHba->pDev->dev,
2833 pHba->reply_fifo_size * REPLY_FRAME_SIZE * 4,
2834 &pHba->reply_pool_pa, GFP_KERNEL);
2835 if (!pHba->reply_pool) {
2836 printk(KERN_ERR "%s: Could not allocate reply pool\n", pHba->name);
2837 return -ENOMEM;
2838 }
2839
2840 for(i = 0; i < pHba->reply_fifo_size; i++) {
2841 writel(pHba->reply_pool_pa + (i * REPLY_FRAME_SIZE * 4),
2842 pHba->reply_port);
2843 wmb();
2844 }
2845 adpt_i2o_status_get(pHba);
2846 return 0;
2847}
2848
2849
2850/*
2851 * I2O System Table. Contains information about
2852 * all the IOPs in the system. Used to inform IOPs
2853 * about each other's existence.
2854 *
2855 * sys_tbl_ver is the CurrentChangeIndicator that is
2856 * used by IOPs to track changes.
2857 */
2858
2859
2860
2861static s32 adpt_i2o_status_get(adpt_hba* pHba)
2862{
2863 ulong timeout;
2864 u32 m;
2865 u32 __iomem *msg;
2866 u8 *status_block=NULL;
2867
2868 if(pHba->status_block == NULL) {
2869 pHba->status_block = dma_alloc_coherent(&pHba->pDev->dev,
2870 sizeof(i2o_status_block),
2871 &pHba->status_block_pa, GFP_KERNEL);
2872 if(pHba->status_block == NULL) {
2873 printk(KERN_ERR
2874 "dpti%d: Get Status Block failed; Out of memory. \n",
2875 pHba->unit);
2876 return -ENOMEM;
2877 }
2878 }
2879 memset(pHba->status_block, 0, sizeof(i2o_status_block));
2880 status_block = (u8*)(pHba->status_block);
2881 timeout = jiffies+TMOUT_GETSTATUS*HZ;
2882 do {
2883 rmb();
2884 m = readl(pHba->post_port);
2885 if (m != EMPTY_QUEUE) {
2886 break;
2887 }
2888 if(time_after(jiffies,timeout)){
2889 printk(KERN_ERR "%s: Timeout waiting for message !\n",
2890 pHba->name);
2891 return -ETIMEDOUT;
2892 }
2893 schedule_timeout_uninterruptible(1);
2894 } while(m==EMPTY_QUEUE);
2895
2896
2897 msg=(u32 __iomem *)(pHba->msg_addr_virt+m);
2898
2899 writel(NINE_WORD_MSG_SIZE|SGL_OFFSET_0, &msg[0]);
2900 writel(I2O_CMD_STATUS_GET<<24|HOST_TID<<12|ADAPTER_TID, &msg[1]);
2901 writel(1, &msg[2]);
2902 writel(0, &msg[3]);
2903 writel(0, &msg[4]);
2904 writel(0, &msg[5]);
2905 writel( dma_low(pHba->status_block_pa), &msg[6]);
2906 writel( dma_high(pHba->status_block_pa), &msg[7]);
2907 writel(sizeof(i2o_status_block), &msg[8]); // 88 bytes
2908
2909 //post message
2910 writel(m, pHba->post_port);
2911 wmb();
2912
2913 while(status_block[87]!=0xff){
2914 if(time_after(jiffies,timeout)){
2915 printk(KERN_ERR"dpti%d: Get status timeout.\n",
2916 pHba->unit);
2917 return -ETIMEDOUT;
2918 }
2919 rmb();
2920 schedule_timeout_uninterruptible(1);
2921 }
2922
2923 // Set up our number of outbound and inbound messages
2924 pHba->post_fifo_size = pHba->status_block->max_inbound_frames;
2925 if (pHba->post_fifo_size > MAX_TO_IOP_MESSAGES) {
2926 pHba->post_fifo_size = MAX_TO_IOP_MESSAGES;
2927 }
2928
2929 pHba->reply_fifo_size = pHba->status_block->max_outbound_frames;
2930 if (pHba->reply_fifo_size > MAX_FROM_IOP_MESSAGES) {
2931 pHba->reply_fifo_size = MAX_FROM_IOP_MESSAGES;
2932 }
2933
2934 // Calculate the Scatter Gather list size
2935 if (dpt_dma64(pHba)) {
2936 pHba->sg_tablesize
2937 = ((pHba->status_block->inbound_frame_size * 4
2938 - 14 * sizeof(u32))
2939 / (sizeof(struct sg_simple_element) + sizeof(u32)));
2940 } else {
2941 pHba->sg_tablesize
2942 = ((pHba->status_block->inbound_frame_size * 4
2943 - 12 * sizeof(u32))
2944 / sizeof(struct sg_simple_element));
2945 }
2946 if (pHba->sg_tablesize > SG_LIST_ELEMENTS) {
2947 pHba->sg_tablesize = SG_LIST_ELEMENTS;
2948 }
2949
2950
2951#ifdef DEBUG
2952 printk("dpti%d: State = ",pHba->unit);
2953 switch(pHba->status_block->iop_state) {
2954 case 0x01:
2955 printk("INIT\n");
2956 break;
2957 case 0x02:
2958 printk("RESET\n");
2959 break;
2960 case 0x04:
2961 printk("HOLD\n");
2962 break;
2963 case 0x05:
2964 printk("READY\n");
2965 break;
2966 case 0x08:
2967 printk("OPERATIONAL\n");
2968 break;
2969 case 0x10:
2970 printk("FAILED\n");
2971 break;
2972 case 0x11:
2973 printk("FAULTED\n");
2974 break;
2975 default:
2976 printk("%x (unknown!!)\n",pHba->status_block->iop_state);
2977 }
2978#endif
2979 return 0;
2980}
2981
2982/*
2983 * Get the IOP's Logical Configuration Table
2984 */
2985static int adpt_i2o_lct_get(adpt_hba* pHba)
2986{
2987 u32 msg[8];
2988 int ret;
2989 u32 buf[16];
2990
2991 if ((pHba->lct_size == 0) || (pHba->lct == NULL)){
2992 pHba->lct_size = pHba->status_block->expected_lct_size;
2993 }
2994 do {
2995 if (pHba->lct == NULL) {
2996 pHba->lct = dma_alloc_coherent(&pHba->pDev->dev,
2997 pHba->lct_size, &pHba->lct_pa,
2998 GFP_ATOMIC);
2999 if(pHba->lct == NULL) {
3000 printk(KERN_CRIT "%s: Lct Get failed. Out of memory.\n",
3001 pHba->name);
3002 return -ENOMEM;
3003 }
3004 }
3005 memset(pHba->lct, 0, pHba->lct_size);
3006
3007 msg[0] = EIGHT_WORD_MSG_SIZE|SGL_OFFSET_6;
3008 msg[1] = I2O_CMD_LCT_NOTIFY<<24 | HOST_TID<<12 | ADAPTER_TID;
3009 msg[2] = 0;
3010 msg[3] = 0;
3011 msg[4] = 0xFFFFFFFF; /* All devices */
3012 msg[5] = 0x00000000; /* Report now */
3013 msg[6] = 0xD0000000|pHba->lct_size;
3014 msg[7] = (u32)pHba->lct_pa;
3015
3016 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 360))) {
3017 printk(KERN_ERR "%s: LCT Get failed (status=%#10x.\n",
3018 pHba->name, ret);
3019 printk(KERN_ERR"Adaptec: Error Reading Hardware.\n");
3020 return ret;
3021 }
3022
3023 if ((pHba->lct->table_size << 2) > pHba->lct_size) {
3024 pHba->lct_size = pHba->lct->table_size << 2;
3025 dma_free_coherent(&pHba->pDev->dev, pHba->lct_size,
3026 pHba->lct, pHba->lct_pa);
3027 pHba->lct = NULL;
3028 }
3029 } while (pHba->lct == NULL);
3030
3031 PDEBUG("%s: Hardware resource table read.\n", pHba->name);
3032
3033
3034 // I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO;
3035 if(adpt_i2o_query_scalar(pHba, 0 , 0x8000, -1, buf, sizeof(buf))>=0) {
3036 pHba->FwDebugBufferSize = buf[1];
3037 pHba->FwDebugBuffer_P = ioremap(pHba->base_addr_phys + buf[0],
3038 pHba->FwDebugBufferSize);
3039 if (pHba->FwDebugBuffer_P) {
3040 pHba->FwDebugFlags_P = pHba->FwDebugBuffer_P +
3041 FW_DEBUG_FLAGS_OFFSET;
3042 pHba->FwDebugBLEDvalue_P = pHba->FwDebugBuffer_P +
3043 FW_DEBUG_BLED_OFFSET;
3044 pHba->FwDebugBLEDflag_P = pHba->FwDebugBLEDvalue_P + 1;
3045 pHba->FwDebugStrLength_P = pHba->FwDebugBuffer_P +
3046 FW_DEBUG_STR_LENGTH_OFFSET;
3047 pHba->FwDebugBuffer_P += buf[2];
3048 pHba->FwDebugFlags = 0;
3049 }
3050 }
3051
3052 return 0;
3053}
3054
3055static int adpt_i2o_build_sys_table(void)
3056{
3057 adpt_hba* pHba = hba_chain;
3058 int count = 0;
3059
3060 if (sys_tbl)
3061 dma_free_coherent(&pHba->pDev->dev, sys_tbl_len,
3062 sys_tbl, sys_tbl_pa);
3063
3064 sys_tbl_len = sizeof(struct i2o_sys_tbl) + // Header + IOPs
3065 (hba_count) * sizeof(struct i2o_sys_tbl_entry);
3066
3067 sys_tbl = dma_alloc_coherent(&pHba->pDev->dev,
3068 sys_tbl_len, &sys_tbl_pa, GFP_KERNEL);
3069 if (!sys_tbl) {
3070 printk(KERN_WARNING "SysTab Set failed. Out of memory.\n");
3071 return -ENOMEM;
3072 }
3073
3074 sys_tbl->num_entries = hba_count;
3075 sys_tbl->version = I2OVERSION;
3076 sys_tbl->change_ind = sys_tbl_ind++;
3077
3078 for(pHba = hba_chain; pHba; pHba = pHba->next) {
3079 u64 addr;
3080 // Get updated Status Block so we have the latest information
3081 if (adpt_i2o_status_get(pHba)) {
3082 sys_tbl->num_entries--;
3083 continue; // try next one
3084 }
3085
3086 sys_tbl->iops[count].org_id = pHba->status_block->org_id;
3087 sys_tbl->iops[count].iop_id = pHba->unit + 2;
3088 sys_tbl->iops[count].seg_num = 0;
3089 sys_tbl->iops[count].i2o_version = pHba->status_block->i2o_version;
3090 sys_tbl->iops[count].iop_state = pHba->status_block->iop_state;
3091 sys_tbl->iops[count].msg_type = pHba->status_block->msg_type;
3092 sys_tbl->iops[count].frame_size = pHba->status_block->inbound_frame_size;
3093 sys_tbl->iops[count].last_changed = sys_tbl_ind - 1; // ??
3094 sys_tbl->iops[count].iop_capabilities = pHba->status_block->iop_capabilities;
3095 addr = pHba->base_addr_phys + 0x40;
3096 sys_tbl->iops[count].inbound_low = dma_low(addr);
3097 sys_tbl->iops[count].inbound_high = dma_high(addr);
3098
3099 count++;
3100 }
3101
3102#ifdef DEBUG
3103{
3104 u32 *table = (u32*)sys_tbl;
3105 printk(KERN_DEBUG"sys_tbl_len=%d in 32bit words\n",(sys_tbl_len >>2));
3106 for(count = 0; count < (sys_tbl_len >>2); count++) {
3107 printk(KERN_INFO "sys_tbl[%d] = %0#10x\n",
3108 count, table[count]);
3109 }
3110}
3111#endif
3112
3113 return 0;
3114}
3115
3116
3117/*
3118 * Dump the information block associated with a given unit (TID)
3119 */
3120
3121static void adpt_i2o_report_hba_unit(adpt_hba* pHba, struct i2o_device *d)
3122{
3123 char buf[64];
3124 int unit = d->lct_data.tid;
3125
3126 printk(KERN_INFO "TID %3.3d ", unit);
3127
3128 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 3, buf, 16)>=0)
3129 {
3130 buf[16]=0;
3131 printk(" Vendor: %-12.12s", buf);
3132 }
3133 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 4, buf, 16)>=0)
3134 {
3135 buf[16]=0;
3136 printk(" Device: %-12.12s", buf);
3137 }
3138 if(adpt_i2o_query_scalar(pHba, unit, 0xF100, 6, buf, 8)>=0)
3139 {
3140 buf[8]=0;
3141 printk(" Rev: %-12.12s\n", buf);
3142 }
3143#ifdef DEBUG
3144 printk(KERN_INFO "\tClass: %.21s\n", adpt_i2o_get_class_name(d->lct_data.class_id));
3145 printk(KERN_INFO "\tSubclass: 0x%04X\n", d->lct_data.sub_class);
3146 printk(KERN_INFO "\tFlags: ");
3147
3148 if(d->lct_data.device_flags&(1<<0))
3149 printk("C"); // ConfigDialog requested
3150 if(d->lct_data.device_flags&(1<<1))
3151 printk("U"); // Multi-user capable
3152 if(!(d->lct_data.device_flags&(1<<4)))
3153 printk("P"); // Peer service enabled!
3154 if(!(d->lct_data.device_flags&(1<<5)))
3155 printk("M"); // Mgmt service enabled!
3156 printk("\n");
3157#endif
3158}
3159
3160#ifdef DEBUG
3161/*
3162 * Do i2o class name lookup
3163 */
3164static const char *adpt_i2o_get_class_name(int class)
3165{
3166 int idx = 16;
3167 static char *i2o_class_name[] = {
3168 "Executive",
3169 "Device Driver Module",
3170 "Block Device",
3171 "Tape Device",
3172 "LAN Interface",
3173 "WAN Interface",
3174 "Fibre Channel Port",
3175 "Fibre Channel Device",
3176 "SCSI Device",
3177 "ATE Port",
3178 "ATE Device",
3179 "Floppy Controller",
3180 "Floppy Device",
3181 "Secondary Bus Port",
3182 "Peer Transport Agent",
3183 "Peer Transport",
3184 "Unknown"
3185 };
3186
3187 switch(class&0xFFF) {
3188 case I2O_CLASS_EXECUTIVE:
3189 idx = 0; break;
3190 case I2O_CLASS_DDM:
3191 idx = 1; break;
3192 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
3193 idx = 2; break;
3194 case I2O_CLASS_SEQUENTIAL_STORAGE:
3195 idx = 3; break;
3196 case I2O_CLASS_LAN:
3197 idx = 4; break;
3198 case I2O_CLASS_WAN:
3199 idx = 5; break;
3200 case I2O_CLASS_FIBRE_CHANNEL_PORT:
3201 idx = 6; break;
3202 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
3203 idx = 7; break;
3204 case I2O_CLASS_SCSI_PERIPHERAL:
3205 idx = 8; break;
3206 case I2O_CLASS_ATE_PORT:
3207 idx = 9; break;
3208 case I2O_CLASS_ATE_PERIPHERAL:
3209 idx = 10; break;
3210 case I2O_CLASS_FLOPPY_CONTROLLER:
3211 idx = 11; break;
3212 case I2O_CLASS_FLOPPY_DEVICE:
3213 idx = 12; break;
3214 case I2O_CLASS_BUS_ADAPTER_PORT:
3215 idx = 13; break;
3216 case I2O_CLASS_PEER_TRANSPORT_AGENT:
3217 idx = 14; break;
3218 case I2O_CLASS_PEER_TRANSPORT:
3219 idx = 15; break;
3220 }
3221 return i2o_class_name[idx];
3222}
3223#endif
3224
3225
3226static s32 adpt_i2o_hrt_get(adpt_hba* pHba)
3227{
3228 u32 msg[6];
3229 int ret, size = sizeof(i2o_hrt);
3230
3231 do {
3232 if (pHba->hrt == NULL) {
3233 pHba->hrt = dma_alloc_coherent(&pHba->pDev->dev,
3234 size, &pHba->hrt_pa, GFP_KERNEL);
3235 if (pHba->hrt == NULL) {
3236 printk(KERN_CRIT "%s: Hrt Get failed; Out of memory.\n", pHba->name);
3237 return -ENOMEM;
3238 }
3239 }
3240
3241 msg[0]= SIX_WORD_MSG_SIZE| SGL_OFFSET_4;
3242 msg[1]= I2O_CMD_HRT_GET<<24 | HOST_TID<<12 | ADAPTER_TID;
3243 msg[2]= 0;
3244 msg[3]= 0;
3245 msg[4]= (0xD0000000 | size); /* Simple transaction */
3246 msg[5]= (u32)pHba->hrt_pa; /* Dump it here */
3247
3248 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg),20))) {
3249 printk(KERN_ERR "%s: Unable to get HRT (status=%#10x)\n", pHba->name, ret);
3250 return ret;
3251 }
3252
3253 if (pHba->hrt->num_entries * pHba->hrt->entry_len << 2 > size) {
3254 int newsize = pHba->hrt->num_entries * pHba->hrt->entry_len << 2;
3255 dma_free_coherent(&pHba->pDev->dev, size,
3256 pHba->hrt, pHba->hrt_pa);
3257 size = newsize;
3258 pHba->hrt = NULL;
3259 }
3260 } while(pHba->hrt == NULL);
3261 return 0;
3262}
3263
3264/*
3265 * Query one scalar group value or a whole scalar group.
3266 */
3267static int adpt_i2o_query_scalar(adpt_hba* pHba, int tid,
3268 int group, int field, void *buf, int buflen)
3269{
3270 u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
3271 u8 *opblk_va;
3272 dma_addr_t opblk_pa;
3273 u8 *resblk_va;
3274 dma_addr_t resblk_pa;
3275
3276 int size;
3277
3278 /* 8 bytes for header */
3279 resblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3280 sizeof(u8) * (8 + buflen), &resblk_pa, GFP_KERNEL);
3281 if (resblk_va == NULL) {
3282 printk(KERN_CRIT "%s: query scalar failed; Out of memory.\n", pHba->name);
3283 return -ENOMEM;
3284 }
3285
3286 opblk_va = dma_alloc_coherent(&pHba->pDev->dev,
3287 sizeof(opblk), &opblk_pa, GFP_KERNEL);
3288 if (opblk_va == NULL) {
3289 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3290 resblk_va, resblk_pa);
3291 printk(KERN_CRIT "%s: query operation failed; Out of memory.\n",
3292 pHba->name);
3293 return -ENOMEM;
3294 }
3295 if (field == -1) /* whole group */
3296 opblk[4] = -1;
3297
3298 memcpy(opblk_va, opblk, sizeof(opblk));
3299 size = adpt_i2o_issue_params(I2O_CMD_UTIL_PARAMS_GET, pHba, tid,
3300 opblk_va, opblk_pa, sizeof(opblk),
3301 resblk_va, resblk_pa, sizeof(u8)*(8+buflen));
3302 dma_free_coherent(&pHba->pDev->dev, sizeof(opblk), opblk_va, opblk_pa);
3303 if (size == -ETIME) {
3304 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3305 resblk_va, resblk_pa);
3306 printk(KERN_WARNING "%s: issue params failed; Timed out.\n", pHba->name);
3307 return -ETIME;
3308 } else if (size == -EINTR) {
3309 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3310 resblk_va, resblk_pa);
3311 printk(KERN_WARNING "%s: issue params failed; Interrupted.\n", pHba->name);
3312 return -EINTR;
3313 }
3314
3315 memcpy(buf, resblk_va+8, buflen); /* cut off header */
3316
3317 dma_free_coherent(&pHba->pDev->dev, sizeof(u8) * (8+buflen),
3318 resblk_va, resblk_pa);
3319 if (size < 0)
3320 return size;
3321
3322 return buflen;
3323}
3324
3325
3326/* Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
3327 *
3328 * This function can be used for all UtilParamsGet/Set operations.
3329 * The OperationBlock is given in opblk-buffer,
3330 * and results are returned in resblk-buffer.
3331 * Note that the minimum sized resblk is 8 bytes and contains
3332 * ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
3333 */
3334static int adpt_i2o_issue_params(int cmd, adpt_hba* pHba, int tid,
3335 void *opblk_va, dma_addr_t opblk_pa, int oplen,
3336 void *resblk_va, dma_addr_t resblk_pa, int reslen)
3337{
3338 u32 msg[9];
3339 u32 *res = (u32 *)resblk_va;
3340 int wait_status;
3341
3342 msg[0] = NINE_WORD_MSG_SIZE | SGL_OFFSET_5;
3343 msg[1] = cmd << 24 | HOST_TID << 12 | tid;
3344 msg[2] = 0;
3345 msg[3] = 0;
3346 msg[4] = 0;
3347 msg[5] = 0x54000000 | oplen; /* OperationBlock */
3348 msg[6] = (u32)opblk_pa;
3349 msg[7] = 0xD0000000 | reslen; /* ResultBlock */
3350 msg[8] = (u32)resblk_pa;
3351
3352 if ((wait_status = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 20))) {
3353 printk("adpt_i2o_issue_params: post_wait failed (%p)\n", resblk_va);
3354 return wait_status; /* -DetailedStatus */
3355 }
3356
3357 if (res[1]&0x00FF0000) { /* BlockStatus != SUCCESS */
3358 printk(KERN_WARNING "%s: %s - Error:\n ErrorInfoSize = 0x%02x, "
3359 "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
3360 pHba->name,
3361 (cmd == I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET"
3362 : "PARAMS_GET",
3363 res[1]>>24, (res[1]>>16)&0xFF, res[1]&0xFFFF);
3364 return -((res[1] >> 16) & 0xFF); /* -BlockStatus */
3365 }
3366
3367 return 4 + ((res[1] & 0x0000FFFF) << 2); /* bytes used in resblk */
3368}
3369
3370
3371static s32 adpt_i2o_quiesce_hba(adpt_hba* pHba)
3372{
3373 u32 msg[4];
3374 int ret;
3375
3376 adpt_i2o_status_get(pHba);
3377
3378 /* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
3379
3380 if((pHba->status_block->iop_state != ADAPTER_STATE_READY) &&
3381 (pHba->status_block->iop_state != ADAPTER_STATE_OPERATIONAL)){
3382 return 0;
3383 }
3384
3385 msg[0] = FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3386 msg[1] = I2O_CMD_SYS_QUIESCE<<24|HOST_TID<<12|ADAPTER_TID;
3387 msg[2] = 0;
3388 msg[3] = 0;
3389
3390 if((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3391 printk(KERN_INFO"dpti%d: Unable to quiesce (status=%#x).\n",
3392 pHba->unit, -ret);
3393 } else {
3394 printk(KERN_INFO"dpti%d: Quiesced.\n",pHba->unit);
3395 }
3396
3397 adpt_i2o_status_get(pHba);
3398 return ret;
3399}
3400
3401
3402/*
3403 * Enable IOP. Allows the IOP to resume external operations.
3404 */
3405static int adpt_i2o_enable_hba(adpt_hba* pHba)
3406{
3407 u32 msg[4];
3408 int ret;
3409
3410 adpt_i2o_status_get(pHba);
3411 if(!pHba->status_block){
3412 return -ENOMEM;
3413 }
3414 /* Enable only allowed on READY state */
3415 if(pHba->status_block->iop_state == ADAPTER_STATE_OPERATIONAL)
3416 return 0;
3417
3418 if(pHba->status_block->iop_state != ADAPTER_STATE_READY)
3419 return -EINVAL;
3420
3421 msg[0]=FOUR_WORD_MSG_SIZE|SGL_OFFSET_0;
3422 msg[1]=I2O_CMD_SYS_ENABLE<<24|HOST_TID<<12|ADAPTER_TID;
3423 msg[2]= 0;
3424 msg[3]= 0;
3425
3426 if ((ret = adpt_i2o_post_wait(pHba, msg, sizeof(msg), 240))) {
3427 printk(KERN_WARNING"%s: Could not enable (status=%#10x).\n",
3428 pHba->name, ret);
3429 } else {
3430 PDEBUG("%s: Enabled.\n", pHba->name);
3431 }
3432
3433 adpt_i2o_status_get(pHba);
3434 return ret;
3435}
3436
3437
3438static int adpt_i2o_systab_send(adpt_hba* pHba)
3439{
3440 u32 msg[12];
3441 int ret;
3442
3443 msg[0] = I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6;
3444 msg[1] = I2O_CMD_SYS_TAB_SET<<24 | HOST_TID<<12 | ADAPTER_TID;
3445 msg[2] = 0;
3446 msg[3] = 0;
3447 msg[4] = (0<<16) | ((pHba->unit+2) << 12); /* Host 0 IOP ID (unit + 2) */
3448 msg[5] = 0; /* Segment 0 */
3449
3450 /*
3451 * Provide three SGL-elements:
3452 * System table (SysTab), Private memory space declaration and
3453 * Private i/o space declaration
3454 */
3455 msg[6] = 0x54000000 | sys_tbl_len;
3456 msg[7] = (u32)sys_tbl_pa;
3457 msg[8] = 0x54000000 | 0;
3458 msg[9] = 0;
3459 msg[10] = 0xD4000000 | 0;
3460 msg[11] = 0;
3461
3462 if ((ret=adpt_i2o_post_wait(pHba, msg, sizeof(msg), 120))) {
3463 printk(KERN_INFO "%s: Unable to set SysTab (status=%#10x).\n",
3464 pHba->name, ret);
3465 }
3466#ifdef DEBUG
3467 else {
3468 PINFO("%s: SysTab set.\n", pHba->name);
3469 }
3470#endif
3471
3472 return ret;
3473}
3474
3475
3476/*============================================================================
3477 *
3478 *============================================================================
3479 */
3480
3481
3482#ifdef UARTDELAY
3483
3484static static void adpt_delay(int millisec)
3485{
3486 int i;
3487 for (i = 0; i < millisec; i++) {
3488 udelay(1000); /* delay for one millisecond */
3489 }
3490}
3491
3492#endif
3493
3494static struct scsi_host_template driver_template = {
3495 .module = THIS_MODULE,
3496 .name = "dpt_i2o",
3497 .proc_name = "dpt_i2o",
3498 .show_info = adpt_show_info,
3499 .info = adpt_info,
3500 .queuecommand = adpt_queue,
3501 .eh_abort_handler = adpt_abort,
3502 .eh_device_reset_handler = adpt_device_reset,
3503 .eh_bus_reset_handler = adpt_bus_reset,
3504 .eh_host_reset_handler = adpt_reset,
3505 .bios_param = adpt_bios_param,
3506 .slave_configure = adpt_slave_configure,
3507 .can_queue = MAX_TO_IOP_MESSAGES,
3508 .this_id = 7,
3509};
3510
3511static int __init adpt_init(void)
3512{
3513 int error;
3514 adpt_hba *pHba, *next;
3515
3516 printk("Loading Adaptec I2O RAID: Version " DPT_I2O_VERSION "\n");
3517
3518 error = adpt_detect(&driver_template);
3519 if (error < 0)
3520 return error;
3521 if (hba_chain == NULL)
3522 return -ENODEV;
3523
3524 for (pHba = hba_chain; pHba; pHba = pHba->next) {
3525 error = scsi_add_host(pHba->host, &pHba->pDev->dev);
3526 if (error)
3527 goto fail;
3528 scsi_scan_host(pHba->host);
3529 }
3530 return 0;
3531fail:
3532 for (pHba = hba_chain; pHba; pHba = next) {
3533 next = pHba->next;
3534 scsi_remove_host(pHba->host);
3535 }
3536 return error;
3537}
3538
3539static void __exit adpt_exit(void)
3540{
3541 adpt_hba *pHba, *next;
3542
3543 for (pHba = hba_chain; pHba; pHba = next) {
3544 next = pHba->next;
3545 adpt_release(pHba);
3546 }
3547}
3548
3549module_init(adpt_init);
3550module_exit(adpt_exit);
3551
3552MODULE_LICENSE("GPL");