Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *
4 * Linux MegaRAID device driver
5 *
6 * Copyright (c) 2002 LSI Logic Corporation.
7 *
8 * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
9 * - fixes
10 * - speed-ups (list handling fixes, issued_list, optimizations.)
11 * - lots of cleanups.
12 *
13 * Copyright (c) 2003 Christoph Hellwig <hch@lst.de>
14 * - new-style, hotplug-aware pci probing and scsi registration
15 *
16 * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju
17 * <Seokmann.Ju@lsil.com>
18 *
19 * Description: Linux device driver for LSI Logic MegaRAID controller
20 *
21 * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493
22 * 518, 520, 531, 532
23 *
24 * This driver is supported by LSI Logic, with assistance from Red Hat, Dell,
25 * and others. Please send updates to the mailing list
26 * linux-scsi@vger.kernel.org .
27 */
28
29#include <linux/mm.h>
30#include <linux/fs.h>
31#include <linux/blkdev.h>
32#include <linux/uaccess.h>
33#include <asm/io.h>
34#include <linux/completion.h>
35#include <linux/delay.h>
36#include <linux/proc_fs.h>
37#include <linux/seq_file.h>
38#include <linux/reboot.h>
39#include <linux/module.h>
40#include <linux/list.h>
41#include <linux/interrupt.h>
42#include <linux/pci.h>
43#include <linux/init.h>
44#include <linux/dma-mapping.h>
45#include <linux/mutex.h>
46#include <linux/slab.h>
47
48#include <scsi/scsi.h>
49#include <scsi/scsi_cmnd.h>
50#include <scsi/scsi_device.h>
51#include <scsi/scsi_eh.h>
52#include <scsi/scsi_host.h>
53#include <scsi/scsi_tcq.h>
54#include <scsi/scsicam.h>
55
56#include "megaraid.h"
57
58#define MEGARAID_MODULE_VERSION "2.00.4"
59
60MODULE_AUTHOR ("sju@lsil.com");
61MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver");
62MODULE_LICENSE ("GPL");
63MODULE_VERSION(MEGARAID_MODULE_VERSION);
64
65static DEFINE_MUTEX(megadev_mutex);
66static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN;
67module_param(max_cmd_per_lun, uint, 0);
68MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)");
69
70static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO;
71module_param(max_sectors_per_io, ushort, 0);
72MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)");
73
74
75static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT;
76module_param(max_mbox_busy_wait, ushort, 0);
77MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)");
78
79#define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20)
80#define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C)
81#define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20)
82#define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C)
83
84/*
85 * Global variables
86 */
87
88static int hba_count;
89static adapter_t *hba_soft_state[MAX_CONTROLLERS];
90static struct proc_dir_entry *mega_proc_dir_entry;
91
92/* For controller re-ordering */
93static struct mega_hbas mega_hbas[MAX_CONTROLLERS];
94
95static long
96megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
97
98/*
99 * The File Operations structure for the serial/ioctl interface of the driver
100 */
101static const struct file_operations megadev_fops = {
102 .owner = THIS_MODULE,
103 .unlocked_ioctl = megadev_unlocked_ioctl,
104 .open = megadev_open,
105 .llseek = noop_llseek,
106};
107
108/*
109 * Array to structures for storing the information about the controllers. This
110 * information is sent to the user level applications, when they do an ioctl
111 * for this information.
112 */
113static struct mcontroller mcontroller[MAX_CONTROLLERS];
114
115/* The current driver version */
116static u32 driver_ver = 0x02000000;
117
118/* major number used by the device for character interface */
119static int major;
120
121#define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01)
122
123
124/*
125 * Debug variable to print some diagnostic messages
126 */
127static int trace_level;
128
129/**
130 * mega_setup_mailbox()
131 * @adapter: pointer to our soft state
132 *
133 * Allocates a 8 byte aligned memory for the handshake mailbox.
134 */
135static int
136mega_setup_mailbox(adapter_t *adapter)
137{
138 unsigned long align;
139
140 adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev,
141 sizeof(mbox64_t),
142 &adapter->una_mbox64_dma,
143 GFP_KERNEL);
144
145 if( !adapter->una_mbox64 ) return -1;
146
147 adapter->mbox = &adapter->una_mbox64->mbox;
148
149 adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) &
150 (~0UL ^ 0xFUL));
151
152 adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8);
153
154 align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox);
155
156 adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align;
157
158 /*
159 * Register the mailbox if the controller is an io-mapped controller
160 */
161 if( adapter->flag & BOARD_IOMAP ) {
162
163 outb(adapter->mbox_dma & 0xFF,
164 adapter->host->io_port + MBOX_PORT0);
165
166 outb((adapter->mbox_dma >> 8) & 0xFF,
167 adapter->host->io_port + MBOX_PORT1);
168
169 outb((adapter->mbox_dma >> 16) & 0xFF,
170 adapter->host->io_port + MBOX_PORT2);
171
172 outb((adapter->mbox_dma >> 24) & 0xFF,
173 adapter->host->io_port + MBOX_PORT3);
174
175 outb(ENABLE_MBOX_BYTE,
176 adapter->host->io_port + ENABLE_MBOX_REGION);
177
178 irq_ack(adapter);
179
180 irq_enable(adapter);
181 }
182
183 return 0;
184}
185
186
187/*
188 * mega_query_adapter()
189 * @adapter - pointer to our soft state
190 *
191 * Issue the adapter inquiry commands to the controller and find out
192 * information and parameter about the devices attached
193 */
194static int
195mega_query_adapter(adapter_t *adapter)
196{
197 dma_addr_t prod_info_dma_handle;
198 mega_inquiry3 *inquiry3;
199 struct mbox_out mbox;
200 u8 *raw_mbox = (u8 *)&mbox;
201 int retval;
202
203 /* Initialize adapter inquiry mailbox */
204
205 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
206 memset(&mbox, 0, sizeof(mbox));
207
208 /*
209 * Try to issue Inquiry3 command
210 * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and
211 * update enquiry3 structure
212 */
213 mbox.xferaddr = (u32)adapter->buf_dma_handle;
214
215 inquiry3 = (mega_inquiry3 *)adapter->mega_buffer;
216
217 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
218 raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */
219 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */
220
221 /* Issue a blocking command to the card */
222 if ((retval = issue_scb_block(adapter, raw_mbox))) {
223 /* the adapter does not support 40ld */
224
225 mraid_ext_inquiry *ext_inq;
226 mraid_inquiry *inq;
227 dma_addr_t dma_handle;
228
229 ext_inq = dma_alloc_coherent(&adapter->dev->dev,
230 sizeof(mraid_ext_inquiry),
231 &dma_handle, GFP_KERNEL);
232
233 if( ext_inq == NULL ) return -1;
234
235 inq = &ext_inq->raid_inq;
236
237 mbox.xferaddr = (u32)dma_handle;
238
239 /*issue old 0x04 command to adapter */
240 mbox.cmd = MEGA_MBOXCMD_ADPEXTINQ;
241
242 issue_scb_block(adapter, raw_mbox);
243
244 /*
245 * update Enquiry3 and ProductInfo structures with
246 * mraid_inquiry structure
247 */
248 mega_8_to_40ld(inq, inquiry3,
249 (mega_product_info *)&adapter->product_info);
250
251 dma_free_coherent(&adapter->dev->dev,
252 sizeof(mraid_ext_inquiry), ext_inq,
253 dma_handle);
254
255 } else { /*adapter supports 40ld */
256 adapter->flag |= BOARD_40LD;
257
258 /*
259 * get product_info, which is static information and will be
260 * unchanged
261 */
262 prod_info_dma_handle = dma_map_single(&adapter->dev->dev,
263 (void *)&adapter->product_info,
264 sizeof(mega_product_info),
265 DMA_FROM_DEVICE);
266
267 mbox.xferaddr = prod_info_dma_handle;
268
269 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
270 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
271
272 if ((retval = issue_scb_block(adapter, raw_mbox)))
273 dev_warn(&adapter->dev->dev,
274 "Product_info cmd failed with error: %d\n",
275 retval);
276
277 dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle,
278 sizeof(mega_product_info), DMA_FROM_DEVICE);
279 }
280
281
282 /*
283 * kernel scans the channels from 0 to <= max_channel
284 */
285 adapter->host->max_channel =
286 adapter->product_info.nchannels + NVIRT_CHAN -1;
287
288 adapter->host->max_id = 16; /* max targets per channel */
289
290 adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */
291
292 adapter->host->cmd_per_lun = max_cmd_per_lun;
293
294 adapter->numldrv = inquiry3->num_ldrv;
295
296 adapter->max_cmds = adapter->product_info.max_commands;
297
298 if(adapter->max_cmds > MAX_COMMANDS)
299 adapter->max_cmds = MAX_COMMANDS;
300
301 adapter->host->can_queue = adapter->max_cmds - 1;
302
303 /*
304 * Get the maximum number of scatter-gather elements supported by this
305 * firmware
306 */
307 mega_get_max_sgl(adapter);
308
309 adapter->host->sg_tablesize = adapter->sglen;
310
311 /* use HP firmware and bios version encoding
312 Note: fw_version[0|1] and bios_version[0|1] were originally shifted
313 right 8 bits making them zero. This 0 value was hardcoded to fix
314 sparse warnings. */
315 if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) {
316 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
317 "%c%d%d.%d%d",
318 adapter->product_info.fw_version[2],
319 0,
320 adapter->product_info.fw_version[1] & 0x0f,
321 0,
322 adapter->product_info.fw_version[0] & 0x0f);
323 snprintf(adapter->bios_version, sizeof(adapter->fw_version),
324 "%c%d%d.%d%d",
325 adapter->product_info.bios_version[2],
326 0,
327 adapter->product_info.bios_version[1] & 0x0f,
328 0,
329 adapter->product_info.bios_version[0] & 0x0f);
330 } else {
331 memcpy(adapter->fw_version,
332 (char *)adapter->product_info.fw_version, 4);
333 adapter->fw_version[4] = 0;
334
335 memcpy(adapter->bios_version,
336 (char *)adapter->product_info.bios_version, 4);
337
338 adapter->bios_version[4] = 0;
339 }
340
341 dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n",
342 adapter->fw_version, adapter->bios_version, adapter->numldrv);
343
344 /*
345 * Do we support extended (>10 bytes) cdbs
346 */
347 adapter->support_ext_cdb = mega_support_ext_cdb(adapter);
348 if (adapter->support_ext_cdb)
349 dev_notice(&adapter->dev->dev, "supports extended CDBs\n");
350
351
352 return 0;
353}
354
355/**
356 * mega_runpendq()
357 * @adapter: pointer to our soft state
358 *
359 * Runs through the list of pending requests.
360 */
361static inline void
362mega_runpendq(adapter_t *adapter)
363{
364 if(!list_empty(&adapter->pending_list))
365 __mega_runpendq(adapter);
366}
367
368/*
369 * megaraid_queue()
370 * @scmd - Issue this scsi command
371 * @done - the callback hook into the scsi mid-layer
372 *
373 * The command queuing entry point for the mid-layer.
374 */
375static int megaraid_queue_lck(struct scsi_cmnd *scmd)
376{
377 adapter_t *adapter;
378 scb_t *scb;
379 int busy=0;
380 unsigned long flags;
381
382 adapter = (adapter_t *)scmd->device->host->hostdata;
383
384 /*
385 * Allocate and build a SCB request
386 * busy flag will be set if mega_build_cmd() command could not
387 * allocate scb. We will return non-zero status in that case.
388 * NOTE: scb can be null even though certain commands completed
389 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would
390 * return 0 in that case.
391 */
392
393 spin_lock_irqsave(&adapter->lock, flags);
394 scb = mega_build_cmd(adapter, scmd, &busy);
395 if (!scb)
396 goto out;
397
398 scb->state |= SCB_PENDQ;
399 list_add_tail(&scb->list, &adapter->pending_list);
400
401 /*
402 * Check if the HBA is in quiescent state, e.g., during a
403 * delete logical drive opertion. If it is, don't run
404 * the pending_list.
405 */
406 if (atomic_read(&adapter->quiescent) == 0)
407 mega_runpendq(adapter);
408
409 busy = 0;
410 out:
411 spin_unlock_irqrestore(&adapter->lock, flags);
412 return busy;
413}
414
415static DEF_SCSI_QCMD(megaraid_queue)
416
417/**
418 * mega_allocate_scb()
419 * @adapter: pointer to our soft state
420 * @cmd: scsi command from the mid-layer
421 *
422 * Allocate a SCB structure. This is the central structure for controller
423 * commands.
424 */
425static inline scb_t *
426mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd)
427{
428 struct list_head *head = &adapter->free_list;
429 scb_t *scb;
430
431 /* Unlink command from Free List */
432 if( !list_empty(head) ) {
433
434 scb = list_entry(head->next, scb_t, list);
435
436 list_del_init(head->next);
437
438 scb->state = SCB_ACTIVE;
439 scb->cmd = cmd;
440 scb->dma_type = MEGA_DMA_TYPE_NONE;
441
442 return scb;
443 }
444
445 return NULL;
446}
447
448/**
449 * mega_get_ldrv_num()
450 * @adapter: pointer to our soft state
451 * @cmd: scsi mid layer command
452 * @channel: channel on the controller
453 *
454 * Calculate the logical drive number based on the information in scsi command
455 * and the channel number.
456 */
457static inline int
458mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel)
459{
460 int tgt;
461 int ldrv_num;
462
463 tgt = cmd->device->id;
464
465 if ( tgt > adapter->this_id )
466 tgt--; /* we do not get inquires for initiator id */
467
468 ldrv_num = (channel * 15) + tgt;
469
470
471 /*
472 * If we have a logical drive with boot enabled, project it first
473 */
474 if( adapter->boot_ldrv_enabled ) {
475 if( ldrv_num == 0 ) {
476 ldrv_num = adapter->boot_ldrv;
477 }
478 else {
479 if( ldrv_num <= adapter->boot_ldrv ) {
480 ldrv_num--;
481 }
482 }
483 }
484
485 /*
486 * If "delete logical drive" feature is enabled on this controller.
487 * Do only if at least one delete logical drive operation was done.
488 *
489 * Also, after logical drive deletion, instead of logical drive number,
490 * the value returned should be 0x80+logical drive id.
491 *
492 * These is valid only for IO commands.
493 */
494
495 if (adapter->support_random_del && adapter->read_ldidmap )
496 switch (cmd->cmnd[0]) {
497 case READ_6:
498 case WRITE_6:
499 case READ_10:
500 case WRITE_10:
501 ldrv_num += 0x80;
502 }
503
504 return ldrv_num;
505}
506
507/**
508 * mega_build_cmd()
509 * @adapter: pointer to our soft state
510 * @cmd: Prepare using this scsi command
511 * @busy: busy flag if no resources
512 *
513 * Prepares a command and scatter gather list for the controller. This routine
514 * also finds out if the commands is intended for a logical drive or a
515 * physical device and prepares the controller command accordingly.
516 *
517 * We also re-order the logical drives and physical devices based on their
518 * boot settings.
519 */
520static scb_t *
521mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
522{
523 mega_passthru *pthru;
524 scb_t *scb;
525 mbox_t *mbox;
526 u32 seg;
527 char islogical;
528 int max_ldrv_num;
529 int channel = 0;
530 int target = 0;
531 int ldrv_num = 0; /* logical drive number */
532
533 /*
534 * We know what channels our logical drives are on - mega_find_card()
535 */
536 islogical = adapter->logdrv_chan[cmd->device->channel];
537
538 /*
539 * The theory: If physical drive is chosen for boot, all the physical
540 * devices are exported before the logical drives, otherwise physical
541 * devices are pushed after logical drives, in which case - Kernel sees
542 * the physical devices on virtual channel which is obviously converted
543 * to actual channel on the HBA.
544 */
545 if( adapter->boot_pdrv_enabled ) {
546 if( islogical ) {
547 /* logical channel */
548 channel = cmd->device->channel -
549 adapter->product_info.nchannels;
550 }
551 else {
552 /* this is physical channel */
553 channel = cmd->device->channel;
554 target = cmd->device->id;
555
556 /*
557 * boot from a physical disk, that disk needs to be
558 * exposed first IF both the channels are SCSI, then
559 * booting from the second channel is not allowed.
560 */
561 if( target == 0 ) {
562 target = adapter->boot_pdrv_tgt;
563 }
564 else if( target == adapter->boot_pdrv_tgt ) {
565 target = 0;
566 }
567 }
568 }
569 else {
570 if( islogical ) {
571 /* this is the logical channel */
572 channel = cmd->device->channel;
573 }
574 else {
575 /* physical channel */
576 channel = cmd->device->channel - NVIRT_CHAN;
577 target = cmd->device->id;
578 }
579 }
580
581
582 if(islogical) {
583
584 /* have just LUN 0 for each target on virtual channels */
585 if (cmd->device->lun) {
586 cmd->result = (DID_BAD_TARGET << 16);
587 scsi_done(cmd);
588 return NULL;
589 }
590
591 ldrv_num = mega_get_ldrv_num(adapter, cmd, channel);
592
593
594 max_ldrv_num = (adapter->flag & BOARD_40LD) ?
595 MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD;
596
597 /*
598 * max_ldrv_num increases by 0x80 if some logical drive was
599 * deleted.
600 */
601 if(adapter->read_ldidmap)
602 max_ldrv_num += 0x80;
603
604 if(ldrv_num > max_ldrv_num ) {
605 cmd->result = (DID_BAD_TARGET << 16);
606 scsi_done(cmd);
607 return NULL;
608 }
609
610 }
611 else {
612 if( cmd->device->lun > 7) {
613 /*
614 * Do not support lun >7 for physically accessed
615 * devices
616 */
617 cmd->result = (DID_BAD_TARGET << 16);
618 scsi_done(cmd);
619 return NULL;
620 }
621 }
622
623 /*
624 *
625 * Logical drive commands
626 *
627 */
628 if(islogical) {
629 switch (cmd->cmnd[0]) {
630 case TEST_UNIT_READY:
631#if MEGA_HAVE_CLUSTERING
632 /*
633 * Do we support clustering and is the support enabled
634 * If no, return success always
635 */
636 if( !adapter->has_cluster ) {
637 cmd->result = (DID_OK << 16);
638 scsi_done(cmd);
639 return NULL;
640 }
641
642 if(!(scb = mega_allocate_scb(adapter, cmd))) {
643 *busy = 1;
644 return NULL;
645 }
646
647 scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
648 scb->raw_mbox[2] = MEGA_RESERVATION_STATUS;
649 scb->raw_mbox[3] = ldrv_num;
650
651 scb->dma_direction = DMA_NONE;
652
653 return scb;
654#else
655 cmd->result = (DID_OK << 16);
656 scsi_done(cmd);
657 return NULL;
658#endif
659
660 case MODE_SENSE: {
661 char *buf;
662 struct scatterlist *sg;
663
664 sg = scsi_sglist(cmd);
665 buf = kmap_atomic(sg_page(sg)) + sg->offset;
666
667 memset(buf, 0, cmd->cmnd[4]);
668 kunmap_atomic(buf - sg->offset);
669
670 cmd->result = (DID_OK << 16);
671 scsi_done(cmd);
672 return NULL;
673 }
674
675 case READ_CAPACITY:
676 case INQUIRY:
677
678 if(!(adapter->flag & (1L << cmd->device->channel))) {
679
680 dev_notice(&adapter->dev->dev,
681 "scsi%d: scanning scsi channel %d "
682 "for logical drives\n",
683 adapter->host->host_no,
684 cmd->device->channel);
685
686 adapter->flag |= (1L << cmd->device->channel);
687 }
688
689 /* Allocate a SCB and initialize passthru */
690 if(!(scb = mega_allocate_scb(adapter, cmd))) {
691 *busy = 1;
692 return NULL;
693 }
694 pthru = scb->pthru;
695
696 mbox = (mbox_t *)scb->raw_mbox;
697 memset(mbox, 0, sizeof(scb->raw_mbox));
698 memset(pthru, 0, sizeof(mega_passthru));
699
700 pthru->timeout = 0;
701 pthru->ars = 1;
702 pthru->reqsenselen = 14;
703 pthru->islogical = 1;
704 pthru->logdrv = ldrv_num;
705 pthru->cdblen = cmd->cmd_len;
706 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
707
708 if( adapter->has_64bit_addr ) {
709 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
710 }
711 else {
712 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
713 }
714
715 scb->dma_direction = DMA_FROM_DEVICE;
716
717 pthru->numsgelements = mega_build_sglist(adapter, scb,
718 &pthru->dataxferaddr, &pthru->dataxferlen);
719
720 mbox->m_out.xferaddr = scb->pthru_dma_addr;
721
722 return scb;
723
724 case READ_6:
725 case WRITE_6:
726 case READ_10:
727 case WRITE_10:
728 case READ_12:
729 case WRITE_12:
730
731 /* Allocate a SCB and initialize mailbox */
732 if(!(scb = mega_allocate_scb(adapter, cmd))) {
733 *busy = 1;
734 return NULL;
735 }
736 mbox = (mbox_t *)scb->raw_mbox;
737
738 memset(mbox, 0, sizeof(scb->raw_mbox));
739 mbox->m_out.logdrv = ldrv_num;
740
741 /*
742 * A little hack: 2nd bit is zero for all scsi read
743 * commands and is set for all scsi write commands
744 */
745 if( adapter->has_64bit_addr ) {
746 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
747 MEGA_MBOXCMD_LWRITE64:
748 MEGA_MBOXCMD_LREAD64 ;
749 }
750 else {
751 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
752 MEGA_MBOXCMD_LWRITE:
753 MEGA_MBOXCMD_LREAD ;
754 }
755
756 /*
757 * 6-byte READ(0x08) or WRITE(0x0A) cdb
758 */
759 if( cmd->cmd_len == 6 ) {
760 mbox->m_out.numsectors = (u32) cmd->cmnd[4];
761 mbox->m_out.lba =
762 ((u32)cmd->cmnd[1] << 16) |
763 ((u32)cmd->cmnd[2] << 8) |
764 (u32)cmd->cmnd[3];
765
766 mbox->m_out.lba &= 0x1FFFFF;
767
768#if MEGA_HAVE_STATS
769 /*
770 * Take modulo 0x80, since the logical drive
771 * number increases by 0x80 when a logical
772 * drive was deleted
773 */
774 if (*cmd->cmnd == READ_6) {
775 adapter->nreads[ldrv_num%0x80]++;
776 adapter->nreadblocks[ldrv_num%0x80] +=
777 mbox->m_out.numsectors;
778 } else {
779 adapter->nwrites[ldrv_num%0x80]++;
780 adapter->nwriteblocks[ldrv_num%0x80] +=
781 mbox->m_out.numsectors;
782 }
783#endif
784 }
785
786 /*
787 * 10-byte READ(0x28) or WRITE(0x2A) cdb
788 */
789 if( cmd->cmd_len == 10 ) {
790 mbox->m_out.numsectors =
791 (u32)cmd->cmnd[8] |
792 ((u32)cmd->cmnd[7] << 8);
793 mbox->m_out.lba =
794 ((u32)cmd->cmnd[2] << 24) |
795 ((u32)cmd->cmnd[3] << 16) |
796 ((u32)cmd->cmnd[4] << 8) |
797 (u32)cmd->cmnd[5];
798
799#if MEGA_HAVE_STATS
800 if (*cmd->cmnd == READ_10) {
801 adapter->nreads[ldrv_num%0x80]++;
802 adapter->nreadblocks[ldrv_num%0x80] +=
803 mbox->m_out.numsectors;
804 } else {
805 adapter->nwrites[ldrv_num%0x80]++;
806 adapter->nwriteblocks[ldrv_num%0x80] +=
807 mbox->m_out.numsectors;
808 }
809#endif
810 }
811
812 /*
813 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
814 */
815 if( cmd->cmd_len == 12 ) {
816 mbox->m_out.lba =
817 ((u32)cmd->cmnd[2] << 24) |
818 ((u32)cmd->cmnd[3] << 16) |
819 ((u32)cmd->cmnd[4] << 8) |
820 (u32)cmd->cmnd[5];
821
822 mbox->m_out.numsectors =
823 ((u32)cmd->cmnd[6] << 24) |
824 ((u32)cmd->cmnd[7] << 16) |
825 ((u32)cmd->cmnd[8] << 8) |
826 (u32)cmd->cmnd[9];
827
828#if MEGA_HAVE_STATS
829 if (*cmd->cmnd == READ_12) {
830 adapter->nreads[ldrv_num%0x80]++;
831 adapter->nreadblocks[ldrv_num%0x80] +=
832 mbox->m_out.numsectors;
833 } else {
834 adapter->nwrites[ldrv_num%0x80]++;
835 adapter->nwriteblocks[ldrv_num%0x80] +=
836 mbox->m_out.numsectors;
837 }
838#endif
839 }
840
841 /*
842 * If it is a read command
843 */
844 if( (*cmd->cmnd & 0x0F) == 0x08 ) {
845 scb->dma_direction = DMA_FROM_DEVICE;
846 }
847 else {
848 scb->dma_direction = DMA_TO_DEVICE;
849 }
850
851 /* Calculate Scatter-Gather info */
852 mbox->m_out.numsgelements = mega_build_sglist(adapter, scb,
853 (u32 *)&mbox->m_out.xferaddr, &seg);
854
855 return scb;
856
857#if MEGA_HAVE_CLUSTERING
858 case RESERVE:
859 case RELEASE:
860
861 /*
862 * Do we support clustering and is the support enabled
863 */
864 if( ! adapter->has_cluster ) {
865
866 cmd->result = (DID_BAD_TARGET << 16);
867 scsi_done(cmd);
868 return NULL;
869 }
870
871 /* Allocate a SCB and initialize mailbox */
872 if(!(scb = mega_allocate_scb(adapter, cmd))) {
873 *busy = 1;
874 return NULL;
875 }
876
877 scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
878 scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ?
879 MEGA_RESERVE_LD : MEGA_RELEASE_LD;
880
881 scb->raw_mbox[3] = ldrv_num;
882
883 scb->dma_direction = DMA_NONE;
884
885 return scb;
886#endif
887
888 default:
889 cmd->result = (DID_BAD_TARGET << 16);
890 scsi_done(cmd);
891 return NULL;
892 }
893 }
894
895 /*
896 * Passthru drive commands
897 */
898 else {
899 /* Allocate a SCB and initialize passthru */
900 if(!(scb = mega_allocate_scb(adapter, cmd))) {
901 *busy = 1;
902 return NULL;
903 }
904
905 mbox = (mbox_t *)scb->raw_mbox;
906 memset(mbox, 0, sizeof(scb->raw_mbox));
907
908 if( adapter->support_ext_cdb ) {
909
910 mega_prepare_extpassthru(adapter, scb, cmd,
911 channel, target);
912
913 mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU;
914
915 mbox->m_out.xferaddr = scb->epthru_dma_addr;
916
917 }
918 else {
919
920 pthru = mega_prepare_passthru(adapter, scb, cmd,
921 channel, target);
922
923 /* Initialize mailbox */
924 if( adapter->has_64bit_addr ) {
925 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
926 }
927 else {
928 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
929 }
930
931 mbox->m_out.xferaddr = scb->pthru_dma_addr;
932
933 }
934 return scb;
935 }
936 return NULL;
937}
938
939
940/**
941 * mega_prepare_passthru()
942 * @adapter: pointer to our soft state
943 * @scb: our scsi control block
944 * @cmd: scsi command from the mid-layer
945 * @channel: actual channel on the controller
946 * @target: actual id on the controller.
947 *
948 * prepare a command for the scsi physical devices.
949 */
950static mega_passthru *
951mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
952 int channel, int target)
953{
954 mega_passthru *pthru;
955
956 pthru = scb->pthru;
957 memset(pthru, 0, sizeof (mega_passthru));
958
959 /* 0=6sec/1=60sec/2=10min/3=3hrs */
960 pthru->timeout = 2;
961
962 pthru->ars = 1;
963 pthru->reqsenselen = 14;
964 pthru->islogical = 0;
965
966 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
967
968 pthru->target = (adapter->flag & BOARD_40LD) ?
969 (channel << 4) | target : target;
970
971 pthru->cdblen = cmd->cmd_len;
972 pthru->logdrv = cmd->device->lun;
973
974 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
975
976 /* Not sure about the direction */
977 scb->dma_direction = DMA_BIDIRECTIONAL;
978
979 /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */
980 switch (cmd->cmnd[0]) {
981 case INQUIRY:
982 case READ_CAPACITY:
983 if(!(adapter->flag & (1L << cmd->device->channel))) {
984
985 dev_notice(&adapter->dev->dev,
986 "scsi%d: scanning scsi channel %d [P%d] "
987 "for physical devices\n",
988 adapter->host->host_no,
989 cmd->device->channel, channel);
990
991 adapter->flag |= (1L << cmd->device->channel);
992 }
993 fallthrough;
994 default:
995 pthru->numsgelements = mega_build_sglist(adapter, scb,
996 &pthru->dataxferaddr, &pthru->dataxferlen);
997 break;
998 }
999 return pthru;
1000}
1001
1002
1003/**
1004 * mega_prepare_extpassthru()
1005 * @adapter: pointer to our soft state
1006 * @scb: our scsi control block
1007 * @cmd: scsi command from the mid-layer
1008 * @channel: actual channel on the controller
1009 * @target: actual id on the controller.
1010 *
1011 * prepare a command for the scsi physical devices. This rountine prepares
1012 * commands for devices which can take extended CDBs (>10 bytes)
1013 */
1014static mega_ext_passthru *
1015mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
1016 struct scsi_cmnd *cmd,
1017 int channel, int target)
1018{
1019 mega_ext_passthru *epthru;
1020
1021 epthru = scb->epthru;
1022 memset(epthru, 0, sizeof(mega_ext_passthru));
1023
1024 /* 0=6sec/1=60sec/2=10min/3=3hrs */
1025 epthru->timeout = 2;
1026
1027 epthru->ars = 1;
1028 epthru->reqsenselen = 14;
1029 epthru->islogical = 0;
1030
1031 epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
1032 epthru->target = (adapter->flag & BOARD_40LD) ?
1033 (channel << 4) | target : target;
1034
1035 epthru->cdblen = cmd->cmd_len;
1036 epthru->logdrv = cmd->device->lun;
1037
1038 memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len);
1039
1040 /* Not sure about the direction */
1041 scb->dma_direction = DMA_BIDIRECTIONAL;
1042
1043 switch(cmd->cmnd[0]) {
1044 case INQUIRY:
1045 case READ_CAPACITY:
1046 if(!(adapter->flag & (1L << cmd->device->channel))) {
1047
1048 dev_notice(&adapter->dev->dev,
1049 "scsi%d: scanning scsi channel %d [P%d] "
1050 "for physical devices\n",
1051 adapter->host->host_no,
1052 cmd->device->channel, channel);
1053
1054 adapter->flag |= (1L << cmd->device->channel);
1055 }
1056 fallthrough;
1057 default:
1058 epthru->numsgelements = mega_build_sglist(adapter, scb,
1059 &epthru->dataxferaddr, &epthru->dataxferlen);
1060 break;
1061 }
1062
1063 return epthru;
1064}
1065
1066static void
1067__mega_runpendq(adapter_t *adapter)
1068{
1069 scb_t *scb;
1070 struct list_head *pos, *next;
1071
1072 /* Issue any pending commands to the card */
1073 list_for_each_safe(pos, next, &adapter->pending_list) {
1074
1075 scb = list_entry(pos, scb_t, list);
1076
1077 if( !(scb->state & SCB_ISSUED) ) {
1078
1079 if( issue_scb(adapter, scb) != 0 )
1080 return;
1081 }
1082 }
1083
1084 return;
1085}
1086
1087
1088/**
1089 * issue_scb()
1090 * @adapter: pointer to our soft state
1091 * @scb: scsi control block
1092 *
1093 * Post a command to the card if the mailbox is available, otherwise return
1094 * busy. We also take the scb from the pending list if the mailbox is
1095 * available.
1096 */
1097static int
1098issue_scb(adapter_t *adapter, scb_t *scb)
1099{
1100 volatile mbox64_t *mbox64 = adapter->mbox64;
1101 volatile mbox_t *mbox = adapter->mbox;
1102 unsigned int i = 0;
1103
1104 if(unlikely(mbox->m_in.busy)) {
1105 do {
1106 udelay(1);
1107 i++;
1108 } while( mbox->m_in.busy && (i < max_mbox_busy_wait) );
1109
1110 if(mbox->m_in.busy) return -1;
1111 }
1112
1113 /* Copy mailbox data into host structure */
1114 memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox,
1115 sizeof(struct mbox_out));
1116
1117 mbox->m_out.cmdid = scb->idx; /* Set cmdid */
1118 mbox->m_in.busy = 1; /* Set busy */
1119
1120
1121 /*
1122 * Increment the pending queue counter
1123 */
1124 atomic_inc(&adapter->pend_cmds);
1125
1126 switch (mbox->m_out.cmd) {
1127 case MEGA_MBOXCMD_LREAD64:
1128 case MEGA_MBOXCMD_LWRITE64:
1129 case MEGA_MBOXCMD_PASSTHRU64:
1130 case MEGA_MBOXCMD_EXTPTHRU:
1131 mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
1132 mbox64->xfer_segment_hi = 0;
1133 mbox->m_out.xferaddr = 0xFFFFFFFF;
1134 break;
1135 default:
1136 mbox64->xfer_segment_lo = 0;
1137 mbox64->xfer_segment_hi = 0;
1138 }
1139
1140 /*
1141 * post the command
1142 */
1143 scb->state |= SCB_ISSUED;
1144
1145 if( likely(adapter->flag & BOARD_MEMMAP) ) {
1146 mbox->m_in.poll = 0;
1147 mbox->m_in.ack = 0;
1148 WRINDOOR(adapter, adapter->mbox_dma | 0x1);
1149 }
1150 else {
1151 irq_enable(adapter);
1152 issue_command(adapter);
1153 }
1154
1155 return 0;
1156}
1157
1158/*
1159 * Wait until the controller's mailbox is available
1160 */
1161static inline int
1162mega_busywait_mbox (adapter_t *adapter)
1163{
1164 if (adapter->mbox->m_in.busy)
1165 return __mega_busywait_mbox(adapter);
1166 return 0;
1167}
1168
1169/**
1170 * issue_scb_block()
1171 * @adapter: pointer to our soft state
1172 * @raw_mbox: the mailbox
1173 *
1174 * Issue a scb in synchronous and non-interrupt mode
1175 */
1176static int
1177issue_scb_block(adapter_t *adapter, u_char *raw_mbox)
1178{
1179 volatile mbox64_t *mbox64 = adapter->mbox64;
1180 volatile mbox_t *mbox = adapter->mbox;
1181 u8 byte;
1182
1183 /* Wait until mailbox is free */
1184 if(mega_busywait_mbox (adapter))
1185 goto bug_blocked_mailbox;
1186
1187 /* Copy mailbox data into host structure */
1188 memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out));
1189 mbox->m_out.cmdid = 0xFE;
1190 mbox->m_in.busy = 1;
1191
1192 switch (raw_mbox[0]) {
1193 case MEGA_MBOXCMD_LREAD64:
1194 case MEGA_MBOXCMD_LWRITE64:
1195 case MEGA_MBOXCMD_PASSTHRU64:
1196 case MEGA_MBOXCMD_EXTPTHRU:
1197 mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
1198 mbox64->xfer_segment_hi = 0;
1199 mbox->m_out.xferaddr = 0xFFFFFFFF;
1200 break;
1201 default:
1202 mbox64->xfer_segment_lo = 0;
1203 mbox64->xfer_segment_hi = 0;
1204 }
1205
1206 if( likely(adapter->flag & BOARD_MEMMAP) ) {
1207 mbox->m_in.poll = 0;
1208 mbox->m_in.ack = 0;
1209 mbox->m_in.numstatus = 0xFF;
1210 mbox->m_in.status = 0xFF;
1211 WRINDOOR(adapter, adapter->mbox_dma | 0x1);
1212
1213 while((volatile u8)mbox->m_in.numstatus == 0xFF)
1214 cpu_relax();
1215
1216 mbox->m_in.numstatus = 0xFF;
1217
1218 while( (volatile u8)mbox->m_in.poll != 0x77 )
1219 cpu_relax();
1220
1221 mbox->m_in.poll = 0;
1222 mbox->m_in.ack = 0x77;
1223
1224 WRINDOOR(adapter, adapter->mbox_dma | 0x2);
1225
1226 while(RDINDOOR(adapter) & 0x2)
1227 cpu_relax();
1228 }
1229 else {
1230 irq_disable(adapter);
1231 issue_command(adapter);
1232
1233 while (!((byte = irq_state(adapter)) & INTR_VALID))
1234 cpu_relax();
1235
1236 set_irq_state(adapter, byte);
1237 irq_enable(adapter);
1238 irq_ack(adapter);
1239 }
1240
1241 return mbox->m_in.status;
1242
1243bug_blocked_mailbox:
1244 dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n");
1245 udelay (1000);
1246 return -1;
1247}
1248
1249
1250/**
1251 * megaraid_isr_iomapped()
1252 * @irq: irq
1253 * @devp: pointer to our soft state
1254 *
1255 * Interrupt service routine for io-mapped controllers.
1256 * Find out if our device is interrupting. If yes, acknowledge the interrupt
1257 * and service the completed commands.
1258 */
1259static irqreturn_t
1260megaraid_isr_iomapped(int irq, void *devp)
1261{
1262 adapter_t *adapter = devp;
1263 unsigned long flags;
1264 u8 status;
1265 u8 nstatus;
1266 u8 completed[MAX_FIRMWARE_STATUS];
1267 u8 byte;
1268 int handled = 0;
1269
1270
1271 /*
1272 * loop till F/W has more commands for us to complete.
1273 */
1274 spin_lock_irqsave(&adapter->lock, flags);
1275
1276 do {
1277 /* Check if a valid interrupt is pending */
1278 byte = irq_state(adapter);
1279 if( (byte & VALID_INTR_BYTE) == 0 ) {
1280 /*
1281 * No more pending commands
1282 */
1283 goto out_unlock;
1284 }
1285 set_irq_state(adapter, byte);
1286
1287 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
1288 == 0xFF)
1289 cpu_relax();
1290 adapter->mbox->m_in.numstatus = 0xFF;
1291
1292 status = adapter->mbox->m_in.status;
1293
1294 /*
1295 * decrement the pending queue counter
1296 */
1297 atomic_sub(nstatus, &adapter->pend_cmds);
1298
1299 memcpy(completed, (void *)adapter->mbox->m_in.completed,
1300 nstatus);
1301
1302 /* Acknowledge interrupt */
1303 irq_ack(adapter);
1304
1305 mega_cmd_done(adapter, completed, nstatus, status);
1306
1307 mega_rundoneq(adapter);
1308
1309 handled = 1;
1310
1311 /* Loop through any pending requests */
1312 if(atomic_read(&adapter->quiescent) == 0) {
1313 mega_runpendq(adapter);
1314 }
1315
1316 } while(1);
1317
1318 out_unlock:
1319
1320 spin_unlock_irqrestore(&adapter->lock, flags);
1321
1322 return IRQ_RETVAL(handled);
1323}
1324
1325
1326/**
1327 * megaraid_isr_memmapped()
1328 * @irq: irq
1329 * @devp: pointer to our soft state
1330 *
1331 * Interrupt service routine for memory-mapped controllers.
1332 * Find out if our device is interrupting. If yes, acknowledge the interrupt
1333 * and service the completed commands.
1334 */
1335static irqreturn_t
1336megaraid_isr_memmapped(int irq, void *devp)
1337{
1338 adapter_t *adapter = devp;
1339 unsigned long flags;
1340 u8 status;
1341 u32 dword = 0;
1342 u8 nstatus;
1343 u8 completed[MAX_FIRMWARE_STATUS];
1344 int handled = 0;
1345
1346
1347 /*
1348 * loop till F/W has more commands for us to complete.
1349 */
1350 spin_lock_irqsave(&adapter->lock, flags);
1351
1352 do {
1353 /* Check if a valid interrupt is pending */
1354 dword = RDOUTDOOR(adapter);
1355 if(dword != 0x10001234) {
1356 /*
1357 * No more pending commands
1358 */
1359 goto out_unlock;
1360 }
1361 WROUTDOOR(adapter, 0x10001234);
1362
1363 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
1364 == 0xFF) {
1365 cpu_relax();
1366 }
1367 adapter->mbox->m_in.numstatus = 0xFF;
1368
1369 status = adapter->mbox->m_in.status;
1370
1371 /*
1372 * decrement the pending queue counter
1373 */
1374 atomic_sub(nstatus, &adapter->pend_cmds);
1375
1376 memcpy(completed, (void *)adapter->mbox->m_in.completed,
1377 nstatus);
1378
1379 /* Acknowledge interrupt */
1380 WRINDOOR(adapter, 0x2);
1381
1382 handled = 1;
1383
1384 while( RDINDOOR(adapter) & 0x02 )
1385 cpu_relax();
1386
1387 mega_cmd_done(adapter, completed, nstatus, status);
1388
1389 mega_rundoneq(adapter);
1390
1391 /* Loop through any pending requests */
1392 if(atomic_read(&adapter->quiescent) == 0) {
1393 mega_runpendq(adapter);
1394 }
1395
1396 } while(1);
1397
1398 out_unlock:
1399
1400 spin_unlock_irqrestore(&adapter->lock, flags);
1401
1402 return IRQ_RETVAL(handled);
1403}
1404/**
1405 * mega_cmd_done()
1406 * @adapter: pointer to our soft state
1407 * @completed: array of ids of completed commands
1408 * @nstatus: number of completed commands
1409 * @status: status of the last command completed
1410 *
1411 * Complete the commands and call the scsi mid-layer callback hooks.
1412 */
1413static void
1414mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1415{
1416 mega_ext_passthru *epthru = NULL;
1417 struct scatterlist *sgl;
1418 struct scsi_cmnd *cmd = NULL;
1419 mega_passthru *pthru = NULL;
1420 mbox_t *mbox = NULL;
1421 u8 c;
1422 scb_t *scb;
1423 int islogical;
1424 int cmdid;
1425 int i;
1426
1427 /*
1428 * for all the commands completed, call the mid-layer callback routine
1429 * and free the scb.
1430 */
1431 for( i = 0; i < nstatus; i++ ) {
1432
1433 cmdid = completed[i];
1434
1435 /*
1436 * Only free SCBs for the commands coming down from the
1437 * mid-layer, not for which were issued internally
1438 *
1439 * For internal command, restore the status returned by the
1440 * firmware so that user can interpret it.
1441 */
1442 if (cmdid == CMDID_INT_CMDS) {
1443 scb = &adapter->int_scb;
1444 cmd = scb->cmd;
1445
1446 list_del_init(&scb->list);
1447 scb->state = SCB_FREE;
1448
1449 adapter->int_status = status;
1450 complete(&adapter->int_waitq);
1451 } else {
1452 scb = &adapter->scb_list[cmdid];
1453
1454 /*
1455 * Make sure f/w has completed a valid command
1456 */
1457 if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) {
1458 dev_crit(&adapter->dev->dev, "invalid command "
1459 "Id %d, scb->state:%x, scsi cmd:%p\n",
1460 cmdid, scb->state, scb->cmd);
1461
1462 continue;
1463 }
1464
1465 /*
1466 * Was a abort issued for this command
1467 */
1468 if( scb->state & SCB_ABORT ) {
1469
1470 dev_warn(&adapter->dev->dev,
1471 "aborted cmd [%x] complete\n",
1472 scb->idx);
1473
1474 scb->cmd->result = (DID_ABORT << 16);
1475
1476 list_add_tail(SCSI_LIST(scb->cmd),
1477 &adapter->completed_list);
1478
1479 mega_free_scb(adapter, scb);
1480
1481 continue;
1482 }
1483
1484 /*
1485 * Was a reset issued for this command
1486 */
1487 if( scb->state & SCB_RESET ) {
1488
1489 dev_warn(&adapter->dev->dev,
1490 "reset cmd [%x] complete\n",
1491 scb->idx);
1492
1493 scb->cmd->result = (DID_RESET << 16);
1494
1495 list_add_tail(SCSI_LIST(scb->cmd),
1496 &adapter->completed_list);
1497
1498 mega_free_scb (adapter, scb);
1499
1500 continue;
1501 }
1502
1503 cmd = scb->cmd;
1504 pthru = scb->pthru;
1505 epthru = scb->epthru;
1506 mbox = (mbox_t *)scb->raw_mbox;
1507
1508#if MEGA_HAVE_STATS
1509 {
1510
1511 int logdrv = mbox->m_out.logdrv;
1512
1513 islogical = adapter->logdrv_chan[cmd->channel];
1514 /*
1515 * Maintain an error counter for the logical drive.
1516 * Some application like SNMP agent need such
1517 * statistics
1518 */
1519 if( status && islogical && (cmd->cmnd[0] == READ_6 ||
1520 cmd->cmnd[0] == READ_10 ||
1521 cmd->cmnd[0] == READ_12)) {
1522 /*
1523 * Logical drive number increases by 0x80 when
1524 * a logical drive is deleted
1525 */
1526 adapter->rd_errors[logdrv%0x80]++;
1527 }
1528
1529 if( status && islogical && (cmd->cmnd[0] == WRITE_6 ||
1530 cmd->cmnd[0] == WRITE_10 ||
1531 cmd->cmnd[0] == WRITE_12)) {
1532 /*
1533 * Logical drive number increases by 0x80 when
1534 * a logical drive is deleted
1535 */
1536 adapter->wr_errors[logdrv%0x80]++;
1537 }
1538
1539 }
1540#endif
1541 }
1542
1543 /*
1544 * Do not return the presence of hard disk on the channel so,
1545 * inquiry sent, and returned data==hard disk or removable
1546 * hard disk and not logical, request should return failure! -
1547 * PJ
1548 */
1549 islogical = adapter->logdrv_chan[cmd->device->channel];
1550 if( cmd->cmnd[0] == INQUIRY && !islogical ) {
1551
1552 sgl = scsi_sglist(cmd);
1553 if( sg_page(sgl) ) {
1554 c = *(unsigned char *) sg_virt(&sgl[0]);
1555 } else {
1556 dev_warn(&adapter->dev->dev, "invalid sg\n");
1557 c = 0;
1558 }
1559
1560 if(IS_RAID_CH(adapter, cmd->device->channel) &&
1561 ((c & 0x1F ) == TYPE_DISK)) {
1562 status = 0xF0;
1563 }
1564 }
1565
1566 /* clear result; otherwise, success returns corrupt value */
1567 cmd->result = 0;
1568
1569 /* Convert MegaRAID status to Linux error code */
1570 switch (status) {
1571 case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */
1572 cmd->result |= (DID_OK << 16);
1573 break;
1574
1575 case 0x02: /* ERROR_ABORTED, i.e.
1576 SCSI_STATUS_CHECK_CONDITION */
1577
1578 /* set sense_buffer and result fields */
1579 if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU ||
1580 mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) {
1581
1582 memcpy(cmd->sense_buffer, pthru->reqsensearea,
1583 14);
1584
1585 cmd->result = SAM_STAT_CHECK_CONDITION;
1586 }
1587 else {
1588 if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) {
1589
1590 memcpy(cmd->sense_buffer,
1591 epthru->reqsensearea, 14);
1592
1593 cmd->result = SAM_STAT_CHECK_CONDITION;
1594 } else
1595 scsi_build_sense(cmd, 0,
1596 ABORTED_COMMAND, 0, 0);
1597 }
1598 break;
1599
1600 case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e.
1601 SCSI_STATUS_BUSY */
1602 cmd->result |= (DID_BUS_BUSY << 16) | status;
1603 break;
1604
1605 default:
1606#if MEGA_HAVE_CLUSTERING
1607 /*
1608 * If TEST_UNIT_READY fails, we know
1609 * MEGA_RESERVATION_STATUS failed
1610 */
1611 if( cmd->cmnd[0] == TEST_UNIT_READY ) {
1612 cmd->result |= (DID_ERROR << 16) |
1613 SAM_STAT_RESERVATION_CONFLICT;
1614 }
1615 else
1616 /*
1617 * Error code returned is 1 if Reserve or Release
1618 * failed or the input parameter is invalid
1619 */
1620 if( status == 1 &&
1621 (cmd->cmnd[0] == RESERVE ||
1622 cmd->cmnd[0] == RELEASE) ) {
1623
1624 cmd->result |= (DID_ERROR << 16) |
1625 SAM_STAT_RESERVATION_CONFLICT;
1626 }
1627 else
1628#endif
1629 cmd->result |= (DID_BAD_TARGET << 16)|status;
1630 }
1631
1632 mega_free_scb(adapter, scb);
1633
1634 /* Add Scsi_Command to end of completed queue */
1635 list_add_tail(SCSI_LIST(cmd), &adapter->completed_list);
1636 }
1637}
1638
1639
1640/*
1641 * mega_runpendq()
1642 *
1643 * Run through the list of completed requests and finish it
1644 */
1645static void
1646mega_rundoneq (adapter_t *adapter)
1647{
1648 struct megaraid_cmd_priv *cmd_priv;
1649
1650 list_for_each_entry(cmd_priv, &adapter->completed_list, entry)
1651 scsi_done(megaraid_to_scsi_cmd(cmd_priv));
1652
1653 INIT_LIST_HEAD(&adapter->completed_list);
1654}
1655
1656
1657/*
1658 * Free a SCB structure
1659 * Note: We assume the scsi commands associated with this scb is not free yet.
1660 */
1661static void
1662mega_free_scb(adapter_t *adapter, scb_t *scb)
1663{
1664 switch( scb->dma_type ) {
1665
1666 case MEGA_DMA_TYPE_NONE:
1667 break;
1668
1669 case MEGA_SGLIST:
1670 scsi_dma_unmap(scb->cmd);
1671 break;
1672 default:
1673 break;
1674 }
1675
1676 /*
1677 * Remove from the pending list
1678 */
1679 list_del_init(&scb->list);
1680
1681 /* Link the scb back into free list */
1682 scb->state = SCB_FREE;
1683 scb->cmd = NULL;
1684
1685 list_add(&scb->list, &adapter->free_list);
1686}
1687
1688
1689static int
1690__mega_busywait_mbox (adapter_t *adapter)
1691{
1692 volatile mbox_t *mbox = adapter->mbox;
1693 long counter;
1694
1695 for (counter = 0; counter < 10000; counter++) {
1696 if (!mbox->m_in.busy)
1697 return 0;
1698 udelay(100);
1699 cond_resched();
1700 }
1701 return -1; /* give up after 1 second */
1702}
1703
1704/*
1705 * Copies data to SGLIST
1706 * Note: For 64 bit cards, we need a minimum of one SG element for read/write
1707 */
1708static int
1709mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1710{
1711 struct scatterlist *sg;
1712 struct scsi_cmnd *cmd;
1713 int sgcnt;
1714 int idx;
1715
1716 cmd = scb->cmd;
1717
1718 /*
1719 * Copy Scatter-Gather list info into controller structure.
1720 *
1721 * The number of sg elements returned must not exceed our limit
1722 */
1723 sgcnt = scsi_dma_map(cmd);
1724
1725 scb->dma_type = MEGA_SGLIST;
1726
1727 BUG_ON(sgcnt > adapter->sglen || sgcnt < 0);
1728
1729 *len = 0;
1730
1731 if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) {
1732 sg = scsi_sglist(cmd);
1733 scb->dma_h_bulkdata = sg_dma_address(sg);
1734 *buf = (u32)scb->dma_h_bulkdata;
1735 *len = sg_dma_len(sg);
1736 return 0;
1737 }
1738
1739 scsi_for_each_sg(cmd, sg, sgcnt, idx) {
1740 if (adapter->has_64bit_addr) {
1741 scb->sgl64[idx].address = sg_dma_address(sg);
1742 *len += scb->sgl64[idx].length = sg_dma_len(sg);
1743 } else {
1744 scb->sgl[idx].address = sg_dma_address(sg);
1745 *len += scb->sgl[idx].length = sg_dma_len(sg);
1746 }
1747 }
1748
1749 /* Reset pointer and length fields */
1750 *buf = scb->sgl_dma_addr;
1751
1752 /* Return count of SG requests */
1753 return sgcnt;
1754}
1755
1756
1757/*
1758 * mega_8_to_40ld()
1759 *
1760 * takes all info in AdapterInquiry structure and puts it into ProductInfo and
1761 * Enquiry3 structures for later use
1762 */
1763static void
1764mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3,
1765 mega_product_info *product_info)
1766{
1767 int i;
1768
1769 product_info->max_commands = inquiry->adapter_info.max_commands;
1770 enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate;
1771 product_info->nchannels = inquiry->adapter_info.nchannels;
1772
1773 for (i = 0; i < 4; i++) {
1774 product_info->fw_version[i] =
1775 inquiry->adapter_info.fw_version[i];
1776
1777 product_info->bios_version[i] =
1778 inquiry->adapter_info.bios_version[i];
1779 }
1780 enquiry3->cache_flush_interval =
1781 inquiry->adapter_info.cache_flush_interval;
1782
1783 product_info->dram_size = inquiry->adapter_info.dram_size;
1784
1785 enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv;
1786
1787 for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) {
1788 enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i];
1789 enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i];
1790 enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i];
1791 }
1792
1793 for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++)
1794 enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i];
1795}
1796
1797static inline void
1798mega_free_sgl(adapter_t *adapter)
1799{
1800 scb_t *scb;
1801 int i;
1802
1803 for(i = 0; i < adapter->max_cmds; i++) {
1804
1805 scb = &adapter->scb_list[i];
1806
1807 if( scb->sgl64 ) {
1808 dma_free_coherent(&adapter->dev->dev,
1809 sizeof(mega_sgl64) * adapter->sglen,
1810 scb->sgl64, scb->sgl_dma_addr);
1811
1812 scb->sgl64 = NULL;
1813 }
1814
1815 if( scb->pthru ) {
1816 dma_free_coherent(&adapter->dev->dev,
1817 sizeof(mega_passthru), scb->pthru,
1818 scb->pthru_dma_addr);
1819
1820 scb->pthru = NULL;
1821 }
1822
1823 if( scb->epthru ) {
1824 dma_free_coherent(&adapter->dev->dev,
1825 sizeof(mega_ext_passthru),
1826 scb->epthru, scb->epthru_dma_addr);
1827
1828 scb->epthru = NULL;
1829 }
1830
1831 }
1832}
1833
1834
1835/*
1836 * Get information about the card/driver
1837 */
1838const char *
1839megaraid_info(struct Scsi_Host *host)
1840{
1841 static char buffer[512];
1842 adapter_t *adapter;
1843
1844 adapter = (adapter_t *)host->hostdata;
1845
1846 sprintf (buffer,
1847 "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns",
1848 adapter->fw_version, adapter->product_info.max_commands,
1849 adapter->host->max_id, adapter->host->max_channel,
1850 (u32)adapter->host->max_lun);
1851 return buffer;
1852}
1853
1854/*
1855 * Abort a previous SCSI request. Only commands on the pending list can be
1856 * aborted. All the commands issued to the F/W must complete.
1857 */
1858static int
1859megaraid_abort(struct scsi_cmnd *cmd)
1860{
1861 adapter_t *adapter;
1862 int rval;
1863
1864 adapter = (adapter_t *)cmd->device->host->hostdata;
1865
1866 rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT);
1867
1868 /*
1869 * This is required here to complete any completed requests
1870 * to be communicated over to the mid layer.
1871 */
1872 mega_rundoneq(adapter);
1873
1874 return rval;
1875}
1876
1877
1878static int
1879megaraid_reset(struct scsi_cmnd *cmd)
1880{
1881 adapter_t *adapter;
1882 megacmd_t mc;
1883 int rval;
1884
1885 adapter = (adapter_t *)cmd->device->host->hostdata;
1886
1887#if MEGA_HAVE_CLUSTERING
1888 mc.cmd = MEGA_CLUSTER_CMD;
1889 mc.opcode = MEGA_RESET_RESERVATIONS;
1890
1891 if( mega_internal_command(adapter, &mc, NULL) != 0 ) {
1892 dev_warn(&adapter->dev->dev, "reservation reset failed\n");
1893 }
1894 else {
1895 dev_info(&adapter->dev->dev, "reservation reset\n");
1896 }
1897#endif
1898
1899 spin_lock_irq(&adapter->lock);
1900
1901 rval = megaraid_abort_and_reset(adapter, NULL, SCB_RESET);
1902
1903 /*
1904 * This is required here to complete any completed requests
1905 * to be communicated over to the mid layer.
1906 */
1907 mega_rundoneq(adapter);
1908 spin_unlock_irq(&adapter->lock);
1909
1910 return rval;
1911}
1912
1913/**
1914 * megaraid_abort_and_reset()
1915 * @adapter: megaraid soft state
1916 * @cmd: scsi command to be aborted or reset
1917 * @aor: abort or reset flag
1918 *
1919 * Try to locate the scsi command in the pending queue. If found and is not
1920 * issued to the controller, abort/reset it. Otherwise return failure
1921 */
1922static int
1923megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
1924{
1925 struct list_head *pos, *next;
1926 scb_t *scb;
1927
1928 if (aor == SCB_ABORT)
1929 dev_warn(&adapter->dev->dev,
1930 "ABORTING cmd=%x <c=%d t=%d l=%d>\n",
1931 cmd->cmnd[0], cmd->device->channel,
1932 cmd->device->id, (u32)cmd->device->lun);
1933 else
1934 dev_warn(&adapter->dev->dev, "RESETTING\n");
1935
1936 if(list_empty(&adapter->pending_list))
1937 return FAILED;
1938
1939 list_for_each_safe(pos, next, &adapter->pending_list) {
1940
1941 scb = list_entry(pos, scb_t, list);
1942
1943 if (!cmd || scb->cmd == cmd) { /* Found command */
1944
1945 scb->state |= aor;
1946
1947 /*
1948 * Check if this command has firmware ownership. If
1949 * yes, we cannot reset this command. Whenever f/w
1950 * completes this command, we will return appropriate
1951 * status from ISR.
1952 */
1953 if( scb->state & SCB_ISSUED ) {
1954
1955 dev_warn(&adapter->dev->dev,
1956 "%s[%x], fw owner\n",
1957 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1958 scb->idx);
1959
1960 return FAILED;
1961 }
1962 /*
1963 * Not yet issued! Remove from the pending
1964 * list
1965 */
1966 dev_warn(&adapter->dev->dev,
1967 "%s-[%x], driver owner\n",
1968 (cmd) ? "ABORTING":"RESET",
1969 scb->idx);
1970 mega_free_scb(adapter, scb);
1971
1972 if (cmd) {
1973 cmd->result = (DID_ABORT << 16);
1974 list_add_tail(SCSI_LIST(cmd),
1975 &adapter->completed_list);
1976 }
1977
1978 return SUCCESS;
1979 }
1980 }
1981
1982 return FAILED;
1983}
1984
1985static inline int
1986make_local_pdev(adapter_t *adapter, struct pci_dev **pdev)
1987{
1988 *pdev = pci_alloc_dev(NULL);
1989
1990 if( *pdev == NULL ) return -1;
1991
1992 memcpy(*pdev, adapter->dev, sizeof(struct pci_dev));
1993
1994 if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) {
1995 kfree(*pdev);
1996 return -1;
1997 }
1998
1999 return 0;
2000}
2001
2002static inline void
2003free_local_pdev(struct pci_dev *pdev)
2004{
2005 kfree(pdev);
2006}
2007
2008/**
2009 * mega_allocate_inquiry()
2010 * @dma_handle: handle returned for dma address
2011 * @pdev: handle to pci device
2012 *
2013 * allocates memory for inquiry structure
2014 */
2015static inline void *
2016mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev)
2017{
2018 return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3),
2019 dma_handle, GFP_KERNEL);
2020}
2021
2022
2023static inline void
2024mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev)
2025{
2026 dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry,
2027 dma_handle);
2028}
2029
2030
2031#ifdef CONFIG_PROC_FS
2032/* Following code handles /proc fs */
2033
2034/**
2035 * proc_show_config()
2036 * @m: Synthetic file construction data
2037 * @v: File iterator
2038 *
2039 * Display configuration information about the controller.
2040 */
2041static int
2042proc_show_config(struct seq_file *m, void *v)
2043{
2044
2045 adapter_t *adapter = m->private;
2046
2047 seq_puts(m, MEGARAID_VERSION);
2048 if(adapter->product_info.product_name[0])
2049 seq_printf(m, "%s\n", adapter->product_info.product_name);
2050
2051 seq_puts(m, "Controller Type: ");
2052
2053 if( adapter->flag & BOARD_MEMMAP )
2054 seq_puts(m, "438/466/467/471/493/518/520/531/532\n");
2055 else
2056 seq_puts(m, "418/428/434\n");
2057
2058 if(adapter->flag & BOARD_40LD)
2059 seq_puts(m, "Controller Supports 40 Logical Drives\n");
2060
2061 if(adapter->flag & BOARD_64BIT)
2062 seq_puts(m, "Controller capable of 64-bit memory addressing\n");
2063 if( adapter->has_64bit_addr )
2064 seq_puts(m, "Controller using 64-bit memory addressing\n");
2065 else
2066 seq_puts(m, "Controller is not using 64-bit memory addressing\n");
2067
2068 seq_printf(m, "Base = %08lx, Irq = %d, ",
2069 adapter->base, adapter->host->irq);
2070
2071 seq_printf(m, "Logical Drives = %d, Channels = %d\n",
2072 adapter->numldrv, adapter->product_info.nchannels);
2073
2074 seq_printf(m, "Version =%s:%s, DRAM = %dMb\n",
2075 adapter->fw_version, adapter->bios_version,
2076 adapter->product_info.dram_size);
2077
2078 seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n",
2079 adapter->product_info.max_commands, adapter->max_cmds);
2080
2081 seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb);
2082 seq_printf(m, "support_random_del = %d\n", adapter->support_random_del);
2083 seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled);
2084 seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv);
2085 seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled);
2086 seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch);
2087 seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt);
2088 seq_printf(m, "quiescent = %d\n",
2089 atomic_read(&adapter->quiescent));
2090 seq_printf(m, "has_cluster = %d\n", adapter->has_cluster);
2091
2092 seq_puts(m, "\nModule Parameters:\n");
2093 seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun);
2094 seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io);
2095 return 0;
2096}
2097
2098/**
2099 * proc_show_stat()
2100 * @m: Synthetic file construction data
2101 * @v: File iterator
2102 *
2103 * Display statistical information about the I/O activity.
2104 */
2105static int
2106proc_show_stat(struct seq_file *m, void *v)
2107{
2108 adapter_t *adapter = m->private;
2109#if MEGA_HAVE_STATS
2110 int i;
2111#endif
2112
2113 seq_puts(m, "Statistical Information for this controller\n");
2114 seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds));
2115#if MEGA_HAVE_STATS
2116 for(i = 0; i < adapter->numldrv; i++) {
2117 seq_printf(m, "Logical Drive %d:\n", i);
2118 seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n",
2119 adapter->nreads[i], adapter->nwrites[i]);
2120 seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n",
2121 adapter->nreadblocks[i], adapter->nwriteblocks[i]);
2122 seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n",
2123 adapter->rd_errors[i], adapter->wr_errors[i]);
2124 }
2125#else
2126 seq_puts(m, "IO and error counters not compiled in driver.\n");
2127#endif
2128 return 0;
2129}
2130
2131
2132/**
2133 * proc_show_mbox()
2134 * @m: Synthetic file construction data
2135 * @v: File iterator
2136 *
2137 * Display mailbox information for the last command issued. This information
2138 * is good for debugging.
2139 */
2140static int
2141proc_show_mbox(struct seq_file *m, void *v)
2142{
2143 adapter_t *adapter = m->private;
2144 volatile mbox_t *mbox = adapter->mbox;
2145
2146 seq_puts(m, "Contents of Mail Box Structure\n");
2147 seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd);
2148 seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid);
2149 seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors);
2150 seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba);
2151 seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr);
2152 seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv);
2153 seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements);
2154 seq_printf(m, " Busy = %01x\n", mbox->m_in.busy);
2155 seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status);
2156 return 0;
2157}
2158
2159
2160/**
2161 * proc_show_rebuild_rate()
2162 * @m: Synthetic file construction data
2163 * @v: File iterator
2164 *
2165 * Display current rebuild rate
2166 */
2167static int
2168proc_show_rebuild_rate(struct seq_file *m, void *v)
2169{
2170 adapter_t *adapter = m->private;
2171 dma_addr_t dma_handle;
2172 caddr_t inquiry;
2173 struct pci_dev *pdev;
2174
2175 if( make_local_pdev(adapter, &pdev) != 0 )
2176 return 0;
2177
2178 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2179 goto free_pdev;
2180
2181 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2182 seq_puts(m, "Adapter inquiry failed.\n");
2183 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2184 goto free_inquiry;
2185 }
2186
2187 if( adapter->flag & BOARD_40LD )
2188 seq_printf(m, "Rebuild Rate: [%d%%]\n",
2189 ((mega_inquiry3 *)inquiry)->rebuild_rate);
2190 else
2191 seq_printf(m, "Rebuild Rate: [%d%%]\n",
2192 ((mraid_ext_inquiry *)
2193 inquiry)->raid_inq.adapter_info.rebuild_rate);
2194
2195free_inquiry:
2196 mega_free_inquiry(inquiry, dma_handle, pdev);
2197free_pdev:
2198 free_local_pdev(pdev);
2199 return 0;
2200}
2201
2202
2203/**
2204 * proc_show_battery()
2205 * @m: Synthetic file construction data
2206 * @v: File iterator
2207 *
2208 * Display information about the battery module on the controller.
2209 */
2210static int
2211proc_show_battery(struct seq_file *m, void *v)
2212{
2213 adapter_t *adapter = m->private;
2214 dma_addr_t dma_handle;
2215 caddr_t inquiry;
2216 struct pci_dev *pdev;
2217 u8 battery_status;
2218
2219 if( make_local_pdev(adapter, &pdev) != 0 )
2220 return 0;
2221
2222 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2223 goto free_pdev;
2224
2225 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2226 seq_puts(m, "Adapter inquiry failed.\n");
2227 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2228 goto free_inquiry;
2229 }
2230
2231 if( adapter->flag & BOARD_40LD ) {
2232 battery_status = ((mega_inquiry3 *)inquiry)->battery_status;
2233 }
2234 else {
2235 battery_status = ((mraid_ext_inquiry *)inquiry)->
2236 raid_inq.adapter_info.battery_status;
2237 }
2238
2239 /*
2240 * Decode the battery status
2241 */
2242 seq_printf(m, "Battery Status:[%d]", battery_status);
2243
2244 if(battery_status == MEGA_BATT_CHARGE_DONE)
2245 seq_puts(m, " Charge Done");
2246
2247 if(battery_status & MEGA_BATT_MODULE_MISSING)
2248 seq_puts(m, " Module Missing");
2249
2250 if(battery_status & MEGA_BATT_LOW_VOLTAGE)
2251 seq_puts(m, " Low Voltage");
2252
2253 if(battery_status & MEGA_BATT_TEMP_HIGH)
2254 seq_puts(m, " Temperature High");
2255
2256 if(battery_status & MEGA_BATT_PACK_MISSING)
2257 seq_puts(m, " Pack Missing");
2258
2259 if(battery_status & MEGA_BATT_CHARGE_INPROG)
2260 seq_puts(m, " Charge In-progress");
2261
2262 if(battery_status & MEGA_BATT_CHARGE_FAIL)
2263 seq_puts(m, " Charge Fail");
2264
2265 if(battery_status & MEGA_BATT_CYCLES_EXCEEDED)
2266 seq_puts(m, " Cycles Exceeded");
2267
2268 seq_putc(m, '\n');
2269
2270free_inquiry:
2271 mega_free_inquiry(inquiry, dma_handle, pdev);
2272free_pdev:
2273 free_local_pdev(pdev);
2274 return 0;
2275}
2276
2277
2278/*
2279 * Display scsi inquiry
2280 */
2281static void
2282mega_print_inquiry(struct seq_file *m, char *scsi_inq)
2283{
2284 int i;
2285
2286 seq_puts(m, " Vendor: ");
2287 seq_write(m, scsi_inq + 8, 8);
2288 seq_puts(m, " Model: ");
2289 seq_write(m, scsi_inq + 16, 16);
2290 seq_puts(m, " Rev: ");
2291 seq_write(m, scsi_inq + 32, 4);
2292 seq_putc(m, '\n');
2293
2294 i = scsi_inq[0] & 0x1f;
2295 seq_printf(m, " Type: %s ", scsi_device_type(i));
2296
2297 seq_printf(m, " ANSI SCSI revision: %02x",
2298 scsi_inq[2] & 0x07);
2299
2300 if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 )
2301 seq_puts(m, " CCS\n");
2302 else
2303 seq_putc(m, '\n');
2304}
2305
2306/**
2307 * proc_show_pdrv()
2308 * @m: Synthetic file construction data
2309 * @adapter: pointer to our soft state
2310 * @channel: channel
2311 *
2312 * Display information about the physical drives.
2313 */
2314static int
2315proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
2316{
2317 dma_addr_t dma_handle;
2318 char *scsi_inq;
2319 dma_addr_t scsi_inq_dma_handle;
2320 caddr_t inquiry;
2321 struct pci_dev *pdev;
2322 u8 *pdrv_state;
2323 u8 state;
2324 int tgt;
2325 int max_channels;
2326 int i;
2327
2328 if( make_local_pdev(adapter, &pdev) != 0 )
2329 return 0;
2330
2331 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2332 goto free_pdev;
2333
2334 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2335 seq_puts(m, "Adapter inquiry failed.\n");
2336 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2337 goto free_inquiry;
2338 }
2339
2340
2341 scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle,
2342 GFP_KERNEL);
2343 if( scsi_inq == NULL ) {
2344 seq_puts(m, "memory not available for scsi inq.\n");
2345 goto free_inquiry;
2346 }
2347
2348 if( adapter->flag & BOARD_40LD ) {
2349 pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state;
2350 }
2351 else {
2352 pdrv_state = ((mraid_ext_inquiry *)inquiry)->
2353 raid_inq.pdrv_info.pdrv_state;
2354 }
2355
2356 max_channels = adapter->product_info.nchannels;
2357
2358 if( channel >= max_channels ) {
2359 goto free_pci;
2360 }
2361
2362 for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) {
2363
2364 i = channel*16 + tgt;
2365
2366 state = *(pdrv_state + i);
2367 switch( state & 0x0F ) {
2368 case PDRV_ONLINE:
2369 seq_printf(m, "Channel:%2d Id:%2d State: Online",
2370 channel, tgt);
2371 break;
2372
2373 case PDRV_FAILED:
2374 seq_printf(m, "Channel:%2d Id:%2d State: Failed",
2375 channel, tgt);
2376 break;
2377
2378 case PDRV_RBLD:
2379 seq_printf(m, "Channel:%2d Id:%2d State: Rebuild",
2380 channel, tgt);
2381 break;
2382
2383 case PDRV_HOTSPARE:
2384 seq_printf(m, "Channel:%2d Id:%2d State: Hot spare",
2385 channel, tgt);
2386 break;
2387
2388 default:
2389 seq_printf(m, "Channel:%2d Id:%2d State: Un-configured",
2390 channel, tgt);
2391 break;
2392 }
2393
2394 /*
2395 * This interface displays inquiries for disk drives
2396 * only. Inquries for logical drives and non-disk
2397 * devices are available through /proc/scsi/scsi
2398 */
2399 memset(scsi_inq, 0, 256);
2400 if( mega_internal_dev_inquiry(adapter, channel, tgt,
2401 scsi_inq_dma_handle) ||
2402 (scsi_inq[0] & 0x1F) != TYPE_DISK ) {
2403 continue;
2404 }
2405
2406 /*
2407 * Check for overflow. We print less than 240
2408 * characters for inquiry
2409 */
2410 seq_puts(m, ".\n");
2411 mega_print_inquiry(m, scsi_inq);
2412 }
2413
2414free_pci:
2415 dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle);
2416free_inquiry:
2417 mega_free_inquiry(inquiry, dma_handle, pdev);
2418free_pdev:
2419 free_local_pdev(pdev);
2420 return 0;
2421}
2422
2423/**
2424 * proc_show_pdrv_ch0()
2425 * @m: Synthetic file construction data
2426 * @v: File iterator
2427 *
2428 * Display information about the physical drives on physical channel 0.
2429 */
2430static int
2431proc_show_pdrv_ch0(struct seq_file *m, void *v)
2432{
2433 return proc_show_pdrv(m, m->private, 0);
2434}
2435
2436
2437/**
2438 * proc_show_pdrv_ch1()
2439 * @m: Synthetic file construction data
2440 * @v: File iterator
2441 *
2442 * Display information about the physical drives on physical channel 1.
2443 */
2444static int
2445proc_show_pdrv_ch1(struct seq_file *m, void *v)
2446{
2447 return proc_show_pdrv(m, m->private, 1);
2448}
2449
2450
2451/**
2452 * proc_show_pdrv_ch2()
2453 * @m: Synthetic file construction data
2454 * @v: File iterator
2455 *
2456 * Display information about the physical drives on physical channel 2.
2457 */
2458static int
2459proc_show_pdrv_ch2(struct seq_file *m, void *v)
2460{
2461 return proc_show_pdrv(m, m->private, 2);
2462}
2463
2464
2465/**
2466 * proc_show_pdrv_ch3()
2467 * @m: Synthetic file construction data
2468 * @v: File iterator
2469 *
2470 * Display information about the physical drives on physical channel 3.
2471 */
2472static int
2473proc_show_pdrv_ch3(struct seq_file *m, void *v)
2474{
2475 return proc_show_pdrv(m, m->private, 3);
2476}
2477
2478
2479/**
2480 * proc_show_rdrv()
2481 * @m: Synthetic file construction data
2482 * @adapter: pointer to our soft state
2483 * @start: starting logical drive to display
2484 * @end: ending logical drive to display
2485 *
2486 * We do not print the inquiry information since its already available through
2487 * /proc/scsi/scsi interface
2488 */
2489static int
2490proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
2491{
2492 dma_addr_t dma_handle;
2493 logdrv_param *lparam;
2494 megacmd_t mc;
2495 char *disk_array;
2496 dma_addr_t disk_array_dma_handle;
2497 caddr_t inquiry;
2498 struct pci_dev *pdev;
2499 u8 *rdrv_state;
2500 int num_ldrv;
2501 u32 array_sz;
2502 int i;
2503
2504 if( make_local_pdev(adapter, &pdev) != 0 )
2505 return 0;
2506
2507 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2508 goto free_pdev;
2509
2510 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2511 seq_puts(m, "Adapter inquiry failed.\n");
2512 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2513 goto free_inquiry;
2514 }
2515
2516 memset(&mc, 0, sizeof(megacmd_t));
2517
2518 if( adapter->flag & BOARD_40LD ) {
2519 array_sz = sizeof(disk_array_40ld);
2520
2521 rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state;
2522
2523 num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv;
2524 }
2525 else {
2526 array_sz = sizeof(disk_array_8ld);
2527
2528 rdrv_state = ((mraid_ext_inquiry *)inquiry)->
2529 raid_inq.logdrv_info.ldrv_state;
2530
2531 num_ldrv = ((mraid_ext_inquiry *)inquiry)->
2532 raid_inq.logdrv_info.num_ldrv;
2533 }
2534
2535 disk_array = dma_alloc_coherent(&pdev->dev, array_sz,
2536 &disk_array_dma_handle, GFP_KERNEL);
2537
2538 if( disk_array == NULL ) {
2539 seq_puts(m, "memory not available.\n");
2540 goto free_inquiry;
2541 }
2542
2543 mc.xferaddr = (u32)disk_array_dma_handle;
2544
2545 if( adapter->flag & BOARD_40LD ) {
2546 mc.cmd = FC_NEW_CONFIG;
2547 mc.opcode = OP_DCMD_READ_CONFIG;
2548
2549 if( mega_internal_command(adapter, &mc, NULL) ) {
2550 seq_puts(m, "40LD read config failed.\n");
2551 goto free_pci;
2552 }
2553
2554 }
2555 else {
2556 mc.cmd = NEW_READ_CONFIG_8LD;
2557
2558 if( mega_internal_command(adapter, &mc, NULL) ) {
2559 mc.cmd = READ_CONFIG_8LD;
2560 if( mega_internal_command(adapter, &mc, NULL) ) {
2561 seq_puts(m, "8LD read config failed.\n");
2562 goto free_pci;
2563 }
2564 }
2565 }
2566
2567 for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) {
2568
2569 if( adapter->flag & BOARD_40LD ) {
2570 lparam =
2571 &((disk_array_40ld *)disk_array)->ldrv[i].lparam;
2572 }
2573 else {
2574 lparam =
2575 &((disk_array_8ld *)disk_array)->ldrv[i].lparam;
2576 }
2577
2578 /*
2579 * Check for overflow. We print less than 240 characters for
2580 * information about each logical drive.
2581 */
2582 seq_printf(m, "Logical drive:%2d:, ", i);
2583
2584 switch( rdrv_state[i] & 0x0F ) {
2585 case RDRV_OFFLINE:
2586 seq_puts(m, "state: offline");
2587 break;
2588 case RDRV_DEGRADED:
2589 seq_puts(m, "state: degraded");
2590 break;
2591 case RDRV_OPTIMAL:
2592 seq_puts(m, "state: optimal");
2593 break;
2594 case RDRV_DELETED:
2595 seq_puts(m, "state: deleted");
2596 break;
2597 default:
2598 seq_puts(m, "state: unknown");
2599 break;
2600 }
2601
2602 /*
2603 * Check if check consistency or initialization is going on
2604 * for this logical drive.
2605 */
2606 if( (rdrv_state[i] & 0xF0) == 0x20 )
2607 seq_puts(m, ", check-consistency in progress");
2608 else if( (rdrv_state[i] & 0xF0) == 0x10 )
2609 seq_puts(m, ", initialization in progress");
2610
2611 seq_putc(m, '\n');
2612
2613 seq_printf(m, "Span depth:%3d, ", lparam->span_depth);
2614 seq_printf(m, "RAID level:%3d, ", lparam->level);
2615 seq_printf(m, "Stripe size:%3d, ",
2616 lparam->stripe_sz ? lparam->stripe_sz/2: 128);
2617 seq_printf(m, "Row size:%3d\n", lparam->row_size);
2618
2619 seq_puts(m, "Read Policy: ");
2620 switch(lparam->read_ahead) {
2621 case NO_READ_AHEAD:
2622 seq_puts(m, "No read ahead, ");
2623 break;
2624 case READ_AHEAD:
2625 seq_puts(m, "Read ahead, ");
2626 break;
2627 case ADAP_READ_AHEAD:
2628 seq_puts(m, "Adaptive, ");
2629 break;
2630
2631 }
2632
2633 seq_puts(m, "Write Policy: ");
2634 switch(lparam->write_mode) {
2635 case WRMODE_WRITE_THRU:
2636 seq_puts(m, "Write thru, ");
2637 break;
2638 case WRMODE_WRITE_BACK:
2639 seq_puts(m, "Write back, ");
2640 break;
2641 }
2642
2643 seq_puts(m, "Cache Policy: ");
2644 switch(lparam->direct_io) {
2645 case CACHED_IO:
2646 seq_puts(m, "Cached IO\n\n");
2647 break;
2648 case DIRECT_IO:
2649 seq_puts(m, "Direct IO\n\n");
2650 break;
2651 }
2652 }
2653
2654free_pci:
2655 dma_free_coherent(&pdev->dev, array_sz, disk_array,
2656 disk_array_dma_handle);
2657free_inquiry:
2658 mega_free_inquiry(inquiry, dma_handle, pdev);
2659free_pdev:
2660 free_local_pdev(pdev);
2661 return 0;
2662}
2663
2664/**
2665 * proc_show_rdrv_10()
2666 * @m: Synthetic file construction data
2667 * @v: File iterator
2668 *
2669 * Display real time information about the logical drives 0 through 9.
2670 */
2671static int
2672proc_show_rdrv_10(struct seq_file *m, void *v)
2673{
2674 return proc_show_rdrv(m, m->private, 0, 9);
2675}
2676
2677
2678/**
2679 * proc_show_rdrv_20()
2680 * @m: Synthetic file construction data
2681 * @v: File iterator
2682 *
2683 * Display real time information about the logical drives 0 through 9.
2684 */
2685static int
2686proc_show_rdrv_20(struct seq_file *m, void *v)
2687{
2688 return proc_show_rdrv(m, m->private, 10, 19);
2689}
2690
2691
2692/**
2693 * proc_show_rdrv_30()
2694 * @m: Synthetic file construction data
2695 * @v: File iterator
2696 *
2697 * Display real time information about the logical drives 0 through 9.
2698 */
2699static int
2700proc_show_rdrv_30(struct seq_file *m, void *v)
2701{
2702 return proc_show_rdrv(m, m->private, 20, 29);
2703}
2704
2705
2706/**
2707 * proc_show_rdrv_40()
2708 * @m: Synthetic file construction data
2709 * @v: File iterator
2710 *
2711 * Display real time information about the logical drives 0 through 9.
2712 */
2713static int
2714proc_show_rdrv_40(struct seq_file *m, void *v)
2715{
2716 return proc_show_rdrv(m, m->private, 30, 39);
2717}
2718
2719/**
2720 * mega_create_proc_entry()
2721 * @index: index in soft state array
2722 * @parent: parent node for this /proc entry
2723 *
2724 * Creates /proc entries for our controllers.
2725 */
2726static void
2727mega_create_proc_entry(int index, struct proc_dir_entry *parent)
2728{
2729 adapter_t *adapter = hba_soft_state[index];
2730 struct proc_dir_entry *dir;
2731 u8 string[16];
2732
2733 sprintf(string, "hba%d", adapter->host->host_no);
2734 dir = proc_mkdir_data(string, 0, parent, adapter);
2735 if (!dir) {
2736 dev_warn(&adapter->dev->dev, "proc_mkdir failed\n");
2737 return;
2738 }
2739
2740 proc_create_single_data("config", S_IRUSR, dir,
2741 proc_show_config, adapter);
2742 proc_create_single_data("stat", S_IRUSR, dir,
2743 proc_show_stat, adapter);
2744 proc_create_single_data("mailbox", S_IRUSR, dir,
2745 proc_show_mbox, adapter);
2746#if MEGA_HAVE_ENH_PROC
2747 proc_create_single_data("rebuild-rate", S_IRUSR, dir,
2748 proc_show_rebuild_rate, adapter);
2749 proc_create_single_data("battery-status", S_IRUSR, dir,
2750 proc_show_battery, adapter);
2751 proc_create_single_data("diskdrives-ch0", S_IRUSR, dir,
2752 proc_show_pdrv_ch0, adapter);
2753 proc_create_single_data("diskdrives-ch1", S_IRUSR, dir,
2754 proc_show_pdrv_ch1, adapter);
2755 proc_create_single_data("diskdrives-ch2", S_IRUSR, dir,
2756 proc_show_pdrv_ch2, adapter);
2757 proc_create_single_data("diskdrives-ch3", S_IRUSR, dir,
2758 proc_show_pdrv_ch3, adapter);
2759 proc_create_single_data("raiddrives-0-9", S_IRUSR, dir,
2760 proc_show_rdrv_10, adapter);
2761 proc_create_single_data("raiddrives-10-19", S_IRUSR, dir,
2762 proc_show_rdrv_20, adapter);
2763 proc_create_single_data("raiddrives-20-29", S_IRUSR, dir,
2764 proc_show_rdrv_30, adapter);
2765 proc_create_single_data("raiddrives-30-39", S_IRUSR, dir,
2766 proc_show_rdrv_40, adapter);
2767#endif
2768}
2769
2770#else
2771static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent)
2772{
2773}
2774#endif
2775
2776
2777/*
2778 * megaraid_biosparam()
2779 *
2780 * Return the disk geometry for a particular disk
2781 */
2782static int
2783megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
2784 sector_t capacity, int geom[])
2785{
2786 adapter_t *adapter;
2787 int heads;
2788 int sectors;
2789 int cylinders;
2790
2791 /* Get pointer to host config structure */
2792 adapter = (adapter_t *)sdev->host->hostdata;
2793
2794 if (IS_RAID_CH(adapter, sdev->channel)) {
2795 /* Default heads (64) & sectors (32) */
2796 heads = 64;
2797 sectors = 32;
2798 cylinders = (ulong)capacity / (heads * sectors);
2799
2800 /*
2801 * Handle extended translation size for logical drives
2802 * > 1Gb
2803 */
2804 if ((ulong)capacity >= 0x200000) {
2805 heads = 255;
2806 sectors = 63;
2807 cylinders = (ulong)capacity / (heads * sectors);
2808 }
2809
2810 /* return result */
2811 geom[0] = heads;
2812 geom[1] = sectors;
2813 geom[2] = cylinders;
2814 }
2815 else {
2816 if (scsi_partsize(bdev, capacity, geom))
2817 return 0;
2818
2819 dev_info(&adapter->dev->dev,
2820 "invalid partition on this disk on channel %d\n",
2821 sdev->channel);
2822
2823 /* Default heads (64) & sectors (32) */
2824 heads = 64;
2825 sectors = 32;
2826 cylinders = (ulong)capacity / (heads * sectors);
2827
2828 /* Handle extended translation size for logical drives > 1Gb */
2829 if ((ulong)capacity >= 0x200000) {
2830 heads = 255;
2831 sectors = 63;
2832 cylinders = (ulong)capacity / (heads * sectors);
2833 }
2834
2835 /* return result */
2836 geom[0] = heads;
2837 geom[1] = sectors;
2838 geom[2] = cylinders;
2839 }
2840
2841 return 0;
2842}
2843
2844/**
2845 * mega_init_scb()
2846 * @adapter: pointer to our soft state
2847 *
2848 * Allocate memory for the various pointers in the scb structures:
2849 * scatter-gather list pointer, passthru and extended passthru structure
2850 * pointers.
2851 */
2852static int
2853mega_init_scb(adapter_t *adapter)
2854{
2855 scb_t *scb;
2856 int i;
2857
2858 for( i = 0; i < adapter->max_cmds; i++ ) {
2859
2860 scb = &adapter->scb_list[i];
2861
2862 scb->sgl64 = NULL;
2863 scb->sgl = NULL;
2864 scb->pthru = NULL;
2865 scb->epthru = NULL;
2866 }
2867
2868 for( i = 0; i < adapter->max_cmds; i++ ) {
2869
2870 scb = &adapter->scb_list[i];
2871
2872 scb->idx = i;
2873
2874 scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev,
2875 sizeof(mega_sgl64) * adapter->sglen,
2876 &scb->sgl_dma_addr, GFP_KERNEL);
2877
2878 scb->sgl = (mega_sglist *)scb->sgl64;
2879
2880 if( !scb->sgl ) {
2881 dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n");
2882 mega_free_sgl(adapter);
2883 return -1;
2884 }
2885
2886 scb->pthru = dma_alloc_coherent(&adapter->dev->dev,
2887 sizeof(mega_passthru),
2888 &scb->pthru_dma_addr, GFP_KERNEL);
2889
2890 if( !scb->pthru ) {
2891 dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
2892 mega_free_sgl(adapter);
2893 return -1;
2894 }
2895
2896 scb->epthru = dma_alloc_coherent(&adapter->dev->dev,
2897 sizeof(mega_ext_passthru),
2898 &scb->epthru_dma_addr, GFP_KERNEL);
2899
2900 if( !scb->epthru ) {
2901 dev_warn(&adapter->dev->dev,
2902 "Can't allocate extended passthru\n");
2903 mega_free_sgl(adapter);
2904 return -1;
2905 }
2906
2907
2908 scb->dma_type = MEGA_DMA_TYPE_NONE;
2909
2910 /*
2911 * Link to free list
2912 * lock not required since we are loading the driver, so no
2913 * commands possible right now.
2914 */
2915 scb->state = SCB_FREE;
2916 scb->cmd = NULL;
2917 list_add(&scb->list, &adapter->free_list);
2918 }
2919
2920 return 0;
2921}
2922
2923
2924/**
2925 * megadev_open()
2926 * @inode: unused
2927 * @filep: unused
2928 *
2929 * Routines for the character/ioctl interface to the driver. Find out if this
2930 * is a valid open.
2931 */
2932static int
2933megadev_open (struct inode *inode, struct file *filep)
2934{
2935 /*
2936 * Only allow superuser to access private ioctl interface
2937 */
2938 if( !capable(CAP_SYS_ADMIN) ) return -EACCES;
2939
2940 return 0;
2941}
2942
2943
2944/**
2945 * megadev_ioctl()
2946 * @filep: Our device file
2947 * @cmd: ioctl command
2948 * @arg: user buffer
2949 *
2950 * ioctl entry point for our private ioctl interface. We move the data in from
2951 * the user space, prepare the command (if necessary, convert the old MIMD
2952 * ioctl to new ioctl command), and issue a synchronous command to the
2953 * controller.
2954 */
2955static int
2956megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
2957{
2958 adapter_t *adapter;
2959 nitioctl_t uioc;
2960 int adapno;
2961 int rval;
2962 mega_passthru __user *upthru; /* user address for passthru */
2963 mega_passthru *pthru; /* copy user passthru here */
2964 dma_addr_t pthru_dma_hndl;
2965 void *data = NULL; /* data to be transferred */
2966 dma_addr_t data_dma_hndl; /* dma handle for data xfer area */
2967 megacmd_t mc;
2968#if MEGA_HAVE_STATS
2969 megastat_t __user *ustats = NULL;
2970 int num_ldrv = 0;
2971#endif
2972 u32 uxferaddr = 0;
2973 struct pci_dev *pdev;
2974
2975 /*
2976 * Make sure only USCSICMD are issued through this interface.
2977 * MIMD application would still fire different command.
2978 */
2979 if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) {
2980 return -EINVAL;
2981 }
2982
2983 /*
2984 * Check and convert a possible MIMD command to NIT command.
2985 * mega_m_to_n() copies the data from the user space, so we do not
2986 * have to do it here.
2987 * NOTE: We will need some user address to copyout the data, therefore
2988 * the inteface layer will also provide us with the required user
2989 * addresses.
2990 */
2991 memset(&uioc, 0, sizeof(nitioctl_t));
2992 if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 )
2993 return rval;
2994
2995
2996 switch( uioc.opcode ) {
2997
2998 case GET_DRIVER_VER:
2999 if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) )
3000 return (-EFAULT);
3001
3002 break;
3003
3004 case GET_N_ADAP:
3005 if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) )
3006 return (-EFAULT);
3007
3008 /*
3009 * Shucks. MIMD interface returns a positive value for number
3010 * of adapters. TODO: Change it to return 0 when there is no
3011 * applicatio using mimd interface.
3012 */
3013 return hba_count;
3014
3015 case GET_ADAP_INFO:
3016
3017 /*
3018 * Which adapter
3019 */
3020 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3021 return (-ENODEV);
3022
3023 if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno,
3024 sizeof(struct mcontroller)) )
3025 return (-EFAULT);
3026 break;
3027
3028#if MEGA_HAVE_STATS
3029
3030 case GET_STATS:
3031 /*
3032 * Which adapter
3033 */
3034 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3035 return (-ENODEV);
3036
3037 adapter = hba_soft_state[adapno];
3038
3039 ustats = uioc.uioc_uaddr;
3040
3041 if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) )
3042 return (-EFAULT);
3043
3044 /*
3045 * Check for the validity of the logical drive number
3046 */
3047 if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL;
3048
3049 if( copy_to_user(ustats->nreads, adapter->nreads,
3050 num_ldrv*sizeof(u32)) )
3051 return -EFAULT;
3052
3053 if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks,
3054 num_ldrv*sizeof(u32)) )
3055 return -EFAULT;
3056
3057 if( copy_to_user(ustats->nwrites, adapter->nwrites,
3058 num_ldrv*sizeof(u32)) )
3059 return -EFAULT;
3060
3061 if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks,
3062 num_ldrv*sizeof(u32)) )
3063 return -EFAULT;
3064
3065 if( copy_to_user(ustats->rd_errors, adapter->rd_errors,
3066 num_ldrv*sizeof(u32)) )
3067 return -EFAULT;
3068
3069 if( copy_to_user(ustats->wr_errors, adapter->wr_errors,
3070 num_ldrv*sizeof(u32)) )
3071 return -EFAULT;
3072
3073 return 0;
3074
3075#endif
3076 case MBOX_CMD:
3077
3078 /*
3079 * Which adapter
3080 */
3081 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3082 return (-ENODEV);
3083
3084 adapter = hba_soft_state[adapno];
3085
3086 /*
3087 * Deletion of logical drive is a special case. The adapter
3088 * should be quiescent before this command is issued.
3089 */
3090 if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV &&
3091 uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) {
3092
3093 /*
3094 * Do we support this feature
3095 */
3096 if( !adapter->support_random_del ) {
3097 dev_warn(&adapter->dev->dev, "logdrv "
3098 "delete on non-supporting F/W\n");
3099
3100 return (-EINVAL);
3101 }
3102
3103 rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] );
3104
3105 if( rval == 0 ) {
3106 memset(&mc, 0, sizeof(megacmd_t));
3107
3108 mc.status = rval;
3109
3110 rval = mega_n_to_m((void __user *)arg, &mc);
3111 }
3112
3113 return rval;
3114 }
3115 /*
3116 * This interface only support the regular passthru commands.
3117 * Reject extended passthru and 64-bit passthru
3118 */
3119 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 ||
3120 uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) {
3121
3122 dev_warn(&adapter->dev->dev, "rejected passthru\n");
3123
3124 return (-EINVAL);
3125 }
3126
3127 /*
3128 * For all internal commands, the buffer must be allocated in
3129 * <4GB address range
3130 */
3131 if( make_local_pdev(adapter, &pdev) != 0 )
3132 return -EIO;
3133
3134 /* Is it a passthru command or a DCMD */
3135 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) {
3136 /* Passthru commands */
3137
3138 pthru = dma_alloc_coherent(&pdev->dev,
3139 sizeof(mega_passthru),
3140 &pthru_dma_hndl, GFP_KERNEL);
3141
3142 if( pthru == NULL ) {
3143 free_local_pdev(pdev);
3144 return (-ENOMEM);
3145 }
3146
3147 /*
3148 * The user passthru structure
3149 */
3150 upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr;
3151
3152 /*
3153 * Copy in the user passthru here.
3154 */
3155 if( copy_from_user(pthru, upthru,
3156 sizeof(mega_passthru)) ) {
3157
3158 dma_free_coherent(&pdev->dev,
3159 sizeof(mega_passthru),
3160 pthru, pthru_dma_hndl);
3161
3162 free_local_pdev(pdev);
3163
3164 return (-EFAULT);
3165 }
3166
3167 /*
3168 * Is there a data transfer
3169 */
3170 if( pthru->dataxferlen ) {
3171 data = dma_alloc_coherent(&pdev->dev,
3172 pthru->dataxferlen,
3173 &data_dma_hndl,
3174 GFP_KERNEL);
3175
3176 if( data == NULL ) {
3177 dma_free_coherent(&pdev->dev,
3178 sizeof(mega_passthru),
3179 pthru,
3180 pthru_dma_hndl);
3181
3182 free_local_pdev(pdev);
3183
3184 return (-ENOMEM);
3185 }
3186
3187 /*
3188 * Save the user address and point the kernel
3189 * address at just allocated memory
3190 */
3191 uxferaddr = pthru->dataxferaddr;
3192 pthru->dataxferaddr = data_dma_hndl;
3193 }
3194
3195
3196 /*
3197 * Is data coming down-stream
3198 */
3199 if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) {
3200 /*
3201 * Get the user data
3202 */
3203 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3204 pthru->dataxferlen) ) {
3205 rval = (-EFAULT);
3206 goto freemem_and_return;
3207 }
3208 }
3209
3210 memset(&mc, 0, sizeof(megacmd_t));
3211
3212 mc.cmd = MEGA_MBOXCMD_PASSTHRU;
3213 mc.xferaddr = (u32)pthru_dma_hndl;
3214
3215 /*
3216 * Issue the command
3217 */
3218 mega_internal_command(adapter, &mc, pthru);
3219
3220 rval = mega_n_to_m((void __user *)arg, &mc);
3221
3222 if( rval ) goto freemem_and_return;
3223
3224
3225 /*
3226 * Is data going up-stream
3227 */
3228 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) {
3229 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3230 pthru->dataxferlen) ) {
3231 rval = (-EFAULT);
3232 }
3233 }
3234
3235 /*
3236 * Send the request sense data also, irrespective of
3237 * whether the user has asked for it or not.
3238 */
3239 if (copy_to_user(upthru->reqsensearea,
3240 pthru->reqsensearea, 14))
3241 rval = -EFAULT;
3242
3243freemem_and_return:
3244 if( pthru->dataxferlen ) {
3245 dma_free_coherent(&pdev->dev,
3246 pthru->dataxferlen, data,
3247 data_dma_hndl);
3248 }
3249
3250 dma_free_coherent(&pdev->dev, sizeof(mega_passthru),
3251 pthru, pthru_dma_hndl);
3252
3253 free_local_pdev(pdev);
3254
3255 return rval;
3256 }
3257 else {
3258 /* DCMD commands */
3259
3260 /*
3261 * Is there a data transfer
3262 */
3263 if( uioc.xferlen ) {
3264 data = dma_alloc_coherent(&pdev->dev,
3265 uioc.xferlen,
3266 &data_dma_hndl,
3267 GFP_KERNEL);
3268
3269 if( data == NULL ) {
3270 free_local_pdev(pdev);
3271 return (-ENOMEM);
3272 }
3273
3274 uxferaddr = MBOX(uioc)->xferaddr;
3275 }
3276
3277 /*
3278 * Is data coming down-stream
3279 */
3280 if( uioc.xferlen && (uioc.flags & UIOC_WR) ) {
3281 /*
3282 * Get the user data
3283 */
3284 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3285 uioc.xferlen) ) {
3286
3287 dma_free_coherent(&pdev->dev,
3288 uioc.xferlen, data,
3289 data_dma_hndl);
3290
3291 free_local_pdev(pdev);
3292
3293 return (-EFAULT);
3294 }
3295 }
3296
3297 memcpy(&mc, MBOX(uioc), sizeof(megacmd_t));
3298
3299 mc.xferaddr = (u32)data_dma_hndl;
3300
3301 /*
3302 * Issue the command
3303 */
3304 mega_internal_command(adapter, &mc, NULL);
3305
3306 rval = mega_n_to_m((void __user *)arg, &mc);
3307
3308 if( rval ) {
3309 if( uioc.xferlen ) {
3310 dma_free_coherent(&pdev->dev,
3311 uioc.xferlen, data,
3312 data_dma_hndl);
3313 }
3314
3315 free_local_pdev(pdev);
3316
3317 return rval;
3318 }
3319
3320 /*
3321 * Is data going up-stream
3322 */
3323 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) {
3324 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3325 uioc.xferlen) ) {
3326
3327 rval = (-EFAULT);
3328 }
3329 }
3330
3331 if( uioc.xferlen ) {
3332 dma_free_coherent(&pdev->dev, uioc.xferlen,
3333 data, data_dma_hndl);
3334 }
3335
3336 free_local_pdev(pdev);
3337
3338 return rval;
3339 }
3340
3341 default:
3342 return (-EINVAL);
3343 }
3344
3345 return 0;
3346}
3347
3348static long
3349megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3350{
3351 int ret;
3352
3353 mutex_lock(&megadev_mutex);
3354 ret = megadev_ioctl(filep, cmd, arg);
3355 mutex_unlock(&megadev_mutex);
3356
3357 return ret;
3358}
3359
3360/**
3361 * mega_m_to_n()
3362 * @arg: user address
3363 * @uioc: new ioctl structure
3364 *
3365 * A thin layer to convert older mimd interface ioctl structure to NIT ioctl
3366 * structure
3367 *
3368 * Converts the older mimd ioctl structure to newer NIT structure
3369 */
3370static int
3371mega_m_to_n(void __user *arg, nitioctl_t *uioc)
3372{
3373 struct uioctl_t uioc_mimd;
3374 char signature[8] = {0};
3375 u8 opcode;
3376 u8 subopcode;
3377
3378
3379 /*
3380 * check is the application conforms to NIT. We do not have to do much
3381 * in that case.
3382 * We exploit the fact that the signature is stored in the very
3383 * beginning of the structure.
3384 */
3385
3386 if( copy_from_user(signature, arg, 7) )
3387 return (-EFAULT);
3388
3389 if( memcmp(signature, "MEGANIT", 7) == 0 ) {
3390
3391 /*
3392 * NOTE NOTE: The nit ioctl is still under flux because of
3393 * change of mailbox definition, in HPE. No applications yet
3394 * use this interface and let's not have applications use this
3395 * interface till the new specifitions are in place.
3396 */
3397 return -EINVAL;
3398#if 0
3399 if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) )
3400 return (-EFAULT);
3401 return 0;
3402#endif
3403 }
3404
3405 /*
3406 * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t
3407 *
3408 * Get the user ioctl structure
3409 */
3410 if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) )
3411 return (-EFAULT);
3412
3413
3414 /*
3415 * Get the opcode and subopcode for the commands
3416 */
3417 opcode = uioc_mimd.ui.fcs.opcode;
3418 subopcode = uioc_mimd.ui.fcs.subopcode;
3419
3420 switch (opcode) {
3421 case 0x82:
3422
3423 switch (subopcode) {
3424
3425 case MEGAIOC_QDRVRVER: /* Query driver version */
3426 uioc->opcode = GET_DRIVER_VER;
3427 uioc->uioc_uaddr = uioc_mimd.data;
3428 break;
3429
3430 case MEGAIOC_QNADAP: /* Get # of adapters */
3431 uioc->opcode = GET_N_ADAP;
3432 uioc->uioc_uaddr = uioc_mimd.data;
3433 break;
3434
3435 case MEGAIOC_QADAPINFO: /* Get adapter information */
3436 uioc->opcode = GET_ADAP_INFO;
3437 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3438 uioc->uioc_uaddr = uioc_mimd.data;
3439 break;
3440
3441 default:
3442 return(-EINVAL);
3443 }
3444
3445 break;
3446
3447
3448 case 0x81:
3449
3450 uioc->opcode = MBOX_CMD;
3451 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3452
3453 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
3454
3455 uioc->xferlen = uioc_mimd.ui.fcs.length;
3456
3457 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
3458 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
3459
3460 break;
3461
3462 case 0x80:
3463
3464 uioc->opcode = MBOX_CMD;
3465 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3466
3467 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
3468
3469 /*
3470 * Choose the xferlen bigger of input and output data
3471 */
3472 uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ?
3473 uioc_mimd.outlen : uioc_mimd.inlen;
3474
3475 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
3476 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
3477
3478 break;
3479
3480 default:
3481 return (-EINVAL);
3482
3483 }
3484
3485 return 0;
3486}
3487
3488/*
3489 * mega_n_to_m()
3490 * @arg: user address
3491 * @mc: mailbox command
3492 *
3493 * Updates the status information to the application, depending on application
3494 * conforms to older mimd ioctl interface or newer NIT ioctl interface
3495 */
3496static int
3497mega_n_to_m(void __user *arg, megacmd_t *mc)
3498{
3499 nitioctl_t __user *uiocp;
3500 megacmd_t __user *umc;
3501 mega_passthru __user *upthru;
3502 struct uioctl_t __user *uioc_mimd;
3503 char signature[8] = {0};
3504
3505 /*
3506 * check is the application conforms to NIT.
3507 */
3508 if( copy_from_user(signature, arg, 7) )
3509 return -EFAULT;
3510
3511 if( memcmp(signature, "MEGANIT", 7) == 0 ) {
3512
3513 uiocp = arg;
3514
3515 if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) )
3516 return (-EFAULT);
3517
3518 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
3519
3520 umc = MBOX_P(uiocp);
3521
3522 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
3523 return -EFAULT;
3524
3525 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus))
3526 return (-EFAULT);
3527 }
3528 }
3529 else {
3530 uioc_mimd = arg;
3531
3532 if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) )
3533 return (-EFAULT);
3534
3535 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
3536
3537 umc = (megacmd_t __user *)uioc_mimd->mbox;
3538
3539 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
3540 return (-EFAULT);
3541
3542 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) )
3543 return (-EFAULT);
3544 }
3545 }
3546
3547 return 0;
3548}
3549
3550
3551/*
3552 * MEGARAID 'FW' commands.
3553 */
3554
3555/**
3556 * mega_is_bios_enabled()
3557 * @adapter: pointer to our soft state
3558 *
3559 * issue command to find out if the BIOS is enabled for this controller
3560 */
3561static int
3562mega_is_bios_enabled(adapter_t *adapter)
3563{
3564 struct mbox_out mbox;
3565 unsigned char *raw_mbox = (u8 *)&mbox;
3566
3567 memset(&mbox, 0, sizeof(mbox));
3568
3569 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3570
3571 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3572
3573 raw_mbox[0] = IS_BIOS_ENABLED;
3574 raw_mbox[2] = GET_BIOS;
3575
3576 issue_scb_block(adapter, raw_mbox);
3577
3578 return *(char *)adapter->mega_buffer;
3579}
3580
3581
3582/**
3583 * mega_enum_raid_scsi()
3584 * @adapter: pointer to our soft state
3585 *
3586 * Find out what channels are RAID/SCSI. This information is used to
3587 * differentiate the virtual channels and physical channels and to support
3588 * ROMB feature and non-disk devices.
3589 */
3590static void
3591mega_enum_raid_scsi(adapter_t *adapter)
3592{
3593 struct mbox_out mbox;
3594 unsigned char *raw_mbox = (u8 *)&mbox;
3595 int i;
3596
3597 memset(&mbox, 0, sizeof(mbox));
3598
3599 /*
3600 * issue command to find out what channels are raid/scsi
3601 */
3602 raw_mbox[0] = CHNL_CLASS;
3603 raw_mbox[2] = GET_CHNL_CLASS;
3604
3605 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3606
3607 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3608
3609 /*
3610 * Non-ROMB firmware fail this command, so all channels
3611 * must be shown RAID
3612 */
3613 adapter->mega_ch_class = 0xFF;
3614
3615 if(!issue_scb_block(adapter, raw_mbox)) {
3616 adapter->mega_ch_class = *((char *)adapter->mega_buffer);
3617
3618 }
3619
3620 for( i = 0; i < adapter->product_info.nchannels; i++ ) {
3621 if( (adapter->mega_ch_class >> i) & 0x01 ) {
3622 dev_info(&adapter->dev->dev, "channel[%d] is raid\n",
3623 i);
3624 }
3625 else {
3626 dev_info(&adapter->dev->dev, "channel[%d] is scsi\n",
3627 i);
3628 }
3629 }
3630
3631 return;
3632}
3633
3634
3635/**
3636 * mega_get_boot_drv()
3637 * @adapter: pointer to our soft state
3638 *
3639 * Find out which device is the boot device. Note, any logical drive or any
3640 * phyical device (e.g., a CDROM) can be designated as a boot device.
3641 */
3642static void
3643mega_get_boot_drv(adapter_t *adapter)
3644{
3645 struct private_bios_data *prv_bios_data;
3646 struct mbox_out mbox;
3647 unsigned char *raw_mbox = (u8 *)&mbox;
3648 u16 cksum = 0;
3649 u8 *cksum_p;
3650 u8 boot_pdrv;
3651 int i;
3652
3653 memset(&mbox, 0, sizeof(mbox));
3654
3655 raw_mbox[0] = BIOS_PVT_DATA;
3656 raw_mbox[2] = GET_BIOS_PVT_DATA;
3657
3658 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3659
3660 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3661
3662 adapter->boot_ldrv_enabled = 0;
3663 adapter->boot_ldrv = 0;
3664
3665 adapter->boot_pdrv_enabled = 0;
3666 adapter->boot_pdrv_ch = 0;
3667 adapter->boot_pdrv_tgt = 0;
3668
3669 if(issue_scb_block(adapter, raw_mbox) == 0) {
3670 prv_bios_data =
3671 (struct private_bios_data *)adapter->mega_buffer;
3672
3673 cksum = 0;
3674 cksum_p = (char *)prv_bios_data;
3675 for (i = 0; i < 14; i++ ) {
3676 cksum += (u16)(*cksum_p++);
3677 }
3678
3679 if (prv_bios_data->cksum == (u16)(0-cksum) ) {
3680
3681 /*
3682 * If MSB is set, a physical drive is set as boot
3683 * device
3684 */
3685 if( prv_bios_data->boot_drv & 0x80 ) {
3686 adapter->boot_pdrv_enabled = 1;
3687 boot_pdrv = prv_bios_data->boot_drv & 0x7F;
3688 adapter->boot_pdrv_ch = boot_pdrv / 16;
3689 adapter->boot_pdrv_tgt = boot_pdrv % 16;
3690 }
3691 else {
3692 adapter->boot_ldrv_enabled = 1;
3693 adapter->boot_ldrv = prv_bios_data->boot_drv;
3694 }
3695 }
3696 }
3697
3698}
3699
3700/**
3701 * mega_support_random_del()
3702 * @adapter: pointer to our soft state
3703 *
3704 * Find out if this controller supports random deletion and addition of
3705 * logical drives
3706 */
3707static int
3708mega_support_random_del(adapter_t *adapter)
3709{
3710 struct mbox_out mbox;
3711 unsigned char *raw_mbox = (u8 *)&mbox;
3712 int rval;
3713
3714 memset(&mbox, 0, sizeof(mbox));
3715
3716 /*
3717 * issue command
3718 */
3719 raw_mbox[0] = FC_DEL_LOGDRV;
3720 raw_mbox[2] = OP_SUP_DEL_LOGDRV;
3721
3722 rval = issue_scb_block(adapter, raw_mbox);
3723
3724 return !rval;
3725}
3726
3727
3728/**
3729 * mega_support_ext_cdb()
3730 * @adapter: pointer to our soft state
3731 *
3732 * Find out if this firmware support cdblen > 10
3733 */
3734static int
3735mega_support_ext_cdb(adapter_t *adapter)
3736{
3737 struct mbox_out mbox;
3738 unsigned char *raw_mbox = (u8 *)&mbox;
3739 int rval;
3740
3741 memset(&mbox, 0, sizeof(mbox));
3742 /*
3743 * issue command to find out if controller supports extended CDBs.
3744 */
3745 raw_mbox[0] = 0xA4;
3746 raw_mbox[2] = 0x16;
3747
3748 rval = issue_scb_block(adapter, raw_mbox);
3749
3750 return !rval;
3751}
3752
3753
3754/**
3755 * mega_del_logdrv()
3756 * @adapter: pointer to our soft state
3757 * @logdrv: logical drive to be deleted
3758 *
3759 * Delete the specified logical drive. It is the responsibility of the user
3760 * app to let the OS know about this operation.
3761 */
3762static int
3763mega_del_logdrv(adapter_t *adapter, int logdrv)
3764{
3765 unsigned long flags;
3766 scb_t *scb;
3767 int rval;
3768
3769 /*
3770 * Stop sending commands to the controller, queue them internally.
3771 * When deletion is complete, ISR will flush the queue.
3772 */
3773 atomic_set(&adapter->quiescent, 1);
3774
3775 /*
3776 * Wait till all the issued commands are complete and there are no
3777 * commands in the pending queue
3778 */
3779 while (atomic_read(&adapter->pend_cmds) > 0 ||
3780 !list_empty(&adapter->pending_list))
3781 msleep(1000); /* sleep for 1s */
3782
3783 rval = mega_do_del_logdrv(adapter, logdrv);
3784
3785 spin_lock_irqsave(&adapter->lock, flags);
3786
3787 /*
3788 * If delete operation was successful, add 0x80 to the logical drive
3789 * ids for commands in the pending queue.
3790 */
3791 if (adapter->read_ldidmap) {
3792 struct list_head *pos;
3793 list_for_each(pos, &adapter->pending_list) {
3794 scb = list_entry(pos, scb_t, list);
3795 if (scb->pthru->logdrv < 0x80 )
3796 scb->pthru->logdrv += 0x80;
3797 }
3798 }
3799
3800 atomic_set(&adapter->quiescent, 0);
3801
3802 mega_runpendq(adapter);
3803
3804 spin_unlock_irqrestore(&adapter->lock, flags);
3805
3806 return rval;
3807}
3808
3809
3810static int
3811mega_do_del_logdrv(adapter_t *adapter, int logdrv)
3812{
3813 megacmd_t mc;
3814 int rval;
3815
3816 memset( &mc, 0, sizeof(megacmd_t));
3817
3818 mc.cmd = FC_DEL_LOGDRV;
3819 mc.opcode = OP_DEL_LOGDRV;
3820 mc.subopcode = logdrv;
3821
3822 rval = mega_internal_command(adapter, &mc, NULL);
3823
3824 /* log this event */
3825 if(rval) {
3826 dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv);
3827 return rval;
3828 }
3829
3830 /*
3831 * After deleting first logical drive, the logical drives must be
3832 * addressed by adding 0x80 to the logical drive id.
3833 */
3834 adapter->read_ldidmap = 1;
3835
3836 return rval;
3837}
3838
3839
3840/**
3841 * mega_get_max_sgl()
3842 * @adapter: pointer to our soft state
3843 *
3844 * Find out the maximum number of scatter-gather elements supported by this
3845 * version of the firmware
3846 */
3847static void
3848mega_get_max_sgl(adapter_t *adapter)
3849{
3850 struct mbox_out mbox;
3851 unsigned char *raw_mbox = (u8 *)&mbox;
3852
3853 memset(&mbox, 0, sizeof(mbox));
3854
3855 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3856
3857 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3858
3859 raw_mbox[0] = MAIN_MISC_OPCODE;
3860 raw_mbox[2] = GET_MAX_SG_SUPPORT;
3861
3862
3863 if( issue_scb_block(adapter, raw_mbox) ) {
3864 /*
3865 * f/w does not support this command. Choose the default value
3866 */
3867 adapter->sglen = MIN_SGLIST;
3868 }
3869 else {
3870 adapter->sglen = *((char *)adapter->mega_buffer);
3871
3872 /*
3873 * Make sure this is not more than the resources we are
3874 * planning to allocate
3875 */
3876 if ( adapter->sglen > MAX_SGLIST )
3877 adapter->sglen = MAX_SGLIST;
3878 }
3879
3880 return;
3881}
3882
3883
3884/**
3885 * mega_support_cluster()
3886 * @adapter: pointer to our soft state
3887 *
3888 * Find out if this firmware support cluster calls.
3889 */
3890static int
3891mega_support_cluster(adapter_t *adapter)
3892{
3893 struct mbox_out mbox;
3894 unsigned char *raw_mbox = (u8 *)&mbox;
3895
3896 memset(&mbox, 0, sizeof(mbox));
3897
3898 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3899
3900 mbox.xferaddr = (u32)adapter->buf_dma_handle;
3901
3902 /*
3903 * Try to get the initiator id. This command will succeed iff the
3904 * clustering is available on this HBA.
3905 */
3906 raw_mbox[0] = MEGA_GET_TARGET_ID;
3907
3908 if( issue_scb_block(adapter, raw_mbox) == 0 ) {
3909
3910 /*
3911 * Cluster support available. Get the initiator target id.
3912 * Tell our id to mid-layer too.
3913 */
3914 adapter->this_id = *(u32 *)adapter->mega_buffer;
3915 adapter->host->this_id = adapter->this_id;
3916
3917 return 1;
3918 }
3919
3920 return 0;
3921}
3922
3923#ifdef CONFIG_PROC_FS
3924/**
3925 * mega_adapinq()
3926 * @adapter: pointer to our soft state
3927 * @dma_handle: DMA address of the buffer
3928 *
3929 * Issue internal commands while interrupts are available.
3930 * We only issue direct mailbox commands from within the driver. ioctl()
3931 * interface using these routines can issue passthru commands.
3932 */
3933static int
3934mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle)
3935{
3936 megacmd_t mc;
3937
3938 memset(&mc, 0, sizeof(megacmd_t));
3939
3940 if( adapter->flag & BOARD_40LD ) {
3941 mc.cmd = FC_NEW_CONFIG;
3942 mc.opcode = NC_SUBOP_ENQUIRY3;
3943 mc.subopcode = ENQ3_GET_SOLICITED_FULL;
3944 }
3945 else {
3946 mc.cmd = MEGA_MBOXCMD_ADPEXTINQ;
3947 }
3948
3949 mc.xferaddr = (u32)dma_handle;
3950
3951 if ( mega_internal_command(adapter, &mc, NULL) != 0 ) {
3952 return -1;
3953 }
3954
3955 return 0;
3956}
3957
3958
3959/**
3960 * mega_internal_dev_inquiry()
3961 * @adapter: pointer to our soft state
3962 * @ch: channel for this device
3963 * @tgt: ID of this device
3964 * @buf_dma_handle: DMA address of the buffer
3965 *
3966 * Issue the scsi inquiry for the specified device.
3967 */
3968static int
3969mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
3970 dma_addr_t buf_dma_handle)
3971{
3972 mega_passthru *pthru;
3973 dma_addr_t pthru_dma_handle;
3974 megacmd_t mc;
3975 int rval;
3976 struct pci_dev *pdev;
3977
3978
3979 /*
3980 * For all internal commands, the buffer must be allocated in <4GB
3981 * address range
3982 */
3983 if( make_local_pdev(adapter, &pdev) != 0 ) return -1;
3984
3985 pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru),
3986 &pthru_dma_handle, GFP_KERNEL);
3987
3988 if( pthru == NULL ) {
3989 free_local_pdev(pdev);
3990 return -1;
3991 }
3992
3993 pthru->timeout = 2;
3994 pthru->ars = 1;
3995 pthru->reqsenselen = 14;
3996 pthru->islogical = 0;
3997
3998 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch;
3999
4000 pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt;
4001
4002 pthru->cdblen = 6;
4003
4004 pthru->cdb[0] = INQUIRY;
4005 pthru->cdb[1] = 0;
4006 pthru->cdb[2] = 0;
4007 pthru->cdb[3] = 0;
4008 pthru->cdb[4] = 255;
4009 pthru->cdb[5] = 0;
4010
4011
4012 pthru->dataxferaddr = (u32)buf_dma_handle;
4013 pthru->dataxferlen = 256;
4014
4015 memset(&mc, 0, sizeof(megacmd_t));
4016
4017 mc.cmd = MEGA_MBOXCMD_PASSTHRU;
4018 mc.xferaddr = (u32)pthru_dma_handle;
4019
4020 rval = mega_internal_command(adapter, &mc, pthru);
4021
4022 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru,
4023 pthru_dma_handle);
4024
4025 free_local_pdev(pdev);
4026
4027 return rval;
4028}
4029#endif
4030
4031/**
4032 * mega_internal_command()
4033 * @adapter: pointer to our soft state
4034 * @mc: the mailbox command
4035 * @pthru: Passthru structure for DCDB commands
4036 *
4037 * Issue the internal commands in interrupt mode.
4038 * The last argument is the address of the passthru structure if the command
4039 * to be fired is a passthru command
4040 *
4041 * Note: parameter 'pthru' is null for non-passthru commands.
4042 */
4043static int
4044mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4045{
4046 unsigned long flags;
4047 scb_t *scb;
4048 int rval;
4049
4050 /*
4051 * The internal commands share one command id and hence are
4052 * serialized. This is so because we want to reserve maximum number of
4053 * available command ids for the I/O commands.
4054 */
4055 mutex_lock(&adapter->int_mtx);
4056
4057 scb = &adapter->int_scb;
4058 memset(scb, 0, sizeof(scb_t));
4059
4060 scb->idx = CMDID_INT_CMDS;
4061 scb->state |= SCB_ACTIVE | SCB_PENDQ;
4062
4063 memcpy(scb->raw_mbox, mc, sizeof(megacmd_t));
4064
4065 /*
4066 * Is it a passthru command
4067 */
4068 if (mc->cmd == MEGA_MBOXCMD_PASSTHRU)
4069 scb->pthru = pthru;
4070
4071 spin_lock_irqsave(&adapter->lock, flags);
4072 list_add_tail(&scb->list, &adapter->pending_list);
4073 /*
4074 * Check if the HBA is in quiescent state, e.g., during a
4075 * delete logical drive opertion. If it is, don't run
4076 * the pending_list.
4077 */
4078 if (atomic_read(&adapter->quiescent) == 0)
4079 mega_runpendq(adapter);
4080 spin_unlock_irqrestore(&adapter->lock, flags);
4081
4082 wait_for_completion(&adapter->int_waitq);
4083
4084 mc->status = rval = adapter->int_status;
4085
4086 /*
4087 * Print a debug message for all failed commands. Applications can use
4088 * this information.
4089 */
4090 if (rval && trace_level) {
4091 dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n",
4092 mc->cmd, mc->opcode, mc->subopcode, rval);
4093 }
4094
4095 mutex_unlock(&adapter->int_mtx);
4096 return rval;
4097}
4098
4099static const struct scsi_host_template megaraid_template = {
4100 .module = THIS_MODULE,
4101 .name = "MegaRAID",
4102 .proc_name = "megaraid_legacy",
4103 .info = megaraid_info,
4104 .queuecommand = megaraid_queue,
4105 .bios_param = megaraid_biosparam,
4106 .max_sectors = MAX_SECTORS_PER_IO,
4107 .can_queue = MAX_COMMANDS,
4108 .this_id = DEFAULT_INITIATOR_ID,
4109 .sg_tablesize = MAX_SGLIST,
4110 .cmd_per_lun = DEF_CMD_PER_LUN,
4111 .eh_abort_handler = megaraid_abort,
4112 .eh_host_reset_handler = megaraid_reset,
4113 .no_write_same = 1,
4114 .cmd_size = sizeof(struct megaraid_cmd_priv),
4115};
4116
4117static int
4118megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4119{
4120 struct Scsi_Host *host;
4121 adapter_t *adapter;
4122 unsigned long mega_baseport, tbase, flag = 0;
4123 u16 subsysid, subsysvid;
4124 u8 pci_bus, pci_dev_func;
4125 int irq, i, j;
4126 int error = -ENODEV;
4127
4128 if (hba_count >= MAX_CONTROLLERS)
4129 goto out;
4130
4131 if (pci_enable_device(pdev))
4132 goto out;
4133 pci_set_master(pdev);
4134
4135 pci_bus = pdev->bus->number;
4136 pci_dev_func = pdev->devfn;
4137
4138 /*
4139 * The megaraid3 stuff reports the ID of the Intel part which is not
4140 * remotely specific to the megaraid
4141 */
4142 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
4143 u16 magic;
4144 /*
4145 * Don't fall over the Compaq management cards using the same
4146 * PCI identifier
4147 */
4148 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
4149 pdev->subsystem_device == 0xC000)
4150 goto out_disable_device;
4151 /* Now check the magic signature byte */
4152 pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
4153 if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
4154 goto out_disable_device;
4155 /* Ok it is probably a megaraid */
4156 }
4157
4158 /*
4159 * For these vendor and device ids, signature offsets are not
4160 * valid and 64 bit is implicit
4161 */
4162 if (id->driver_data & BOARD_64BIT)
4163 flag |= BOARD_64BIT;
4164 else {
4165 u32 magic64;
4166
4167 pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64);
4168 if (magic64 == HBA_SIGNATURE_64BIT)
4169 flag |= BOARD_64BIT;
4170 }
4171
4172 subsysvid = pdev->subsystem_vendor;
4173 subsysid = pdev->subsystem_device;
4174
4175 dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n",
4176 id->vendor, id->device);
4177
4178 /* Read the base port and IRQ from PCI */
4179 mega_baseport = pci_resource_start(pdev, 0);
4180 irq = pdev->irq;
4181
4182 tbase = mega_baseport;
4183 if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) {
4184 flag |= BOARD_MEMMAP;
4185
4186 if (!request_mem_region(mega_baseport, 128, "megaraid")) {
4187 dev_warn(&pdev->dev, "mem region busy!\n");
4188 goto out_disable_device;
4189 }
4190
4191 mega_baseport = (unsigned long)ioremap(mega_baseport, 128);
4192 if (!mega_baseport) {
4193 dev_warn(&pdev->dev, "could not map hba memory\n");
4194 goto out_release_region;
4195 }
4196 } else {
4197 flag |= BOARD_IOMAP;
4198 mega_baseport += 0x10;
4199
4200 if (!request_region(mega_baseport, 16, "megaraid"))
4201 goto out_disable_device;
4202 }
4203
4204 /* Initialize SCSI Host structure */
4205 host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t));
4206 if (!host)
4207 goto out_iounmap;
4208
4209 adapter = (adapter_t *)host->hostdata;
4210 memset(adapter, 0, sizeof(adapter_t));
4211
4212 dev_notice(&pdev->dev,
4213 "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n",
4214 host->host_no, mega_baseport, irq);
4215
4216 adapter->base = mega_baseport;
4217 if (flag & BOARD_MEMMAP)
4218 adapter->mmio_base = (void __iomem *) mega_baseport;
4219
4220 INIT_LIST_HEAD(&adapter->free_list);
4221 INIT_LIST_HEAD(&adapter->pending_list);
4222 INIT_LIST_HEAD(&adapter->completed_list);
4223
4224 adapter->flag = flag;
4225 spin_lock_init(&adapter->lock);
4226
4227 host->cmd_per_lun = max_cmd_per_lun;
4228 host->max_sectors = max_sectors_per_io;
4229
4230 adapter->dev = pdev;
4231 adapter->host = host;
4232
4233 adapter->host->irq = irq;
4234
4235 if (flag & BOARD_MEMMAP)
4236 adapter->host->base = tbase;
4237 else {
4238 adapter->host->io_port = tbase;
4239 adapter->host->n_io_port = 16;
4240 }
4241
4242 adapter->host->unique_id = (pci_bus << 8) | pci_dev_func;
4243
4244 /*
4245 * Allocate buffer to issue internal commands.
4246 */
4247 adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev,
4248 MEGA_BUFFER_SIZE,
4249 &adapter->buf_dma_handle,
4250 GFP_KERNEL);
4251 if (!adapter->mega_buffer) {
4252 dev_warn(&pdev->dev, "out of RAM\n");
4253 goto out_host_put;
4254 }
4255
4256 adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t),
4257 GFP_KERNEL);
4258 if (!adapter->scb_list) {
4259 dev_warn(&pdev->dev, "out of RAM\n");
4260 goto out_free_cmd_buffer;
4261 }
4262
4263 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ?
4264 megaraid_isr_memmapped : megaraid_isr_iomapped,
4265 IRQF_SHARED, "megaraid", adapter)) {
4266 dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq);
4267 goto out_free_scb_list;
4268 }
4269
4270 if (mega_setup_mailbox(adapter))
4271 goto out_free_irq;
4272
4273 if (mega_query_adapter(adapter))
4274 goto out_free_mbox;
4275
4276 /*
4277 * Have checks for some buggy f/w
4278 */
4279 if ((subsysid == 0x1111) && (subsysvid == 0x1111)) {
4280 /*
4281 * Which firmware
4282 */
4283 if (!strcmp(adapter->fw_version, "3.00") ||
4284 !strcmp(adapter->fw_version, "3.01")) {
4285
4286 dev_warn(&pdev->dev,
4287 "Your card is a Dell PERC "
4288 "2/SC RAID controller with "
4289 "firmware\nmegaraid: 3.00 or 3.01. "
4290 "This driver is known to have "
4291 "corruption issues\nmegaraid: with "
4292 "those firmware versions on this "
4293 "specific card. In order\nmegaraid: "
4294 "to protect your data, please upgrade "
4295 "your firmware to version\nmegaraid: "
4296 "3.10 or later, available from the "
4297 "Dell Technical Support web\n"
4298 "megaraid: site at\nhttp://support."
4299 "dell.com/us/en/filelib/download/"
4300 "index.asp?fileid=2940\n"
4301 );
4302 }
4303 }
4304
4305 /*
4306 * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with
4307 * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit
4308 * support, since this firmware cannot handle 64 bit
4309 * addressing
4310 */
4311 if ((subsysvid == PCI_VENDOR_ID_HP) &&
4312 ((subsysid == 0x60E7) || (subsysid == 0x60E8))) {
4313 /*
4314 * which firmware
4315 */
4316 if (!strcmp(adapter->fw_version, "H01.07") ||
4317 !strcmp(adapter->fw_version, "H01.08") ||
4318 !strcmp(adapter->fw_version, "H01.09") ) {
4319 dev_warn(&pdev->dev,
4320 "Firmware H.01.07, "
4321 "H.01.08, and H.01.09 on 1M/2M "
4322 "controllers\n"
4323 "do not support 64 bit "
4324 "addressing.\nDISABLING "
4325 "64 bit support.\n");
4326 adapter->flag &= ~BOARD_64BIT;
4327 }
4328 }
4329
4330 if (mega_is_bios_enabled(adapter))
4331 mega_hbas[hba_count].is_bios_enabled = 1;
4332 mega_hbas[hba_count].hostdata_addr = adapter;
4333
4334 /*
4335 * Find out which channel is raid and which is scsi. This is
4336 * for ROMB support.
4337 */
4338 mega_enum_raid_scsi(adapter);
4339
4340 /*
4341 * Find out if a logical drive is set as the boot drive. If
4342 * there is one, will make that as the first logical drive.
4343 * ROMB: Do we have to boot from a physical drive. Then all
4344 * the physical drives would appear before the logical disks.
4345 * Else, all the physical drives would be exported to the mid
4346 * layer after logical drives.
4347 */
4348 mega_get_boot_drv(adapter);
4349
4350 if (adapter->boot_pdrv_enabled) {
4351 j = adapter->product_info.nchannels;
4352 for( i = 0; i < j; i++ )
4353 adapter->logdrv_chan[i] = 0;
4354 for( i = j; i < NVIRT_CHAN + j; i++ )
4355 adapter->logdrv_chan[i] = 1;
4356 } else {
4357 for (i = 0; i < NVIRT_CHAN; i++)
4358 adapter->logdrv_chan[i] = 1;
4359 for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++)
4360 adapter->logdrv_chan[i] = 0;
4361 adapter->mega_ch_class <<= NVIRT_CHAN;
4362 }
4363
4364 /*
4365 * Do we support random deletion and addition of logical
4366 * drives
4367 */
4368 adapter->read_ldidmap = 0; /* set it after first logdrv
4369 delete cmd */
4370 adapter->support_random_del = mega_support_random_del(adapter);
4371
4372 /* Initialize SCBs */
4373 if (mega_init_scb(adapter))
4374 goto out_free_mbox;
4375
4376 /*
4377 * Reset the pending commands counter
4378 */
4379 atomic_set(&adapter->pend_cmds, 0);
4380
4381 /*
4382 * Reset the adapter quiescent flag
4383 */
4384 atomic_set(&adapter->quiescent, 0);
4385
4386 hba_soft_state[hba_count] = adapter;
4387
4388 /*
4389 * Fill in the structure which needs to be passed back to the
4390 * application when it does an ioctl() for controller related
4391 * information.
4392 */
4393 i = hba_count;
4394
4395 mcontroller[i].base = mega_baseport;
4396 mcontroller[i].irq = irq;
4397 mcontroller[i].numldrv = adapter->numldrv;
4398 mcontroller[i].pcibus = pci_bus;
4399 mcontroller[i].pcidev = id->device;
4400 mcontroller[i].pcifun = PCI_FUNC (pci_dev_func);
4401 mcontroller[i].pciid = -1;
4402 mcontroller[i].pcivendor = id->vendor;
4403 mcontroller[i].pcislot = PCI_SLOT(pci_dev_func);
4404 mcontroller[i].uid = (pci_bus << 8) | pci_dev_func;
4405
4406
4407 /* Set the Mode of addressing to 64 bit if we can */
4408 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
4409 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4410 adapter->has_64bit_addr = 1;
4411 } else {
4412 dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4413 adapter->has_64bit_addr = 0;
4414 }
4415
4416 mutex_init(&adapter->int_mtx);
4417 init_completion(&adapter->int_waitq);
4418
4419 adapter->this_id = DEFAULT_INITIATOR_ID;
4420 adapter->host->this_id = DEFAULT_INITIATOR_ID;
4421
4422#if MEGA_HAVE_CLUSTERING
4423 /*
4424 * Is cluster support enabled on this controller
4425 * Note: In a cluster the HBAs ( the initiators ) will have
4426 * different target IDs and we cannot assume it to be 7. Call
4427 * to mega_support_cluster() will get the target ids also if
4428 * the cluster support is available
4429 */
4430 adapter->has_cluster = mega_support_cluster(adapter);
4431 if (adapter->has_cluster) {
4432 dev_notice(&pdev->dev,
4433 "Cluster driver, initiator id:%d\n",
4434 adapter->this_id);
4435 }
4436#endif
4437
4438 pci_set_drvdata(pdev, host);
4439
4440 mega_create_proc_entry(hba_count, mega_proc_dir_entry);
4441
4442 error = scsi_add_host(host, &pdev->dev);
4443 if (error)
4444 goto out_free_mbox;
4445
4446 scsi_scan_host(host);
4447 hba_count++;
4448 return 0;
4449
4450 out_free_mbox:
4451 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
4452 adapter->una_mbox64, adapter->una_mbox64_dma);
4453 out_free_irq:
4454 free_irq(adapter->host->irq, adapter);
4455 out_free_scb_list:
4456 kfree(adapter->scb_list);
4457 out_free_cmd_buffer:
4458 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
4459 adapter->mega_buffer, adapter->buf_dma_handle);
4460 out_host_put:
4461 scsi_host_put(host);
4462 out_iounmap:
4463 if (flag & BOARD_MEMMAP)
4464 iounmap((void *)mega_baseport);
4465 out_release_region:
4466 if (flag & BOARD_MEMMAP)
4467 release_mem_region(tbase, 128);
4468 else
4469 release_region(mega_baseport, 16);
4470 out_disable_device:
4471 pci_disable_device(pdev);
4472 out:
4473 return error;
4474}
4475
4476static void
4477__megaraid_shutdown(adapter_t *adapter)
4478{
4479 u_char raw_mbox[sizeof(struct mbox_out)];
4480 mbox_t *mbox = (mbox_t *)raw_mbox;
4481 int i;
4482
4483 /* Flush adapter cache */
4484 memset(&mbox->m_out, 0, sizeof(raw_mbox));
4485 raw_mbox[0] = FLUSH_ADAPTER;
4486
4487 free_irq(adapter->host->irq, adapter);
4488
4489 /* Issue a blocking (interrupts disabled) command to the card */
4490 issue_scb_block(adapter, raw_mbox);
4491
4492 /* Flush disks cache */
4493 memset(&mbox->m_out, 0, sizeof(raw_mbox));
4494 raw_mbox[0] = FLUSH_SYSTEM;
4495
4496 /* Issue a blocking (interrupts disabled) command to the card */
4497 issue_scb_block(adapter, raw_mbox);
4498
4499 if (atomic_read(&adapter->pend_cmds) > 0)
4500 dev_warn(&adapter->dev->dev, "pending commands!!\n");
4501
4502 /*
4503 * Have a delibrate delay to make sure all the caches are
4504 * actually flushed.
4505 */
4506 for (i = 0; i <= 10; i++)
4507 mdelay(1000);
4508}
4509
4510static void
4511megaraid_remove_one(struct pci_dev *pdev)
4512{
4513 struct Scsi_Host *host = pci_get_drvdata(pdev);
4514 adapter_t *adapter = (adapter_t *)host->hostdata;
4515 char buf[12] = { 0 };
4516
4517 scsi_remove_host(host);
4518
4519 __megaraid_shutdown(adapter);
4520
4521 /* Free our resources */
4522 if (adapter->flag & BOARD_MEMMAP) {
4523 iounmap((void *)adapter->base);
4524 release_mem_region(adapter->host->base, 128);
4525 } else
4526 release_region(adapter->base, 16);
4527
4528 mega_free_sgl(adapter);
4529
4530 sprintf(buf, "hba%d", adapter->host->host_no);
4531 remove_proc_subtree(buf, mega_proc_dir_entry);
4532
4533 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
4534 adapter->mega_buffer, adapter->buf_dma_handle);
4535 kfree(adapter->scb_list);
4536 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
4537 adapter->una_mbox64, adapter->una_mbox64_dma);
4538
4539 scsi_host_put(host);
4540 pci_disable_device(pdev);
4541
4542 hba_count--;
4543}
4544
4545static void
4546megaraid_shutdown(struct pci_dev *pdev)
4547{
4548 struct Scsi_Host *host = pci_get_drvdata(pdev);
4549 adapter_t *adapter = (adapter_t *)host->hostdata;
4550
4551 __megaraid_shutdown(adapter);
4552}
4553
4554static struct pci_device_id megaraid_pci_tbl[] = {
4555 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID,
4556 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4557 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2,
4558 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4559 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3,
4560 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4561 {0,}
4562};
4563MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl);
4564
4565static struct pci_driver megaraid_pci_driver = {
4566 .name = "megaraid_legacy",
4567 .id_table = megaraid_pci_tbl,
4568 .probe = megaraid_probe_one,
4569 .remove = megaraid_remove_one,
4570 .shutdown = megaraid_shutdown,
4571};
4572
4573static int __init megaraid_init(void)
4574{
4575 int error;
4576
4577 if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN))
4578 max_cmd_per_lun = MAX_CMD_PER_LUN;
4579 if (max_mbox_busy_wait > MBOX_BUSY_WAIT)
4580 max_mbox_busy_wait = MBOX_BUSY_WAIT;
4581
4582#ifdef CONFIG_PROC_FS
4583 mega_proc_dir_entry = proc_mkdir("megaraid", NULL);
4584 if (!mega_proc_dir_entry) {
4585 printk(KERN_WARNING
4586 "megaraid: failed to create megaraid root\n");
4587 }
4588#endif
4589 error = pci_register_driver(&megaraid_pci_driver);
4590 if (error) {
4591#ifdef CONFIG_PROC_FS
4592 remove_proc_entry("megaraid", NULL);
4593#endif
4594 return error;
4595 }
4596
4597 /*
4598 * Register the driver as a character device, for applications
4599 * to access it for ioctls.
4600 * First argument (major) to register_chrdev implies a dynamic
4601 * major number allocation.
4602 */
4603 major = register_chrdev(0, "megadev_legacy", &megadev_fops);
4604 if (major < 0) {
4605 printk(KERN_WARNING
4606 "megaraid: failed to register char device\n");
4607 }
4608
4609 return 0;
4610}
4611
4612static void __exit megaraid_exit(void)
4613{
4614 /*
4615 * Unregister the character device interface to the driver.
4616 */
4617 unregister_chrdev(major, "megadev_legacy");
4618
4619 pci_unregister_driver(&megaraid_pci_driver);
4620
4621#ifdef CONFIG_PROC_FS
4622 remove_proc_entry("megaraid", NULL);
4623#endif
4624}
4625
4626module_init(megaraid_init);
4627module_exit(megaraid_exit);
4628
4629/* vi: set ts=8 sw=8 tw=78: */
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 *
4 * Linux MegaRAID device driver
5 *
6 * Copyright (c) 2002 LSI Logic Corporation.
7 *
8 * Copyright (c) 2002 Red Hat, Inc. All rights reserved.
9 * - fixes
10 * - speed-ups (list handling fixes, issued_list, optimizations.)
11 * - lots of cleanups.
12 *
13 * Copyright (c) 2003 Christoph Hellwig <hch@lst.de>
14 * - new-style, hotplug-aware pci probing and scsi registration
15 *
16 * Version : v2.00.4 Mon Nov 14 14:02:43 EST 2005 - Seokmann Ju
17 * <Seokmann.Ju@lsil.com>
18 *
19 * Description: Linux device driver for LSI Logic MegaRAID controller
20 *
21 * Supported controllers: MegaRAID 418, 428, 438, 466, 762, 467, 471, 490, 493
22 * 518, 520, 531, 532
23 *
24 * This driver is supported by LSI Logic, with assistance from Red Hat, Dell,
25 * and others. Please send updates to the mailing list
26 * linux-scsi@vger.kernel.org .
27 */
28
29#include <linux/mm.h>
30#include <linux/fs.h>
31#include <linux/blkdev.h>
32#include <linux/uaccess.h>
33#include <asm/io.h>
34#include <linux/completion.h>
35#include <linux/delay.h>
36#include <linux/proc_fs.h>
37#include <linux/seq_file.h>
38#include <linux/reboot.h>
39#include <linux/module.h>
40#include <linux/list.h>
41#include <linux/interrupt.h>
42#include <linux/pci.h>
43#include <linux/init.h>
44#include <linux/dma-mapping.h>
45#include <linux/mutex.h>
46#include <linux/slab.h>
47#include <scsi/scsicam.h>
48
49#include "scsi.h"
50#include <scsi/scsi_host.h>
51
52#include "megaraid.h"
53
54#define MEGARAID_MODULE_VERSION "2.00.4"
55
56MODULE_AUTHOR ("sju@lsil.com");
57MODULE_DESCRIPTION ("LSI Logic MegaRAID legacy driver");
58MODULE_LICENSE ("GPL");
59MODULE_VERSION(MEGARAID_MODULE_VERSION);
60
61static DEFINE_MUTEX(megadev_mutex);
62static unsigned int max_cmd_per_lun = DEF_CMD_PER_LUN;
63module_param(max_cmd_per_lun, uint, 0);
64MODULE_PARM_DESC(max_cmd_per_lun, "Maximum number of commands which can be issued to a single LUN (default=DEF_CMD_PER_LUN=63)");
65
66static unsigned short int max_sectors_per_io = MAX_SECTORS_PER_IO;
67module_param(max_sectors_per_io, ushort, 0);
68MODULE_PARM_DESC(max_sectors_per_io, "Maximum number of sectors per I/O request (default=MAX_SECTORS_PER_IO=128)");
69
70
71static unsigned short int max_mbox_busy_wait = MBOX_BUSY_WAIT;
72module_param(max_mbox_busy_wait, ushort, 0);
73MODULE_PARM_DESC(max_mbox_busy_wait, "Maximum wait for mailbox in microseconds if busy (default=MBOX_BUSY_WAIT=10)");
74
75#define RDINDOOR(adapter) readl((adapter)->mmio_base + 0x20)
76#define RDOUTDOOR(adapter) readl((adapter)->mmio_base + 0x2C)
77#define WRINDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x20)
78#define WROUTDOOR(adapter,value) writel(value, (adapter)->mmio_base + 0x2C)
79
80/*
81 * Global variables
82 */
83
84static int hba_count;
85static adapter_t *hba_soft_state[MAX_CONTROLLERS];
86static struct proc_dir_entry *mega_proc_dir_entry;
87
88/* For controller re-ordering */
89static struct mega_hbas mega_hbas[MAX_CONTROLLERS];
90
91static long
92megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
93
94/*
95 * The File Operations structure for the serial/ioctl interface of the driver
96 */
97static const struct file_operations megadev_fops = {
98 .owner = THIS_MODULE,
99 .unlocked_ioctl = megadev_unlocked_ioctl,
100 .open = megadev_open,
101 .llseek = noop_llseek,
102};
103
104/*
105 * Array to structures for storing the information about the controllers. This
106 * information is sent to the user level applications, when they do an ioctl
107 * for this information.
108 */
109static struct mcontroller mcontroller[MAX_CONTROLLERS];
110
111/* The current driver version */
112static u32 driver_ver = 0x02000000;
113
114/* major number used by the device for character interface */
115static int major;
116
117#define IS_RAID_CH(hba, ch) (((hba)->mega_ch_class >> (ch)) & 0x01)
118
119
120/*
121 * Debug variable to print some diagnostic messages
122 */
123static int trace_level;
124
125/**
126 * mega_setup_mailbox()
127 * @adapter: pointer to our soft state
128 *
129 * Allocates a 8 byte aligned memory for the handshake mailbox.
130 */
131static int
132mega_setup_mailbox(adapter_t *adapter)
133{
134 unsigned long align;
135
136 adapter->una_mbox64 = dma_alloc_coherent(&adapter->dev->dev,
137 sizeof(mbox64_t),
138 &adapter->una_mbox64_dma,
139 GFP_KERNEL);
140
141 if( !adapter->una_mbox64 ) return -1;
142
143 adapter->mbox = &adapter->una_mbox64->mbox;
144
145 adapter->mbox = (mbox_t *)((((unsigned long) adapter->mbox) + 15) &
146 (~0UL ^ 0xFUL));
147
148 adapter->mbox64 = (mbox64_t *)(((unsigned long)adapter->mbox) - 8);
149
150 align = ((void *)adapter->mbox) - ((void *)&adapter->una_mbox64->mbox);
151
152 adapter->mbox_dma = adapter->una_mbox64_dma + 8 + align;
153
154 /*
155 * Register the mailbox if the controller is an io-mapped controller
156 */
157 if( adapter->flag & BOARD_IOMAP ) {
158
159 outb(adapter->mbox_dma & 0xFF,
160 adapter->host->io_port + MBOX_PORT0);
161
162 outb((adapter->mbox_dma >> 8) & 0xFF,
163 adapter->host->io_port + MBOX_PORT1);
164
165 outb((adapter->mbox_dma >> 16) & 0xFF,
166 adapter->host->io_port + MBOX_PORT2);
167
168 outb((adapter->mbox_dma >> 24) & 0xFF,
169 adapter->host->io_port + MBOX_PORT3);
170
171 outb(ENABLE_MBOX_BYTE,
172 adapter->host->io_port + ENABLE_MBOX_REGION);
173
174 irq_ack(adapter);
175
176 irq_enable(adapter);
177 }
178
179 return 0;
180}
181
182
183/*
184 * mega_query_adapter()
185 * @adapter - pointer to our soft state
186 *
187 * Issue the adapter inquiry commands to the controller and find out
188 * information and parameter about the devices attached
189 */
190static int
191mega_query_adapter(adapter_t *adapter)
192{
193 dma_addr_t prod_info_dma_handle;
194 mega_inquiry3 *inquiry3;
195 u8 raw_mbox[sizeof(struct mbox_out)];
196 mbox_t *mbox;
197 int retval;
198
199 /* Initialize adapter inquiry mailbox */
200
201 mbox = (mbox_t *)raw_mbox;
202
203 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
204 memset(&mbox->m_out, 0, sizeof(raw_mbox));
205
206 /*
207 * Try to issue Inquiry3 command
208 * if not succeeded, then issue MEGA_MBOXCMD_ADAPTERINQ command and
209 * update enquiry3 structure
210 */
211 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
212
213 inquiry3 = (mega_inquiry3 *)adapter->mega_buffer;
214
215 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
216 raw_mbox[2] = NC_SUBOP_ENQUIRY3; /* i.e. 0x0F */
217 raw_mbox[3] = ENQ3_GET_SOLICITED_FULL; /* i.e. 0x02 */
218
219 /* Issue a blocking command to the card */
220 if ((retval = issue_scb_block(adapter, raw_mbox))) {
221 /* the adapter does not support 40ld */
222
223 mraid_ext_inquiry *ext_inq;
224 mraid_inquiry *inq;
225 dma_addr_t dma_handle;
226
227 ext_inq = dma_alloc_coherent(&adapter->dev->dev,
228 sizeof(mraid_ext_inquiry),
229 &dma_handle, GFP_KERNEL);
230
231 if( ext_inq == NULL ) return -1;
232
233 inq = &ext_inq->raid_inq;
234
235 mbox->m_out.xferaddr = (u32)dma_handle;
236
237 /*issue old 0x04 command to adapter */
238 mbox->m_out.cmd = MEGA_MBOXCMD_ADPEXTINQ;
239
240 issue_scb_block(adapter, raw_mbox);
241
242 /*
243 * update Enquiry3 and ProductInfo structures with
244 * mraid_inquiry structure
245 */
246 mega_8_to_40ld(inq, inquiry3,
247 (mega_product_info *)&adapter->product_info);
248
249 dma_free_coherent(&adapter->dev->dev,
250 sizeof(mraid_ext_inquiry), ext_inq,
251 dma_handle);
252
253 } else { /*adapter supports 40ld */
254 adapter->flag |= BOARD_40LD;
255
256 /*
257 * get product_info, which is static information and will be
258 * unchanged
259 */
260 prod_info_dma_handle = dma_map_single(&adapter->dev->dev,
261 (void *)&adapter->product_info,
262 sizeof(mega_product_info),
263 DMA_FROM_DEVICE);
264
265 mbox->m_out.xferaddr = prod_info_dma_handle;
266
267 raw_mbox[0] = FC_NEW_CONFIG; /* i.e. mbox->cmd=0xA1 */
268 raw_mbox[2] = NC_SUBOP_PRODUCT_INFO; /* i.e. 0x0E */
269
270 if ((retval = issue_scb_block(adapter, raw_mbox)))
271 dev_warn(&adapter->dev->dev,
272 "Product_info cmd failed with error: %d\n",
273 retval);
274
275 dma_unmap_single(&adapter->dev->dev, prod_info_dma_handle,
276 sizeof(mega_product_info), DMA_FROM_DEVICE);
277 }
278
279
280 /*
281 * kernel scans the channels from 0 to <= max_channel
282 */
283 adapter->host->max_channel =
284 adapter->product_info.nchannels + NVIRT_CHAN -1;
285
286 adapter->host->max_id = 16; /* max targets per channel */
287
288 adapter->host->max_lun = 7; /* Up to 7 luns for non disk devices */
289
290 adapter->host->cmd_per_lun = max_cmd_per_lun;
291
292 adapter->numldrv = inquiry3->num_ldrv;
293
294 adapter->max_cmds = adapter->product_info.max_commands;
295
296 if(adapter->max_cmds > MAX_COMMANDS)
297 adapter->max_cmds = MAX_COMMANDS;
298
299 adapter->host->can_queue = adapter->max_cmds - 1;
300
301 /*
302 * Get the maximum number of scatter-gather elements supported by this
303 * firmware
304 */
305 mega_get_max_sgl(adapter);
306
307 adapter->host->sg_tablesize = adapter->sglen;
308
309 /* use HP firmware and bios version encoding
310 Note: fw_version[0|1] and bios_version[0|1] were originally shifted
311 right 8 bits making them zero. This 0 value was hardcoded to fix
312 sparse warnings. */
313 if (adapter->product_info.subsysvid == PCI_VENDOR_ID_HP) {
314 snprintf(adapter->fw_version, sizeof(adapter->fw_version),
315 "%c%d%d.%d%d",
316 adapter->product_info.fw_version[2],
317 0,
318 adapter->product_info.fw_version[1] & 0x0f,
319 0,
320 adapter->product_info.fw_version[0] & 0x0f);
321 snprintf(adapter->bios_version, sizeof(adapter->fw_version),
322 "%c%d%d.%d%d",
323 adapter->product_info.bios_version[2],
324 0,
325 adapter->product_info.bios_version[1] & 0x0f,
326 0,
327 adapter->product_info.bios_version[0] & 0x0f);
328 } else {
329 memcpy(adapter->fw_version,
330 (char *)adapter->product_info.fw_version, 4);
331 adapter->fw_version[4] = 0;
332
333 memcpy(adapter->bios_version,
334 (char *)adapter->product_info.bios_version, 4);
335
336 adapter->bios_version[4] = 0;
337 }
338
339 dev_notice(&adapter->dev->dev, "[%s:%s] detected %d logical drives\n",
340 adapter->fw_version, adapter->bios_version, adapter->numldrv);
341
342 /*
343 * Do we support extended (>10 bytes) cdbs
344 */
345 adapter->support_ext_cdb = mega_support_ext_cdb(adapter);
346 if (adapter->support_ext_cdb)
347 dev_notice(&adapter->dev->dev, "supports extended CDBs\n");
348
349
350 return 0;
351}
352
353/**
354 * mega_runpendq()
355 * @adapter: pointer to our soft state
356 *
357 * Runs through the list of pending requests.
358 */
359static inline void
360mega_runpendq(adapter_t *adapter)
361{
362 if(!list_empty(&adapter->pending_list))
363 __mega_runpendq(adapter);
364}
365
366/*
367 * megaraid_queue()
368 * @scmd - Issue this scsi command
369 * @done - the callback hook into the scsi mid-layer
370 *
371 * The command queuing entry point for the mid-layer.
372 */
373static int
374megaraid_queue_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *))
375{
376 adapter_t *adapter;
377 scb_t *scb;
378 int busy=0;
379 unsigned long flags;
380
381 adapter = (adapter_t *)scmd->device->host->hostdata;
382
383 scmd->scsi_done = done;
384
385
386 /*
387 * Allocate and build a SCB request
388 * busy flag will be set if mega_build_cmd() command could not
389 * allocate scb. We will return non-zero status in that case.
390 * NOTE: scb can be null even though certain commands completed
391 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, we would
392 * return 0 in that case.
393 */
394
395 spin_lock_irqsave(&adapter->lock, flags);
396 scb = mega_build_cmd(adapter, scmd, &busy);
397 if (!scb)
398 goto out;
399
400 scb->state |= SCB_PENDQ;
401 list_add_tail(&scb->list, &adapter->pending_list);
402
403 /*
404 * Check if the HBA is in quiescent state, e.g., during a
405 * delete logical drive opertion. If it is, don't run
406 * the pending_list.
407 */
408 if (atomic_read(&adapter->quiescent) == 0)
409 mega_runpendq(adapter);
410
411 busy = 0;
412 out:
413 spin_unlock_irqrestore(&adapter->lock, flags);
414 return busy;
415}
416
417static DEF_SCSI_QCMD(megaraid_queue)
418
419/**
420 * mega_allocate_scb()
421 * @adapter: pointer to our soft state
422 * @cmd: scsi command from the mid-layer
423 *
424 * Allocate a SCB structure. This is the central structure for controller
425 * commands.
426 */
427static inline scb_t *
428mega_allocate_scb(adapter_t *adapter, struct scsi_cmnd *cmd)
429{
430 struct list_head *head = &adapter->free_list;
431 scb_t *scb;
432
433 /* Unlink command from Free List */
434 if( !list_empty(head) ) {
435
436 scb = list_entry(head->next, scb_t, list);
437
438 list_del_init(head->next);
439
440 scb->state = SCB_ACTIVE;
441 scb->cmd = cmd;
442 scb->dma_type = MEGA_DMA_TYPE_NONE;
443
444 return scb;
445 }
446
447 return NULL;
448}
449
450/**
451 * mega_get_ldrv_num()
452 * @adapter: pointer to our soft state
453 * @cmd: scsi mid layer command
454 * @channel: channel on the controller
455 *
456 * Calculate the logical drive number based on the information in scsi command
457 * and the channel number.
458 */
459static inline int
460mega_get_ldrv_num(adapter_t *adapter, struct scsi_cmnd *cmd, int channel)
461{
462 int tgt;
463 int ldrv_num;
464
465 tgt = cmd->device->id;
466
467 if ( tgt > adapter->this_id )
468 tgt--; /* we do not get inquires for initiator id */
469
470 ldrv_num = (channel * 15) + tgt;
471
472
473 /*
474 * If we have a logical drive with boot enabled, project it first
475 */
476 if( adapter->boot_ldrv_enabled ) {
477 if( ldrv_num == 0 ) {
478 ldrv_num = adapter->boot_ldrv;
479 }
480 else {
481 if( ldrv_num <= adapter->boot_ldrv ) {
482 ldrv_num--;
483 }
484 }
485 }
486
487 /*
488 * If "delete logical drive" feature is enabled on this controller.
489 * Do only if at least one delete logical drive operation was done.
490 *
491 * Also, after logical drive deletion, instead of logical drive number,
492 * the value returned should be 0x80+logical drive id.
493 *
494 * These is valid only for IO commands.
495 */
496
497 if (adapter->support_random_del && adapter->read_ldidmap )
498 switch (cmd->cmnd[0]) {
499 case READ_6:
500 case WRITE_6:
501 case READ_10:
502 case WRITE_10:
503 ldrv_num += 0x80;
504 }
505
506 return ldrv_num;
507}
508
509/**
510 * mega_build_cmd()
511 * @adapter: pointer to our soft state
512 * @cmd: Prepare using this scsi command
513 * @busy: busy flag if no resources
514 *
515 * Prepares a command and scatter gather list for the controller. This routine
516 * also finds out if the commands is intended for a logical drive or a
517 * physical device and prepares the controller command accordingly.
518 *
519 * We also re-order the logical drives and physical devices based on their
520 * boot settings.
521 */
522static scb_t *
523mega_build_cmd(adapter_t *adapter, struct scsi_cmnd *cmd, int *busy)
524{
525 mega_passthru *pthru;
526 scb_t *scb;
527 mbox_t *mbox;
528 u32 seg;
529 char islogical;
530 int max_ldrv_num;
531 int channel = 0;
532 int target = 0;
533 int ldrv_num = 0; /* logical drive number */
534
535 /*
536 * We know what channels our logical drives are on - mega_find_card()
537 */
538 islogical = adapter->logdrv_chan[cmd->device->channel];
539
540 /*
541 * The theory: If physical drive is chosen for boot, all the physical
542 * devices are exported before the logical drives, otherwise physical
543 * devices are pushed after logical drives, in which case - Kernel sees
544 * the physical devices on virtual channel which is obviously converted
545 * to actual channel on the HBA.
546 */
547 if( adapter->boot_pdrv_enabled ) {
548 if( islogical ) {
549 /* logical channel */
550 channel = cmd->device->channel -
551 adapter->product_info.nchannels;
552 }
553 else {
554 /* this is physical channel */
555 channel = cmd->device->channel;
556 target = cmd->device->id;
557
558 /*
559 * boot from a physical disk, that disk needs to be
560 * exposed first IF both the channels are SCSI, then
561 * booting from the second channel is not allowed.
562 */
563 if( target == 0 ) {
564 target = adapter->boot_pdrv_tgt;
565 }
566 else if( target == adapter->boot_pdrv_tgt ) {
567 target = 0;
568 }
569 }
570 }
571 else {
572 if( islogical ) {
573 /* this is the logical channel */
574 channel = cmd->device->channel;
575 }
576 else {
577 /* physical channel */
578 channel = cmd->device->channel - NVIRT_CHAN;
579 target = cmd->device->id;
580 }
581 }
582
583
584 if(islogical) {
585
586 /* have just LUN 0 for each target on virtual channels */
587 if (cmd->device->lun) {
588 cmd->result = (DID_BAD_TARGET << 16);
589 cmd->scsi_done(cmd);
590 return NULL;
591 }
592
593 ldrv_num = mega_get_ldrv_num(adapter, cmd, channel);
594
595
596 max_ldrv_num = (adapter->flag & BOARD_40LD) ?
597 MAX_LOGICAL_DRIVES_40LD : MAX_LOGICAL_DRIVES_8LD;
598
599 /*
600 * max_ldrv_num increases by 0x80 if some logical drive was
601 * deleted.
602 */
603 if(adapter->read_ldidmap)
604 max_ldrv_num += 0x80;
605
606 if(ldrv_num > max_ldrv_num ) {
607 cmd->result = (DID_BAD_TARGET << 16);
608 cmd->scsi_done(cmd);
609 return NULL;
610 }
611
612 }
613 else {
614 if( cmd->device->lun > 7) {
615 /*
616 * Do not support lun >7 for physically accessed
617 * devices
618 */
619 cmd->result = (DID_BAD_TARGET << 16);
620 cmd->scsi_done(cmd);
621 return NULL;
622 }
623 }
624
625 /*
626 *
627 * Logical drive commands
628 *
629 */
630 if(islogical) {
631 switch (cmd->cmnd[0]) {
632 case TEST_UNIT_READY:
633#if MEGA_HAVE_CLUSTERING
634 /*
635 * Do we support clustering and is the support enabled
636 * If no, return success always
637 */
638 if( !adapter->has_cluster ) {
639 cmd->result = (DID_OK << 16);
640 cmd->scsi_done(cmd);
641 return NULL;
642 }
643
644 if(!(scb = mega_allocate_scb(adapter, cmd))) {
645 *busy = 1;
646 return NULL;
647 }
648
649 scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
650 scb->raw_mbox[2] = MEGA_RESERVATION_STATUS;
651 scb->raw_mbox[3] = ldrv_num;
652
653 scb->dma_direction = DMA_NONE;
654
655 return scb;
656#else
657 cmd->result = (DID_OK << 16);
658 cmd->scsi_done(cmd);
659 return NULL;
660#endif
661
662 case MODE_SENSE: {
663 char *buf;
664 struct scatterlist *sg;
665
666 sg = scsi_sglist(cmd);
667 buf = kmap_atomic(sg_page(sg)) + sg->offset;
668
669 memset(buf, 0, cmd->cmnd[4]);
670 kunmap_atomic(buf - sg->offset);
671
672 cmd->result = (DID_OK << 16);
673 cmd->scsi_done(cmd);
674 return NULL;
675 }
676
677 case READ_CAPACITY:
678 case INQUIRY:
679
680 if(!(adapter->flag & (1L << cmd->device->channel))) {
681
682 dev_notice(&adapter->dev->dev,
683 "scsi%d: scanning scsi channel %d "
684 "for logical drives\n",
685 adapter->host->host_no,
686 cmd->device->channel);
687
688 adapter->flag |= (1L << cmd->device->channel);
689 }
690
691 /* Allocate a SCB and initialize passthru */
692 if(!(scb = mega_allocate_scb(adapter, cmd))) {
693 *busy = 1;
694 return NULL;
695 }
696 pthru = scb->pthru;
697
698 mbox = (mbox_t *)scb->raw_mbox;
699 memset(mbox, 0, sizeof(scb->raw_mbox));
700 memset(pthru, 0, sizeof(mega_passthru));
701
702 pthru->timeout = 0;
703 pthru->ars = 1;
704 pthru->reqsenselen = 14;
705 pthru->islogical = 1;
706 pthru->logdrv = ldrv_num;
707 pthru->cdblen = cmd->cmd_len;
708 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
709
710 if( adapter->has_64bit_addr ) {
711 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
712 }
713 else {
714 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
715 }
716
717 scb->dma_direction = DMA_FROM_DEVICE;
718
719 pthru->numsgelements = mega_build_sglist(adapter, scb,
720 &pthru->dataxferaddr, &pthru->dataxferlen);
721
722 mbox->m_out.xferaddr = scb->pthru_dma_addr;
723
724 return scb;
725
726 case READ_6:
727 case WRITE_6:
728 case READ_10:
729 case WRITE_10:
730 case READ_12:
731 case WRITE_12:
732
733 /* Allocate a SCB and initialize mailbox */
734 if(!(scb = mega_allocate_scb(adapter, cmd))) {
735 *busy = 1;
736 return NULL;
737 }
738 mbox = (mbox_t *)scb->raw_mbox;
739
740 memset(mbox, 0, sizeof(scb->raw_mbox));
741 mbox->m_out.logdrv = ldrv_num;
742
743 /*
744 * A little hack: 2nd bit is zero for all scsi read
745 * commands and is set for all scsi write commands
746 */
747 if( adapter->has_64bit_addr ) {
748 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
749 MEGA_MBOXCMD_LWRITE64:
750 MEGA_MBOXCMD_LREAD64 ;
751 }
752 else {
753 mbox->m_out.cmd = (*cmd->cmnd & 0x02) ?
754 MEGA_MBOXCMD_LWRITE:
755 MEGA_MBOXCMD_LREAD ;
756 }
757
758 /*
759 * 6-byte READ(0x08) or WRITE(0x0A) cdb
760 */
761 if( cmd->cmd_len == 6 ) {
762 mbox->m_out.numsectors = (u32) cmd->cmnd[4];
763 mbox->m_out.lba =
764 ((u32)cmd->cmnd[1] << 16) |
765 ((u32)cmd->cmnd[2] << 8) |
766 (u32)cmd->cmnd[3];
767
768 mbox->m_out.lba &= 0x1FFFFF;
769
770#if MEGA_HAVE_STATS
771 /*
772 * Take modulo 0x80, since the logical drive
773 * number increases by 0x80 when a logical
774 * drive was deleted
775 */
776 if (*cmd->cmnd == READ_6) {
777 adapter->nreads[ldrv_num%0x80]++;
778 adapter->nreadblocks[ldrv_num%0x80] +=
779 mbox->m_out.numsectors;
780 } else {
781 adapter->nwrites[ldrv_num%0x80]++;
782 adapter->nwriteblocks[ldrv_num%0x80] +=
783 mbox->m_out.numsectors;
784 }
785#endif
786 }
787
788 /*
789 * 10-byte READ(0x28) or WRITE(0x2A) cdb
790 */
791 if( cmd->cmd_len == 10 ) {
792 mbox->m_out.numsectors =
793 (u32)cmd->cmnd[8] |
794 ((u32)cmd->cmnd[7] << 8);
795 mbox->m_out.lba =
796 ((u32)cmd->cmnd[2] << 24) |
797 ((u32)cmd->cmnd[3] << 16) |
798 ((u32)cmd->cmnd[4] << 8) |
799 (u32)cmd->cmnd[5];
800
801#if MEGA_HAVE_STATS
802 if (*cmd->cmnd == READ_10) {
803 adapter->nreads[ldrv_num%0x80]++;
804 adapter->nreadblocks[ldrv_num%0x80] +=
805 mbox->m_out.numsectors;
806 } else {
807 adapter->nwrites[ldrv_num%0x80]++;
808 adapter->nwriteblocks[ldrv_num%0x80] +=
809 mbox->m_out.numsectors;
810 }
811#endif
812 }
813
814 /*
815 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
816 */
817 if( cmd->cmd_len == 12 ) {
818 mbox->m_out.lba =
819 ((u32)cmd->cmnd[2] << 24) |
820 ((u32)cmd->cmnd[3] << 16) |
821 ((u32)cmd->cmnd[4] << 8) |
822 (u32)cmd->cmnd[5];
823
824 mbox->m_out.numsectors =
825 ((u32)cmd->cmnd[6] << 24) |
826 ((u32)cmd->cmnd[7] << 16) |
827 ((u32)cmd->cmnd[8] << 8) |
828 (u32)cmd->cmnd[9];
829
830#if MEGA_HAVE_STATS
831 if (*cmd->cmnd == READ_12) {
832 adapter->nreads[ldrv_num%0x80]++;
833 adapter->nreadblocks[ldrv_num%0x80] +=
834 mbox->m_out.numsectors;
835 } else {
836 adapter->nwrites[ldrv_num%0x80]++;
837 adapter->nwriteblocks[ldrv_num%0x80] +=
838 mbox->m_out.numsectors;
839 }
840#endif
841 }
842
843 /*
844 * If it is a read command
845 */
846 if( (*cmd->cmnd & 0x0F) == 0x08 ) {
847 scb->dma_direction = DMA_FROM_DEVICE;
848 }
849 else {
850 scb->dma_direction = DMA_TO_DEVICE;
851 }
852
853 /* Calculate Scatter-Gather info */
854 mbox->m_out.numsgelements = mega_build_sglist(adapter, scb,
855 (u32 *)&mbox->m_out.xferaddr, &seg);
856
857 return scb;
858
859#if MEGA_HAVE_CLUSTERING
860 case RESERVE:
861 case RELEASE:
862
863 /*
864 * Do we support clustering and is the support enabled
865 */
866 if( ! adapter->has_cluster ) {
867
868 cmd->result = (DID_BAD_TARGET << 16);
869 cmd->scsi_done(cmd);
870 return NULL;
871 }
872
873 /* Allocate a SCB and initialize mailbox */
874 if(!(scb = mega_allocate_scb(adapter, cmd))) {
875 *busy = 1;
876 return NULL;
877 }
878
879 scb->raw_mbox[0] = MEGA_CLUSTER_CMD;
880 scb->raw_mbox[2] = ( *cmd->cmnd == RESERVE ) ?
881 MEGA_RESERVE_LD : MEGA_RELEASE_LD;
882
883 scb->raw_mbox[3] = ldrv_num;
884
885 scb->dma_direction = DMA_NONE;
886
887 return scb;
888#endif
889
890 default:
891 cmd->result = (DID_BAD_TARGET << 16);
892 cmd->scsi_done(cmd);
893 return NULL;
894 }
895 }
896
897 /*
898 * Passthru drive commands
899 */
900 else {
901 /* Allocate a SCB and initialize passthru */
902 if(!(scb = mega_allocate_scb(adapter, cmd))) {
903 *busy = 1;
904 return NULL;
905 }
906
907 mbox = (mbox_t *)scb->raw_mbox;
908 memset(mbox, 0, sizeof(scb->raw_mbox));
909
910 if( adapter->support_ext_cdb ) {
911
912 mega_prepare_extpassthru(adapter, scb, cmd,
913 channel, target);
914
915 mbox->m_out.cmd = MEGA_MBOXCMD_EXTPTHRU;
916
917 mbox->m_out.xferaddr = scb->epthru_dma_addr;
918
919 }
920 else {
921
922 pthru = mega_prepare_passthru(adapter, scb, cmd,
923 channel, target);
924
925 /* Initialize mailbox */
926 if( adapter->has_64bit_addr ) {
927 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU64;
928 }
929 else {
930 mbox->m_out.cmd = MEGA_MBOXCMD_PASSTHRU;
931 }
932
933 mbox->m_out.xferaddr = scb->pthru_dma_addr;
934
935 }
936 return scb;
937 }
938 return NULL;
939}
940
941
942/**
943 * mega_prepare_passthru()
944 * @adapter: pointer to our soft state
945 * @scb: our scsi control block
946 * @cmd: scsi command from the mid-layer
947 * @channel: actual channel on the controller
948 * @target: actual id on the controller.
949 *
950 * prepare a command for the scsi physical devices.
951 */
952static mega_passthru *
953mega_prepare_passthru(adapter_t *adapter, scb_t *scb, struct scsi_cmnd *cmd,
954 int channel, int target)
955{
956 mega_passthru *pthru;
957
958 pthru = scb->pthru;
959 memset(pthru, 0, sizeof (mega_passthru));
960
961 /* 0=6sec/1=60sec/2=10min/3=3hrs */
962 pthru->timeout = 2;
963
964 pthru->ars = 1;
965 pthru->reqsenselen = 14;
966 pthru->islogical = 0;
967
968 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
969
970 pthru->target = (adapter->flag & BOARD_40LD) ?
971 (channel << 4) | target : target;
972
973 pthru->cdblen = cmd->cmd_len;
974 pthru->logdrv = cmd->device->lun;
975
976 memcpy(pthru->cdb, cmd->cmnd, cmd->cmd_len);
977
978 /* Not sure about the direction */
979 scb->dma_direction = DMA_BIDIRECTIONAL;
980
981 /* Special Code for Handling READ_CAPA/ INQ using bounce buffers */
982 switch (cmd->cmnd[0]) {
983 case INQUIRY:
984 case READ_CAPACITY:
985 if(!(adapter->flag & (1L << cmd->device->channel))) {
986
987 dev_notice(&adapter->dev->dev,
988 "scsi%d: scanning scsi channel %d [P%d] "
989 "for physical devices\n",
990 adapter->host->host_no,
991 cmd->device->channel, channel);
992
993 adapter->flag |= (1L << cmd->device->channel);
994 }
995 fallthrough;
996 default:
997 pthru->numsgelements = mega_build_sglist(adapter, scb,
998 &pthru->dataxferaddr, &pthru->dataxferlen);
999 break;
1000 }
1001 return pthru;
1002}
1003
1004
1005/**
1006 * mega_prepare_extpassthru()
1007 * @adapter: pointer to our soft state
1008 * @scb: our scsi control block
1009 * @cmd: scsi command from the mid-layer
1010 * @channel: actual channel on the controller
1011 * @target: actual id on the controller.
1012 *
1013 * prepare a command for the scsi physical devices. This rountine prepares
1014 * commands for devices which can take extended CDBs (>10 bytes)
1015 */
1016static mega_ext_passthru *
1017mega_prepare_extpassthru(adapter_t *adapter, scb_t *scb,
1018 struct scsi_cmnd *cmd,
1019 int channel, int target)
1020{
1021 mega_ext_passthru *epthru;
1022
1023 epthru = scb->epthru;
1024 memset(epthru, 0, sizeof(mega_ext_passthru));
1025
1026 /* 0=6sec/1=60sec/2=10min/3=3hrs */
1027 epthru->timeout = 2;
1028
1029 epthru->ars = 1;
1030 epthru->reqsenselen = 14;
1031 epthru->islogical = 0;
1032
1033 epthru->channel = (adapter->flag & BOARD_40LD) ? 0 : channel;
1034 epthru->target = (adapter->flag & BOARD_40LD) ?
1035 (channel << 4) | target : target;
1036
1037 epthru->cdblen = cmd->cmd_len;
1038 epthru->logdrv = cmd->device->lun;
1039
1040 memcpy(epthru->cdb, cmd->cmnd, cmd->cmd_len);
1041
1042 /* Not sure about the direction */
1043 scb->dma_direction = DMA_BIDIRECTIONAL;
1044
1045 switch(cmd->cmnd[0]) {
1046 case INQUIRY:
1047 case READ_CAPACITY:
1048 if(!(adapter->flag & (1L << cmd->device->channel))) {
1049
1050 dev_notice(&adapter->dev->dev,
1051 "scsi%d: scanning scsi channel %d [P%d] "
1052 "for physical devices\n",
1053 adapter->host->host_no,
1054 cmd->device->channel, channel);
1055
1056 adapter->flag |= (1L << cmd->device->channel);
1057 }
1058 fallthrough;
1059 default:
1060 epthru->numsgelements = mega_build_sglist(adapter, scb,
1061 &epthru->dataxferaddr, &epthru->dataxferlen);
1062 break;
1063 }
1064
1065 return epthru;
1066}
1067
1068static void
1069__mega_runpendq(adapter_t *adapter)
1070{
1071 scb_t *scb;
1072 struct list_head *pos, *next;
1073
1074 /* Issue any pending commands to the card */
1075 list_for_each_safe(pos, next, &adapter->pending_list) {
1076
1077 scb = list_entry(pos, scb_t, list);
1078
1079 if( !(scb->state & SCB_ISSUED) ) {
1080
1081 if( issue_scb(adapter, scb) != 0 )
1082 return;
1083 }
1084 }
1085
1086 return;
1087}
1088
1089
1090/**
1091 * issue_scb()
1092 * @adapter: pointer to our soft state
1093 * @scb: scsi control block
1094 *
1095 * Post a command to the card if the mailbox is available, otherwise return
1096 * busy. We also take the scb from the pending list if the mailbox is
1097 * available.
1098 */
1099static int
1100issue_scb(adapter_t *adapter, scb_t *scb)
1101{
1102 volatile mbox64_t *mbox64 = adapter->mbox64;
1103 volatile mbox_t *mbox = adapter->mbox;
1104 unsigned int i = 0;
1105
1106 if(unlikely(mbox->m_in.busy)) {
1107 do {
1108 udelay(1);
1109 i++;
1110 } while( mbox->m_in.busy && (i < max_mbox_busy_wait) );
1111
1112 if(mbox->m_in.busy) return -1;
1113 }
1114
1115 /* Copy mailbox data into host structure */
1116 memcpy((char *)&mbox->m_out, (char *)scb->raw_mbox,
1117 sizeof(struct mbox_out));
1118
1119 mbox->m_out.cmdid = scb->idx; /* Set cmdid */
1120 mbox->m_in.busy = 1; /* Set busy */
1121
1122
1123 /*
1124 * Increment the pending queue counter
1125 */
1126 atomic_inc(&adapter->pend_cmds);
1127
1128 switch (mbox->m_out.cmd) {
1129 case MEGA_MBOXCMD_LREAD64:
1130 case MEGA_MBOXCMD_LWRITE64:
1131 case MEGA_MBOXCMD_PASSTHRU64:
1132 case MEGA_MBOXCMD_EXTPTHRU:
1133 mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
1134 mbox64->xfer_segment_hi = 0;
1135 mbox->m_out.xferaddr = 0xFFFFFFFF;
1136 break;
1137 default:
1138 mbox64->xfer_segment_lo = 0;
1139 mbox64->xfer_segment_hi = 0;
1140 }
1141
1142 /*
1143 * post the command
1144 */
1145 scb->state |= SCB_ISSUED;
1146
1147 if( likely(adapter->flag & BOARD_MEMMAP) ) {
1148 mbox->m_in.poll = 0;
1149 mbox->m_in.ack = 0;
1150 WRINDOOR(adapter, adapter->mbox_dma | 0x1);
1151 }
1152 else {
1153 irq_enable(adapter);
1154 issue_command(adapter);
1155 }
1156
1157 return 0;
1158}
1159
1160/*
1161 * Wait until the controller's mailbox is available
1162 */
1163static inline int
1164mega_busywait_mbox (adapter_t *adapter)
1165{
1166 if (adapter->mbox->m_in.busy)
1167 return __mega_busywait_mbox(adapter);
1168 return 0;
1169}
1170
1171/**
1172 * issue_scb_block()
1173 * @adapter: pointer to our soft state
1174 * @raw_mbox: the mailbox
1175 *
1176 * Issue a scb in synchronous and non-interrupt mode
1177 */
1178static int
1179issue_scb_block(adapter_t *adapter, u_char *raw_mbox)
1180{
1181 volatile mbox64_t *mbox64 = adapter->mbox64;
1182 volatile mbox_t *mbox = adapter->mbox;
1183 u8 byte;
1184
1185 /* Wait until mailbox is free */
1186 if(mega_busywait_mbox (adapter))
1187 goto bug_blocked_mailbox;
1188
1189 /* Copy mailbox data into host structure */
1190 memcpy((char *) mbox, raw_mbox, sizeof(struct mbox_out));
1191 mbox->m_out.cmdid = 0xFE;
1192 mbox->m_in.busy = 1;
1193
1194 switch (raw_mbox[0]) {
1195 case MEGA_MBOXCMD_LREAD64:
1196 case MEGA_MBOXCMD_LWRITE64:
1197 case MEGA_MBOXCMD_PASSTHRU64:
1198 case MEGA_MBOXCMD_EXTPTHRU:
1199 mbox64->xfer_segment_lo = mbox->m_out.xferaddr;
1200 mbox64->xfer_segment_hi = 0;
1201 mbox->m_out.xferaddr = 0xFFFFFFFF;
1202 break;
1203 default:
1204 mbox64->xfer_segment_lo = 0;
1205 mbox64->xfer_segment_hi = 0;
1206 }
1207
1208 if( likely(adapter->flag & BOARD_MEMMAP) ) {
1209 mbox->m_in.poll = 0;
1210 mbox->m_in.ack = 0;
1211 mbox->m_in.numstatus = 0xFF;
1212 mbox->m_in.status = 0xFF;
1213 WRINDOOR(adapter, adapter->mbox_dma | 0x1);
1214
1215 while((volatile u8)mbox->m_in.numstatus == 0xFF)
1216 cpu_relax();
1217
1218 mbox->m_in.numstatus = 0xFF;
1219
1220 while( (volatile u8)mbox->m_in.poll != 0x77 )
1221 cpu_relax();
1222
1223 mbox->m_in.poll = 0;
1224 mbox->m_in.ack = 0x77;
1225
1226 WRINDOOR(adapter, adapter->mbox_dma | 0x2);
1227
1228 while(RDINDOOR(adapter) & 0x2)
1229 cpu_relax();
1230 }
1231 else {
1232 irq_disable(adapter);
1233 issue_command(adapter);
1234
1235 while (!((byte = irq_state(adapter)) & INTR_VALID))
1236 cpu_relax();
1237
1238 set_irq_state(adapter, byte);
1239 irq_enable(adapter);
1240 irq_ack(adapter);
1241 }
1242
1243 return mbox->m_in.status;
1244
1245bug_blocked_mailbox:
1246 dev_warn(&adapter->dev->dev, "Blocked mailbox......!!\n");
1247 udelay (1000);
1248 return -1;
1249}
1250
1251
1252/**
1253 * megaraid_isr_iomapped()
1254 * @irq: irq
1255 * @devp: pointer to our soft state
1256 *
1257 * Interrupt service routine for io-mapped controllers.
1258 * Find out if our device is interrupting. If yes, acknowledge the interrupt
1259 * and service the completed commands.
1260 */
1261static irqreturn_t
1262megaraid_isr_iomapped(int irq, void *devp)
1263{
1264 adapter_t *adapter = devp;
1265 unsigned long flags;
1266 u8 status;
1267 u8 nstatus;
1268 u8 completed[MAX_FIRMWARE_STATUS];
1269 u8 byte;
1270 int handled = 0;
1271
1272
1273 /*
1274 * loop till F/W has more commands for us to complete.
1275 */
1276 spin_lock_irqsave(&adapter->lock, flags);
1277
1278 do {
1279 /* Check if a valid interrupt is pending */
1280 byte = irq_state(adapter);
1281 if( (byte & VALID_INTR_BYTE) == 0 ) {
1282 /*
1283 * No more pending commands
1284 */
1285 goto out_unlock;
1286 }
1287 set_irq_state(adapter, byte);
1288
1289 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
1290 == 0xFF)
1291 cpu_relax();
1292 adapter->mbox->m_in.numstatus = 0xFF;
1293
1294 status = adapter->mbox->m_in.status;
1295
1296 /*
1297 * decrement the pending queue counter
1298 */
1299 atomic_sub(nstatus, &adapter->pend_cmds);
1300
1301 memcpy(completed, (void *)adapter->mbox->m_in.completed,
1302 nstatus);
1303
1304 /* Acknowledge interrupt */
1305 irq_ack(adapter);
1306
1307 mega_cmd_done(adapter, completed, nstatus, status);
1308
1309 mega_rundoneq(adapter);
1310
1311 handled = 1;
1312
1313 /* Loop through any pending requests */
1314 if(atomic_read(&adapter->quiescent) == 0) {
1315 mega_runpendq(adapter);
1316 }
1317
1318 } while(1);
1319
1320 out_unlock:
1321
1322 spin_unlock_irqrestore(&adapter->lock, flags);
1323
1324 return IRQ_RETVAL(handled);
1325}
1326
1327
1328/**
1329 * megaraid_isr_memmapped()
1330 * @irq: irq
1331 * @devp: pointer to our soft state
1332 *
1333 * Interrupt service routine for memory-mapped controllers.
1334 * Find out if our device is interrupting. If yes, acknowledge the interrupt
1335 * and service the completed commands.
1336 */
1337static irqreturn_t
1338megaraid_isr_memmapped(int irq, void *devp)
1339{
1340 adapter_t *adapter = devp;
1341 unsigned long flags;
1342 u8 status;
1343 u32 dword = 0;
1344 u8 nstatus;
1345 u8 completed[MAX_FIRMWARE_STATUS];
1346 int handled = 0;
1347
1348
1349 /*
1350 * loop till F/W has more commands for us to complete.
1351 */
1352 spin_lock_irqsave(&adapter->lock, flags);
1353
1354 do {
1355 /* Check if a valid interrupt is pending */
1356 dword = RDOUTDOOR(adapter);
1357 if(dword != 0x10001234) {
1358 /*
1359 * No more pending commands
1360 */
1361 goto out_unlock;
1362 }
1363 WROUTDOOR(adapter, 0x10001234);
1364
1365 while((nstatus = (volatile u8)adapter->mbox->m_in.numstatus)
1366 == 0xFF) {
1367 cpu_relax();
1368 }
1369 adapter->mbox->m_in.numstatus = 0xFF;
1370
1371 status = adapter->mbox->m_in.status;
1372
1373 /*
1374 * decrement the pending queue counter
1375 */
1376 atomic_sub(nstatus, &adapter->pend_cmds);
1377
1378 memcpy(completed, (void *)adapter->mbox->m_in.completed,
1379 nstatus);
1380
1381 /* Acknowledge interrupt */
1382 WRINDOOR(adapter, 0x2);
1383
1384 handled = 1;
1385
1386 while( RDINDOOR(adapter) & 0x02 )
1387 cpu_relax();
1388
1389 mega_cmd_done(adapter, completed, nstatus, status);
1390
1391 mega_rundoneq(adapter);
1392
1393 /* Loop through any pending requests */
1394 if(atomic_read(&adapter->quiescent) == 0) {
1395 mega_runpendq(adapter);
1396 }
1397
1398 } while(1);
1399
1400 out_unlock:
1401
1402 spin_unlock_irqrestore(&adapter->lock, flags);
1403
1404 return IRQ_RETVAL(handled);
1405}
1406/**
1407 * mega_cmd_done()
1408 * @adapter: pointer to our soft state
1409 * @completed: array of ids of completed commands
1410 * @nstatus: number of completed commands
1411 * @status: status of the last command completed
1412 *
1413 * Complete the commands and call the scsi mid-layer callback hooks.
1414 */
1415static void
1416mega_cmd_done(adapter_t *adapter, u8 completed[], int nstatus, int status)
1417{
1418 mega_ext_passthru *epthru = NULL;
1419 struct scatterlist *sgl;
1420 struct scsi_cmnd *cmd = NULL;
1421 mega_passthru *pthru = NULL;
1422 mbox_t *mbox = NULL;
1423 u8 c;
1424 scb_t *scb;
1425 int islogical;
1426 int cmdid;
1427 int i;
1428
1429 /*
1430 * for all the commands completed, call the mid-layer callback routine
1431 * and free the scb.
1432 */
1433 for( i = 0; i < nstatus; i++ ) {
1434
1435 cmdid = completed[i];
1436
1437 /*
1438 * Only free SCBs for the commands coming down from the
1439 * mid-layer, not for which were issued internally
1440 *
1441 * For internal command, restore the status returned by the
1442 * firmware so that user can interpret it.
1443 */
1444 if (cmdid == CMDID_INT_CMDS) {
1445 scb = &adapter->int_scb;
1446
1447 list_del_init(&scb->list);
1448 scb->state = SCB_FREE;
1449
1450 adapter->int_status = status;
1451 complete(&adapter->int_waitq);
1452 } else {
1453 scb = &adapter->scb_list[cmdid];
1454
1455 /*
1456 * Make sure f/w has completed a valid command
1457 */
1458 if( !(scb->state & SCB_ISSUED) || scb->cmd == NULL ) {
1459 dev_crit(&adapter->dev->dev, "invalid command "
1460 "Id %d, scb->state:%x, scsi cmd:%p\n",
1461 cmdid, scb->state, scb->cmd);
1462
1463 continue;
1464 }
1465
1466 /*
1467 * Was a abort issued for this command
1468 */
1469 if( scb->state & SCB_ABORT ) {
1470
1471 dev_warn(&adapter->dev->dev,
1472 "aborted cmd [%x] complete\n",
1473 scb->idx);
1474
1475 scb->cmd->result = (DID_ABORT << 16);
1476
1477 list_add_tail(SCSI_LIST(scb->cmd),
1478 &adapter->completed_list);
1479
1480 mega_free_scb(adapter, scb);
1481
1482 continue;
1483 }
1484
1485 /*
1486 * Was a reset issued for this command
1487 */
1488 if( scb->state & SCB_RESET ) {
1489
1490 dev_warn(&adapter->dev->dev,
1491 "reset cmd [%x] complete\n",
1492 scb->idx);
1493
1494 scb->cmd->result = (DID_RESET << 16);
1495
1496 list_add_tail(SCSI_LIST(scb->cmd),
1497 &adapter->completed_list);
1498
1499 mega_free_scb (adapter, scb);
1500
1501 continue;
1502 }
1503
1504 cmd = scb->cmd;
1505 pthru = scb->pthru;
1506 epthru = scb->epthru;
1507 mbox = (mbox_t *)scb->raw_mbox;
1508
1509#if MEGA_HAVE_STATS
1510 {
1511
1512 int logdrv = mbox->m_out.logdrv;
1513
1514 islogical = adapter->logdrv_chan[cmd->channel];
1515 /*
1516 * Maintain an error counter for the logical drive.
1517 * Some application like SNMP agent need such
1518 * statistics
1519 */
1520 if( status && islogical && (cmd->cmnd[0] == READ_6 ||
1521 cmd->cmnd[0] == READ_10 ||
1522 cmd->cmnd[0] == READ_12)) {
1523 /*
1524 * Logical drive number increases by 0x80 when
1525 * a logical drive is deleted
1526 */
1527 adapter->rd_errors[logdrv%0x80]++;
1528 }
1529
1530 if( status && islogical && (cmd->cmnd[0] == WRITE_6 ||
1531 cmd->cmnd[0] == WRITE_10 ||
1532 cmd->cmnd[0] == WRITE_12)) {
1533 /*
1534 * Logical drive number increases by 0x80 when
1535 * a logical drive is deleted
1536 */
1537 adapter->wr_errors[logdrv%0x80]++;
1538 }
1539
1540 }
1541#endif
1542 }
1543
1544 /*
1545 * Do not return the presence of hard disk on the channel so,
1546 * inquiry sent, and returned data==hard disk or removable
1547 * hard disk and not logical, request should return failure! -
1548 * PJ
1549 */
1550 islogical = adapter->logdrv_chan[cmd->device->channel];
1551 if( cmd->cmnd[0] == INQUIRY && !islogical ) {
1552
1553 sgl = scsi_sglist(cmd);
1554 if( sg_page(sgl) ) {
1555 c = *(unsigned char *) sg_virt(&sgl[0]);
1556 } else {
1557 dev_warn(&adapter->dev->dev, "invalid sg\n");
1558 c = 0;
1559 }
1560
1561 if(IS_RAID_CH(adapter, cmd->device->channel) &&
1562 ((c & 0x1F ) == TYPE_DISK)) {
1563 status = 0xF0;
1564 }
1565 }
1566
1567 /* clear result; otherwise, success returns corrupt value */
1568 cmd->result = 0;
1569
1570 /* Convert MegaRAID status to Linux error code */
1571 switch (status) {
1572 case 0x00: /* SUCCESS , i.e. SCSI_STATUS_GOOD */
1573 cmd->result |= (DID_OK << 16);
1574 break;
1575
1576 case 0x02: /* ERROR_ABORTED, i.e.
1577 SCSI_STATUS_CHECK_CONDITION */
1578
1579 /* set sense_buffer and result fields */
1580 if( mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU ||
1581 mbox->m_out.cmd == MEGA_MBOXCMD_PASSTHRU64 ) {
1582
1583 memcpy(cmd->sense_buffer, pthru->reqsensearea,
1584 14);
1585
1586 cmd->result = SAM_STAT_CHECK_CONDITION;
1587 }
1588 else {
1589 if (mbox->m_out.cmd == MEGA_MBOXCMD_EXTPTHRU) {
1590
1591 memcpy(cmd->sense_buffer,
1592 epthru->reqsensearea, 14);
1593
1594 cmd->result = SAM_STAT_CHECK_CONDITION;
1595 } else
1596 scsi_build_sense(cmd, 0,
1597 ABORTED_COMMAND, 0, 0);
1598 }
1599 break;
1600
1601 case 0x08: /* ERR_DEST_DRIVE_FAILED, i.e.
1602 SCSI_STATUS_BUSY */
1603 cmd->result |= (DID_BUS_BUSY << 16) | status;
1604 break;
1605
1606 default:
1607#if MEGA_HAVE_CLUSTERING
1608 /*
1609 * If TEST_UNIT_READY fails, we know
1610 * MEGA_RESERVATION_STATUS failed
1611 */
1612 if( cmd->cmnd[0] == TEST_UNIT_READY ) {
1613 cmd->result |= (DID_ERROR << 16) |
1614 SAM_STAT_RESERVATION_CONFLICT;
1615 }
1616 else
1617 /*
1618 * Error code returned is 1 if Reserve or Release
1619 * failed or the input parameter is invalid
1620 */
1621 if( status == 1 &&
1622 (cmd->cmnd[0] == RESERVE ||
1623 cmd->cmnd[0] == RELEASE) ) {
1624
1625 cmd->result |= (DID_ERROR << 16) |
1626 SAM_STAT_RESERVATION_CONFLICT;
1627 }
1628 else
1629#endif
1630 cmd->result |= (DID_BAD_TARGET << 16)|status;
1631 }
1632
1633 mega_free_scb(adapter, scb);
1634
1635 /* Add Scsi_Command to end of completed queue */
1636 list_add_tail(SCSI_LIST(cmd), &adapter->completed_list);
1637 }
1638}
1639
1640
1641/*
1642 * mega_runpendq()
1643 *
1644 * Run through the list of completed requests and finish it
1645 */
1646static void
1647mega_rundoneq (adapter_t *adapter)
1648{
1649 struct scsi_cmnd *cmd;
1650 struct list_head *pos;
1651
1652 list_for_each(pos, &adapter->completed_list) {
1653
1654 struct scsi_pointer* spos = (struct scsi_pointer *)pos;
1655
1656 cmd = list_entry(spos, struct scsi_cmnd, SCp);
1657 cmd->scsi_done(cmd);
1658 }
1659
1660 INIT_LIST_HEAD(&adapter->completed_list);
1661}
1662
1663
1664/*
1665 * Free a SCB structure
1666 * Note: We assume the scsi commands associated with this scb is not free yet.
1667 */
1668static void
1669mega_free_scb(adapter_t *adapter, scb_t *scb)
1670{
1671 switch( scb->dma_type ) {
1672
1673 case MEGA_DMA_TYPE_NONE:
1674 break;
1675
1676 case MEGA_SGLIST:
1677 scsi_dma_unmap(scb->cmd);
1678 break;
1679 default:
1680 break;
1681 }
1682
1683 /*
1684 * Remove from the pending list
1685 */
1686 list_del_init(&scb->list);
1687
1688 /* Link the scb back into free list */
1689 scb->state = SCB_FREE;
1690 scb->cmd = NULL;
1691
1692 list_add(&scb->list, &adapter->free_list);
1693}
1694
1695
1696static int
1697__mega_busywait_mbox (adapter_t *adapter)
1698{
1699 volatile mbox_t *mbox = adapter->mbox;
1700 long counter;
1701
1702 for (counter = 0; counter < 10000; counter++) {
1703 if (!mbox->m_in.busy)
1704 return 0;
1705 udelay(100);
1706 cond_resched();
1707 }
1708 return -1; /* give up after 1 second */
1709}
1710
1711/*
1712 * Copies data to SGLIST
1713 * Note: For 64 bit cards, we need a minimum of one SG element for read/write
1714 */
1715static int
1716mega_build_sglist(adapter_t *adapter, scb_t *scb, u32 *buf, u32 *len)
1717{
1718 struct scatterlist *sg;
1719 struct scsi_cmnd *cmd;
1720 int sgcnt;
1721 int idx;
1722
1723 cmd = scb->cmd;
1724
1725 /*
1726 * Copy Scatter-Gather list info into controller structure.
1727 *
1728 * The number of sg elements returned must not exceed our limit
1729 */
1730 sgcnt = scsi_dma_map(cmd);
1731
1732 scb->dma_type = MEGA_SGLIST;
1733
1734 BUG_ON(sgcnt > adapter->sglen || sgcnt < 0);
1735
1736 *len = 0;
1737
1738 if (scsi_sg_count(cmd) == 1 && !adapter->has_64bit_addr) {
1739 sg = scsi_sglist(cmd);
1740 scb->dma_h_bulkdata = sg_dma_address(sg);
1741 *buf = (u32)scb->dma_h_bulkdata;
1742 *len = sg_dma_len(sg);
1743 return 0;
1744 }
1745
1746 scsi_for_each_sg(cmd, sg, sgcnt, idx) {
1747 if (adapter->has_64bit_addr) {
1748 scb->sgl64[idx].address = sg_dma_address(sg);
1749 *len += scb->sgl64[idx].length = sg_dma_len(sg);
1750 } else {
1751 scb->sgl[idx].address = sg_dma_address(sg);
1752 *len += scb->sgl[idx].length = sg_dma_len(sg);
1753 }
1754 }
1755
1756 /* Reset pointer and length fields */
1757 *buf = scb->sgl_dma_addr;
1758
1759 /* Return count of SG requests */
1760 return sgcnt;
1761}
1762
1763
1764/*
1765 * mega_8_to_40ld()
1766 *
1767 * takes all info in AdapterInquiry structure and puts it into ProductInfo and
1768 * Enquiry3 structures for later use
1769 */
1770static void
1771mega_8_to_40ld(mraid_inquiry *inquiry, mega_inquiry3 *enquiry3,
1772 mega_product_info *product_info)
1773{
1774 int i;
1775
1776 product_info->max_commands = inquiry->adapter_info.max_commands;
1777 enquiry3->rebuild_rate = inquiry->adapter_info.rebuild_rate;
1778 product_info->nchannels = inquiry->adapter_info.nchannels;
1779
1780 for (i = 0; i < 4; i++) {
1781 product_info->fw_version[i] =
1782 inquiry->adapter_info.fw_version[i];
1783
1784 product_info->bios_version[i] =
1785 inquiry->adapter_info.bios_version[i];
1786 }
1787 enquiry3->cache_flush_interval =
1788 inquiry->adapter_info.cache_flush_interval;
1789
1790 product_info->dram_size = inquiry->adapter_info.dram_size;
1791
1792 enquiry3->num_ldrv = inquiry->logdrv_info.num_ldrv;
1793
1794 for (i = 0; i < MAX_LOGICAL_DRIVES_8LD; i++) {
1795 enquiry3->ldrv_size[i] = inquiry->logdrv_info.ldrv_size[i];
1796 enquiry3->ldrv_prop[i] = inquiry->logdrv_info.ldrv_prop[i];
1797 enquiry3->ldrv_state[i] = inquiry->logdrv_info.ldrv_state[i];
1798 }
1799
1800 for (i = 0; i < (MAX_PHYSICAL_DRIVES); i++)
1801 enquiry3->pdrv_state[i] = inquiry->pdrv_info.pdrv_state[i];
1802}
1803
1804static inline void
1805mega_free_sgl(adapter_t *adapter)
1806{
1807 scb_t *scb;
1808 int i;
1809
1810 for(i = 0; i < adapter->max_cmds; i++) {
1811
1812 scb = &adapter->scb_list[i];
1813
1814 if( scb->sgl64 ) {
1815 dma_free_coherent(&adapter->dev->dev,
1816 sizeof(mega_sgl64) * adapter->sglen,
1817 scb->sgl64, scb->sgl_dma_addr);
1818
1819 scb->sgl64 = NULL;
1820 }
1821
1822 if( scb->pthru ) {
1823 dma_free_coherent(&adapter->dev->dev,
1824 sizeof(mega_passthru), scb->pthru,
1825 scb->pthru_dma_addr);
1826
1827 scb->pthru = NULL;
1828 }
1829
1830 if( scb->epthru ) {
1831 dma_free_coherent(&adapter->dev->dev,
1832 sizeof(mega_ext_passthru),
1833 scb->epthru, scb->epthru_dma_addr);
1834
1835 scb->epthru = NULL;
1836 }
1837
1838 }
1839}
1840
1841
1842/*
1843 * Get information about the card/driver
1844 */
1845const char *
1846megaraid_info(struct Scsi_Host *host)
1847{
1848 static char buffer[512];
1849 adapter_t *adapter;
1850
1851 adapter = (adapter_t *)host->hostdata;
1852
1853 sprintf (buffer,
1854 "LSI Logic MegaRAID %s %d commands %d targs %d chans %d luns",
1855 adapter->fw_version, adapter->product_info.max_commands,
1856 adapter->host->max_id, adapter->host->max_channel,
1857 (u32)adapter->host->max_lun);
1858 return buffer;
1859}
1860
1861/*
1862 * Abort a previous SCSI request. Only commands on the pending list can be
1863 * aborted. All the commands issued to the F/W must complete.
1864 */
1865static int
1866megaraid_abort(struct scsi_cmnd *cmd)
1867{
1868 adapter_t *adapter;
1869 int rval;
1870
1871 adapter = (adapter_t *)cmd->device->host->hostdata;
1872
1873 rval = megaraid_abort_and_reset(adapter, cmd, SCB_ABORT);
1874
1875 /*
1876 * This is required here to complete any completed requests
1877 * to be communicated over to the mid layer.
1878 */
1879 mega_rundoneq(adapter);
1880
1881 return rval;
1882}
1883
1884
1885static int
1886megaraid_reset(struct scsi_cmnd *cmd)
1887{
1888 adapter_t *adapter;
1889 megacmd_t mc;
1890 int rval;
1891
1892 adapter = (adapter_t *)cmd->device->host->hostdata;
1893
1894#if MEGA_HAVE_CLUSTERING
1895 mc.cmd = MEGA_CLUSTER_CMD;
1896 mc.opcode = MEGA_RESET_RESERVATIONS;
1897
1898 if( mega_internal_command(adapter, &mc, NULL) != 0 ) {
1899 dev_warn(&adapter->dev->dev, "reservation reset failed\n");
1900 }
1901 else {
1902 dev_info(&adapter->dev->dev, "reservation reset\n");
1903 }
1904#endif
1905
1906 spin_lock_irq(&adapter->lock);
1907
1908 rval = megaraid_abort_and_reset(adapter, cmd, SCB_RESET);
1909
1910 /*
1911 * This is required here to complete any completed requests
1912 * to be communicated over to the mid layer.
1913 */
1914 mega_rundoneq(adapter);
1915 spin_unlock_irq(&adapter->lock);
1916
1917 return rval;
1918}
1919
1920/**
1921 * megaraid_abort_and_reset()
1922 * @adapter: megaraid soft state
1923 * @cmd: scsi command to be aborted or reset
1924 * @aor: abort or reset flag
1925 *
1926 * Try to locate the scsi command in the pending queue. If found and is not
1927 * issued to the controller, abort/reset it. Otherwise return failure
1928 */
1929static int
1930megaraid_abort_and_reset(adapter_t *adapter, struct scsi_cmnd *cmd, int aor)
1931{
1932 struct list_head *pos, *next;
1933 scb_t *scb;
1934
1935 dev_warn(&adapter->dev->dev, "%s cmd=%x <c=%d t=%d l=%d>\n",
1936 (aor == SCB_ABORT)? "ABORTING":"RESET",
1937 cmd->cmnd[0], cmd->device->channel,
1938 cmd->device->id, (u32)cmd->device->lun);
1939
1940 if(list_empty(&adapter->pending_list))
1941 return FAILED;
1942
1943 list_for_each_safe(pos, next, &adapter->pending_list) {
1944
1945 scb = list_entry(pos, scb_t, list);
1946
1947 if (scb->cmd == cmd) { /* Found command */
1948
1949 scb->state |= aor;
1950
1951 /*
1952 * Check if this command has firmware ownership. If
1953 * yes, we cannot reset this command. Whenever f/w
1954 * completes this command, we will return appropriate
1955 * status from ISR.
1956 */
1957 if( scb->state & SCB_ISSUED ) {
1958
1959 dev_warn(&adapter->dev->dev,
1960 "%s[%x], fw owner\n",
1961 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1962 scb->idx);
1963
1964 return FAILED;
1965 }
1966 else {
1967
1968 /*
1969 * Not yet issued! Remove from the pending
1970 * list
1971 */
1972 dev_warn(&adapter->dev->dev,
1973 "%s-[%x], driver owner\n",
1974 (aor==SCB_ABORT) ? "ABORTING":"RESET",
1975 scb->idx);
1976
1977 mega_free_scb(adapter, scb);
1978
1979 if( aor == SCB_ABORT ) {
1980 cmd->result = (DID_ABORT << 16);
1981 }
1982 else {
1983 cmd->result = (DID_RESET << 16);
1984 }
1985
1986 list_add_tail(SCSI_LIST(cmd),
1987 &adapter->completed_list);
1988
1989 return SUCCESS;
1990 }
1991 }
1992 }
1993
1994 return FAILED;
1995}
1996
1997static inline int
1998make_local_pdev(adapter_t *adapter, struct pci_dev **pdev)
1999{
2000 *pdev = pci_alloc_dev(NULL);
2001
2002 if( *pdev == NULL ) return -1;
2003
2004 memcpy(*pdev, adapter->dev, sizeof(struct pci_dev));
2005
2006 if (dma_set_mask(&(*pdev)->dev, DMA_BIT_MASK(32)) != 0) {
2007 kfree(*pdev);
2008 return -1;
2009 }
2010
2011 return 0;
2012}
2013
2014static inline void
2015free_local_pdev(struct pci_dev *pdev)
2016{
2017 kfree(pdev);
2018}
2019
2020/**
2021 * mega_allocate_inquiry()
2022 * @dma_handle: handle returned for dma address
2023 * @pdev: handle to pci device
2024 *
2025 * allocates memory for inquiry structure
2026 */
2027static inline void *
2028mega_allocate_inquiry(dma_addr_t *dma_handle, struct pci_dev *pdev)
2029{
2030 return dma_alloc_coherent(&pdev->dev, sizeof(mega_inquiry3),
2031 dma_handle, GFP_KERNEL);
2032}
2033
2034
2035static inline void
2036mega_free_inquiry(void *inquiry, dma_addr_t dma_handle, struct pci_dev *pdev)
2037{
2038 dma_free_coherent(&pdev->dev, sizeof(mega_inquiry3), inquiry,
2039 dma_handle);
2040}
2041
2042
2043#ifdef CONFIG_PROC_FS
2044/* Following code handles /proc fs */
2045
2046/**
2047 * proc_show_config()
2048 * @m: Synthetic file construction data
2049 * @v: File iterator
2050 *
2051 * Display configuration information about the controller.
2052 */
2053static int
2054proc_show_config(struct seq_file *m, void *v)
2055{
2056
2057 adapter_t *adapter = m->private;
2058
2059 seq_puts(m, MEGARAID_VERSION);
2060 if(adapter->product_info.product_name[0])
2061 seq_printf(m, "%s\n", adapter->product_info.product_name);
2062
2063 seq_puts(m, "Controller Type: ");
2064
2065 if( adapter->flag & BOARD_MEMMAP )
2066 seq_puts(m, "438/466/467/471/493/518/520/531/532\n");
2067 else
2068 seq_puts(m, "418/428/434\n");
2069
2070 if(adapter->flag & BOARD_40LD)
2071 seq_puts(m, "Controller Supports 40 Logical Drives\n");
2072
2073 if(adapter->flag & BOARD_64BIT)
2074 seq_puts(m, "Controller capable of 64-bit memory addressing\n");
2075 if( adapter->has_64bit_addr )
2076 seq_puts(m, "Controller using 64-bit memory addressing\n");
2077 else
2078 seq_puts(m, "Controller is not using 64-bit memory addressing\n");
2079
2080 seq_printf(m, "Base = %08lx, Irq = %d, ",
2081 adapter->base, adapter->host->irq);
2082
2083 seq_printf(m, "Logical Drives = %d, Channels = %d\n",
2084 adapter->numldrv, adapter->product_info.nchannels);
2085
2086 seq_printf(m, "Version =%s:%s, DRAM = %dMb\n",
2087 adapter->fw_version, adapter->bios_version,
2088 adapter->product_info.dram_size);
2089
2090 seq_printf(m, "Controller Queue Depth = %d, Driver Queue Depth = %d\n",
2091 adapter->product_info.max_commands, adapter->max_cmds);
2092
2093 seq_printf(m, "support_ext_cdb = %d\n", adapter->support_ext_cdb);
2094 seq_printf(m, "support_random_del = %d\n", adapter->support_random_del);
2095 seq_printf(m, "boot_ldrv_enabled = %d\n", adapter->boot_ldrv_enabled);
2096 seq_printf(m, "boot_ldrv = %d\n", adapter->boot_ldrv);
2097 seq_printf(m, "boot_pdrv_enabled = %d\n", adapter->boot_pdrv_enabled);
2098 seq_printf(m, "boot_pdrv_ch = %d\n", adapter->boot_pdrv_ch);
2099 seq_printf(m, "boot_pdrv_tgt = %d\n", adapter->boot_pdrv_tgt);
2100 seq_printf(m, "quiescent = %d\n",
2101 atomic_read(&adapter->quiescent));
2102 seq_printf(m, "has_cluster = %d\n", adapter->has_cluster);
2103
2104 seq_puts(m, "\nModule Parameters:\n");
2105 seq_printf(m, "max_cmd_per_lun = %d\n", max_cmd_per_lun);
2106 seq_printf(m, "max_sectors_per_io = %d\n", max_sectors_per_io);
2107 return 0;
2108}
2109
2110/**
2111 * proc_show_stat()
2112 * @m: Synthetic file construction data
2113 * @v: File iterator
2114 *
2115 * Display statistical information about the I/O activity.
2116 */
2117static int
2118proc_show_stat(struct seq_file *m, void *v)
2119{
2120 adapter_t *adapter = m->private;
2121#if MEGA_HAVE_STATS
2122 int i;
2123#endif
2124
2125 seq_puts(m, "Statistical Information for this controller\n");
2126 seq_printf(m, "pend_cmds = %d\n", atomic_read(&adapter->pend_cmds));
2127#if MEGA_HAVE_STATS
2128 for(i = 0; i < adapter->numldrv; i++) {
2129 seq_printf(m, "Logical Drive %d:\n", i);
2130 seq_printf(m, "\tReads Issued = %lu, Writes Issued = %lu\n",
2131 adapter->nreads[i], adapter->nwrites[i]);
2132 seq_printf(m, "\tSectors Read = %lu, Sectors Written = %lu\n",
2133 adapter->nreadblocks[i], adapter->nwriteblocks[i]);
2134 seq_printf(m, "\tRead errors = %lu, Write errors = %lu\n\n",
2135 adapter->rd_errors[i], adapter->wr_errors[i]);
2136 }
2137#else
2138 seq_puts(m, "IO and error counters not compiled in driver.\n");
2139#endif
2140 return 0;
2141}
2142
2143
2144/**
2145 * proc_show_mbox()
2146 * @m: Synthetic file construction data
2147 * @v: File iterator
2148 *
2149 * Display mailbox information for the last command issued. This information
2150 * is good for debugging.
2151 */
2152static int
2153proc_show_mbox(struct seq_file *m, void *v)
2154{
2155 adapter_t *adapter = m->private;
2156 volatile mbox_t *mbox = adapter->mbox;
2157
2158 seq_puts(m, "Contents of Mail Box Structure\n");
2159 seq_printf(m, " Fw Command = 0x%02x\n", mbox->m_out.cmd);
2160 seq_printf(m, " Cmd Sequence = 0x%02x\n", mbox->m_out.cmdid);
2161 seq_printf(m, " No of Sectors= %04d\n", mbox->m_out.numsectors);
2162 seq_printf(m, " LBA = 0x%02x\n", mbox->m_out.lba);
2163 seq_printf(m, " DTA = 0x%08x\n", mbox->m_out.xferaddr);
2164 seq_printf(m, " Logical Drive= 0x%02x\n", mbox->m_out.logdrv);
2165 seq_printf(m, " No of SG Elmt= 0x%02x\n", mbox->m_out.numsgelements);
2166 seq_printf(m, " Busy = %01x\n", mbox->m_in.busy);
2167 seq_printf(m, " Status = 0x%02x\n", mbox->m_in.status);
2168 return 0;
2169}
2170
2171
2172/**
2173 * proc_show_rebuild_rate()
2174 * @m: Synthetic file construction data
2175 * @v: File iterator
2176 *
2177 * Display current rebuild rate
2178 */
2179static int
2180proc_show_rebuild_rate(struct seq_file *m, void *v)
2181{
2182 adapter_t *adapter = m->private;
2183 dma_addr_t dma_handle;
2184 caddr_t inquiry;
2185 struct pci_dev *pdev;
2186
2187 if( make_local_pdev(adapter, &pdev) != 0 )
2188 return 0;
2189
2190 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2191 goto free_pdev;
2192
2193 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2194 seq_puts(m, "Adapter inquiry failed.\n");
2195 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2196 goto free_inquiry;
2197 }
2198
2199 if( adapter->flag & BOARD_40LD )
2200 seq_printf(m, "Rebuild Rate: [%d%%]\n",
2201 ((mega_inquiry3 *)inquiry)->rebuild_rate);
2202 else
2203 seq_printf(m, "Rebuild Rate: [%d%%]\n",
2204 ((mraid_ext_inquiry *)
2205 inquiry)->raid_inq.adapter_info.rebuild_rate);
2206
2207free_inquiry:
2208 mega_free_inquiry(inquiry, dma_handle, pdev);
2209free_pdev:
2210 free_local_pdev(pdev);
2211 return 0;
2212}
2213
2214
2215/**
2216 * proc_show_battery()
2217 * @m: Synthetic file construction data
2218 * @v: File iterator
2219 *
2220 * Display information about the battery module on the controller.
2221 */
2222static int
2223proc_show_battery(struct seq_file *m, void *v)
2224{
2225 adapter_t *adapter = m->private;
2226 dma_addr_t dma_handle;
2227 caddr_t inquiry;
2228 struct pci_dev *pdev;
2229 u8 battery_status;
2230
2231 if( make_local_pdev(adapter, &pdev) != 0 )
2232 return 0;
2233
2234 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2235 goto free_pdev;
2236
2237 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2238 seq_puts(m, "Adapter inquiry failed.\n");
2239 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2240 goto free_inquiry;
2241 }
2242
2243 if( adapter->flag & BOARD_40LD ) {
2244 battery_status = ((mega_inquiry3 *)inquiry)->battery_status;
2245 }
2246 else {
2247 battery_status = ((mraid_ext_inquiry *)inquiry)->
2248 raid_inq.adapter_info.battery_status;
2249 }
2250
2251 /*
2252 * Decode the battery status
2253 */
2254 seq_printf(m, "Battery Status:[%d]", battery_status);
2255
2256 if(battery_status == MEGA_BATT_CHARGE_DONE)
2257 seq_puts(m, " Charge Done");
2258
2259 if(battery_status & MEGA_BATT_MODULE_MISSING)
2260 seq_puts(m, " Module Missing");
2261
2262 if(battery_status & MEGA_BATT_LOW_VOLTAGE)
2263 seq_puts(m, " Low Voltage");
2264
2265 if(battery_status & MEGA_BATT_TEMP_HIGH)
2266 seq_puts(m, " Temperature High");
2267
2268 if(battery_status & MEGA_BATT_PACK_MISSING)
2269 seq_puts(m, " Pack Missing");
2270
2271 if(battery_status & MEGA_BATT_CHARGE_INPROG)
2272 seq_puts(m, " Charge In-progress");
2273
2274 if(battery_status & MEGA_BATT_CHARGE_FAIL)
2275 seq_puts(m, " Charge Fail");
2276
2277 if(battery_status & MEGA_BATT_CYCLES_EXCEEDED)
2278 seq_puts(m, " Cycles Exceeded");
2279
2280 seq_putc(m, '\n');
2281
2282free_inquiry:
2283 mega_free_inquiry(inquiry, dma_handle, pdev);
2284free_pdev:
2285 free_local_pdev(pdev);
2286 return 0;
2287}
2288
2289
2290/*
2291 * Display scsi inquiry
2292 */
2293static void
2294mega_print_inquiry(struct seq_file *m, char *scsi_inq)
2295{
2296 int i;
2297
2298 seq_puts(m, " Vendor: ");
2299 seq_write(m, scsi_inq + 8, 8);
2300 seq_puts(m, " Model: ");
2301 seq_write(m, scsi_inq + 16, 16);
2302 seq_puts(m, " Rev: ");
2303 seq_write(m, scsi_inq + 32, 4);
2304 seq_putc(m, '\n');
2305
2306 i = scsi_inq[0] & 0x1f;
2307 seq_printf(m, " Type: %s ", scsi_device_type(i));
2308
2309 seq_printf(m, " ANSI SCSI revision: %02x",
2310 scsi_inq[2] & 0x07);
2311
2312 if( (scsi_inq[2] & 0x07) == 1 && (scsi_inq[3] & 0x0f) == 1 )
2313 seq_puts(m, " CCS\n");
2314 else
2315 seq_putc(m, '\n');
2316}
2317
2318/**
2319 * proc_show_pdrv()
2320 * @m: Synthetic file construction data
2321 * @adapter: pointer to our soft state
2322 * @channel: channel
2323 *
2324 * Display information about the physical drives.
2325 */
2326static int
2327proc_show_pdrv(struct seq_file *m, adapter_t *adapter, int channel)
2328{
2329 dma_addr_t dma_handle;
2330 char *scsi_inq;
2331 dma_addr_t scsi_inq_dma_handle;
2332 caddr_t inquiry;
2333 struct pci_dev *pdev;
2334 u8 *pdrv_state;
2335 u8 state;
2336 int tgt;
2337 int max_channels;
2338 int i;
2339
2340 if( make_local_pdev(adapter, &pdev) != 0 )
2341 return 0;
2342
2343 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2344 goto free_pdev;
2345
2346 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2347 seq_puts(m, "Adapter inquiry failed.\n");
2348 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2349 goto free_inquiry;
2350 }
2351
2352
2353 scsi_inq = dma_alloc_coherent(&pdev->dev, 256, &scsi_inq_dma_handle,
2354 GFP_KERNEL);
2355 if( scsi_inq == NULL ) {
2356 seq_puts(m, "memory not available for scsi inq.\n");
2357 goto free_inquiry;
2358 }
2359
2360 if( adapter->flag & BOARD_40LD ) {
2361 pdrv_state = ((mega_inquiry3 *)inquiry)->pdrv_state;
2362 }
2363 else {
2364 pdrv_state = ((mraid_ext_inquiry *)inquiry)->
2365 raid_inq.pdrv_info.pdrv_state;
2366 }
2367
2368 max_channels = adapter->product_info.nchannels;
2369
2370 if( channel >= max_channels ) {
2371 goto free_pci;
2372 }
2373
2374 for( tgt = 0; tgt <= MAX_TARGET; tgt++ ) {
2375
2376 i = channel*16 + tgt;
2377
2378 state = *(pdrv_state + i);
2379 switch( state & 0x0F ) {
2380 case PDRV_ONLINE:
2381 seq_printf(m, "Channel:%2d Id:%2d State: Online",
2382 channel, tgt);
2383 break;
2384
2385 case PDRV_FAILED:
2386 seq_printf(m, "Channel:%2d Id:%2d State: Failed",
2387 channel, tgt);
2388 break;
2389
2390 case PDRV_RBLD:
2391 seq_printf(m, "Channel:%2d Id:%2d State: Rebuild",
2392 channel, tgt);
2393 break;
2394
2395 case PDRV_HOTSPARE:
2396 seq_printf(m, "Channel:%2d Id:%2d State: Hot spare",
2397 channel, tgt);
2398 break;
2399
2400 default:
2401 seq_printf(m, "Channel:%2d Id:%2d State: Un-configured",
2402 channel, tgt);
2403 break;
2404 }
2405
2406 /*
2407 * This interface displays inquiries for disk drives
2408 * only. Inquries for logical drives and non-disk
2409 * devices are available through /proc/scsi/scsi
2410 */
2411 memset(scsi_inq, 0, 256);
2412 if( mega_internal_dev_inquiry(adapter, channel, tgt,
2413 scsi_inq_dma_handle) ||
2414 (scsi_inq[0] & 0x1F) != TYPE_DISK ) {
2415 continue;
2416 }
2417
2418 /*
2419 * Check for overflow. We print less than 240
2420 * characters for inquiry
2421 */
2422 seq_puts(m, ".\n");
2423 mega_print_inquiry(m, scsi_inq);
2424 }
2425
2426free_pci:
2427 dma_free_coherent(&pdev->dev, 256, scsi_inq, scsi_inq_dma_handle);
2428free_inquiry:
2429 mega_free_inquiry(inquiry, dma_handle, pdev);
2430free_pdev:
2431 free_local_pdev(pdev);
2432 return 0;
2433}
2434
2435/**
2436 * proc_show_pdrv_ch0()
2437 * @m: Synthetic file construction data
2438 * @v: File iterator
2439 *
2440 * Display information about the physical drives on physical channel 0.
2441 */
2442static int
2443proc_show_pdrv_ch0(struct seq_file *m, void *v)
2444{
2445 return proc_show_pdrv(m, m->private, 0);
2446}
2447
2448
2449/**
2450 * proc_show_pdrv_ch1()
2451 * @m: Synthetic file construction data
2452 * @v: File iterator
2453 *
2454 * Display information about the physical drives on physical channel 1.
2455 */
2456static int
2457proc_show_pdrv_ch1(struct seq_file *m, void *v)
2458{
2459 return proc_show_pdrv(m, m->private, 1);
2460}
2461
2462
2463/**
2464 * proc_show_pdrv_ch2()
2465 * @m: Synthetic file construction data
2466 * @v: File iterator
2467 *
2468 * Display information about the physical drives on physical channel 2.
2469 */
2470static int
2471proc_show_pdrv_ch2(struct seq_file *m, void *v)
2472{
2473 return proc_show_pdrv(m, m->private, 2);
2474}
2475
2476
2477/**
2478 * proc_show_pdrv_ch3()
2479 * @m: Synthetic file construction data
2480 * @v: File iterator
2481 *
2482 * Display information about the physical drives on physical channel 3.
2483 */
2484static int
2485proc_show_pdrv_ch3(struct seq_file *m, void *v)
2486{
2487 return proc_show_pdrv(m, m->private, 3);
2488}
2489
2490
2491/**
2492 * proc_show_rdrv()
2493 * @m: Synthetic file construction data
2494 * @adapter: pointer to our soft state
2495 * @start: starting logical drive to display
2496 * @end: ending logical drive to display
2497 *
2498 * We do not print the inquiry information since its already available through
2499 * /proc/scsi/scsi interface
2500 */
2501static int
2502proc_show_rdrv(struct seq_file *m, adapter_t *adapter, int start, int end )
2503{
2504 dma_addr_t dma_handle;
2505 logdrv_param *lparam;
2506 megacmd_t mc;
2507 char *disk_array;
2508 dma_addr_t disk_array_dma_handle;
2509 caddr_t inquiry;
2510 struct pci_dev *pdev;
2511 u8 *rdrv_state;
2512 int num_ldrv;
2513 u32 array_sz;
2514 int i;
2515
2516 if( make_local_pdev(adapter, &pdev) != 0 )
2517 return 0;
2518
2519 if( (inquiry = mega_allocate_inquiry(&dma_handle, pdev)) == NULL )
2520 goto free_pdev;
2521
2522 if( mega_adapinq(adapter, dma_handle) != 0 ) {
2523 seq_puts(m, "Adapter inquiry failed.\n");
2524 dev_warn(&adapter->dev->dev, "inquiry failed\n");
2525 goto free_inquiry;
2526 }
2527
2528 memset(&mc, 0, sizeof(megacmd_t));
2529
2530 if( adapter->flag & BOARD_40LD ) {
2531 array_sz = sizeof(disk_array_40ld);
2532
2533 rdrv_state = ((mega_inquiry3 *)inquiry)->ldrv_state;
2534
2535 num_ldrv = ((mega_inquiry3 *)inquiry)->num_ldrv;
2536 }
2537 else {
2538 array_sz = sizeof(disk_array_8ld);
2539
2540 rdrv_state = ((mraid_ext_inquiry *)inquiry)->
2541 raid_inq.logdrv_info.ldrv_state;
2542
2543 num_ldrv = ((mraid_ext_inquiry *)inquiry)->
2544 raid_inq.logdrv_info.num_ldrv;
2545 }
2546
2547 disk_array = dma_alloc_coherent(&pdev->dev, array_sz,
2548 &disk_array_dma_handle, GFP_KERNEL);
2549
2550 if( disk_array == NULL ) {
2551 seq_puts(m, "memory not available.\n");
2552 goto free_inquiry;
2553 }
2554
2555 mc.xferaddr = (u32)disk_array_dma_handle;
2556
2557 if( adapter->flag & BOARD_40LD ) {
2558 mc.cmd = FC_NEW_CONFIG;
2559 mc.opcode = OP_DCMD_READ_CONFIG;
2560
2561 if( mega_internal_command(adapter, &mc, NULL) ) {
2562 seq_puts(m, "40LD read config failed.\n");
2563 goto free_pci;
2564 }
2565
2566 }
2567 else {
2568 mc.cmd = NEW_READ_CONFIG_8LD;
2569
2570 if( mega_internal_command(adapter, &mc, NULL) ) {
2571 mc.cmd = READ_CONFIG_8LD;
2572 if( mega_internal_command(adapter, &mc, NULL) ) {
2573 seq_puts(m, "8LD read config failed.\n");
2574 goto free_pci;
2575 }
2576 }
2577 }
2578
2579 for( i = start; i < ( (end+1 < num_ldrv) ? end+1 : num_ldrv ); i++ ) {
2580
2581 if( adapter->flag & BOARD_40LD ) {
2582 lparam =
2583 &((disk_array_40ld *)disk_array)->ldrv[i].lparam;
2584 }
2585 else {
2586 lparam =
2587 &((disk_array_8ld *)disk_array)->ldrv[i].lparam;
2588 }
2589
2590 /*
2591 * Check for overflow. We print less than 240 characters for
2592 * information about each logical drive.
2593 */
2594 seq_printf(m, "Logical drive:%2d:, ", i);
2595
2596 switch( rdrv_state[i] & 0x0F ) {
2597 case RDRV_OFFLINE:
2598 seq_puts(m, "state: offline");
2599 break;
2600 case RDRV_DEGRADED:
2601 seq_puts(m, "state: degraded");
2602 break;
2603 case RDRV_OPTIMAL:
2604 seq_puts(m, "state: optimal");
2605 break;
2606 case RDRV_DELETED:
2607 seq_puts(m, "state: deleted");
2608 break;
2609 default:
2610 seq_puts(m, "state: unknown");
2611 break;
2612 }
2613
2614 /*
2615 * Check if check consistency or initialization is going on
2616 * for this logical drive.
2617 */
2618 if( (rdrv_state[i] & 0xF0) == 0x20 )
2619 seq_puts(m, ", check-consistency in progress");
2620 else if( (rdrv_state[i] & 0xF0) == 0x10 )
2621 seq_puts(m, ", initialization in progress");
2622
2623 seq_putc(m, '\n');
2624
2625 seq_printf(m, "Span depth:%3d, ", lparam->span_depth);
2626 seq_printf(m, "RAID level:%3d, ", lparam->level);
2627 seq_printf(m, "Stripe size:%3d, ",
2628 lparam->stripe_sz ? lparam->stripe_sz/2: 128);
2629 seq_printf(m, "Row size:%3d\n", lparam->row_size);
2630
2631 seq_puts(m, "Read Policy: ");
2632 switch(lparam->read_ahead) {
2633 case NO_READ_AHEAD:
2634 seq_puts(m, "No read ahead, ");
2635 break;
2636 case READ_AHEAD:
2637 seq_puts(m, "Read ahead, ");
2638 break;
2639 case ADAP_READ_AHEAD:
2640 seq_puts(m, "Adaptive, ");
2641 break;
2642
2643 }
2644
2645 seq_puts(m, "Write Policy: ");
2646 switch(lparam->write_mode) {
2647 case WRMODE_WRITE_THRU:
2648 seq_puts(m, "Write thru, ");
2649 break;
2650 case WRMODE_WRITE_BACK:
2651 seq_puts(m, "Write back, ");
2652 break;
2653 }
2654
2655 seq_puts(m, "Cache Policy: ");
2656 switch(lparam->direct_io) {
2657 case CACHED_IO:
2658 seq_puts(m, "Cached IO\n\n");
2659 break;
2660 case DIRECT_IO:
2661 seq_puts(m, "Direct IO\n\n");
2662 break;
2663 }
2664 }
2665
2666free_pci:
2667 dma_free_coherent(&pdev->dev, array_sz, disk_array,
2668 disk_array_dma_handle);
2669free_inquiry:
2670 mega_free_inquiry(inquiry, dma_handle, pdev);
2671free_pdev:
2672 free_local_pdev(pdev);
2673 return 0;
2674}
2675
2676/**
2677 * proc_show_rdrv_10()
2678 * @m: Synthetic file construction data
2679 * @v: File iterator
2680 *
2681 * Display real time information about the logical drives 0 through 9.
2682 */
2683static int
2684proc_show_rdrv_10(struct seq_file *m, void *v)
2685{
2686 return proc_show_rdrv(m, m->private, 0, 9);
2687}
2688
2689
2690/**
2691 * proc_show_rdrv_20()
2692 * @m: Synthetic file construction data
2693 * @v: File iterator
2694 *
2695 * Display real time information about the logical drives 0 through 9.
2696 */
2697static int
2698proc_show_rdrv_20(struct seq_file *m, void *v)
2699{
2700 return proc_show_rdrv(m, m->private, 10, 19);
2701}
2702
2703
2704/**
2705 * proc_show_rdrv_30()
2706 * @m: Synthetic file construction data
2707 * @v: File iterator
2708 *
2709 * Display real time information about the logical drives 0 through 9.
2710 */
2711static int
2712proc_show_rdrv_30(struct seq_file *m, void *v)
2713{
2714 return proc_show_rdrv(m, m->private, 20, 29);
2715}
2716
2717
2718/**
2719 * proc_show_rdrv_40()
2720 * @m: Synthetic file construction data
2721 * @v: File iterator
2722 *
2723 * Display real time information about the logical drives 0 through 9.
2724 */
2725static int
2726proc_show_rdrv_40(struct seq_file *m, void *v)
2727{
2728 return proc_show_rdrv(m, m->private, 30, 39);
2729}
2730
2731/**
2732 * mega_create_proc_entry()
2733 * @index: index in soft state array
2734 * @parent: parent node for this /proc entry
2735 *
2736 * Creates /proc entries for our controllers.
2737 */
2738static void
2739mega_create_proc_entry(int index, struct proc_dir_entry *parent)
2740{
2741 adapter_t *adapter = hba_soft_state[index];
2742 struct proc_dir_entry *dir;
2743 u8 string[16];
2744
2745 sprintf(string, "hba%d", adapter->host->host_no);
2746 dir = proc_mkdir_data(string, 0, parent, adapter);
2747 if (!dir) {
2748 dev_warn(&adapter->dev->dev, "proc_mkdir failed\n");
2749 return;
2750 }
2751
2752 proc_create_single_data("config", S_IRUSR, dir,
2753 proc_show_config, adapter);
2754 proc_create_single_data("stat", S_IRUSR, dir,
2755 proc_show_stat, adapter);
2756 proc_create_single_data("mailbox", S_IRUSR, dir,
2757 proc_show_mbox, adapter);
2758#if MEGA_HAVE_ENH_PROC
2759 proc_create_single_data("rebuild-rate", S_IRUSR, dir,
2760 proc_show_rebuild_rate, adapter);
2761 proc_create_single_data("battery-status", S_IRUSR, dir,
2762 proc_show_battery, adapter);
2763 proc_create_single_data("diskdrives-ch0", S_IRUSR, dir,
2764 proc_show_pdrv_ch0, adapter);
2765 proc_create_single_data("diskdrives-ch1", S_IRUSR, dir,
2766 proc_show_pdrv_ch1, adapter);
2767 proc_create_single_data("diskdrives-ch2", S_IRUSR, dir,
2768 proc_show_pdrv_ch2, adapter);
2769 proc_create_single_data("diskdrives-ch3", S_IRUSR, dir,
2770 proc_show_pdrv_ch3, adapter);
2771 proc_create_single_data("raiddrives-0-9", S_IRUSR, dir,
2772 proc_show_rdrv_10, adapter);
2773 proc_create_single_data("raiddrives-10-19", S_IRUSR, dir,
2774 proc_show_rdrv_20, adapter);
2775 proc_create_single_data("raiddrives-20-29", S_IRUSR, dir,
2776 proc_show_rdrv_30, adapter);
2777 proc_create_single_data("raiddrives-30-39", S_IRUSR, dir,
2778 proc_show_rdrv_40, adapter);
2779#endif
2780}
2781
2782#else
2783static inline void mega_create_proc_entry(int index, struct proc_dir_entry *parent)
2784{
2785}
2786#endif
2787
2788
2789/*
2790 * megaraid_biosparam()
2791 *
2792 * Return the disk geometry for a particular disk
2793 */
2794static int
2795megaraid_biosparam(struct scsi_device *sdev, struct block_device *bdev,
2796 sector_t capacity, int geom[])
2797{
2798 adapter_t *adapter;
2799 int heads;
2800 int sectors;
2801 int cylinders;
2802
2803 /* Get pointer to host config structure */
2804 adapter = (adapter_t *)sdev->host->hostdata;
2805
2806 if (IS_RAID_CH(adapter, sdev->channel)) {
2807 /* Default heads (64) & sectors (32) */
2808 heads = 64;
2809 sectors = 32;
2810 cylinders = (ulong)capacity / (heads * sectors);
2811
2812 /*
2813 * Handle extended translation size for logical drives
2814 * > 1Gb
2815 */
2816 if ((ulong)capacity >= 0x200000) {
2817 heads = 255;
2818 sectors = 63;
2819 cylinders = (ulong)capacity / (heads * sectors);
2820 }
2821
2822 /* return result */
2823 geom[0] = heads;
2824 geom[1] = sectors;
2825 geom[2] = cylinders;
2826 }
2827 else {
2828 if (scsi_partsize(bdev, capacity, geom))
2829 return 0;
2830
2831 dev_info(&adapter->dev->dev,
2832 "invalid partition on this disk on channel %d\n",
2833 sdev->channel);
2834
2835 /* Default heads (64) & sectors (32) */
2836 heads = 64;
2837 sectors = 32;
2838 cylinders = (ulong)capacity / (heads * sectors);
2839
2840 /* Handle extended translation size for logical drives > 1Gb */
2841 if ((ulong)capacity >= 0x200000) {
2842 heads = 255;
2843 sectors = 63;
2844 cylinders = (ulong)capacity / (heads * sectors);
2845 }
2846
2847 /* return result */
2848 geom[0] = heads;
2849 geom[1] = sectors;
2850 geom[2] = cylinders;
2851 }
2852
2853 return 0;
2854}
2855
2856/**
2857 * mega_init_scb()
2858 * @adapter: pointer to our soft state
2859 *
2860 * Allocate memory for the various pointers in the scb structures:
2861 * scatter-gather list pointer, passthru and extended passthru structure
2862 * pointers.
2863 */
2864static int
2865mega_init_scb(adapter_t *adapter)
2866{
2867 scb_t *scb;
2868 int i;
2869
2870 for( i = 0; i < adapter->max_cmds; i++ ) {
2871
2872 scb = &adapter->scb_list[i];
2873
2874 scb->sgl64 = NULL;
2875 scb->sgl = NULL;
2876 scb->pthru = NULL;
2877 scb->epthru = NULL;
2878 }
2879
2880 for( i = 0; i < adapter->max_cmds; i++ ) {
2881
2882 scb = &adapter->scb_list[i];
2883
2884 scb->idx = i;
2885
2886 scb->sgl64 = dma_alloc_coherent(&adapter->dev->dev,
2887 sizeof(mega_sgl64) * adapter->sglen,
2888 &scb->sgl_dma_addr, GFP_KERNEL);
2889
2890 scb->sgl = (mega_sglist *)scb->sgl64;
2891
2892 if( !scb->sgl ) {
2893 dev_warn(&adapter->dev->dev, "RAID: Can't allocate sglist\n");
2894 mega_free_sgl(adapter);
2895 return -1;
2896 }
2897
2898 scb->pthru = dma_alloc_coherent(&adapter->dev->dev,
2899 sizeof(mega_passthru),
2900 &scb->pthru_dma_addr, GFP_KERNEL);
2901
2902 if( !scb->pthru ) {
2903 dev_warn(&adapter->dev->dev, "RAID: Can't allocate passthru\n");
2904 mega_free_sgl(adapter);
2905 return -1;
2906 }
2907
2908 scb->epthru = dma_alloc_coherent(&adapter->dev->dev,
2909 sizeof(mega_ext_passthru),
2910 &scb->epthru_dma_addr, GFP_KERNEL);
2911
2912 if( !scb->epthru ) {
2913 dev_warn(&adapter->dev->dev,
2914 "Can't allocate extended passthru\n");
2915 mega_free_sgl(adapter);
2916 return -1;
2917 }
2918
2919
2920 scb->dma_type = MEGA_DMA_TYPE_NONE;
2921
2922 /*
2923 * Link to free list
2924 * lock not required since we are loading the driver, so no
2925 * commands possible right now.
2926 */
2927 scb->state = SCB_FREE;
2928 scb->cmd = NULL;
2929 list_add(&scb->list, &adapter->free_list);
2930 }
2931
2932 return 0;
2933}
2934
2935
2936/**
2937 * megadev_open()
2938 * @inode: unused
2939 * @filep: unused
2940 *
2941 * Routines for the character/ioctl interface to the driver. Find out if this
2942 * is a valid open.
2943 */
2944static int
2945megadev_open (struct inode *inode, struct file *filep)
2946{
2947 /*
2948 * Only allow superuser to access private ioctl interface
2949 */
2950 if( !capable(CAP_SYS_ADMIN) ) return -EACCES;
2951
2952 return 0;
2953}
2954
2955
2956/**
2957 * megadev_ioctl()
2958 * @filep: Our device file
2959 * @cmd: ioctl command
2960 * @arg: user buffer
2961 *
2962 * ioctl entry point for our private ioctl interface. We move the data in from
2963 * the user space, prepare the command (if necessary, convert the old MIMD
2964 * ioctl to new ioctl command), and issue a synchronous command to the
2965 * controller.
2966 */
2967static int
2968megadev_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
2969{
2970 adapter_t *adapter;
2971 nitioctl_t uioc;
2972 int adapno;
2973 int rval;
2974 mega_passthru __user *upthru; /* user address for passthru */
2975 mega_passthru *pthru; /* copy user passthru here */
2976 dma_addr_t pthru_dma_hndl;
2977 void *data = NULL; /* data to be transferred */
2978 dma_addr_t data_dma_hndl; /* dma handle for data xfer area */
2979 megacmd_t mc;
2980#if MEGA_HAVE_STATS
2981 megastat_t __user *ustats = NULL;
2982 int num_ldrv = 0;
2983#endif
2984 u32 uxferaddr = 0;
2985 struct pci_dev *pdev;
2986
2987 /*
2988 * Make sure only USCSICMD are issued through this interface.
2989 * MIMD application would still fire different command.
2990 */
2991 if( (_IOC_TYPE(cmd) != MEGAIOC_MAGIC) && (cmd != USCSICMD) ) {
2992 return -EINVAL;
2993 }
2994
2995 /*
2996 * Check and convert a possible MIMD command to NIT command.
2997 * mega_m_to_n() copies the data from the user space, so we do not
2998 * have to do it here.
2999 * NOTE: We will need some user address to copyout the data, therefore
3000 * the inteface layer will also provide us with the required user
3001 * addresses.
3002 */
3003 memset(&uioc, 0, sizeof(nitioctl_t));
3004 if( (rval = mega_m_to_n( (void __user *)arg, &uioc)) != 0 )
3005 return rval;
3006
3007
3008 switch( uioc.opcode ) {
3009
3010 case GET_DRIVER_VER:
3011 if( put_user(driver_ver, (u32 __user *)uioc.uioc_uaddr) )
3012 return (-EFAULT);
3013
3014 break;
3015
3016 case GET_N_ADAP:
3017 if( put_user(hba_count, (u32 __user *)uioc.uioc_uaddr) )
3018 return (-EFAULT);
3019
3020 /*
3021 * Shucks. MIMD interface returns a positive value for number
3022 * of adapters. TODO: Change it to return 0 when there is no
3023 * applicatio using mimd interface.
3024 */
3025 return hba_count;
3026
3027 case GET_ADAP_INFO:
3028
3029 /*
3030 * Which adapter
3031 */
3032 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3033 return (-ENODEV);
3034
3035 if( copy_to_user(uioc.uioc_uaddr, mcontroller+adapno,
3036 sizeof(struct mcontroller)) )
3037 return (-EFAULT);
3038 break;
3039
3040#if MEGA_HAVE_STATS
3041
3042 case GET_STATS:
3043 /*
3044 * Which adapter
3045 */
3046 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3047 return (-ENODEV);
3048
3049 adapter = hba_soft_state[adapno];
3050
3051 ustats = uioc.uioc_uaddr;
3052
3053 if( copy_from_user(&num_ldrv, &ustats->num_ldrv, sizeof(int)) )
3054 return (-EFAULT);
3055
3056 /*
3057 * Check for the validity of the logical drive number
3058 */
3059 if( num_ldrv >= MAX_LOGICAL_DRIVES_40LD ) return -EINVAL;
3060
3061 if( copy_to_user(ustats->nreads, adapter->nreads,
3062 num_ldrv*sizeof(u32)) )
3063 return -EFAULT;
3064
3065 if( copy_to_user(ustats->nreadblocks, adapter->nreadblocks,
3066 num_ldrv*sizeof(u32)) )
3067 return -EFAULT;
3068
3069 if( copy_to_user(ustats->nwrites, adapter->nwrites,
3070 num_ldrv*sizeof(u32)) )
3071 return -EFAULT;
3072
3073 if( copy_to_user(ustats->nwriteblocks, adapter->nwriteblocks,
3074 num_ldrv*sizeof(u32)) )
3075 return -EFAULT;
3076
3077 if( copy_to_user(ustats->rd_errors, adapter->rd_errors,
3078 num_ldrv*sizeof(u32)) )
3079 return -EFAULT;
3080
3081 if( copy_to_user(ustats->wr_errors, adapter->wr_errors,
3082 num_ldrv*sizeof(u32)) )
3083 return -EFAULT;
3084
3085 return 0;
3086
3087#endif
3088 case MBOX_CMD:
3089
3090 /*
3091 * Which adapter
3092 */
3093 if( (adapno = GETADAP(uioc.adapno)) >= hba_count )
3094 return (-ENODEV);
3095
3096 adapter = hba_soft_state[adapno];
3097
3098 /*
3099 * Deletion of logical drive is a special case. The adapter
3100 * should be quiescent before this command is issued.
3101 */
3102 if( uioc.uioc_rmbox[0] == FC_DEL_LOGDRV &&
3103 uioc.uioc_rmbox[2] == OP_DEL_LOGDRV ) {
3104
3105 /*
3106 * Do we support this feature
3107 */
3108 if( !adapter->support_random_del ) {
3109 dev_warn(&adapter->dev->dev, "logdrv "
3110 "delete on non-supporting F/W\n");
3111
3112 return (-EINVAL);
3113 }
3114
3115 rval = mega_del_logdrv( adapter, uioc.uioc_rmbox[3] );
3116
3117 if( rval == 0 ) {
3118 memset(&mc, 0, sizeof(megacmd_t));
3119
3120 mc.status = rval;
3121
3122 rval = mega_n_to_m((void __user *)arg, &mc);
3123 }
3124
3125 return rval;
3126 }
3127 /*
3128 * This interface only support the regular passthru commands.
3129 * Reject extended passthru and 64-bit passthru
3130 */
3131 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU64 ||
3132 uioc.uioc_rmbox[0] == MEGA_MBOXCMD_EXTPTHRU ) {
3133
3134 dev_warn(&adapter->dev->dev, "rejected passthru\n");
3135
3136 return (-EINVAL);
3137 }
3138
3139 /*
3140 * For all internal commands, the buffer must be allocated in
3141 * <4GB address range
3142 */
3143 if( make_local_pdev(adapter, &pdev) != 0 )
3144 return -EIO;
3145
3146 /* Is it a passthru command or a DCMD */
3147 if( uioc.uioc_rmbox[0] == MEGA_MBOXCMD_PASSTHRU ) {
3148 /* Passthru commands */
3149
3150 pthru = dma_alloc_coherent(&pdev->dev,
3151 sizeof(mega_passthru),
3152 &pthru_dma_hndl, GFP_KERNEL);
3153
3154 if( pthru == NULL ) {
3155 free_local_pdev(pdev);
3156 return (-ENOMEM);
3157 }
3158
3159 /*
3160 * The user passthru structure
3161 */
3162 upthru = (mega_passthru __user *)(unsigned long)MBOX(uioc)->xferaddr;
3163
3164 /*
3165 * Copy in the user passthru here.
3166 */
3167 if( copy_from_user(pthru, upthru,
3168 sizeof(mega_passthru)) ) {
3169
3170 dma_free_coherent(&pdev->dev,
3171 sizeof(mega_passthru),
3172 pthru, pthru_dma_hndl);
3173
3174 free_local_pdev(pdev);
3175
3176 return (-EFAULT);
3177 }
3178
3179 /*
3180 * Is there a data transfer
3181 */
3182 if( pthru->dataxferlen ) {
3183 data = dma_alloc_coherent(&pdev->dev,
3184 pthru->dataxferlen,
3185 &data_dma_hndl,
3186 GFP_KERNEL);
3187
3188 if( data == NULL ) {
3189 dma_free_coherent(&pdev->dev,
3190 sizeof(mega_passthru),
3191 pthru,
3192 pthru_dma_hndl);
3193
3194 free_local_pdev(pdev);
3195
3196 return (-ENOMEM);
3197 }
3198
3199 /*
3200 * Save the user address and point the kernel
3201 * address at just allocated memory
3202 */
3203 uxferaddr = pthru->dataxferaddr;
3204 pthru->dataxferaddr = data_dma_hndl;
3205 }
3206
3207
3208 /*
3209 * Is data coming down-stream
3210 */
3211 if( pthru->dataxferlen && (uioc.flags & UIOC_WR) ) {
3212 /*
3213 * Get the user data
3214 */
3215 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3216 pthru->dataxferlen) ) {
3217 rval = (-EFAULT);
3218 goto freemem_and_return;
3219 }
3220 }
3221
3222 memset(&mc, 0, sizeof(megacmd_t));
3223
3224 mc.cmd = MEGA_MBOXCMD_PASSTHRU;
3225 mc.xferaddr = (u32)pthru_dma_hndl;
3226
3227 /*
3228 * Issue the command
3229 */
3230 mega_internal_command(adapter, &mc, pthru);
3231
3232 rval = mega_n_to_m((void __user *)arg, &mc);
3233
3234 if( rval ) goto freemem_and_return;
3235
3236
3237 /*
3238 * Is data going up-stream
3239 */
3240 if( pthru->dataxferlen && (uioc.flags & UIOC_RD) ) {
3241 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3242 pthru->dataxferlen) ) {
3243 rval = (-EFAULT);
3244 }
3245 }
3246
3247 /*
3248 * Send the request sense data also, irrespective of
3249 * whether the user has asked for it or not.
3250 */
3251 if (copy_to_user(upthru->reqsensearea,
3252 pthru->reqsensearea, 14))
3253 rval = -EFAULT;
3254
3255freemem_and_return:
3256 if( pthru->dataxferlen ) {
3257 dma_free_coherent(&pdev->dev,
3258 pthru->dataxferlen, data,
3259 data_dma_hndl);
3260 }
3261
3262 dma_free_coherent(&pdev->dev, sizeof(mega_passthru),
3263 pthru, pthru_dma_hndl);
3264
3265 free_local_pdev(pdev);
3266
3267 return rval;
3268 }
3269 else {
3270 /* DCMD commands */
3271
3272 /*
3273 * Is there a data transfer
3274 */
3275 if( uioc.xferlen ) {
3276 data = dma_alloc_coherent(&pdev->dev,
3277 uioc.xferlen,
3278 &data_dma_hndl,
3279 GFP_KERNEL);
3280
3281 if( data == NULL ) {
3282 free_local_pdev(pdev);
3283 return (-ENOMEM);
3284 }
3285
3286 uxferaddr = MBOX(uioc)->xferaddr;
3287 }
3288
3289 /*
3290 * Is data coming down-stream
3291 */
3292 if( uioc.xferlen && (uioc.flags & UIOC_WR) ) {
3293 /*
3294 * Get the user data
3295 */
3296 if( copy_from_user(data, (char __user *)(unsigned long) uxferaddr,
3297 uioc.xferlen) ) {
3298
3299 dma_free_coherent(&pdev->dev,
3300 uioc.xferlen, data,
3301 data_dma_hndl);
3302
3303 free_local_pdev(pdev);
3304
3305 return (-EFAULT);
3306 }
3307 }
3308
3309 memcpy(&mc, MBOX(uioc), sizeof(megacmd_t));
3310
3311 mc.xferaddr = (u32)data_dma_hndl;
3312
3313 /*
3314 * Issue the command
3315 */
3316 mega_internal_command(adapter, &mc, NULL);
3317
3318 rval = mega_n_to_m((void __user *)arg, &mc);
3319
3320 if( rval ) {
3321 if( uioc.xferlen ) {
3322 dma_free_coherent(&pdev->dev,
3323 uioc.xferlen, data,
3324 data_dma_hndl);
3325 }
3326
3327 free_local_pdev(pdev);
3328
3329 return rval;
3330 }
3331
3332 /*
3333 * Is data going up-stream
3334 */
3335 if( uioc.xferlen && (uioc.flags & UIOC_RD) ) {
3336 if( copy_to_user((char __user *)(unsigned long) uxferaddr, data,
3337 uioc.xferlen) ) {
3338
3339 rval = (-EFAULT);
3340 }
3341 }
3342
3343 if( uioc.xferlen ) {
3344 dma_free_coherent(&pdev->dev, uioc.xferlen,
3345 data, data_dma_hndl);
3346 }
3347
3348 free_local_pdev(pdev);
3349
3350 return rval;
3351 }
3352
3353 default:
3354 return (-EINVAL);
3355 }
3356
3357 return 0;
3358}
3359
3360static long
3361megadev_unlocked_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
3362{
3363 int ret;
3364
3365 mutex_lock(&megadev_mutex);
3366 ret = megadev_ioctl(filep, cmd, arg);
3367 mutex_unlock(&megadev_mutex);
3368
3369 return ret;
3370}
3371
3372/**
3373 * mega_m_to_n()
3374 * @arg: user address
3375 * @uioc: new ioctl structure
3376 *
3377 * A thin layer to convert older mimd interface ioctl structure to NIT ioctl
3378 * structure
3379 *
3380 * Converts the older mimd ioctl structure to newer NIT structure
3381 */
3382static int
3383mega_m_to_n(void __user *arg, nitioctl_t *uioc)
3384{
3385 struct uioctl_t uioc_mimd;
3386 char signature[8] = {0};
3387 u8 opcode;
3388 u8 subopcode;
3389
3390
3391 /*
3392 * check is the application conforms to NIT. We do not have to do much
3393 * in that case.
3394 * We exploit the fact that the signature is stored in the very
3395 * beginning of the structure.
3396 */
3397
3398 if( copy_from_user(signature, arg, 7) )
3399 return (-EFAULT);
3400
3401 if( memcmp(signature, "MEGANIT", 7) == 0 ) {
3402
3403 /*
3404 * NOTE NOTE: The nit ioctl is still under flux because of
3405 * change of mailbox definition, in HPE. No applications yet
3406 * use this interface and let's not have applications use this
3407 * interface till the new specifitions are in place.
3408 */
3409 return -EINVAL;
3410#if 0
3411 if( copy_from_user(uioc, arg, sizeof(nitioctl_t)) )
3412 return (-EFAULT);
3413 return 0;
3414#endif
3415 }
3416
3417 /*
3418 * Else assume we have mimd uioctl_t as arg. Convert to nitioctl_t
3419 *
3420 * Get the user ioctl structure
3421 */
3422 if( copy_from_user(&uioc_mimd, arg, sizeof(struct uioctl_t)) )
3423 return (-EFAULT);
3424
3425
3426 /*
3427 * Get the opcode and subopcode for the commands
3428 */
3429 opcode = uioc_mimd.ui.fcs.opcode;
3430 subopcode = uioc_mimd.ui.fcs.subopcode;
3431
3432 switch (opcode) {
3433 case 0x82:
3434
3435 switch (subopcode) {
3436
3437 case MEGAIOC_QDRVRVER: /* Query driver version */
3438 uioc->opcode = GET_DRIVER_VER;
3439 uioc->uioc_uaddr = uioc_mimd.data;
3440 break;
3441
3442 case MEGAIOC_QNADAP: /* Get # of adapters */
3443 uioc->opcode = GET_N_ADAP;
3444 uioc->uioc_uaddr = uioc_mimd.data;
3445 break;
3446
3447 case MEGAIOC_QADAPINFO: /* Get adapter information */
3448 uioc->opcode = GET_ADAP_INFO;
3449 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3450 uioc->uioc_uaddr = uioc_mimd.data;
3451 break;
3452
3453 default:
3454 return(-EINVAL);
3455 }
3456
3457 break;
3458
3459
3460 case 0x81:
3461
3462 uioc->opcode = MBOX_CMD;
3463 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3464
3465 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
3466
3467 uioc->xferlen = uioc_mimd.ui.fcs.length;
3468
3469 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
3470 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
3471
3472 break;
3473
3474 case 0x80:
3475
3476 uioc->opcode = MBOX_CMD;
3477 uioc->adapno = uioc_mimd.ui.fcs.adapno;
3478
3479 memcpy(uioc->uioc_rmbox, uioc_mimd.mbox, 18);
3480
3481 /*
3482 * Choose the xferlen bigger of input and output data
3483 */
3484 uioc->xferlen = uioc_mimd.outlen > uioc_mimd.inlen ?
3485 uioc_mimd.outlen : uioc_mimd.inlen;
3486
3487 if( uioc_mimd.outlen ) uioc->flags = UIOC_RD;
3488 if( uioc_mimd.inlen ) uioc->flags |= UIOC_WR;
3489
3490 break;
3491
3492 default:
3493 return (-EINVAL);
3494
3495 }
3496
3497 return 0;
3498}
3499
3500/*
3501 * mega_n_to_m()
3502 * @arg: user address
3503 * @mc: mailbox command
3504 *
3505 * Updates the status information to the application, depending on application
3506 * conforms to older mimd ioctl interface or newer NIT ioctl interface
3507 */
3508static int
3509mega_n_to_m(void __user *arg, megacmd_t *mc)
3510{
3511 nitioctl_t __user *uiocp;
3512 megacmd_t __user *umc;
3513 mega_passthru __user *upthru;
3514 struct uioctl_t __user *uioc_mimd;
3515 char signature[8] = {0};
3516
3517 /*
3518 * check is the application conforms to NIT.
3519 */
3520 if( copy_from_user(signature, arg, 7) )
3521 return -EFAULT;
3522
3523 if( memcmp(signature, "MEGANIT", 7) == 0 ) {
3524
3525 uiocp = arg;
3526
3527 if( put_user(mc->status, (u8 __user *)&MBOX_P(uiocp)->status) )
3528 return (-EFAULT);
3529
3530 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
3531
3532 umc = MBOX_P(uiocp);
3533
3534 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
3535 return -EFAULT;
3536
3537 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus))
3538 return (-EFAULT);
3539 }
3540 }
3541 else {
3542 uioc_mimd = arg;
3543
3544 if( put_user(mc->status, (u8 __user *)&uioc_mimd->mbox[17]) )
3545 return (-EFAULT);
3546
3547 if( mc->cmd == MEGA_MBOXCMD_PASSTHRU ) {
3548
3549 umc = (megacmd_t __user *)uioc_mimd->mbox;
3550
3551 if (get_user(upthru, (mega_passthru __user * __user *)&umc->xferaddr))
3552 return (-EFAULT);
3553
3554 if( put_user(mc->status, (u8 __user *)&upthru->scsistatus) )
3555 return (-EFAULT);
3556 }
3557 }
3558
3559 return 0;
3560}
3561
3562
3563/*
3564 * MEGARAID 'FW' commands.
3565 */
3566
3567/**
3568 * mega_is_bios_enabled()
3569 * @adapter: pointer to our soft state
3570 *
3571 * issue command to find out if the BIOS is enabled for this controller
3572 */
3573static int
3574mega_is_bios_enabled(adapter_t *adapter)
3575{
3576 unsigned char raw_mbox[sizeof(struct mbox_out)];
3577 mbox_t *mbox;
3578
3579 mbox = (mbox_t *)raw_mbox;
3580
3581 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3582
3583 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3584
3585 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3586
3587 raw_mbox[0] = IS_BIOS_ENABLED;
3588 raw_mbox[2] = GET_BIOS;
3589
3590 issue_scb_block(adapter, raw_mbox);
3591
3592 return *(char *)adapter->mega_buffer;
3593}
3594
3595
3596/**
3597 * mega_enum_raid_scsi()
3598 * @adapter: pointer to our soft state
3599 *
3600 * Find out what channels are RAID/SCSI. This information is used to
3601 * differentiate the virtual channels and physical channels and to support
3602 * ROMB feature and non-disk devices.
3603 */
3604static void
3605mega_enum_raid_scsi(adapter_t *adapter)
3606{
3607 unsigned char raw_mbox[sizeof(struct mbox_out)];
3608 mbox_t *mbox;
3609 int i;
3610
3611 mbox = (mbox_t *)raw_mbox;
3612
3613 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3614
3615 /*
3616 * issue command to find out what channels are raid/scsi
3617 */
3618 raw_mbox[0] = CHNL_CLASS;
3619 raw_mbox[2] = GET_CHNL_CLASS;
3620
3621 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3622
3623 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3624
3625 /*
3626 * Non-ROMB firmware fail this command, so all channels
3627 * must be shown RAID
3628 */
3629 adapter->mega_ch_class = 0xFF;
3630
3631 if(!issue_scb_block(adapter, raw_mbox)) {
3632 adapter->mega_ch_class = *((char *)adapter->mega_buffer);
3633
3634 }
3635
3636 for( i = 0; i < adapter->product_info.nchannels; i++ ) {
3637 if( (adapter->mega_ch_class >> i) & 0x01 ) {
3638 dev_info(&adapter->dev->dev, "channel[%d] is raid\n",
3639 i);
3640 }
3641 else {
3642 dev_info(&adapter->dev->dev, "channel[%d] is scsi\n",
3643 i);
3644 }
3645 }
3646
3647 return;
3648}
3649
3650
3651/**
3652 * mega_get_boot_drv()
3653 * @adapter: pointer to our soft state
3654 *
3655 * Find out which device is the boot device. Note, any logical drive or any
3656 * phyical device (e.g., a CDROM) can be designated as a boot device.
3657 */
3658static void
3659mega_get_boot_drv(adapter_t *adapter)
3660{
3661 struct private_bios_data *prv_bios_data;
3662 unsigned char raw_mbox[sizeof(struct mbox_out)];
3663 mbox_t *mbox;
3664 u16 cksum = 0;
3665 u8 *cksum_p;
3666 u8 boot_pdrv;
3667 int i;
3668
3669 mbox = (mbox_t *)raw_mbox;
3670
3671 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3672
3673 raw_mbox[0] = BIOS_PVT_DATA;
3674 raw_mbox[2] = GET_BIOS_PVT_DATA;
3675
3676 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3677
3678 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3679
3680 adapter->boot_ldrv_enabled = 0;
3681 adapter->boot_ldrv = 0;
3682
3683 adapter->boot_pdrv_enabled = 0;
3684 adapter->boot_pdrv_ch = 0;
3685 adapter->boot_pdrv_tgt = 0;
3686
3687 if(issue_scb_block(adapter, raw_mbox) == 0) {
3688 prv_bios_data =
3689 (struct private_bios_data *)adapter->mega_buffer;
3690
3691 cksum = 0;
3692 cksum_p = (char *)prv_bios_data;
3693 for (i = 0; i < 14; i++ ) {
3694 cksum += (u16)(*cksum_p++);
3695 }
3696
3697 if (prv_bios_data->cksum == (u16)(0-cksum) ) {
3698
3699 /*
3700 * If MSB is set, a physical drive is set as boot
3701 * device
3702 */
3703 if( prv_bios_data->boot_drv & 0x80 ) {
3704 adapter->boot_pdrv_enabled = 1;
3705 boot_pdrv = prv_bios_data->boot_drv & 0x7F;
3706 adapter->boot_pdrv_ch = boot_pdrv / 16;
3707 adapter->boot_pdrv_tgt = boot_pdrv % 16;
3708 }
3709 else {
3710 adapter->boot_ldrv_enabled = 1;
3711 adapter->boot_ldrv = prv_bios_data->boot_drv;
3712 }
3713 }
3714 }
3715
3716}
3717
3718/**
3719 * mega_support_random_del()
3720 * @adapter: pointer to our soft state
3721 *
3722 * Find out if this controller supports random deletion and addition of
3723 * logical drives
3724 */
3725static int
3726mega_support_random_del(adapter_t *adapter)
3727{
3728 unsigned char raw_mbox[sizeof(struct mbox_out)];
3729 mbox_t *mbox;
3730 int rval;
3731
3732 mbox = (mbox_t *)raw_mbox;
3733
3734 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3735
3736 /*
3737 * issue command
3738 */
3739 raw_mbox[0] = FC_DEL_LOGDRV;
3740 raw_mbox[2] = OP_SUP_DEL_LOGDRV;
3741
3742 rval = issue_scb_block(adapter, raw_mbox);
3743
3744 return !rval;
3745}
3746
3747
3748/**
3749 * mega_support_ext_cdb()
3750 * @adapter: pointer to our soft state
3751 *
3752 * Find out if this firmware support cdblen > 10
3753 */
3754static int
3755mega_support_ext_cdb(adapter_t *adapter)
3756{
3757 unsigned char raw_mbox[sizeof(struct mbox_out)];
3758 mbox_t *mbox;
3759 int rval;
3760
3761 mbox = (mbox_t *)raw_mbox;
3762
3763 memset(&mbox->m_out, 0, sizeof(raw_mbox));
3764 /*
3765 * issue command to find out if controller supports extended CDBs.
3766 */
3767 raw_mbox[0] = 0xA4;
3768 raw_mbox[2] = 0x16;
3769
3770 rval = issue_scb_block(adapter, raw_mbox);
3771
3772 return !rval;
3773}
3774
3775
3776/**
3777 * mega_del_logdrv()
3778 * @adapter: pointer to our soft state
3779 * @logdrv: logical drive to be deleted
3780 *
3781 * Delete the specified logical drive. It is the responsibility of the user
3782 * app to let the OS know about this operation.
3783 */
3784static int
3785mega_del_logdrv(adapter_t *adapter, int logdrv)
3786{
3787 unsigned long flags;
3788 scb_t *scb;
3789 int rval;
3790
3791 /*
3792 * Stop sending commands to the controller, queue them internally.
3793 * When deletion is complete, ISR will flush the queue.
3794 */
3795 atomic_set(&adapter->quiescent, 1);
3796
3797 /*
3798 * Wait till all the issued commands are complete and there are no
3799 * commands in the pending queue
3800 */
3801 while (atomic_read(&adapter->pend_cmds) > 0 ||
3802 !list_empty(&adapter->pending_list))
3803 msleep(1000); /* sleep for 1s */
3804
3805 rval = mega_do_del_logdrv(adapter, logdrv);
3806
3807 spin_lock_irqsave(&adapter->lock, flags);
3808
3809 /*
3810 * If delete operation was successful, add 0x80 to the logical drive
3811 * ids for commands in the pending queue.
3812 */
3813 if (adapter->read_ldidmap) {
3814 struct list_head *pos;
3815 list_for_each(pos, &adapter->pending_list) {
3816 scb = list_entry(pos, scb_t, list);
3817 if (scb->pthru->logdrv < 0x80 )
3818 scb->pthru->logdrv += 0x80;
3819 }
3820 }
3821
3822 atomic_set(&adapter->quiescent, 0);
3823
3824 mega_runpendq(adapter);
3825
3826 spin_unlock_irqrestore(&adapter->lock, flags);
3827
3828 return rval;
3829}
3830
3831
3832static int
3833mega_do_del_logdrv(adapter_t *adapter, int logdrv)
3834{
3835 megacmd_t mc;
3836 int rval;
3837
3838 memset( &mc, 0, sizeof(megacmd_t));
3839
3840 mc.cmd = FC_DEL_LOGDRV;
3841 mc.opcode = OP_DEL_LOGDRV;
3842 mc.subopcode = logdrv;
3843
3844 rval = mega_internal_command(adapter, &mc, NULL);
3845
3846 /* log this event */
3847 if(rval) {
3848 dev_warn(&adapter->dev->dev, "Delete LD-%d failed", logdrv);
3849 return rval;
3850 }
3851
3852 /*
3853 * After deleting first logical drive, the logical drives must be
3854 * addressed by adding 0x80 to the logical drive id.
3855 */
3856 adapter->read_ldidmap = 1;
3857
3858 return rval;
3859}
3860
3861
3862/**
3863 * mega_get_max_sgl()
3864 * @adapter: pointer to our soft state
3865 *
3866 * Find out the maximum number of scatter-gather elements supported by this
3867 * version of the firmware
3868 */
3869static void
3870mega_get_max_sgl(adapter_t *adapter)
3871{
3872 unsigned char raw_mbox[sizeof(struct mbox_out)];
3873 mbox_t *mbox;
3874
3875 mbox = (mbox_t *)raw_mbox;
3876
3877 memset(mbox, 0, sizeof(raw_mbox));
3878
3879 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3880
3881 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3882
3883 raw_mbox[0] = MAIN_MISC_OPCODE;
3884 raw_mbox[2] = GET_MAX_SG_SUPPORT;
3885
3886
3887 if( issue_scb_block(adapter, raw_mbox) ) {
3888 /*
3889 * f/w does not support this command. Choose the default value
3890 */
3891 adapter->sglen = MIN_SGLIST;
3892 }
3893 else {
3894 adapter->sglen = *((char *)adapter->mega_buffer);
3895
3896 /*
3897 * Make sure this is not more than the resources we are
3898 * planning to allocate
3899 */
3900 if ( adapter->sglen > MAX_SGLIST )
3901 adapter->sglen = MAX_SGLIST;
3902 }
3903
3904 return;
3905}
3906
3907
3908/**
3909 * mega_support_cluster()
3910 * @adapter: pointer to our soft state
3911 *
3912 * Find out if this firmware support cluster calls.
3913 */
3914static int
3915mega_support_cluster(adapter_t *adapter)
3916{
3917 unsigned char raw_mbox[sizeof(struct mbox_out)];
3918 mbox_t *mbox;
3919
3920 mbox = (mbox_t *)raw_mbox;
3921
3922 memset(mbox, 0, sizeof(raw_mbox));
3923
3924 memset((void *)adapter->mega_buffer, 0, MEGA_BUFFER_SIZE);
3925
3926 mbox->m_out.xferaddr = (u32)adapter->buf_dma_handle;
3927
3928 /*
3929 * Try to get the initiator id. This command will succeed iff the
3930 * clustering is available on this HBA.
3931 */
3932 raw_mbox[0] = MEGA_GET_TARGET_ID;
3933
3934 if( issue_scb_block(adapter, raw_mbox) == 0 ) {
3935
3936 /*
3937 * Cluster support available. Get the initiator target id.
3938 * Tell our id to mid-layer too.
3939 */
3940 adapter->this_id = *(u32 *)adapter->mega_buffer;
3941 adapter->host->this_id = adapter->this_id;
3942
3943 return 1;
3944 }
3945
3946 return 0;
3947}
3948
3949#ifdef CONFIG_PROC_FS
3950/**
3951 * mega_adapinq()
3952 * @adapter: pointer to our soft state
3953 * @dma_handle: DMA address of the buffer
3954 *
3955 * Issue internal commands while interrupts are available.
3956 * We only issue direct mailbox commands from within the driver. ioctl()
3957 * interface using these routines can issue passthru commands.
3958 */
3959static int
3960mega_adapinq(adapter_t *adapter, dma_addr_t dma_handle)
3961{
3962 megacmd_t mc;
3963
3964 memset(&mc, 0, sizeof(megacmd_t));
3965
3966 if( adapter->flag & BOARD_40LD ) {
3967 mc.cmd = FC_NEW_CONFIG;
3968 mc.opcode = NC_SUBOP_ENQUIRY3;
3969 mc.subopcode = ENQ3_GET_SOLICITED_FULL;
3970 }
3971 else {
3972 mc.cmd = MEGA_MBOXCMD_ADPEXTINQ;
3973 }
3974
3975 mc.xferaddr = (u32)dma_handle;
3976
3977 if ( mega_internal_command(adapter, &mc, NULL) != 0 ) {
3978 return -1;
3979 }
3980
3981 return 0;
3982}
3983
3984
3985/**
3986 * mega_internal_dev_inquiry()
3987 * @adapter: pointer to our soft state
3988 * @ch: channel for this device
3989 * @tgt: ID of this device
3990 * @buf_dma_handle: DMA address of the buffer
3991 *
3992 * Issue the scsi inquiry for the specified device.
3993 */
3994static int
3995mega_internal_dev_inquiry(adapter_t *adapter, u8 ch, u8 tgt,
3996 dma_addr_t buf_dma_handle)
3997{
3998 mega_passthru *pthru;
3999 dma_addr_t pthru_dma_handle;
4000 megacmd_t mc;
4001 int rval;
4002 struct pci_dev *pdev;
4003
4004
4005 /*
4006 * For all internal commands, the buffer must be allocated in <4GB
4007 * address range
4008 */
4009 if( make_local_pdev(adapter, &pdev) != 0 ) return -1;
4010
4011 pthru = dma_alloc_coherent(&pdev->dev, sizeof(mega_passthru),
4012 &pthru_dma_handle, GFP_KERNEL);
4013
4014 if( pthru == NULL ) {
4015 free_local_pdev(pdev);
4016 return -1;
4017 }
4018
4019 pthru->timeout = 2;
4020 pthru->ars = 1;
4021 pthru->reqsenselen = 14;
4022 pthru->islogical = 0;
4023
4024 pthru->channel = (adapter->flag & BOARD_40LD) ? 0 : ch;
4025
4026 pthru->target = (adapter->flag & BOARD_40LD) ? (ch << 4)|tgt : tgt;
4027
4028 pthru->cdblen = 6;
4029
4030 pthru->cdb[0] = INQUIRY;
4031 pthru->cdb[1] = 0;
4032 pthru->cdb[2] = 0;
4033 pthru->cdb[3] = 0;
4034 pthru->cdb[4] = 255;
4035 pthru->cdb[5] = 0;
4036
4037
4038 pthru->dataxferaddr = (u32)buf_dma_handle;
4039 pthru->dataxferlen = 256;
4040
4041 memset(&mc, 0, sizeof(megacmd_t));
4042
4043 mc.cmd = MEGA_MBOXCMD_PASSTHRU;
4044 mc.xferaddr = (u32)pthru_dma_handle;
4045
4046 rval = mega_internal_command(adapter, &mc, pthru);
4047
4048 dma_free_coherent(&pdev->dev, sizeof(mega_passthru), pthru,
4049 pthru_dma_handle);
4050
4051 free_local_pdev(pdev);
4052
4053 return rval;
4054}
4055#endif
4056
4057/**
4058 * mega_internal_command()
4059 * @adapter: pointer to our soft state
4060 * @mc: the mailbox command
4061 * @pthru: Passthru structure for DCDB commands
4062 *
4063 * Issue the internal commands in interrupt mode.
4064 * The last argument is the address of the passthru structure if the command
4065 * to be fired is a passthru command
4066 *
4067 * Note: parameter 'pthru' is null for non-passthru commands.
4068 */
4069static int
4070mega_internal_command(adapter_t *adapter, megacmd_t *mc, mega_passthru *pthru)
4071{
4072 unsigned long flags;
4073 scb_t *scb;
4074 int rval;
4075
4076 /*
4077 * The internal commands share one command id and hence are
4078 * serialized. This is so because we want to reserve maximum number of
4079 * available command ids for the I/O commands.
4080 */
4081 mutex_lock(&adapter->int_mtx);
4082
4083 scb = &adapter->int_scb;
4084 memset(scb, 0, sizeof(scb_t));
4085
4086 scb->idx = CMDID_INT_CMDS;
4087 scb->state |= SCB_ACTIVE | SCB_PENDQ;
4088
4089 memcpy(scb->raw_mbox, mc, sizeof(megacmd_t));
4090
4091 /*
4092 * Is it a passthru command
4093 */
4094 if (mc->cmd == MEGA_MBOXCMD_PASSTHRU)
4095 scb->pthru = pthru;
4096
4097 spin_lock_irqsave(&adapter->lock, flags);
4098 list_add_tail(&scb->list, &adapter->pending_list);
4099 /*
4100 * Check if the HBA is in quiescent state, e.g., during a
4101 * delete logical drive opertion. If it is, don't run
4102 * the pending_list.
4103 */
4104 if (atomic_read(&adapter->quiescent) == 0)
4105 mega_runpendq(adapter);
4106 spin_unlock_irqrestore(&adapter->lock, flags);
4107
4108 wait_for_completion(&adapter->int_waitq);
4109
4110 mc->status = rval = adapter->int_status;
4111
4112 /*
4113 * Print a debug message for all failed commands. Applications can use
4114 * this information.
4115 */
4116 if (rval && trace_level) {
4117 dev_info(&adapter->dev->dev, "cmd [%x, %x, %x] status:[%x]\n",
4118 mc->cmd, mc->opcode, mc->subopcode, rval);
4119 }
4120
4121 mutex_unlock(&adapter->int_mtx);
4122 return rval;
4123}
4124
4125static struct scsi_host_template megaraid_template = {
4126 .module = THIS_MODULE,
4127 .name = "MegaRAID",
4128 .proc_name = "megaraid_legacy",
4129 .info = megaraid_info,
4130 .queuecommand = megaraid_queue,
4131 .bios_param = megaraid_biosparam,
4132 .max_sectors = MAX_SECTORS_PER_IO,
4133 .can_queue = MAX_COMMANDS,
4134 .this_id = DEFAULT_INITIATOR_ID,
4135 .sg_tablesize = MAX_SGLIST,
4136 .cmd_per_lun = DEF_CMD_PER_LUN,
4137 .eh_abort_handler = megaraid_abort,
4138 .eh_device_reset_handler = megaraid_reset,
4139 .eh_bus_reset_handler = megaraid_reset,
4140 .eh_host_reset_handler = megaraid_reset,
4141 .no_write_same = 1,
4142};
4143
4144static int
4145megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
4146{
4147 struct Scsi_Host *host;
4148 adapter_t *adapter;
4149 unsigned long mega_baseport, tbase, flag = 0;
4150 u16 subsysid, subsysvid;
4151 u8 pci_bus, pci_dev_func;
4152 int irq, i, j;
4153 int error = -ENODEV;
4154
4155 if (hba_count >= MAX_CONTROLLERS)
4156 goto out;
4157
4158 if (pci_enable_device(pdev))
4159 goto out;
4160 pci_set_master(pdev);
4161
4162 pci_bus = pdev->bus->number;
4163 pci_dev_func = pdev->devfn;
4164
4165 /*
4166 * The megaraid3 stuff reports the ID of the Intel part which is not
4167 * remotely specific to the megaraid
4168 */
4169 if (pdev->vendor == PCI_VENDOR_ID_INTEL) {
4170 u16 magic;
4171 /*
4172 * Don't fall over the Compaq management cards using the same
4173 * PCI identifier
4174 */
4175 if (pdev->subsystem_vendor == PCI_VENDOR_ID_COMPAQ &&
4176 pdev->subsystem_device == 0xC000)
4177 goto out_disable_device;
4178 /* Now check the magic signature byte */
4179 pci_read_config_word(pdev, PCI_CONF_AMISIG, &magic);
4180 if (magic != HBA_SIGNATURE_471 && magic != HBA_SIGNATURE)
4181 goto out_disable_device;
4182 /* Ok it is probably a megaraid */
4183 }
4184
4185 /*
4186 * For these vendor and device ids, signature offsets are not
4187 * valid and 64 bit is implicit
4188 */
4189 if (id->driver_data & BOARD_64BIT)
4190 flag |= BOARD_64BIT;
4191 else {
4192 u32 magic64;
4193
4194 pci_read_config_dword(pdev, PCI_CONF_AMISIG64, &magic64);
4195 if (magic64 == HBA_SIGNATURE_64BIT)
4196 flag |= BOARD_64BIT;
4197 }
4198
4199 subsysvid = pdev->subsystem_vendor;
4200 subsysid = pdev->subsystem_device;
4201
4202 dev_notice(&pdev->dev, "found 0x%4.04x:0x%4.04x\n",
4203 id->vendor, id->device);
4204
4205 /* Read the base port and IRQ from PCI */
4206 mega_baseport = pci_resource_start(pdev, 0);
4207 irq = pdev->irq;
4208
4209 tbase = mega_baseport;
4210 if (pci_resource_flags(pdev, 0) & IORESOURCE_MEM) {
4211 flag |= BOARD_MEMMAP;
4212
4213 if (!request_mem_region(mega_baseport, 128, "megaraid")) {
4214 dev_warn(&pdev->dev, "mem region busy!\n");
4215 goto out_disable_device;
4216 }
4217
4218 mega_baseport = (unsigned long)ioremap(mega_baseport, 128);
4219 if (!mega_baseport) {
4220 dev_warn(&pdev->dev, "could not map hba memory\n");
4221 goto out_release_region;
4222 }
4223 } else {
4224 flag |= BOARD_IOMAP;
4225 mega_baseport += 0x10;
4226
4227 if (!request_region(mega_baseport, 16, "megaraid"))
4228 goto out_disable_device;
4229 }
4230
4231 /* Initialize SCSI Host structure */
4232 host = scsi_host_alloc(&megaraid_template, sizeof(adapter_t));
4233 if (!host)
4234 goto out_iounmap;
4235
4236 adapter = (adapter_t *)host->hostdata;
4237 memset(adapter, 0, sizeof(adapter_t));
4238
4239 dev_notice(&pdev->dev,
4240 "scsi%d:Found MegaRAID controller at 0x%lx, IRQ:%d\n",
4241 host->host_no, mega_baseport, irq);
4242
4243 adapter->base = mega_baseport;
4244 if (flag & BOARD_MEMMAP)
4245 adapter->mmio_base = (void __iomem *) mega_baseport;
4246
4247 INIT_LIST_HEAD(&adapter->free_list);
4248 INIT_LIST_HEAD(&adapter->pending_list);
4249 INIT_LIST_HEAD(&adapter->completed_list);
4250
4251 adapter->flag = flag;
4252 spin_lock_init(&adapter->lock);
4253
4254 host->cmd_per_lun = max_cmd_per_lun;
4255 host->max_sectors = max_sectors_per_io;
4256
4257 adapter->dev = pdev;
4258 adapter->host = host;
4259
4260 adapter->host->irq = irq;
4261
4262 if (flag & BOARD_MEMMAP)
4263 adapter->host->base = tbase;
4264 else {
4265 adapter->host->io_port = tbase;
4266 adapter->host->n_io_port = 16;
4267 }
4268
4269 adapter->host->unique_id = (pci_bus << 8) | pci_dev_func;
4270
4271 /*
4272 * Allocate buffer to issue internal commands.
4273 */
4274 adapter->mega_buffer = dma_alloc_coherent(&adapter->dev->dev,
4275 MEGA_BUFFER_SIZE,
4276 &adapter->buf_dma_handle,
4277 GFP_KERNEL);
4278 if (!adapter->mega_buffer) {
4279 dev_warn(&pdev->dev, "out of RAM\n");
4280 goto out_host_put;
4281 }
4282
4283 adapter->scb_list = kmalloc_array(MAX_COMMANDS, sizeof(scb_t),
4284 GFP_KERNEL);
4285 if (!adapter->scb_list) {
4286 dev_warn(&pdev->dev, "out of RAM\n");
4287 goto out_free_cmd_buffer;
4288 }
4289
4290 if (request_irq(irq, (adapter->flag & BOARD_MEMMAP) ?
4291 megaraid_isr_memmapped : megaraid_isr_iomapped,
4292 IRQF_SHARED, "megaraid", adapter)) {
4293 dev_warn(&pdev->dev, "Couldn't register IRQ %d!\n", irq);
4294 goto out_free_scb_list;
4295 }
4296
4297 if (mega_setup_mailbox(adapter))
4298 goto out_free_irq;
4299
4300 if (mega_query_adapter(adapter))
4301 goto out_free_mbox;
4302
4303 /*
4304 * Have checks for some buggy f/w
4305 */
4306 if ((subsysid == 0x1111) && (subsysvid == 0x1111)) {
4307 /*
4308 * Which firmware
4309 */
4310 if (!strcmp(adapter->fw_version, "3.00") ||
4311 !strcmp(adapter->fw_version, "3.01")) {
4312
4313 dev_warn(&pdev->dev,
4314 "Your card is a Dell PERC "
4315 "2/SC RAID controller with "
4316 "firmware\nmegaraid: 3.00 or 3.01. "
4317 "This driver is known to have "
4318 "corruption issues\nmegaraid: with "
4319 "those firmware versions on this "
4320 "specific card. In order\nmegaraid: "
4321 "to protect your data, please upgrade "
4322 "your firmware to version\nmegaraid: "
4323 "3.10 or later, available from the "
4324 "Dell Technical Support web\n"
4325 "megaraid: site at\nhttp://support."
4326 "dell.com/us/en/filelib/download/"
4327 "index.asp?fileid=2940\n"
4328 );
4329 }
4330 }
4331
4332 /*
4333 * If we have a HP 1M(0x60E7)/2M(0x60E8) controller with
4334 * firmware H.01.07, H.01.08, and H.01.09 disable 64 bit
4335 * support, since this firmware cannot handle 64 bit
4336 * addressing
4337 */
4338 if ((subsysvid == PCI_VENDOR_ID_HP) &&
4339 ((subsysid == 0x60E7) || (subsysid == 0x60E8))) {
4340 /*
4341 * which firmware
4342 */
4343 if (!strcmp(adapter->fw_version, "H01.07") ||
4344 !strcmp(adapter->fw_version, "H01.08") ||
4345 !strcmp(adapter->fw_version, "H01.09") ) {
4346 dev_warn(&pdev->dev,
4347 "Firmware H.01.07, "
4348 "H.01.08, and H.01.09 on 1M/2M "
4349 "controllers\n"
4350 "do not support 64 bit "
4351 "addressing.\nDISABLING "
4352 "64 bit support.\n");
4353 adapter->flag &= ~BOARD_64BIT;
4354 }
4355 }
4356
4357 if (mega_is_bios_enabled(adapter))
4358 mega_hbas[hba_count].is_bios_enabled = 1;
4359 mega_hbas[hba_count].hostdata_addr = adapter;
4360
4361 /*
4362 * Find out which channel is raid and which is scsi. This is
4363 * for ROMB support.
4364 */
4365 mega_enum_raid_scsi(adapter);
4366
4367 /*
4368 * Find out if a logical drive is set as the boot drive. If
4369 * there is one, will make that as the first logical drive.
4370 * ROMB: Do we have to boot from a physical drive. Then all
4371 * the physical drives would appear before the logical disks.
4372 * Else, all the physical drives would be exported to the mid
4373 * layer after logical drives.
4374 */
4375 mega_get_boot_drv(adapter);
4376
4377 if (adapter->boot_pdrv_enabled) {
4378 j = adapter->product_info.nchannels;
4379 for( i = 0; i < j; i++ )
4380 adapter->logdrv_chan[i] = 0;
4381 for( i = j; i < NVIRT_CHAN + j; i++ )
4382 adapter->logdrv_chan[i] = 1;
4383 } else {
4384 for (i = 0; i < NVIRT_CHAN; i++)
4385 adapter->logdrv_chan[i] = 1;
4386 for (i = NVIRT_CHAN; i < MAX_CHANNELS+NVIRT_CHAN; i++)
4387 adapter->logdrv_chan[i] = 0;
4388 adapter->mega_ch_class <<= NVIRT_CHAN;
4389 }
4390
4391 /*
4392 * Do we support random deletion and addition of logical
4393 * drives
4394 */
4395 adapter->read_ldidmap = 0; /* set it after first logdrv
4396 delete cmd */
4397 adapter->support_random_del = mega_support_random_del(adapter);
4398
4399 /* Initialize SCBs */
4400 if (mega_init_scb(adapter))
4401 goto out_free_mbox;
4402
4403 /*
4404 * Reset the pending commands counter
4405 */
4406 atomic_set(&adapter->pend_cmds, 0);
4407
4408 /*
4409 * Reset the adapter quiescent flag
4410 */
4411 atomic_set(&adapter->quiescent, 0);
4412
4413 hba_soft_state[hba_count] = adapter;
4414
4415 /*
4416 * Fill in the structure which needs to be passed back to the
4417 * application when it does an ioctl() for controller related
4418 * information.
4419 */
4420 i = hba_count;
4421
4422 mcontroller[i].base = mega_baseport;
4423 mcontroller[i].irq = irq;
4424 mcontroller[i].numldrv = adapter->numldrv;
4425 mcontroller[i].pcibus = pci_bus;
4426 mcontroller[i].pcidev = id->device;
4427 mcontroller[i].pcifun = PCI_FUNC (pci_dev_func);
4428 mcontroller[i].pciid = -1;
4429 mcontroller[i].pcivendor = id->vendor;
4430 mcontroller[i].pcislot = PCI_SLOT(pci_dev_func);
4431 mcontroller[i].uid = (pci_bus << 8) | pci_dev_func;
4432
4433
4434 /* Set the Mode of addressing to 64 bit if we can */
4435 if ((adapter->flag & BOARD_64BIT) && (sizeof(dma_addr_t) == 8)) {
4436 dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
4437 adapter->has_64bit_addr = 1;
4438 } else {
4439 dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
4440 adapter->has_64bit_addr = 0;
4441 }
4442
4443 mutex_init(&adapter->int_mtx);
4444 init_completion(&adapter->int_waitq);
4445
4446 adapter->this_id = DEFAULT_INITIATOR_ID;
4447 adapter->host->this_id = DEFAULT_INITIATOR_ID;
4448
4449#if MEGA_HAVE_CLUSTERING
4450 /*
4451 * Is cluster support enabled on this controller
4452 * Note: In a cluster the HBAs ( the initiators ) will have
4453 * different target IDs and we cannot assume it to be 7. Call
4454 * to mega_support_cluster() will get the target ids also if
4455 * the cluster support is available
4456 */
4457 adapter->has_cluster = mega_support_cluster(adapter);
4458 if (adapter->has_cluster) {
4459 dev_notice(&pdev->dev,
4460 "Cluster driver, initiator id:%d\n",
4461 adapter->this_id);
4462 }
4463#endif
4464
4465 pci_set_drvdata(pdev, host);
4466
4467 mega_create_proc_entry(hba_count, mega_proc_dir_entry);
4468
4469 error = scsi_add_host(host, &pdev->dev);
4470 if (error)
4471 goto out_free_mbox;
4472
4473 scsi_scan_host(host);
4474 hba_count++;
4475 return 0;
4476
4477 out_free_mbox:
4478 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
4479 adapter->una_mbox64, adapter->una_mbox64_dma);
4480 out_free_irq:
4481 free_irq(adapter->host->irq, adapter);
4482 out_free_scb_list:
4483 kfree(adapter->scb_list);
4484 out_free_cmd_buffer:
4485 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
4486 adapter->mega_buffer, adapter->buf_dma_handle);
4487 out_host_put:
4488 scsi_host_put(host);
4489 out_iounmap:
4490 if (flag & BOARD_MEMMAP)
4491 iounmap((void *)mega_baseport);
4492 out_release_region:
4493 if (flag & BOARD_MEMMAP)
4494 release_mem_region(tbase, 128);
4495 else
4496 release_region(mega_baseport, 16);
4497 out_disable_device:
4498 pci_disable_device(pdev);
4499 out:
4500 return error;
4501}
4502
4503static void
4504__megaraid_shutdown(adapter_t *adapter)
4505{
4506 u_char raw_mbox[sizeof(struct mbox_out)];
4507 mbox_t *mbox = (mbox_t *)raw_mbox;
4508 int i;
4509
4510 /* Flush adapter cache */
4511 memset(&mbox->m_out, 0, sizeof(raw_mbox));
4512 raw_mbox[0] = FLUSH_ADAPTER;
4513
4514 free_irq(adapter->host->irq, adapter);
4515
4516 /* Issue a blocking (interrupts disabled) command to the card */
4517 issue_scb_block(adapter, raw_mbox);
4518
4519 /* Flush disks cache */
4520 memset(&mbox->m_out, 0, sizeof(raw_mbox));
4521 raw_mbox[0] = FLUSH_SYSTEM;
4522
4523 /* Issue a blocking (interrupts disabled) command to the card */
4524 issue_scb_block(adapter, raw_mbox);
4525
4526 if (atomic_read(&adapter->pend_cmds) > 0)
4527 dev_warn(&adapter->dev->dev, "pending commands!!\n");
4528
4529 /*
4530 * Have a delibrate delay to make sure all the caches are
4531 * actually flushed.
4532 */
4533 for (i = 0; i <= 10; i++)
4534 mdelay(1000);
4535}
4536
4537static void
4538megaraid_remove_one(struct pci_dev *pdev)
4539{
4540 struct Scsi_Host *host = pci_get_drvdata(pdev);
4541 adapter_t *adapter = (adapter_t *)host->hostdata;
4542 char buf[12] = { 0 };
4543
4544 scsi_remove_host(host);
4545
4546 __megaraid_shutdown(adapter);
4547
4548 /* Free our resources */
4549 if (adapter->flag & BOARD_MEMMAP) {
4550 iounmap((void *)adapter->base);
4551 release_mem_region(adapter->host->base, 128);
4552 } else
4553 release_region(adapter->base, 16);
4554
4555 mega_free_sgl(adapter);
4556
4557 sprintf(buf, "hba%d", adapter->host->host_no);
4558 remove_proc_subtree(buf, mega_proc_dir_entry);
4559
4560 dma_free_coherent(&adapter->dev->dev, MEGA_BUFFER_SIZE,
4561 adapter->mega_buffer, adapter->buf_dma_handle);
4562 kfree(adapter->scb_list);
4563 dma_free_coherent(&adapter->dev->dev, sizeof(mbox64_t),
4564 adapter->una_mbox64, adapter->una_mbox64_dma);
4565
4566 scsi_host_put(host);
4567 pci_disable_device(pdev);
4568
4569 hba_count--;
4570}
4571
4572static void
4573megaraid_shutdown(struct pci_dev *pdev)
4574{
4575 struct Scsi_Host *host = pci_get_drvdata(pdev);
4576 adapter_t *adapter = (adapter_t *)host->hostdata;
4577
4578 __megaraid_shutdown(adapter);
4579}
4580
4581static struct pci_device_id megaraid_pci_tbl[] = {
4582 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID,
4583 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4584 {PCI_VENDOR_ID_AMI, PCI_DEVICE_ID_AMI_MEGARAID2,
4585 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4586 {PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_AMI_MEGARAID3,
4587 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
4588 {0,}
4589};
4590MODULE_DEVICE_TABLE(pci, megaraid_pci_tbl);
4591
4592static struct pci_driver megaraid_pci_driver = {
4593 .name = "megaraid_legacy",
4594 .id_table = megaraid_pci_tbl,
4595 .probe = megaraid_probe_one,
4596 .remove = megaraid_remove_one,
4597 .shutdown = megaraid_shutdown,
4598};
4599
4600static int __init megaraid_init(void)
4601{
4602 int error;
4603
4604 if ((max_cmd_per_lun <= 0) || (max_cmd_per_lun > MAX_CMD_PER_LUN))
4605 max_cmd_per_lun = MAX_CMD_PER_LUN;
4606 if (max_mbox_busy_wait > MBOX_BUSY_WAIT)
4607 max_mbox_busy_wait = MBOX_BUSY_WAIT;
4608
4609#ifdef CONFIG_PROC_FS
4610 mega_proc_dir_entry = proc_mkdir("megaraid", NULL);
4611 if (!mega_proc_dir_entry) {
4612 printk(KERN_WARNING
4613 "megaraid: failed to create megaraid root\n");
4614 }
4615#endif
4616 error = pci_register_driver(&megaraid_pci_driver);
4617 if (error) {
4618#ifdef CONFIG_PROC_FS
4619 remove_proc_entry("megaraid", NULL);
4620#endif
4621 return error;
4622 }
4623
4624 /*
4625 * Register the driver as a character device, for applications
4626 * to access it for ioctls.
4627 * First argument (major) to register_chrdev implies a dynamic
4628 * major number allocation.
4629 */
4630 major = register_chrdev(0, "megadev_legacy", &megadev_fops);
4631 if (!major) {
4632 printk(KERN_WARNING
4633 "megaraid: failed to register char device\n");
4634 }
4635
4636 return 0;
4637}
4638
4639static void __exit megaraid_exit(void)
4640{
4641 /*
4642 * Unregister the character device interface to the driver.
4643 */
4644 unregister_chrdev(major, "megadev_legacy");
4645
4646 pci_unregister_driver(&megaraid_pci_driver);
4647
4648#ifdef CONFIG_PROC_FS
4649 remove_proc_entry("megaraid", NULL);
4650#endif
4651}
4652
4653module_init(megaraid_init);
4654module_exit(megaraid_exit);
4655
4656/* vi: set ts=8 sw=8 tw=78: */