Loading...
1/*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3
4 Written By: Adam Radford <linuxraid@lsi.com>
5 Modifications By: Tom Couch <linuxraid@lsi.com>
6
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 Copyright (C) 2010 LSI Corporation.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; version 2 of the License.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 NO WARRANTY
20 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 solely responsible for determining the appropriateness of using and
25 distributing the Program and assumes all risks associated with its
26 exercise of rights under this Agreement, including but not limited to
27 the risks and costs of program errors, damage to or loss of data,
28 programs or equipment, and unavailability or interruption of operations.
29
30 DISCLAIMER OF LIABILITY
31 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 You should have received a copy of the GNU General Public License
40 along with this program; if not, write to the Free Software
41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42
43 Bugs/Comments/Suggestions should be mailed to:
44 linuxraid@lsi.com
45
46 For more information, goto:
47 http://www.lsi.com
48
49 Note: This version of the driver does not contain a bundled firmware
50 image.
51
52 History
53 -------
54 2.26.02.000 - Driver cleanup for kernel submission.
55 2.26.02.001 - Replace schedule_timeout() calls with msleep().
56 2.26.02.002 - Add support for PAE mode.
57 Add lun support.
58 Fix twa_remove() to free irq handler/unregister_chrdev()
59 before shutting down card.
60 Change to new 'change_queue_depth' api.
61 Fix 'handled=1' ISR usage, remove bogus IRQ check.
62 Remove un-needed eh_abort handler.
63 Add support for embedded firmware error strings.
64 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
65 2.26.02.004 - Add support for 9550SX controllers.
66 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
67 2.26.02.006 - Fix 9550SX pchip reset timeout.
68 Add big endian support.
69 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
70 2.26.02.008 - Free irq handler in __twa_shutdown().
71 Serialize reset code.
72 Add support for 9650SE controllers.
73 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
74 2.26.02.010 - Add support for 9690SA controllers.
75 2.26.02.011 - Increase max AENs drained to 256.
76 Add MSI support and "use_msi" module parameter.
77 Fix bug in twa_get_param() on 4GB+.
78 Use pci_resource_len() for ioremap().
79 2.26.02.012 - Add power management support.
80 2.26.02.013 - Fix bug in twa_load_sgl().
81 2.26.02.014 - Force 60 second timeout default.
82*/
83
84#include <linux/module.h>
85#include <linux/reboot.h>
86#include <linux/spinlock.h>
87#include <linux/interrupt.h>
88#include <linux/moduleparam.h>
89#include <linux/errno.h>
90#include <linux/types.h>
91#include <linux/delay.h>
92#include <linux/pci.h>
93#include <linux/time.h>
94#include <linux/mutex.h>
95#include <linux/slab.h>
96#include <asm/io.h>
97#include <asm/irq.h>
98#include <asm/uaccess.h>
99#include <scsi/scsi.h>
100#include <scsi/scsi_host.h>
101#include <scsi/scsi_tcq.h>
102#include <scsi/scsi_cmnd.h>
103#include "3w-9xxx.h"
104
105/* Globals */
106#define TW_DRIVER_VERSION "2.26.02.014"
107static DEFINE_MUTEX(twa_chrdev_mutex);
108static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
109static unsigned int twa_device_extension_count;
110static int twa_major = -1;
111extern struct timezone sys_tz;
112
113/* Module parameters */
114MODULE_AUTHOR ("LSI");
115MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
116MODULE_LICENSE("GPL");
117MODULE_VERSION(TW_DRIVER_VERSION);
118
119static int use_msi = 0;
120module_param(use_msi, int, S_IRUGO);
121MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
122
123/* Function prototypes */
124static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
125static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
126static char *twa_aen_severity_lookup(unsigned char severity_code);
127static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
128static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
129static int twa_chrdev_open(struct inode *inode, struct file *file);
130static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
131static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
132static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
133static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
134 u32 set_features, unsigned short current_fw_srl,
135 unsigned short current_fw_arch_id,
136 unsigned short current_fw_branch,
137 unsigned short current_fw_build,
138 unsigned short *fw_on_ctlr_srl,
139 unsigned short *fw_on_ctlr_arch_id,
140 unsigned short *fw_on_ctlr_branch,
141 unsigned short *fw_on_ctlr_build,
142 u32 *init_connect_result);
143static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
144static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
145static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
146static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
147static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
148static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
149static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
150static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
151static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
152static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
153
154/* Functions */
155
156/* Show some statistics about the card */
157static ssize_t twa_show_stats(struct device *dev,
158 struct device_attribute *attr, char *buf)
159{
160 struct Scsi_Host *host = class_to_shost(dev);
161 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
162 unsigned long flags = 0;
163 ssize_t len;
164
165 spin_lock_irqsave(tw_dev->host->host_lock, flags);
166 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
167 "Current commands posted: %4d\n"
168 "Max commands posted: %4d\n"
169 "Current pending commands: %4d\n"
170 "Max pending commands: %4d\n"
171 "Last sgl length: %4d\n"
172 "Max sgl length: %4d\n"
173 "Last sector count: %4d\n"
174 "Max sector count: %4d\n"
175 "SCSI Host Resets: %4d\n"
176 "AEN's: %4d\n",
177 TW_DRIVER_VERSION,
178 tw_dev->posted_request_count,
179 tw_dev->max_posted_request_count,
180 tw_dev->pending_request_count,
181 tw_dev->max_pending_request_count,
182 tw_dev->sgl_entries,
183 tw_dev->max_sgl_entries,
184 tw_dev->sector_count,
185 tw_dev->max_sector_count,
186 tw_dev->num_resets,
187 tw_dev->aen_count);
188 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
189 return len;
190} /* End twa_show_stats() */
191
192/* This function will set a devices queue depth */
193static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth,
194 int reason)
195{
196 if (reason != SCSI_QDEPTH_DEFAULT)
197 return -EOPNOTSUPP;
198
199 if (queue_depth > TW_Q_LENGTH-2)
200 queue_depth = TW_Q_LENGTH-2;
201 scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
202 return queue_depth;
203} /* End twa_change_queue_depth() */
204
205/* Create sysfs 'stats' entry */
206static struct device_attribute twa_host_stats_attr = {
207 .attr = {
208 .name = "stats",
209 .mode = S_IRUGO,
210 },
211 .show = twa_show_stats
212};
213
214/* Host attributes initializer */
215static struct device_attribute *twa_host_attrs[] = {
216 &twa_host_stats_attr,
217 NULL,
218};
219
220/* File operations struct for character device */
221static const struct file_operations twa_fops = {
222 .owner = THIS_MODULE,
223 .unlocked_ioctl = twa_chrdev_ioctl,
224 .open = twa_chrdev_open,
225 .release = NULL,
226 .llseek = noop_llseek,
227};
228
229/* This function will complete an aen request from the isr */
230static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
231{
232 TW_Command_Full *full_command_packet;
233 TW_Command *command_packet;
234 TW_Command_Apache_Header *header;
235 unsigned short aen;
236 int retval = 1;
237
238 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
239 tw_dev->posted_request_count--;
240 aen = le16_to_cpu(header->status_block.error);
241 full_command_packet = tw_dev->command_packet_virt[request_id];
242 command_packet = &full_command_packet->command.oldcommand;
243
244 /* First check for internal completion of set param for time sync */
245 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
246 /* Keep reading the queue in case there are more aen's */
247 if (twa_aen_read_queue(tw_dev, request_id))
248 goto out2;
249 else {
250 retval = 0;
251 goto out;
252 }
253 }
254
255 switch (aen) {
256 case TW_AEN_QUEUE_EMPTY:
257 /* Quit reading the queue if this is the last one */
258 break;
259 case TW_AEN_SYNC_TIME_WITH_HOST:
260 twa_aen_sync_time(tw_dev, request_id);
261 retval = 0;
262 goto out;
263 default:
264 twa_aen_queue_event(tw_dev, header);
265
266 /* If there are more aen's, keep reading the queue */
267 if (twa_aen_read_queue(tw_dev, request_id))
268 goto out2;
269 else {
270 retval = 0;
271 goto out;
272 }
273 }
274 retval = 0;
275out2:
276 tw_dev->state[request_id] = TW_S_COMPLETED;
277 twa_free_request_id(tw_dev, request_id);
278 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
279out:
280 return retval;
281} /* End twa_aen_complete() */
282
283/* This function will drain aen queue */
284static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
285{
286 int request_id = 0;
287 char cdb[TW_MAX_CDB_LEN];
288 TW_SG_Entry sglist[1];
289 int finished = 0, count = 0;
290 TW_Command_Full *full_command_packet;
291 TW_Command_Apache_Header *header;
292 unsigned short aen;
293 int first_reset = 0, queue = 0, retval = 1;
294
295 if (no_check_reset)
296 first_reset = 0;
297 else
298 first_reset = 1;
299
300 full_command_packet = tw_dev->command_packet_virt[request_id];
301 memset(full_command_packet, 0, sizeof(TW_Command_Full));
302
303 /* Initialize cdb */
304 memset(&cdb, 0, TW_MAX_CDB_LEN);
305 cdb[0] = REQUEST_SENSE; /* opcode */
306 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
307
308 /* Initialize sglist */
309 memset(&sglist, 0, sizeof(TW_SG_Entry));
310 sglist[0].length = TW_SECTOR_SIZE;
311 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
312
313 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
314 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
315 goto out;
316 }
317
318 /* Mark internal command */
319 tw_dev->srb[request_id] = NULL;
320
321 do {
322 /* Send command to the board */
323 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
324 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
325 goto out;
326 }
327
328 /* Now poll for completion */
329 if (twa_poll_response(tw_dev, request_id, 30)) {
330 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
331 tw_dev->posted_request_count--;
332 goto out;
333 }
334
335 tw_dev->posted_request_count--;
336 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
337 aen = le16_to_cpu(header->status_block.error);
338 queue = 0;
339 count++;
340
341 switch (aen) {
342 case TW_AEN_QUEUE_EMPTY:
343 if (first_reset != 1)
344 goto out;
345 else
346 finished = 1;
347 break;
348 case TW_AEN_SOFT_RESET:
349 if (first_reset == 0)
350 first_reset = 1;
351 else
352 queue = 1;
353 break;
354 case TW_AEN_SYNC_TIME_WITH_HOST:
355 break;
356 default:
357 queue = 1;
358 }
359
360 /* Now queue an event info */
361 if (queue)
362 twa_aen_queue_event(tw_dev, header);
363 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
364
365 if (count == TW_MAX_AEN_DRAIN)
366 goto out;
367
368 retval = 0;
369out:
370 tw_dev->state[request_id] = TW_S_INITIAL;
371 return retval;
372} /* End twa_aen_drain_queue() */
373
374/* This function will queue an event */
375static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
376{
377 u32 local_time;
378 struct timeval time;
379 TW_Event *event;
380 unsigned short aen;
381 char host[16];
382 char *error_str;
383
384 tw_dev->aen_count++;
385
386 /* Fill out event info */
387 event = tw_dev->event_queue[tw_dev->error_index];
388
389 /* Check for clobber */
390 host[0] = '\0';
391 if (tw_dev->host) {
392 sprintf(host, " scsi%d:", tw_dev->host->host_no);
393 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
394 tw_dev->aen_clobber = 1;
395 }
396
397 aen = le16_to_cpu(header->status_block.error);
398 memset(event, 0, sizeof(TW_Event));
399
400 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
401 do_gettimeofday(&time);
402 local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
403 event->time_stamp_sec = local_time;
404 event->aen_code = aen;
405 event->retrieved = TW_AEN_NOT_RETRIEVED;
406 event->sequence_id = tw_dev->error_sequence_id;
407 tw_dev->error_sequence_id++;
408
409 /* Check for embedded error string */
410 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
411
412 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
413 event->parameter_len = strlen(header->err_specific_desc);
414 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
415 if (event->severity != TW_AEN_SEVERITY_DEBUG)
416 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
417 host,
418 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
419 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
420 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
421 header->err_specific_desc);
422 else
423 tw_dev->aen_count--;
424
425 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
426 tw_dev->event_queue_wrapped = 1;
427 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
428} /* End twa_aen_queue_event() */
429
430/* This function will read the aen queue from the isr */
431static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
432{
433 char cdb[TW_MAX_CDB_LEN];
434 TW_SG_Entry sglist[1];
435 TW_Command_Full *full_command_packet;
436 int retval = 1;
437
438 full_command_packet = tw_dev->command_packet_virt[request_id];
439 memset(full_command_packet, 0, sizeof(TW_Command_Full));
440
441 /* Initialize cdb */
442 memset(&cdb, 0, TW_MAX_CDB_LEN);
443 cdb[0] = REQUEST_SENSE; /* opcode */
444 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
445
446 /* Initialize sglist */
447 memset(&sglist, 0, sizeof(TW_SG_Entry));
448 sglist[0].length = TW_SECTOR_SIZE;
449 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
450
451 /* Mark internal command */
452 tw_dev->srb[request_id] = NULL;
453
454 /* Now post the command packet */
455 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
456 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
457 goto out;
458 }
459 retval = 0;
460out:
461 return retval;
462} /* End twa_aen_read_queue() */
463
464/* This function will look up an AEN severity string */
465static char *twa_aen_severity_lookup(unsigned char severity_code)
466{
467 char *retval = NULL;
468
469 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
470 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
471 goto out;
472
473 retval = twa_aen_severity_table[severity_code];
474out:
475 return retval;
476} /* End twa_aen_severity_lookup() */
477
478/* This function will sync firmware time with the host time */
479static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
480{
481 u32 schedulertime;
482 struct timeval utc;
483 TW_Command_Full *full_command_packet;
484 TW_Command *command_packet;
485 TW_Param_Apache *param;
486 u32 local_time;
487
488 /* Fill out the command packet */
489 full_command_packet = tw_dev->command_packet_virt[request_id];
490 memset(full_command_packet, 0, sizeof(TW_Command_Full));
491 command_packet = &full_command_packet->command.oldcommand;
492 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
493 command_packet->request_id = request_id;
494 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
495 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
496 command_packet->size = TW_COMMAND_SIZE;
497 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
498
499 /* Setup the param */
500 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
501 memset(param, 0, TW_SECTOR_SIZE);
502 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
503 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
504 param->parameter_size_bytes = cpu_to_le16(4);
505
506 /* Convert system time in UTC to local time seconds since last
507 Sunday 12:00AM */
508 do_gettimeofday(&utc);
509 local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
510 schedulertime = local_time - (3 * 86400);
511 schedulertime = cpu_to_le32(schedulertime % 604800);
512
513 memcpy(param->data, &schedulertime, sizeof(u32));
514
515 /* Mark internal command */
516 tw_dev->srb[request_id] = NULL;
517
518 /* Now post the command */
519 twa_post_command_packet(tw_dev, request_id, 1);
520} /* End twa_aen_sync_time() */
521
522/* This function will allocate memory and check if it is correctly aligned */
523static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
524{
525 int i;
526 dma_addr_t dma_handle;
527 unsigned long *cpu_addr;
528 int retval = 1;
529
530 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
531 if (!cpu_addr) {
532 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
533 goto out;
534 }
535
536 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
537 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
538 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
539 goto out;
540 }
541
542 memset(cpu_addr, 0, size*TW_Q_LENGTH);
543
544 for (i = 0; i < TW_Q_LENGTH; i++) {
545 switch(which) {
546 case 0:
547 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
548 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
549 break;
550 case 1:
551 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
552 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
553 break;
554 }
555 }
556 retval = 0;
557out:
558 return retval;
559} /* End twa_allocate_memory() */
560
561/* This function will check the status register for unexpected bits */
562static int twa_check_bits(u32 status_reg_value)
563{
564 int retval = 1;
565
566 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
567 goto out;
568 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
569 goto out;
570
571 retval = 0;
572out:
573 return retval;
574} /* End twa_check_bits() */
575
576/* This function will check the srl and decide if we are compatible */
577static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
578{
579 int retval = 1;
580 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
581 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
582 u32 init_connect_result = 0;
583
584 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
585 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
586 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
587 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
588 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
589 &fw_on_ctlr_build, &init_connect_result)) {
590 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
591 goto out;
592 }
593
594 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
595 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
596 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
597
598 /* Try base mode compatibility */
599 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
600 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
601 TW_EXTENDED_INIT_CONNECT,
602 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
603 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
604 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
605 &fw_on_ctlr_branch, &fw_on_ctlr_build,
606 &init_connect_result)) {
607 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
608 goto out;
609 }
610 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
611 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
612 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
613 } else {
614 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
615 }
616 goto out;
617 }
618 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
619 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
620 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
621 }
622
623 /* Load rest of compatibility struct */
624 strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
625 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
626 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
627 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
628 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
629 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
630 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
631 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
632 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
633 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
634
635 retval = 0;
636out:
637 return retval;
638} /* End twa_check_srl() */
639
640/* This function handles ioctl for the character device */
641static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
642{
643 struct inode *inode = file->f_path.dentry->d_inode;
644 long timeout;
645 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
646 dma_addr_t dma_handle;
647 int request_id = 0;
648 unsigned int sequence_id = 0;
649 unsigned char event_index, start_index;
650 TW_Ioctl_Driver_Command driver_command;
651 TW_Ioctl_Buf_Apache *tw_ioctl;
652 TW_Lock *tw_lock;
653 TW_Command_Full *full_command_packet;
654 TW_Compatibility_Info *tw_compat_info;
655 TW_Event *event;
656 struct timeval current_time;
657 u32 current_time_ms;
658 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
659 int retval = TW_IOCTL_ERROR_OS_EFAULT;
660 void __user *argp = (void __user *)arg;
661
662 mutex_lock(&twa_chrdev_mutex);
663
664 /* Only let one of these through at a time */
665 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
666 retval = TW_IOCTL_ERROR_OS_EINTR;
667 goto out;
668 }
669
670 /* First copy down the driver command */
671 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
672 goto out2;
673
674 /* Check data buffer size */
675 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
676 retval = TW_IOCTL_ERROR_OS_EINVAL;
677 goto out2;
678 }
679
680 /* Hardware can only do multiple of 512 byte transfers */
681 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
682
683 /* Now allocate ioctl buf memory */
684 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
685 if (!cpu_addr) {
686 retval = TW_IOCTL_ERROR_OS_ENOMEM;
687 goto out2;
688 }
689
690 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
691
692 /* Now copy down the entire ioctl */
693 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
694 goto out3;
695
696 /* See which ioctl we are doing */
697 switch (cmd) {
698 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
699 spin_lock_irqsave(tw_dev->host->host_lock, flags);
700 twa_get_request_id(tw_dev, &request_id);
701
702 /* Flag internal command */
703 tw_dev->srb[request_id] = NULL;
704
705 /* Flag chrdev ioctl */
706 tw_dev->chrdev_request_id = request_id;
707
708 full_command_packet = &tw_ioctl->firmware_command;
709
710 /* Load request id and sglist for both command types */
711 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
712
713 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
714
715 /* Now post the command packet to the controller */
716 twa_post_command_packet(tw_dev, request_id, 1);
717 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
718
719 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
720
721 /* Now wait for command to complete */
722 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
723
724 /* We timed out, and didn't get an interrupt */
725 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
726 /* Now we need to reset the board */
727 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
728 tw_dev->host->host_no, TW_DRIVER, 0x37,
729 cmd);
730 retval = TW_IOCTL_ERROR_OS_EIO;
731 twa_reset_device_extension(tw_dev);
732 goto out3;
733 }
734
735 /* Now copy in the command packet response */
736 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
737
738 /* Now complete the io */
739 spin_lock_irqsave(tw_dev->host->host_lock, flags);
740 tw_dev->posted_request_count--;
741 tw_dev->state[request_id] = TW_S_COMPLETED;
742 twa_free_request_id(tw_dev, request_id);
743 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
744 break;
745 case TW_IOCTL_GET_COMPATIBILITY_INFO:
746 tw_ioctl->driver_command.status = 0;
747 /* Copy compatibility struct into ioctl data buffer */
748 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
749 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
750 break;
751 case TW_IOCTL_GET_LAST_EVENT:
752 if (tw_dev->event_queue_wrapped) {
753 if (tw_dev->aen_clobber) {
754 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
755 tw_dev->aen_clobber = 0;
756 } else
757 tw_ioctl->driver_command.status = 0;
758 } else {
759 if (!tw_dev->error_index) {
760 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
761 break;
762 }
763 tw_ioctl->driver_command.status = 0;
764 }
765 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
766 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
767 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
768 break;
769 case TW_IOCTL_GET_FIRST_EVENT:
770 if (tw_dev->event_queue_wrapped) {
771 if (tw_dev->aen_clobber) {
772 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
773 tw_dev->aen_clobber = 0;
774 } else
775 tw_ioctl->driver_command.status = 0;
776 event_index = tw_dev->error_index;
777 } else {
778 if (!tw_dev->error_index) {
779 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
780 break;
781 }
782 tw_ioctl->driver_command.status = 0;
783 event_index = 0;
784 }
785 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
786 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
787 break;
788 case TW_IOCTL_GET_NEXT_EVENT:
789 event = (TW_Event *)tw_ioctl->data_buffer;
790 sequence_id = event->sequence_id;
791 tw_ioctl->driver_command.status = 0;
792
793 if (tw_dev->event_queue_wrapped) {
794 if (tw_dev->aen_clobber) {
795 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
796 tw_dev->aen_clobber = 0;
797 }
798 start_index = tw_dev->error_index;
799 } else {
800 if (!tw_dev->error_index) {
801 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
802 break;
803 }
804 start_index = 0;
805 }
806 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
807
808 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
809 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
810 tw_dev->aen_clobber = 1;
811 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
812 break;
813 }
814 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
815 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
816 break;
817 case TW_IOCTL_GET_PREVIOUS_EVENT:
818 event = (TW_Event *)tw_ioctl->data_buffer;
819 sequence_id = event->sequence_id;
820 tw_ioctl->driver_command.status = 0;
821
822 if (tw_dev->event_queue_wrapped) {
823 if (tw_dev->aen_clobber) {
824 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
825 tw_dev->aen_clobber = 0;
826 }
827 start_index = tw_dev->error_index;
828 } else {
829 if (!tw_dev->error_index) {
830 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
831 break;
832 }
833 start_index = 0;
834 }
835 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
836
837 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
838 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
839 tw_dev->aen_clobber = 1;
840 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
841 break;
842 }
843 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
844 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
845 break;
846 case TW_IOCTL_GET_LOCK:
847 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
848 do_gettimeofday(¤t_time);
849 current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
850
851 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
852 tw_dev->ioctl_sem_lock = 1;
853 tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
854 tw_ioctl->driver_command.status = 0;
855 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
856 } else {
857 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
858 tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
859 }
860 break;
861 case TW_IOCTL_RELEASE_LOCK:
862 if (tw_dev->ioctl_sem_lock == 1) {
863 tw_dev->ioctl_sem_lock = 0;
864 tw_ioctl->driver_command.status = 0;
865 } else {
866 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
867 }
868 break;
869 default:
870 retval = TW_IOCTL_ERROR_OS_ENOTTY;
871 goto out3;
872 }
873
874 /* Now copy the entire response to userspace */
875 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
876 retval = 0;
877out3:
878 /* Now free ioctl buf memory */
879 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
880out2:
881 mutex_unlock(&tw_dev->ioctl_lock);
882out:
883 mutex_unlock(&twa_chrdev_mutex);
884 return retval;
885} /* End twa_chrdev_ioctl() */
886
887/* This function handles open for the character device */
888/* NOTE that this function will race with remove. */
889static int twa_chrdev_open(struct inode *inode, struct file *file)
890{
891 unsigned int minor_number;
892 int retval = TW_IOCTL_ERROR_OS_ENODEV;
893
894 minor_number = iminor(inode);
895 if (minor_number >= twa_device_extension_count)
896 goto out;
897 retval = 0;
898out:
899 return retval;
900} /* End twa_chrdev_open() */
901
902/* This function will print readable messages from status register errors */
903static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
904{
905 int retval = 1;
906
907 /* Check for various error conditions and handle them appropriately */
908 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
909 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
910 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
911 }
912
913 if (status_reg_value & TW_STATUS_PCI_ABORT) {
914 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
915 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
916 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
917 }
918
919 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
920 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
921 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
922 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
923 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
924 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
925 }
926
927 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
928 if (tw_dev->reset_print == 0) {
929 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
930 tw_dev->reset_print = 1;
931 }
932 goto out;
933 }
934 retval = 0;
935out:
936 return retval;
937} /* End twa_decode_bits() */
938
939/* This function will empty the response queue */
940static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
941{
942 u32 status_reg_value, response_que_value;
943 int count = 0, retval = 1;
944
945 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
946
947 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
948 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
949 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
950 count++;
951 }
952 if (count == TW_MAX_RESPONSE_DRAIN)
953 goto out;
954
955 retval = 0;
956out:
957 return retval;
958} /* End twa_empty_response_queue() */
959
960/* This function will clear the pchip/response queue on 9550SX */
961static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
962{
963 u32 response_que_value = 0;
964 unsigned long before;
965 int retval = 1;
966
967 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
968 before = jiffies;
969 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
970 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
971 msleep(1);
972 if (time_after(jiffies, before + HZ * 30))
973 goto out;
974 }
975 /* P-chip settle time */
976 msleep(500);
977 retval = 0;
978 } else
979 retval = 0;
980out:
981 return retval;
982} /* End twa_empty_response_queue_large() */
983
984/* This function passes sense keys from firmware to scsi layer */
985static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
986{
987 TW_Command_Full *full_command_packet;
988 unsigned short error;
989 int retval = 1;
990 char *error_str;
991
992 full_command_packet = tw_dev->command_packet_virt[request_id];
993
994 /* Check for embedded error string */
995 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
996
997 /* Don't print error for Logical unit not supported during rollcall */
998 error = le16_to_cpu(full_command_packet->header.status_block.error);
999 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
1000 if (print_host)
1001 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1002 tw_dev->host->host_no,
1003 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1004 full_command_packet->header.status_block.error,
1005 error_str[0] == '\0' ?
1006 twa_string_lookup(twa_error_table,
1007 full_command_packet->header.status_block.error) : error_str,
1008 full_command_packet->header.err_specific_desc);
1009 else
1010 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1011 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1012 full_command_packet->header.status_block.error,
1013 error_str[0] == '\0' ?
1014 twa_string_lookup(twa_error_table,
1015 full_command_packet->header.status_block.error) : error_str,
1016 full_command_packet->header.err_specific_desc);
1017 }
1018
1019 if (copy_sense) {
1020 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1021 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1022 retval = TW_ISR_DONT_RESULT;
1023 goto out;
1024 }
1025 retval = 0;
1026out:
1027 return retval;
1028} /* End twa_fill_sense() */
1029
1030/* This function will free up device extension resources */
1031static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1032{
1033 if (tw_dev->command_packet_virt[0])
1034 pci_free_consistent(tw_dev->tw_pci_dev,
1035 sizeof(TW_Command_Full)*TW_Q_LENGTH,
1036 tw_dev->command_packet_virt[0],
1037 tw_dev->command_packet_phys[0]);
1038
1039 if (tw_dev->generic_buffer_virt[0])
1040 pci_free_consistent(tw_dev->tw_pci_dev,
1041 TW_SECTOR_SIZE*TW_Q_LENGTH,
1042 tw_dev->generic_buffer_virt[0],
1043 tw_dev->generic_buffer_phys[0]);
1044
1045 kfree(tw_dev->event_queue[0]);
1046} /* End twa_free_device_extension() */
1047
1048/* This function will free a request id */
1049static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1050{
1051 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1052 tw_dev->state[request_id] = TW_S_FINISHED;
1053 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1054} /* End twa_free_request_id() */
1055
1056/* This function will get parameter table entries from the firmware */
1057static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1058{
1059 TW_Command_Full *full_command_packet;
1060 TW_Command *command_packet;
1061 TW_Param_Apache *param;
1062 void *retval = NULL;
1063
1064 /* Setup the command packet */
1065 full_command_packet = tw_dev->command_packet_virt[request_id];
1066 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1067 command_packet = &full_command_packet->command.oldcommand;
1068
1069 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1070 command_packet->size = TW_COMMAND_SIZE;
1071 command_packet->request_id = request_id;
1072 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1073
1074 /* Now setup the param */
1075 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1076 memset(param, 0, TW_SECTOR_SIZE);
1077 param->table_id = cpu_to_le16(table_id | 0x8000);
1078 param->parameter_id = cpu_to_le16(parameter_id);
1079 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1080
1081 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1082 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1083
1084 /* Post the command packet to the board */
1085 twa_post_command_packet(tw_dev, request_id, 1);
1086
1087 /* Poll for completion */
1088 if (twa_poll_response(tw_dev, request_id, 30))
1089 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1090 else
1091 retval = (void *)&(param->data[0]);
1092
1093 tw_dev->posted_request_count--;
1094 tw_dev->state[request_id] = TW_S_INITIAL;
1095
1096 return retval;
1097} /* End twa_get_param() */
1098
1099/* This function will assign an available request id */
1100static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1101{
1102 *request_id = tw_dev->free_queue[tw_dev->free_head];
1103 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1104 tw_dev->state[*request_id] = TW_S_STARTED;
1105} /* End twa_get_request_id() */
1106
1107/* This function will send an initconnection command to controller */
1108static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1109 u32 set_features, unsigned short current_fw_srl,
1110 unsigned short current_fw_arch_id,
1111 unsigned short current_fw_branch,
1112 unsigned short current_fw_build,
1113 unsigned short *fw_on_ctlr_srl,
1114 unsigned short *fw_on_ctlr_arch_id,
1115 unsigned short *fw_on_ctlr_branch,
1116 unsigned short *fw_on_ctlr_build,
1117 u32 *init_connect_result)
1118{
1119 TW_Command_Full *full_command_packet;
1120 TW_Initconnect *tw_initconnect;
1121 int request_id = 0, retval = 1;
1122
1123 /* Initialize InitConnection command packet */
1124 full_command_packet = tw_dev->command_packet_virt[request_id];
1125 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1126 full_command_packet->header.header_desc.size_header = 128;
1127
1128 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1129 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1130 tw_initconnect->request_id = request_id;
1131 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1132 tw_initconnect->features = set_features;
1133
1134 /* Turn on 64-bit sgl support if we need to */
1135 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1136
1137 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1138
1139 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1140 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1141 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1142 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1143 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1144 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1145 } else
1146 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1147
1148 /* Send command packet to the board */
1149 twa_post_command_packet(tw_dev, request_id, 1);
1150
1151 /* Poll for completion */
1152 if (twa_poll_response(tw_dev, request_id, 30)) {
1153 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1154 } else {
1155 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1156 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1157 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1158 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1159 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1160 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1161 }
1162 retval = 0;
1163 }
1164
1165 tw_dev->posted_request_count--;
1166 tw_dev->state[request_id] = TW_S_INITIAL;
1167
1168 return retval;
1169} /* End twa_initconnection() */
1170
1171/* This function will initialize the fields of a device extension */
1172static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1173{
1174 int i, retval = 1;
1175
1176 /* Initialize command packet buffers */
1177 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1178 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1179 goto out;
1180 }
1181
1182 /* Initialize generic buffer */
1183 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1184 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1185 goto out;
1186 }
1187
1188 /* Allocate event info space */
1189 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1190 if (!tw_dev->event_queue[0]) {
1191 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1192 goto out;
1193 }
1194
1195
1196 for (i = 0; i < TW_Q_LENGTH; i++) {
1197 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1198 tw_dev->free_queue[i] = i;
1199 tw_dev->state[i] = TW_S_INITIAL;
1200 }
1201
1202 tw_dev->pending_head = TW_Q_START;
1203 tw_dev->pending_tail = TW_Q_START;
1204 tw_dev->free_head = TW_Q_START;
1205 tw_dev->free_tail = TW_Q_START;
1206 tw_dev->error_sequence_id = 1;
1207 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1208
1209 mutex_init(&tw_dev->ioctl_lock);
1210 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1211
1212 retval = 0;
1213out:
1214 return retval;
1215} /* End twa_initialize_device_extension() */
1216
1217/* This function is the interrupt service routine */
1218static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1219{
1220 int request_id, error = 0;
1221 u32 status_reg_value;
1222 TW_Response_Queue response_que;
1223 TW_Command_Full *full_command_packet;
1224 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1225 int handled = 0;
1226
1227 /* Get the per adapter lock */
1228 spin_lock(tw_dev->host->host_lock);
1229
1230 /* Read the registers */
1231 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1232
1233 /* Check if this is our interrupt, otherwise bail */
1234 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1235 goto twa_interrupt_bail;
1236
1237 handled = 1;
1238
1239 /* If we are resetting, bail */
1240 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1241 goto twa_interrupt_bail;
1242
1243 /* Check controller for errors */
1244 if (twa_check_bits(status_reg_value)) {
1245 if (twa_decode_bits(tw_dev, status_reg_value)) {
1246 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1247 goto twa_interrupt_bail;
1248 }
1249 }
1250
1251 /* Handle host interrupt */
1252 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1253 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1254
1255 /* Handle attention interrupt */
1256 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1257 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1258 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1259 twa_get_request_id(tw_dev, &request_id);
1260
1261 error = twa_aen_read_queue(tw_dev, request_id);
1262 if (error) {
1263 tw_dev->state[request_id] = TW_S_COMPLETED;
1264 twa_free_request_id(tw_dev, request_id);
1265 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1266 }
1267 }
1268 }
1269
1270 /* Handle command interrupt */
1271 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1272 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1273 /* Drain as many pending commands as we can */
1274 while (tw_dev->pending_request_count > 0) {
1275 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1276 if (tw_dev->state[request_id] != TW_S_PENDING) {
1277 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1278 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1279 goto twa_interrupt_bail;
1280 }
1281 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1282 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1283 tw_dev->pending_request_count--;
1284 } else {
1285 /* If we get here, we will continue re-posting on the next command interrupt */
1286 break;
1287 }
1288 }
1289 }
1290
1291 /* Handle response interrupt */
1292 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1293
1294 /* Drain the response queue from the board */
1295 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1296 /* Complete the response */
1297 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1298 request_id = TW_RESID_OUT(response_que.response_id);
1299 full_command_packet = tw_dev->command_packet_virt[request_id];
1300 error = 0;
1301 /* Check for command packet errors */
1302 if (full_command_packet->command.newcommand.status != 0) {
1303 if (tw_dev->srb[request_id] != NULL) {
1304 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1305 } else {
1306 /* Skip ioctl error prints */
1307 if (request_id != tw_dev->chrdev_request_id) {
1308 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1309 }
1310 }
1311 }
1312
1313 /* Check for correct state */
1314 if (tw_dev->state[request_id] != TW_S_POSTED) {
1315 if (tw_dev->srb[request_id] != NULL) {
1316 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1317 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1318 goto twa_interrupt_bail;
1319 }
1320 }
1321
1322 /* Check for internal command completion */
1323 if (tw_dev->srb[request_id] == NULL) {
1324 if (request_id != tw_dev->chrdev_request_id) {
1325 if (twa_aen_complete(tw_dev, request_id))
1326 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1327 } else {
1328 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1329 wake_up(&tw_dev->ioctl_wqueue);
1330 }
1331 } else {
1332 struct scsi_cmnd *cmd;
1333
1334 cmd = tw_dev->srb[request_id];
1335
1336 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1337 /* If no error command was a success */
1338 if (error == 0) {
1339 cmd->result = (DID_OK << 16);
1340 }
1341
1342 /* If error, command failed */
1343 if (error == 1) {
1344 /* Ask for a host reset */
1345 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1346 }
1347
1348 /* Report residual bytes for single sgl */
1349 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1350 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1351 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1352 }
1353
1354 /* Now complete the io */
1355 tw_dev->state[request_id] = TW_S_COMPLETED;
1356 twa_free_request_id(tw_dev, request_id);
1357 tw_dev->posted_request_count--;
1358 tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1359 twa_unmap_scsi_data(tw_dev, request_id);
1360 }
1361
1362 /* Check for valid status after each drain */
1363 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1364 if (twa_check_bits(status_reg_value)) {
1365 if (twa_decode_bits(tw_dev, status_reg_value)) {
1366 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1367 goto twa_interrupt_bail;
1368 }
1369 }
1370 }
1371 }
1372
1373twa_interrupt_bail:
1374 spin_unlock(tw_dev->host->host_lock);
1375 return IRQ_RETVAL(handled);
1376} /* End twa_interrupt() */
1377
1378/* This function will load the request id and various sgls for ioctls */
1379static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1380{
1381 TW_Command *oldcommand;
1382 TW_Command_Apache *newcommand;
1383 TW_SG_Entry *sgl;
1384 unsigned int pae = 0;
1385
1386 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1387 pae = 1;
1388
1389 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1390 newcommand = &full_command_packet->command.newcommand;
1391 newcommand->request_id__lunl =
1392 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1393 if (length) {
1394 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1395 newcommand->sg_list[0].length = cpu_to_le32(length);
1396 }
1397 newcommand->sgl_entries__lunh =
1398 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1399 } else {
1400 oldcommand = &full_command_packet->command.oldcommand;
1401 oldcommand->request_id = request_id;
1402
1403 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1404 /* Load the sg list */
1405 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1406 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1407 else
1408 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1409 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1410 sgl->length = cpu_to_le32(length);
1411
1412 oldcommand->size += pae;
1413 }
1414 }
1415} /* End twa_load_sgl() */
1416
1417/* This function will perform a pci-dma mapping for a scatter gather list */
1418static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
1419{
1420 int use_sg;
1421 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1422
1423 use_sg = scsi_dma_map(cmd);
1424 if (!use_sg)
1425 return 0;
1426 else if (use_sg < 0) {
1427 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
1428 return 0;
1429 }
1430
1431 cmd->SCp.phase = TW_PHASE_SGLIST;
1432 cmd->SCp.have_data_in = use_sg;
1433
1434 return use_sg;
1435} /* End twa_map_scsi_sg_data() */
1436
1437/* This function will poll for a response interrupt of a request */
1438static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1439{
1440 int retval = 1, found = 0, response_request_id;
1441 TW_Response_Queue response_queue;
1442 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1443
1444 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1445 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1446 response_request_id = TW_RESID_OUT(response_queue.response_id);
1447 if (request_id != response_request_id) {
1448 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1449 goto out;
1450 }
1451 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1452 if (full_command_packet->command.newcommand.status != 0) {
1453 /* bad response */
1454 twa_fill_sense(tw_dev, request_id, 0, 0);
1455 goto out;
1456 }
1457 found = 1;
1458 } else {
1459 if (full_command_packet->command.oldcommand.status != 0) {
1460 /* bad response */
1461 twa_fill_sense(tw_dev, request_id, 0, 0);
1462 goto out;
1463 }
1464 found = 1;
1465 }
1466 }
1467
1468 if (found)
1469 retval = 0;
1470out:
1471 return retval;
1472} /* End twa_poll_response() */
1473
1474/* This function will poll the status register for a flag */
1475static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1476{
1477 u32 status_reg_value;
1478 unsigned long before;
1479 int retval = 1;
1480
1481 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1482 before = jiffies;
1483
1484 if (twa_check_bits(status_reg_value))
1485 twa_decode_bits(tw_dev, status_reg_value);
1486
1487 while ((status_reg_value & flag) != flag) {
1488 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1489
1490 if (twa_check_bits(status_reg_value))
1491 twa_decode_bits(tw_dev, status_reg_value);
1492
1493 if (time_after(jiffies, before + HZ * seconds))
1494 goto out;
1495
1496 msleep(50);
1497 }
1498 retval = 0;
1499out:
1500 return retval;
1501} /* End twa_poll_status() */
1502
1503/* This function will poll the status register for disappearance of a flag */
1504static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1505{
1506 u32 status_reg_value;
1507 unsigned long before;
1508 int retval = 1;
1509
1510 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1511 before = jiffies;
1512
1513 if (twa_check_bits(status_reg_value))
1514 twa_decode_bits(tw_dev, status_reg_value);
1515
1516 while ((status_reg_value & flag) != 0) {
1517 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1518 if (twa_check_bits(status_reg_value))
1519 twa_decode_bits(tw_dev, status_reg_value);
1520
1521 if (time_after(jiffies, before + HZ * seconds))
1522 goto out;
1523
1524 msleep(50);
1525 }
1526 retval = 0;
1527out:
1528 return retval;
1529} /* End twa_poll_status_gone() */
1530
1531/* This function will attempt to post a command packet to the board */
1532static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1533{
1534 u32 status_reg_value;
1535 dma_addr_t command_que_value;
1536 int retval = 1;
1537
1538 command_que_value = tw_dev->command_packet_phys[request_id];
1539
1540 /* For 9650SE write low 4 bytes first */
1541 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1542 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1543 command_que_value += TW_COMMAND_OFFSET;
1544 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1545 }
1546
1547 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1548
1549 if (twa_check_bits(status_reg_value))
1550 twa_decode_bits(tw_dev, status_reg_value);
1551
1552 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1553
1554 /* Only pend internal driver commands */
1555 if (!internal) {
1556 retval = SCSI_MLQUEUE_HOST_BUSY;
1557 goto out;
1558 }
1559
1560 /* Couldn't post the command packet, so we do it later */
1561 if (tw_dev->state[request_id] != TW_S_PENDING) {
1562 tw_dev->state[request_id] = TW_S_PENDING;
1563 tw_dev->pending_request_count++;
1564 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1565 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1566 }
1567 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1568 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1569 }
1570 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1571 goto out;
1572 } else {
1573 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1574 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1575 /* Now write upper 4 bytes */
1576 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1577 } else {
1578 if (sizeof(dma_addr_t) > 4) {
1579 command_que_value += TW_COMMAND_OFFSET;
1580 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1581 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1582 } else {
1583 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1584 }
1585 }
1586 tw_dev->state[request_id] = TW_S_POSTED;
1587 tw_dev->posted_request_count++;
1588 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1589 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1590 }
1591 }
1592 retval = 0;
1593out:
1594 return retval;
1595} /* End twa_post_command_packet() */
1596
1597/* This function will reset a device extension */
1598static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1599{
1600 int i = 0;
1601 int retval = 1;
1602 unsigned long flags = 0;
1603
1604 set_bit(TW_IN_RESET, &tw_dev->flags);
1605 TW_DISABLE_INTERRUPTS(tw_dev);
1606 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1607 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1608
1609 /* Abort all requests that are in progress */
1610 for (i = 0; i < TW_Q_LENGTH; i++) {
1611 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1612 (tw_dev->state[i] != TW_S_INITIAL) &&
1613 (tw_dev->state[i] != TW_S_COMPLETED)) {
1614 if (tw_dev->srb[i]) {
1615 tw_dev->srb[i]->result = (DID_RESET << 16);
1616 tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
1617 twa_unmap_scsi_data(tw_dev, i);
1618 }
1619 }
1620 }
1621
1622 /* Reset queues and counts */
1623 for (i = 0; i < TW_Q_LENGTH; i++) {
1624 tw_dev->free_queue[i] = i;
1625 tw_dev->state[i] = TW_S_INITIAL;
1626 }
1627 tw_dev->free_head = TW_Q_START;
1628 tw_dev->free_tail = TW_Q_START;
1629 tw_dev->posted_request_count = 0;
1630 tw_dev->pending_request_count = 0;
1631 tw_dev->pending_head = TW_Q_START;
1632 tw_dev->pending_tail = TW_Q_START;
1633 tw_dev->reset_print = 0;
1634
1635 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1636
1637 if (twa_reset_sequence(tw_dev, 1))
1638 goto out;
1639
1640 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1641 clear_bit(TW_IN_RESET, &tw_dev->flags);
1642 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1643
1644 retval = 0;
1645out:
1646 return retval;
1647} /* End twa_reset_device_extension() */
1648
1649/* This function will reset a controller */
1650static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1651{
1652 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1653
1654 while (tries < TW_MAX_RESET_TRIES) {
1655 if (do_soft_reset) {
1656 TW_SOFT_RESET(tw_dev);
1657 /* Clear pchip/response queue on 9550SX */
1658 if (twa_empty_response_queue_large(tw_dev)) {
1659 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1660 do_soft_reset = 1;
1661 tries++;
1662 continue;
1663 }
1664 }
1665
1666 /* Make sure controller is in a good state */
1667 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1668 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1669 do_soft_reset = 1;
1670 tries++;
1671 continue;
1672 }
1673
1674 /* Empty response queue */
1675 if (twa_empty_response_queue(tw_dev)) {
1676 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1677 do_soft_reset = 1;
1678 tries++;
1679 continue;
1680 }
1681
1682 flashed = 0;
1683
1684 /* Check for compatibility/flash */
1685 if (twa_check_srl(tw_dev, &flashed)) {
1686 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1687 do_soft_reset = 1;
1688 tries++;
1689 continue;
1690 } else {
1691 if (flashed) {
1692 tries++;
1693 continue;
1694 }
1695 }
1696
1697 /* Drain the AEN queue */
1698 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1699 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1700 do_soft_reset = 1;
1701 tries++;
1702 continue;
1703 }
1704
1705 /* If we got here, controller is in a good state */
1706 retval = 0;
1707 goto out;
1708 }
1709out:
1710 return retval;
1711} /* End twa_reset_sequence() */
1712
1713/* This funciton returns unit geometry in cylinders/heads/sectors */
1714static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1715{
1716 int heads, sectors, cylinders;
1717 TW_Device_Extension *tw_dev;
1718
1719 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1720
1721 if (capacity >= 0x200000) {
1722 heads = 255;
1723 sectors = 63;
1724 cylinders = sector_div(capacity, heads * sectors);
1725 } else {
1726 heads = 64;
1727 sectors = 32;
1728 cylinders = sector_div(capacity, heads * sectors);
1729 }
1730
1731 geom[0] = heads;
1732 geom[1] = sectors;
1733 geom[2] = cylinders;
1734
1735 return 0;
1736} /* End twa_scsi_biosparam() */
1737
1738/* This is the new scsi eh reset function */
1739static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1740{
1741 TW_Device_Extension *tw_dev = NULL;
1742 int retval = FAILED;
1743
1744 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1745
1746 tw_dev->num_resets++;
1747
1748 sdev_printk(KERN_WARNING, SCpnt->device,
1749 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1750 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1751
1752 /* Make sure we are not issuing an ioctl or resetting from ioctl */
1753 mutex_lock(&tw_dev->ioctl_lock);
1754
1755 /* Now reset the card and some of the device extension data */
1756 if (twa_reset_device_extension(tw_dev)) {
1757 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1758 goto out;
1759 }
1760
1761 retval = SUCCESS;
1762out:
1763 mutex_unlock(&tw_dev->ioctl_lock);
1764 return retval;
1765} /* End twa_scsi_eh_reset() */
1766
1767/* This is the main scsi queue function to handle scsi opcodes */
1768static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1769{
1770 int request_id, retval;
1771 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1772
1773 /* If we are resetting due to timed out ioctl, report as busy */
1774 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1775 retval = SCSI_MLQUEUE_HOST_BUSY;
1776 goto out;
1777 }
1778
1779 /* Check if this FW supports luns */
1780 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1781 SCpnt->result = (DID_BAD_TARGET << 16);
1782 done(SCpnt);
1783 retval = 0;
1784 goto out;
1785 }
1786
1787 /* Save done function into scsi_cmnd struct */
1788 SCpnt->scsi_done = done;
1789
1790 /* Get a free request id */
1791 twa_get_request_id(tw_dev, &request_id);
1792
1793 /* Save the scsi command for use by the ISR */
1794 tw_dev->srb[request_id] = SCpnt;
1795
1796 /* Initialize phase to zero */
1797 SCpnt->SCp.phase = TW_PHASE_INITIAL;
1798
1799 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1800 switch (retval) {
1801 case SCSI_MLQUEUE_HOST_BUSY:
1802 twa_free_request_id(tw_dev, request_id);
1803 twa_unmap_scsi_data(tw_dev, request_id);
1804 break;
1805 case 1:
1806 tw_dev->state[request_id] = TW_S_COMPLETED;
1807 twa_free_request_id(tw_dev, request_id);
1808 twa_unmap_scsi_data(tw_dev, request_id);
1809 SCpnt->result = (DID_ERROR << 16);
1810 done(SCpnt);
1811 retval = 0;
1812 }
1813out:
1814 return retval;
1815} /* End twa_scsi_queue() */
1816
1817static DEF_SCSI_QCMD(twa_scsi_queue)
1818
1819/* This function hands scsi cdb's to the firmware */
1820static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1821{
1822 TW_Command_Full *full_command_packet;
1823 TW_Command_Apache *command_packet;
1824 u32 num_sectors = 0x0;
1825 int i, sg_count;
1826 struct scsi_cmnd *srb = NULL;
1827 struct scatterlist *sglist = NULL, *sg;
1828 int retval = 1;
1829
1830 if (tw_dev->srb[request_id]) {
1831 srb = tw_dev->srb[request_id];
1832 if (scsi_sglist(srb))
1833 sglist = scsi_sglist(srb);
1834 }
1835
1836 /* Initialize command packet */
1837 full_command_packet = tw_dev->command_packet_virt[request_id];
1838 full_command_packet->header.header_desc.size_header = 128;
1839 full_command_packet->header.status_block.error = 0;
1840 full_command_packet->header.status_block.severity__reserved = 0;
1841
1842 command_packet = &full_command_packet->command.newcommand;
1843 command_packet->status = 0;
1844 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1845
1846 /* We forced 16 byte cdb use earlier */
1847 if (!cdb)
1848 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1849 else
1850 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1851
1852 if (srb) {
1853 command_packet->unit = srb->device->id;
1854 command_packet->request_id__lunl =
1855 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1856 } else {
1857 command_packet->request_id__lunl =
1858 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1859 command_packet->unit = 0;
1860 }
1861
1862 command_packet->sgl_offset = 16;
1863
1864 if (!sglistarg) {
1865 /* Map sglist from scsi layer to cmd packet */
1866
1867 if (scsi_sg_count(srb)) {
1868 if ((scsi_sg_count(srb) == 1) &&
1869 (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1870 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1871 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1872 scsi_sg_copy_to_buffer(srb,
1873 tw_dev->generic_buffer_virt[request_id],
1874 TW_SECTOR_SIZE);
1875 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1876 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1877 } else {
1878 sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
1879 if (sg_count == 0)
1880 goto out;
1881
1882 scsi_for_each_sg(srb, sg, sg_count, i) {
1883 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1884 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1885 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1886 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1887 goto out;
1888 }
1889 }
1890 }
1891 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1892 }
1893 } else {
1894 /* Internal cdb post */
1895 for (i = 0; i < use_sg; i++) {
1896 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1897 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1898 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1899 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1900 goto out;
1901 }
1902 }
1903 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1904 }
1905
1906 if (srb) {
1907 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1908 num_sectors = (u32)srb->cmnd[4];
1909
1910 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1911 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1912 }
1913
1914 /* Update sector statistic */
1915 tw_dev->sector_count = num_sectors;
1916 if (tw_dev->sector_count > tw_dev->max_sector_count)
1917 tw_dev->max_sector_count = tw_dev->sector_count;
1918
1919 /* Update SG statistics */
1920 if (srb) {
1921 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1922 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1923 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1924 }
1925
1926 /* Now post the command to the board */
1927 if (srb) {
1928 retval = twa_post_command_packet(tw_dev, request_id, 0);
1929 } else {
1930 twa_post_command_packet(tw_dev, request_id, 1);
1931 retval = 0;
1932 }
1933out:
1934 return retval;
1935} /* End twa_scsiop_execute_scsi() */
1936
1937/* This function completes an execute scsi operation */
1938static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1939{
1940 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1941
1942 if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
1943 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1944 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1945 if (scsi_sg_count(cmd) == 1) {
1946 void *buf = tw_dev->generic_buffer_virt[request_id];
1947
1948 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1949 }
1950 }
1951} /* End twa_scsiop_execute_scsi_complete() */
1952
1953/* This function tells the controller to shut down */
1954static void __twa_shutdown(TW_Device_Extension *tw_dev)
1955{
1956 /* Disable interrupts */
1957 TW_DISABLE_INTERRUPTS(tw_dev);
1958
1959 /* Free up the IRQ */
1960 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1961
1962 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1963
1964 /* Tell the card we are shutting down */
1965 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1966 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1967 } else {
1968 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1969 }
1970
1971 /* Clear all interrupts just before exit */
1972 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1973} /* End __twa_shutdown() */
1974
1975/* Wrapper for __twa_shutdown */
1976static void twa_shutdown(struct pci_dev *pdev)
1977{
1978 struct Scsi_Host *host = pci_get_drvdata(pdev);
1979 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1980
1981 __twa_shutdown(tw_dev);
1982} /* End twa_shutdown() */
1983
1984/* This function will look up a string */
1985static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1986{
1987 int index;
1988
1989 for (index = 0; ((code != table[index].code) &&
1990 (table[index].text != (char *)0)); index++);
1991 return(table[index].text);
1992} /* End twa_string_lookup() */
1993
1994/* This function will perform a pci-dma unmap */
1995static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1996{
1997 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1998
1999 if (cmd->SCp.phase == TW_PHASE_SGLIST)
2000 scsi_dma_unmap(cmd);
2001} /* End twa_unmap_scsi_data() */
2002
2003/* This function gets called when a disk is coming on-line */
2004static int twa_slave_configure(struct scsi_device *sdev)
2005{
2006 /* Force 60 second timeout */
2007 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
2008
2009 return 0;
2010} /* End twa_slave_configure() */
2011
2012/* scsi_host_template initializer */
2013static struct scsi_host_template driver_template = {
2014 .module = THIS_MODULE,
2015 .name = "3ware 9000 Storage Controller",
2016 .queuecommand = twa_scsi_queue,
2017 .eh_host_reset_handler = twa_scsi_eh_reset,
2018 .bios_param = twa_scsi_biosparam,
2019 .change_queue_depth = twa_change_queue_depth,
2020 .can_queue = TW_Q_LENGTH-2,
2021 .slave_configure = twa_slave_configure,
2022 .this_id = -1,
2023 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
2024 .max_sectors = TW_MAX_SECTORS,
2025 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
2026 .use_clustering = ENABLE_CLUSTERING,
2027 .shost_attrs = twa_host_attrs,
2028 .emulated = 1
2029};
2030
2031/* This function will probe and initialize a card */
2032static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2033{
2034 struct Scsi_Host *host = NULL;
2035 TW_Device_Extension *tw_dev;
2036 unsigned long mem_addr, mem_len;
2037 int retval = -ENODEV;
2038
2039 retval = pci_enable_device(pdev);
2040 if (retval) {
2041 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2042 goto out_disable_device;
2043 }
2044
2045 pci_set_master(pdev);
2046 pci_try_set_mwi(pdev);
2047
2048 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2049 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2050 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2051 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2052 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2053 retval = -ENODEV;
2054 goto out_disable_device;
2055 }
2056
2057 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2058 if (!host) {
2059 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2060 retval = -ENOMEM;
2061 goto out_disable_device;
2062 }
2063 tw_dev = (TW_Device_Extension *)host->hostdata;
2064
2065 /* Save values to device extension */
2066 tw_dev->host = host;
2067 tw_dev->tw_pci_dev = pdev;
2068
2069 if (twa_initialize_device_extension(tw_dev)) {
2070 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2071 goto out_free_device_extension;
2072 }
2073
2074 /* Request IO regions */
2075 retval = pci_request_regions(pdev, "3w-9xxx");
2076 if (retval) {
2077 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2078 goto out_free_device_extension;
2079 }
2080
2081 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2082 mem_addr = pci_resource_start(pdev, 1);
2083 mem_len = pci_resource_len(pdev, 1);
2084 } else {
2085 mem_addr = pci_resource_start(pdev, 2);
2086 mem_len = pci_resource_len(pdev, 2);
2087 }
2088
2089 /* Save base address */
2090 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2091 if (!tw_dev->base_addr) {
2092 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2093 goto out_release_mem_region;
2094 }
2095
2096 /* Disable interrupts on the card */
2097 TW_DISABLE_INTERRUPTS(tw_dev);
2098
2099 /* Initialize the card */
2100 if (twa_reset_sequence(tw_dev, 0))
2101 goto out_iounmap;
2102
2103 /* Set host specific parameters */
2104 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2105 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2106 host->max_id = TW_MAX_UNITS_9650SE;
2107 else
2108 host->max_id = TW_MAX_UNITS;
2109
2110 host->max_cmd_len = TW_MAX_CDB_LEN;
2111
2112 /* Channels aren't supported by adapter */
2113 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2114 host->max_channel = 0;
2115
2116 /* Register the card with the kernel SCSI layer */
2117 retval = scsi_add_host(host, &pdev->dev);
2118 if (retval) {
2119 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2120 goto out_iounmap;
2121 }
2122
2123 pci_set_drvdata(pdev, host);
2124
2125 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2126 host->host_no, mem_addr, pdev->irq);
2127 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2128 host->host_no,
2129 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2130 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2131 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2132 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2133 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2134 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2135
2136 /* Try to enable MSI */
2137 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2138 !pci_enable_msi(pdev))
2139 set_bit(TW_USING_MSI, &tw_dev->flags);
2140
2141 /* Now setup the interrupt handler */
2142 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2143 if (retval) {
2144 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2145 goto out_remove_host;
2146 }
2147
2148 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2149 twa_device_extension_count++;
2150
2151 /* Re-enable interrupts on the card */
2152 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2153
2154 /* Finally, scan the host */
2155 scsi_scan_host(host);
2156
2157 if (twa_major == -1) {
2158 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2159 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2160 }
2161 return 0;
2162
2163out_remove_host:
2164 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2165 pci_disable_msi(pdev);
2166 scsi_remove_host(host);
2167out_iounmap:
2168 iounmap(tw_dev->base_addr);
2169out_release_mem_region:
2170 pci_release_regions(pdev);
2171out_free_device_extension:
2172 twa_free_device_extension(tw_dev);
2173 scsi_host_put(host);
2174out_disable_device:
2175 pci_disable_device(pdev);
2176
2177 return retval;
2178} /* End twa_probe() */
2179
2180/* This function is called to remove a device */
2181static void twa_remove(struct pci_dev *pdev)
2182{
2183 struct Scsi_Host *host = pci_get_drvdata(pdev);
2184 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2185
2186 scsi_remove_host(tw_dev->host);
2187
2188 /* Unregister character device */
2189 if (twa_major >= 0) {
2190 unregister_chrdev(twa_major, "twa");
2191 twa_major = -1;
2192 }
2193
2194 /* Shutdown the card */
2195 __twa_shutdown(tw_dev);
2196
2197 /* Disable MSI if enabled */
2198 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2199 pci_disable_msi(pdev);
2200
2201 /* Free IO remapping */
2202 iounmap(tw_dev->base_addr);
2203
2204 /* Free up the mem region */
2205 pci_release_regions(pdev);
2206
2207 /* Free up device extension resources */
2208 twa_free_device_extension(tw_dev);
2209
2210 scsi_host_put(tw_dev->host);
2211 pci_disable_device(pdev);
2212 twa_device_extension_count--;
2213} /* End twa_remove() */
2214
2215#ifdef CONFIG_PM
2216/* This function is called on PCI suspend */
2217static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2218{
2219 struct Scsi_Host *host = pci_get_drvdata(pdev);
2220 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2221
2222 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2223
2224 TW_DISABLE_INTERRUPTS(tw_dev);
2225 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2226
2227 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2228 pci_disable_msi(pdev);
2229
2230 /* Tell the card we are shutting down */
2231 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2232 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2233 } else {
2234 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2235 }
2236 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2237
2238 pci_save_state(pdev);
2239 pci_disable_device(pdev);
2240 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2241
2242 return 0;
2243} /* End twa_suspend() */
2244
2245/* This function is called on PCI resume */
2246static int twa_resume(struct pci_dev *pdev)
2247{
2248 int retval = 0;
2249 struct Scsi_Host *host = pci_get_drvdata(pdev);
2250 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2251
2252 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2253 pci_set_power_state(pdev, PCI_D0);
2254 pci_enable_wake(pdev, PCI_D0, 0);
2255 pci_restore_state(pdev);
2256
2257 retval = pci_enable_device(pdev);
2258 if (retval) {
2259 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2260 return retval;
2261 }
2262
2263 pci_set_master(pdev);
2264 pci_try_set_mwi(pdev);
2265
2266 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2267 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2268 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2269 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2270 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2271 retval = -ENODEV;
2272 goto out_disable_device;
2273 }
2274
2275 /* Initialize the card */
2276 if (twa_reset_sequence(tw_dev, 0)) {
2277 retval = -ENODEV;
2278 goto out_disable_device;
2279 }
2280
2281 /* Now setup the interrupt handler */
2282 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2283 if (retval) {
2284 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2285 retval = -ENODEV;
2286 goto out_disable_device;
2287 }
2288
2289 /* Now enable MSI if enabled */
2290 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2291 pci_enable_msi(pdev);
2292
2293 /* Re-enable interrupts on the card */
2294 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2295
2296 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2297 return 0;
2298
2299out_disable_device:
2300 scsi_remove_host(host);
2301 pci_disable_device(pdev);
2302
2303 return retval;
2304} /* End twa_resume() */
2305#endif
2306
2307/* PCI Devices supported by this driver */
2308static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2309 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2310 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2311 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2312 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2313 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2314 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2315 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2316 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2317 { }
2318};
2319MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2320
2321/* pci_driver initializer */
2322static struct pci_driver twa_driver = {
2323 .name = "3w-9xxx",
2324 .id_table = twa_pci_tbl,
2325 .probe = twa_probe,
2326 .remove = twa_remove,
2327#ifdef CONFIG_PM
2328 .suspend = twa_suspend,
2329 .resume = twa_resume,
2330#endif
2331 .shutdown = twa_shutdown
2332};
2333
2334/* This function is called on driver initialization */
2335static int __init twa_init(void)
2336{
2337 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2338
2339 return pci_register_driver(&twa_driver);
2340} /* End twa_init() */
2341
2342/* This function is called on driver exit */
2343static void __exit twa_exit(void)
2344{
2345 pci_unregister_driver(&twa_driver);
2346} /* End twa_exit() */
2347
2348module_init(twa_init);
2349module_exit(twa_exit);
2350
1/*
2 3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
3
4 Written By: Adam Radford <aradford@gmail.com>
5 Modifications By: Tom Couch
6
7 Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
8 Copyright (C) 2010 LSI Corporation.
9
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; version 2 of the License.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 NO WARRANTY
20 THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
21 CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
22 LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
23 MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
24 solely responsible for determining the appropriateness of using and
25 distributing the Program and assumes all risks associated with its
26 exercise of rights under this Agreement, including but not limited to
27 the risks and costs of program errors, damage to or loss of data,
28 programs or equipment, and unavailability or interruption of operations.
29
30 DISCLAIMER OF LIABILITY
31 NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
32 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
34 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
35 TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
36 USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
37 HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
38
39 You should have received a copy of the GNU General Public License
40 along with this program; if not, write to the Free Software
41 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
42
43 Bugs/Comments/Suggestions should be mailed to:
44 aradford@gmail.com
45
46 Note: This version of the driver does not contain a bundled firmware
47 image.
48
49 History
50 -------
51 2.26.02.000 - Driver cleanup for kernel submission.
52 2.26.02.001 - Replace schedule_timeout() calls with msleep().
53 2.26.02.002 - Add support for PAE mode.
54 Add lun support.
55 Fix twa_remove() to free irq handler/unregister_chrdev()
56 before shutting down card.
57 Change to new 'change_queue_depth' api.
58 Fix 'handled=1' ISR usage, remove bogus IRQ check.
59 Remove un-needed eh_abort handler.
60 Add support for embedded firmware error strings.
61 2.26.02.003 - Correctly handle single sgl's with use_sg=1.
62 2.26.02.004 - Add support for 9550SX controllers.
63 2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
64 2.26.02.006 - Fix 9550SX pchip reset timeout.
65 Add big endian support.
66 2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
67 2.26.02.008 - Free irq handler in __twa_shutdown().
68 Serialize reset code.
69 Add support for 9650SE controllers.
70 2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
71 2.26.02.010 - Add support for 9690SA controllers.
72 2.26.02.011 - Increase max AENs drained to 256.
73 Add MSI support and "use_msi" module parameter.
74 Fix bug in twa_get_param() on 4GB+.
75 Use pci_resource_len() for ioremap().
76 2.26.02.012 - Add power management support.
77 2.26.02.013 - Fix bug in twa_load_sgl().
78 2.26.02.014 - Force 60 second timeout default.
79*/
80
81#include <linux/module.h>
82#include <linux/reboot.h>
83#include <linux/spinlock.h>
84#include <linux/interrupt.h>
85#include <linux/moduleparam.h>
86#include <linux/errno.h>
87#include <linux/types.h>
88#include <linux/delay.h>
89#include <linux/pci.h>
90#include <linux/time.h>
91#include <linux/mutex.h>
92#include <linux/slab.h>
93#include <asm/io.h>
94#include <asm/irq.h>
95#include <linux/uaccess.h>
96#include <scsi/scsi.h>
97#include <scsi/scsi_host.h>
98#include <scsi/scsi_tcq.h>
99#include <scsi/scsi_cmnd.h>
100#include "3w-9xxx.h"
101
102/* Globals */
103#define TW_DRIVER_VERSION "2.26.02.014"
104static DEFINE_MUTEX(twa_chrdev_mutex);
105static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
106static unsigned int twa_device_extension_count;
107static int twa_major = -1;
108extern struct timezone sys_tz;
109
110/* Module parameters */
111MODULE_AUTHOR ("LSI");
112MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
113MODULE_LICENSE("GPL");
114MODULE_VERSION(TW_DRIVER_VERSION);
115
116static int use_msi = 0;
117module_param(use_msi, int, S_IRUGO);
118MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts. Default: 0");
119
120/* Function prototypes */
121static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
122static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
123static char *twa_aen_severity_lookup(unsigned char severity_code);
124static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
125static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
126static int twa_chrdev_open(struct inode *inode, struct file *file);
127static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
128static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
129static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
130static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
131 u32 set_features, unsigned short current_fw_srl,
132 unsigned short current_fw_arch_id,
133 unsigned short current_fw_branch,
134 unsigned short current_fw_build,
135 unsigned short *fw_on_ctlr_srl,
136 unsigned short *fw_on_ctlr_arch_id,
137 unsigned short *fw_on_ctlr_branch,
138 unsigned short *fw_on_ctlr_build,
139 u32 *init_connect_result);
140static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
141static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
142static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
143static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
144static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
145static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
146static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
147static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
148static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
149
150/* Functions */
151
152/* Show some statistics about the card */
153static ssize_t twa_show_stats(struct device *dev,
154 struct device_attribute *attr, char *buf)
155{
156 struct Scsi_Host *host = class_to_shost(dev);
157 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
158 unsigned long flags = 0;
159 ssize_t len;
160
161 spin_lock_irqsave(tw_dev->host->host_lock, flags);
162 len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
163 "Current commands posted: %4d\n"
164 "Max commands posted: %4d\n"
165 "Current pending commands: %4d\n"
166 "Max pending commands: %4d\n"
167 "Last sgl length: %4d\n"
168 "Max sgl length: %4d\n"
169 "Last sector count: %4d\n"
170 "Max sector count: %4d\n"
171 "SCSI Host Resets: %4d\n"
172 "AEN's: %4d\n",
173 TW_DRIVER_VERSION,
174 tw_dev->posted_request_count,
175 tw_dev->max_posted_request_count,
176 tw_dev->pending_request_count,
177 tw_dev->max_pending_request_count,
178 tw_dev->sgl_entries,
179 tw_dev->max_sgl_entries,
180 tw_dev->sector_count,
181 tw_dev->max_sector_count,
182 tw_dev->num_resets,
183 tw_dev->aen_count);
184 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
185 return len;
186} /* End twa_show_stats() */
187
188/* Create sysfs 'stats' entry */
189static struct device_attribute twa_host_stats_attr = {
190 .attr = {
191 .name = "stats",
192 .mode = S_IRUGO,
193 },
194 .show = twa_show_stats
195};
196
197/* Host attributes initializer */
198static struct device_attribute *twa_host_attrs[] = {
199 &twa_host_stats_attr,
200 NULL,
201};
202
203/* File operations struct for character device */
204static const struct file_operations twa_fops = {
205 .owner = THIS_MODULE,
206 .unlocked_ioctl = twa_chrdev_ioctl,
207 .open = twa_chrdev_open,
208 .release = NULL,
209 .llseek = noop_llseek,
210};
211
212/*
213 * The controllers use an inline buffer instead of a mapped SGL for small,
214 * single entry buffers. Note that we treat a zero-length transfer like
215 * a mapped SGL.
216 */
217static bool twa_command_mapped(struct scsi_cmnd *cmd)
218{
219 return scsi_sg_count(cmd) != 1 ||
220 scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
221}
222
223/* This function will complete an aen request from the isr */
224static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
225{
226 TW_Command_Full *full_command_packet;
227 TW_Command *command_packet;
228 TW_Command_Apache_Header *header;
229 unsigned short aen;
230 int retval = 1;
231
232 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
233 tw_dev->posted_request_count--;
234 aen = le16_to_cpu(header->status_block.error);
235 full_command_packet = tw_dev->command_packet_virt[request_id];
236 command_packet = &full_command_packet->command.oldcommand;
237
238 /* First check for internal completion of set param for time sync */
239 if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
240 /* Keep reading the queue in case there are more aen's */
241 if (twa_aen_read_queue(tw_dev, request_id))
242 goto out2;
243 else {
244 retval = 0;
245 goto out;
246 }
247 }
248
249 switch (aen) {
250 case TW_AEN_QUEUE_EMPTY:
251 /* Quit reading the queue if this is the last one */
252 break;
253 case TW_AEN_SYNC_TIME_WITH_HOST:
254 twa_aen_sync_time(tw_dev, request_id);
255 retval = 0;
256 goto out;
257 default:
258 twa_aen_queue_event(tw_dev, header);
259
260 /* If there are more aen's, keep reading the queue */
261 if (twa_aen_read_queue(tw_dev, request_id))
262 goto out2;
263 else {
264 retval = 0;
265 goto out;
266 }
267 }
268 retval = 0;
269out2:
270 tw_dev->state[request_id] = TW_S_COMPLETED;
271 twa_free_request_id(tw_dev, request_id);
272 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
273out:
274 return retval;
275} /* End twa_aen_complete() */
276
277/* This function will drain aen queue */
278static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
279{
280 int request_id = 0;
281 char cdb[TW_MAX_CDB_LEN];
282 TW_SG_Entry sglist[1];
283 int finished = 0, count = 0;
284 TW_Command_Full *full_command_packet;
285 TW_Command_Apache_Header *header;
286 unsigned short aen;
287 int first_reset = 0, queue = 0, retval = 1;
288
289 if (no_check_reset)
290 first_reset = 0;
291 else
292 first_reset = 1;
293
294 full_command_packet = tw_dev->command_packet_virt[request_id];
295 memset(full_command_packet, 0, sizeof(TW_Command_Full));
296
297 /* Initialize cdb */
298 memset(&cdb, 0, TW_MAX_CDB_LEN);
299 cdb[0] = REQUEST_SENSE; /* opcode */
300 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
301
302 /* Initialize sglist */
303 memset(&sglist, 0, sizeof(TW_SG_Entry));
304 sglist[0].length = TW_SECTOR_SIZE;
305 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
306
307 if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
308 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
309 goto out;
310 }
311
312 /* Mark internal command */
313 tw_dev->srb[request_id] = NULL;
314
315 do {
316 /* Send command to the board */
317 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
318 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
319 goto out;
320 }
321
322 /* Now poll for completion */
323 if (twa_poll_response(tw_dev, request_id, 30)) {
324 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
325 tw_dev->posted_request_count--;
326 goto out;
327 }
328
329 tw_dev->posted_request_count--;
330 header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
331 aen = le16_to_cpu(header->status_block.error);
332 queue = 0;
333 count++;
334
335 switch (aen) {
336 case TW_AEN_QUEUE_EMPTY:
337 if (first_reset != 1)
338 goto out;
339 else
340 finished = 1;
341 break;
342 case TW_AEN_SOFT_RESET:
343 if (first_reset == 0)
344 first_reset = 1;
345 else
346 queue = 1;
347 break;
348 case TW_AEN_SYNC_TIME_WITH_HOST:
349 break;
350 default:
351 queue = 1;
352 }
353
354 /* Now queue an event info */
355 if (queue)
356 twa_aen_queue_event(tw_dev, header);
357 } while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
358
359 if (count == TW_MAX_AEN_DRAIN)
360 goto out;
361
362 retval = 0;
363out:
364 tw_dev->state[request_id] = TW_S_INITIAL;
365 return retval;
366} /* End twa_aen_drain_queue() */
367
368/* This function will queue an event */
369static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
370{
371 u32 local_time;
372 TW_Event *event;
373 unsigned short aen;
374 char host[16];
375 char *error_str;
376
377 tw_dev->aen_count++;
378
379 /* Fill out event info */
380 event = tw_dev->event_queue[tw_dev->error_index];
381
382 /* Check for clobber */
383 host[0] = '\0';
384 if (tw_dev->host) {
385 sprintf(host, " scsi%d:", tw_dev->host->host_no);
386 if (event->retrieved == TW_AEN_NOT_RETRIEVED)
387 tw_dev->aen_clobber = 1;
388 }
389
390 aen = le16_to_cpu(header->status_block.error);
391 memset(event, 0, sizeof(TW_Event));
392
393 event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
394 /* event->time_stamp_sec overflows in y2106 */
395 local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
396 event->time_stamp_sec = local_time;
397 event->aen_code = aen;
398 event->retrieved = TW_AEN_NOT_RETRIEVED;
399 event->sequence_id = tw_dev->error_sequence_id;
400 tw_dev->error_sequence_id++;
401
402 /* Check for embedded error string */
403 error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
404
405 header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
406 event->parameter_len = strlen(header->err_specific_desc);
407 memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
408 if (event->severity != TW_AEN_SEVERITY_DEBUG)
409 printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
410 host,
411 twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
412 TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
413 error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
414 header->err_specific_desc);
415 else
416 tw_dev->aen_count--;
417
418 if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
419 tw_dev->event_queue_wrapped = 1;
420 tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
421} /* End twa_aen_queue_event() */
422
423/* This function will read the aen queue from the isr */
424static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
425{
426 char cdb[TW_MAX_CDB_LEN];
427 TW_SG_Entry sglist[1];
428 TW_Command_Full *full_command_packet;
429 int retval = 1;
430
431 full_command_packet = tw_dev->command_packet_virt[request_id];
432 memset(full_command_packet, 0, sizeof(TW_Command_Full));
433
434 /* Initialize cdb */
435 memset(&cdb, 0, TW_MAX_CDB_LEN);
436 cdb[0] = REQUEST_SENSE; /* opcode */
437 cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
438
439 /* Initialize sglist */
440 memset(&sglist, 0, sizeof(TW_SG_Entry));
441 sglist[0].length = TW_SECTOR_SIZE;
442 sglist[0].address = tw_dev->generic_buffer_phys[request_id];
443
444 /* Mark internal command */
445 tw_dev->srb[request_id] = NULL;
446
447 /* Now post the command packet */
448 if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
449 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
450 goto out;
451 }
452 retval = 0;
453out:
454 return retval;
455} /* End twa_aen_read_queue() */
456
457/* This function will look up an AEN severity string */
458static char *twa_aen_severity_lookup(unsigned char severity_code)
459{
460 char *retval = NULL;
461
462 if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
463 (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
464 goto out;
465
466 retval = twa_aen_severity_table[severity_code];
467out:
468 return retval;
469} /* End twa_aen_severity_lookup() */
470
471/* This function will sync firmware time with the host time */
472static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
473{
474 u32 schedulertime;
475 TW_Command_Full *full_command_packet;
476 TW_Command *command_packet;
477 TW_Param_Apache *param;
478 time64_t local_time;
479
480 /* Fill out the command packet */
481 full_command_packet = tw_dev->command_packet_virt[request_id];
482 memset(full_command_packet, 0, sizeof(TW_Command_Full));
483 command_packet = &full_command_packet->command.oldcommand;
484 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
485 command_packet->request_id = request_id;
486 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
487 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
488 command_packet->size = TW_COMMAND_SIZE;
489 command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
490
491 /* Setup the param */
492 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
493 memset(param, 0, TW_SECTOR_SIZE);
494 param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
495 param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
496 param->parameter_size_bytes = cpu_to_le16(4);
497
498 /* Convert system time in UTC to local time seconds since last
499 Sunday 12:00AM */
500 local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
501 div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
502 schedulertime = cpu_to_le32(schedulertime % 604800);
503
504 memcpy(param->data, &schedulertime, sizeof(u32));
505
506 /* Mark internal command */
507 tw_dev->srb[request_id] = NULL;
508
509 /* Now post the command */
510 twa_post_command_packet(tw_dev, request_id, 1);
511} /* End twa_aen_sync_time() */
512
513/* This function will allocate memory and check if it is correctly aligned */
514static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
515{
516 int i;
517 dma_addr_t dma_handle;
518 unsigned long *cpu_addr;
519 int retval = 1;
520
521 cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
522 if (!cpu_addr) {
523 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
524 goto out;
525 }
526
527 if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
528 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
529 pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
530 goto out;
531 }
532
533 memset(cpu_addr, 0, size*TW_Q_LENGTH);
534
535 for (i = 0; i < TW_Q_LENGTH; i++) {
536 switch(which) {
537 case 0:
538 tw_dev->command_packet_phys[i] = dma_handle+(i*size);
539 tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
540 break;
541 case 1:
542 tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
543 tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
544 break;
545 }
546 }
547 retval = 0;
548out:
549 return retval;
550} /* End twa_allocate_memory() */
551
552/* This function will check the status register for unexpected bits */
553static int twa_check_bits(u32 status_reg_value)
554{
555 int retval = 1;
556
557 if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
558 goto out;
559 if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
560 goto out;
561
562 retval = 0;
563out:
564 return retval;
565} /* End twa_check_bits() */
566
567/* This function will check the srl and decide if we are compatible */
568static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
569{
570 int retval = 1;
571 unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
572 unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
573 u32 init_connect_result = 0;
574
575 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
576 TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
577 TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
578 TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
579 &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
580 &fw_on_ctlr_build, &init_connect_result)) {
581 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
582 goto out;
583 }
584
585 tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
586 tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
587 tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
588
589 /* Try base mode compatibility */
590 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
591 if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
592 TW_EXTENDED_INIT_CONNECT,
593 TW_BASE_FW_SRL, TW_9000_ARCH_ID,
594 TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
595 &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
596 &fw_on_ctlr_branch, &fw_on_ctlr_build,
597 &init_connect_result)) {
598 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
599 goto out;
600 }
601 if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
602 if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
603 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
604 } else {
605 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
606 }
607 goto out;
608 }
609 tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
610 tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
611 tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
612 }
613
614 /* Load rest of compatibility struct */
615 strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
616 sizeof(tw_dev->tw_compat_info.driver_version));
617 tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
618 tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
619 tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
620 tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
621 tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
622 tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
623 tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
624 tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
625 tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
626
627 retval = 0;
628out:
629 return retval;
630} /* End twa_check_srl() */
631
632/* This function handles ioctl for the character device */
633static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
634{
635 struct inode *inode = file_inode(file);
636 long timeout;
637 unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
638 dma_addr_t dma_handle;
639 int request_id = 0;
640 unsigned int sequence_id = 0;
641 unsigned char event_index, start_index;
642 TW_Ioctl_Driver_Command driver_command;
643 TW_Ioctl_Buf_Apache *tw_ioctl;
644 TW_Lock *tw_lock;
645 TW_Command_Full *full_command_packet;
646 TW_Compatibility_Info *tw_compat_info;
647 TW_Event *event;
648 ktime_t current_time;
649 TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
650 int retval = TW_IOCTL_ERROR_OS_EFAULT;
651 void __user *argp = (void __user *)arg;
652
653 mutex_lock(&twa_chrdev_mutex);
654
655 /* Only let one of these through at a time */
656 if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
657 retval = TW_IOCTL_ERROR_OS_EINTR;
658 goto out;
659 }
660
661 /* First copy down the driver command */
662 if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
663 goto out2;
664
665 /* Check data buffer size */
666 if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
667 retval = TW_IOCTL_ERROR_OS_EINVAL;
668 goto out2;
669 }
670
671 /* Hardware can only do multiple of 512 byte transfers */
672 data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
673
674 /* Now allocate ioctl buf memory */
675 cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
676 if (!cpu_addr) {
677 retval = TW_IOCTL_ERROR_OS_ENOMEM;
678 goto out2;
679 }
680
681 tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
682
683 /* Now copy down the entire ioctl */
684 if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
685 goto out3;
686
687 /* See which ioctl we are doing */
688 switch (cmd) {
689 case TW_IOCTL_FIRMWARE_PASS_THROUGH:
690 spin_lock_irqsave(tw_dev->host->host_lock, flags);
691 twa_get_request_id(tw_dev, &request_id);
692
693 /* Flag internal command */
694 tw_dev->srb[request_id] = NULL;
695
696 /* Flag chrdev ioctl */
697 tw_dev->chrdev_request_id = request_id;
698
699 full_command_packet = &tw_ioctl->firmware_command;
700
701 /* Load request id and sglist for both command types */
702 twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
703
704 memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
705
706 /* Now post the command packet to the controller */
707 twa_post_command_packet(tw_dev, request_id, 1);
708 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
709
710 timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
711
712 /* Now wait for command to complete */
713 timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
714
715 /* We timed out, and didn't get an interrupt */
716 if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
717 /* Now we need to reset the board */
718 printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
719 tw_dev->host->host_no, TW_DRIVER, 0x37,
720 cmd);
721 retval = TW_IOCTL_ERROR_OS_EIO;
722 twa_reset_device_extension(tw_dev);
723 goto out3;
724 }
725
726 /* Now copy in the command packet response */
727 memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
728
729 /* Now complete the io */
730 spin_lock_irqsave(tw_dev->host->host_lock, flags);
731 tw_dev->posted_request_count--;
732 tw_dev->state[request_id] = TW_S_COMPLETED;
733 twa_free_request_id(tw_dev, request_id);
734 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
735 break;
736 case TW_IOCTL_GET_COMPATIBILITY_INFO:
737 tw_ioctl->driver_command.status = 0;
738 /* Copy compatibility struct into ioctl data buffer */
739 tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
740 memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
741 break;
742 case TW_IOCTL_GET_LAST_EVENT:
743 if (tw_dev->event_queue_wrapped) {
744 if (tw_dev->aen_clobber) {
745 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
746 tw_dev->aen_clobber = 0;
747 } else
748 tw_ioctl->driver_command.status = 0;
749 } else {
750 if (!tw_dev->error_index) {
751 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
752 break;
753 }
754 tw_ioctl->driver_command.status = 0;
755 }
756 event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
757 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
758 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
759 break;
760 case TW_IOCTL_GET_FIRST_EVENT:
761 if (tw_dev->event_queue_wrapped) {
762 if (tw_dev->aen_clobber) {
763 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
764 tw_dev->aen_clobber = 0;
765 } else
766 tw_ioctl->driver_command.status = 0;
767 event_index = tw_dev->error_index;
768 } else {
769 if (!tw_dev->error_index) {
770 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
771 break;
772 }
773 tw_ioctl->driver_command.status = 0;
774 event_index = 0;
775 }
776 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
777 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
778 break;
779 case TW_IOCTL_GET_NEXT_EVENT:
780 event = (TW_Event *)tw_ioctl->data_buffer;
781 sequence_id = event->sequence_id;
782 tw_ioctl->driver_command.status = 0;
783
784 if (tw_dev->event_queue_wrapped) {
785 if (tw_dev->aen_clobber) {
786 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
787 tw_dev->aen_clobber = 0;
788 }
789 start_index = tw_dev->error_index;
790 } else {
791 if (!tw_dev->error_index) {
792 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
793 break;
794 }
795 start_index = 0;
796 }
797 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
798
799 if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
800 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
801 tw_dev->aen_clobber = 1;
802 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
803 break;
804 }
805 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
806 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
807 break;
808 case TW_IOCTL_GET_PREVIOUS_EVENT:
809 event = (TW_Event *)tw_ioctl->data_buffer;
810 sequence_id = event->sequence_id;
811 tw_ioctl->driver_command.status = 0;
812
813 if (tw_dev->event_queue_wrapped) {
814 if (tw_dev->aen_clobber) {
815 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
816 tw_dev->aen_clobber = 0;
817 }
818 start_index = tw_dev->error_index;
819 } else {
820 if (!tw_dev->error_index) {
821 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
822 break;
823 }
824 start_index = 0;
825 }
826 event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
827
828 if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
829 if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
830 tw_dev->aen_clobber = 1;
831 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
832 break;
833 }
834 memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
835 tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
836 break;
837 case TW_IOCTL_GET_LOCK:
838 tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
839 current_time = ktime_get();
840
841 if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
842 ktime_after(current_time, tw_dev->ioctl_time)) {
843 tw_dev->ioctl_sem_lock = 1;
844 tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
845 tw_ioctl->driver_command.status = 0;
846 tw_lock->time_remaining_msec = tw_lock->timeout_msec;
847 } else {
848 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
849 tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
850 }
851 break;
852 case TW_IOCTL_RELEASE_LOCK:
853 if (tw_dev->ioctl_sem_lock == 1) {
854 tw_dev->ioctl_sem_lock = 0;
855 tw_ioctl->driver_command.status = 0;
856 } else {
857 tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
858 }
859 break;
860 default:
861 retval = TW_IOCTL_ERROR_OS_ENOTTY;
862 goto out3;
863 }
864
865 /* Now copy the entire response to userspace */
866 if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
867 retval = 0;
868out3:
869 /* Now free ioctl buf memory */
870 dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
871out2:
872 mutex_unlock(&tw_dev->ioctl_lock);
873out:
874 mutex_unlock(&twa_chrdev_mutex);
875 return retval;
876} /* End twa_chrdev_ioctl() */
877
878/* This function handles open for the character device */
879/* NOTE that this function will race with remove. */
880static int twa_chrdev_open(struct inode *inode, struct file *file)
881{
882 unsigned int minor_number;
883 int retval = TW_IOCTL_ERROR_OS_ENODEV;
884
885 minor_number = iminor(inode);
886 if (minor_number >= twa_device_extension_count)
887 goto out;
888 retval = 0;
889out:
890 return retval;
891} /* End twa_chrdev_open() */
892
893/* This function will print readable messages from status register errors */
894static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
895{
896 int retval = 1;
897
898 /* Check for various error conditions and handle them appropriately */
899 if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
900 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
901 writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
902 }
903
904 if (status_reg_value & TW_STATUS_PCI_ABORT) {
905 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
906 writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
907 pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
908 }
909
910 if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
911 if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
912 (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
913 (!test_bit(TW_IN_RESET, &tw_dev->flags)))
914 TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
915 writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
916 }
917
918 if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
919 if (tw_dev->reset_print == 0) {
920 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
921 tw_dev->reset_print = 1;
922 }
923 goto out;
924 }
925 retval = 0;
926out:
927 return retval;
928} /* End twa_decode_bits() */
929
930/* This function will empty the response queue */
931static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
932{
933 u32 status_reg_value, response_que_value;
934 int count = 0, retval = 1;
935
936 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
937
938 while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
939 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
940 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
941 count++;
942 }
943 if (count == TW_MAX_RESPONSE_DRAIN)
944 goto out;
945
946 retval = 0;
947out:
948 return retval;
949} /* End twa_empty_response_queue() */
950
951/* This function will clear the pchip/response queue on 9550SX */
952static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
953{
954 u32 response_que_value = 0;
955 unsigned long before;
956 int retval = 1;
957
958 if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
959 before = jiffies;
960 while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
961 response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
962 msleep(1);
963 if (time_after(jiffies, before + HZ * 30))
964 goto out;
965 }
966 /* P-chip settle time */
967 msleep(500);
968 retval = 0;
969 } else
970 retval = 0;
971out:
972 return retval;
973} /* End twa_empty_response_queue_large() */
974
975/* This function passes sense keys from firmware to scsi layer */
976static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
977{
978 TW_Command_Full *full_command_packet;
979 unsigned short error;
980 int retval = 1;
981 char *error_str;
982
983 full_command_packet = tw_dev->command_packet_virt[request_id];
984
985 /* Check for embedded error string */
986 error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
987
988 /* Don't print error for Logical unit not supported during rollcall */
989 error = le16_to_cpu(full_command_packet->header.status_block.error);
990 if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
991 if (print_host)
992 printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
993 tw_dev->host->host_no,
994 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
995 full_command_packet->header.status_block.error,
996 error_str[0] == '\0' ?
997 twa_string_lookup(twa_error_table,
998 full_command_packet->header.status_block.error) : error_str,
999 full_command_packet->header.err_specific_desc);
1000 else
1001 printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1002 TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1003 full_command_packet->header.status_block.error,
1004 error_str[0] == '\0' ?
1005 twa_string_lookup(twa_error_table,
1006 full_command_packet->header.status_block.error) : error_str,
1007 full_command_packet->header.err_specific_desc);
1008 }
1009
1010 if (copy_sense) {
1011 memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1012 tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1013 retval = TW_ISR_DONT_RESULT;
1014 goto out;
1015 }
1016 retval = 0;
1017out:
1018 return retval;
1019} /* End twa_fill_sense() */
1020
1021/* This function will free up device extension resources */
1022static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1023{
1024 if (tw_dev->command_packet_virt[0])
1025 pci_free_consistent(tw_dev->tw_pci_dev,
1026 sizeof(TW_Command_Full)*TW_Q_LENGTH,
1027 tw_dev->command_packet_virt[0],
1028 tw_dev->command_packet_phys[0]);
1029
1030 if (tw_dev->generic_buffer_virt[0])
1031 pci_free_consistent(tw_dev->tw_pci_dev,
1032 TW_SECTOR_SIZE*TW_Q_LENGTH,
1033 tw_dev->generic_buffer_virt[0],
1034 tw_dev->generic_buffer_phys[0]);
1035
1036 kfree(tw_dev->event_queue[0]);
1037} /* End twa_free_device_extension() */
1038
1039/* This function will free a request id */
1040static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1041{
1042 tw_dev->free_queue[tw_dev->free_tail] = request_id;
1043 tw_dev->state[request_id] = TW_S_FINISHED;
1044 tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1045} /* End twa_free_request_id() */
1046
1047/* This function will get parameter table entries from the firmware */
1048static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1049{
1050 TW_Command_Full *full_command_packet;
1051 TW_Command *command_packet;
1052 TW_Param_Apache *param;
1053 void *retval = NULL;
1054
1055 /* Setup the command packet */
1056 full_command_packet = tw_dev->command_packet_virt[request_id];
1057 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1058 command_packet = &full_command_packet->command.oldcommand;
1059
1060 command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1061 command_packet->size = TW_COMMAND_SIZE;
1062 command_packet->request_id = request_id;
1063 command_packet->byte6_offset.block_count = cpu_to_le16(1);
1064
1065 /* Now setup the param */
1066 param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1067 memset(param, 0, TW_SECTOR_SIZE);
1068 param->table_id = cpu_to_le16(table_id | 0x8000);
1069 param->parameter_id = cpu_to_le16(parameter_id);
1070 param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1071
1072 command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1073 command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1074
1075 /* Post the command packet to the board */
1076 twa_post_command_packet(tw_dev, request_id, 1);
1077
1078 /* Poll for completion */
1079 if (twa_poll_response(tw_dev, request_id, 30))
1080 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1081 else
1082 retval = (void *)&(param->data[0]);
1083
1084 tw_dev->posted_request_count--;
1085 tw_dev->state[request_id] = TW_S_INITIAL;
1086
1087 return retval;
1088} /* End twa_get_param() */
1089
1090/* This function will assign an available request id */
1091static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1092{
1093 *request_id = tw_dev->free_queue[tw_dev->free_head];
1094 tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1095 tw_dev->state[*request_id] = TW_S_STARTED;
1096} /* End twa_get_request_id() */
1097
1098/* This function will send an initconnection command to controller */
1099static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1100 u32 set_features, unsigned short current_fw_srl,
1101 unsigned short current_fw_arch_id,
1102 unsigned short current_fw_branch,
1103 unsigned short current_fw_build,
1104 unsigned short *fw_on_ctlr_srl,
1105 unsigned short *fw_on_ctlr_arch_id,
1106 unsigned short *fw_on_ctlr_branch,
1107 unsigned short *fw_on_ctlr_build,
1108 u32 *init_connect_result)
1109{
1110 TW_Command_Full *full_command_packet;
1111 TW_Initconnect *tw_initconnect;
1112 int request_id = 0, retval = 1;
1113
1114 /* Initialize InitConnection command packet */
1115 full_command_packet = tw_dev->command_packet_virt[request_id];
1116 memset(full_command_packet, 0, sizeof(TW_Command_Full));
1117 full_command_packet->header.header_desc.size_header = 128;
1118
1119 tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1120 tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1121 tw_initconnect->request_id = request_id;
1122 tw_initconnect->message_credits = cpu_to_le16(message_credits);
1123 tw_initconnect->features = set_features;
1124
1125 /* Turn on 64-bit sgl support if we need to */
1126 tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1127
1128 tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1129
1130 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1131 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1132 tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1133 tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1134 tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1135 tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1136 } else
1137 tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1138
1139 /* Send command packet to the board */
1140 twa_post_command_packet(tw_dev, request_id, 1);
1141
1142 /* Poll for completion */
1143 if (twa_poll_response(tw_dev, request_id, 30)) {
1144 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1145 } else {
1146 if (set_features & TW_EXTENDED_INIT_CONNECT) {
1147 *fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1148 *fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1149 *fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1150 *fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1151 *init_connect_result = le32_to_cpu(tw_initconnect->result);
1152 }
1153 retval = 0;
1154 }
1155
1156 tw_dev->posted_request_count--;
1157 tw_dev->state[request_id] = TW_S_INITIAL;
1158
1159 return retval;
1160} /* End twa_initconnection() */
1161
1162/* This function will initialize the fields of a device extension */
1163static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1164{
1165 int i, retval = 1;
1166
1167 /* Initialize command packet buffers */
1168 if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1169 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1170 goto out;
1171 }
1172
1173 /* Initialize generic buffer */
1174 if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1175 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1176 goto out;
1177 }
1178
1179 /* Allocate event info space */
1180 tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1181 if (!tw_dev->event_queue[0]) {
1182 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1183 goto out;
1184 }
1185
1186
1187 for (i = 0; i < TW_Q_LENGTH; i++) {
1188 tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1189 tw_dev->free_queue[i] = i;
1190 tw_dev->state[i] = TW_S_INITIAL;
1191 }
1192
1193 tw_dev->pending_head = TW_Q_START;
1194 tw_dev->pending_tail = TW_Q_START;
1195 tw_dev->free_head = TW_Q_START;
1196 tw_dev->free_tail = TW_Q_START;
1197 tw_dev->error_sequence_id = 1;
1198 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1199
1200 mutex_init(&tw_dev->ioctl_lock);
1201 init_waitqueue_head(&tw_dev->ioctl_wqueue);
1202
1203 retval = 0;
1204out:
1205 return retval;
1206} /* End twa_initialize_device_extension() */
1207
1208/* This function is the interrupt service routine */
1209static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1210{
1211 int request_id, error = 0;
1212 u32 status_reg_value;
1213 TW_Response_Queue response_que;
1214 TW_Command_Full *full_command_packet;
1215 TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1216 int handled = 0;
1217
1218 /* Get the per adapter lock */
1219 spin_lock(tw_dev->host->host_lock);
1220
1221 /* Read the registers */
1222 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1223
1224 /* Check if this is our interrupt, otherwise bail */
1225 if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1226 goto twa_interrupt_bail;
1227
1228 handled = 1;
1229
1230 /* If we are resetting, bail */
1231 if (test_bit(TW_IN_RESET, &tw_dev->flags))
1232 goto twa_interrupt_bail;
1233
1234 /* Check controller for errors */
1235 if (twa_check_bits(status_reg_value)) {
1236 if (twa_decode_bits(tw_dev, status_reg_value)) {
1237 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1238 goto twa_interrupt_bail;
1239 }
1240 }
1241
1242 /* Handle host interrupt */
1243 if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1244 TW_CLEAR_HOST_INTERRUPT(tw_dev);
1245
1246 /* Handle attention interrupt */
1247 if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1248 TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1249 if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1250 twa_get_request_id(tw_dev, &request_id);
1251
1252 error = twa_aen_read_queue(tw_dev, request_id);
1253 if (error) {
1254 tw_dev->state[request_id] = TW_S_COMPLETED;
1255 twa_free_request_id(tw_dev, request_id);
1256 clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1257 }
1258 }
1259 }
1260
1261 /* Handle command interrupt */
1262 if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1263 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1264 /* Drain as many pending commands as we can */
1265 while (tw_dev->pending_request_count > 0) {
1266 request_id = tw_dev->pending_queue[tw_dev->pending_head];
1267 if (tw_dev->state[request_id] != TW_S_PENDING) {
1268 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1269 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1270 goto twa_interrupt_bail;
1271 }
1272 if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1273 tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1274 tw_dev->pending_request_count--;
1275 } else {
1276 /* If we get here, we will continue re-posting on the next command interrupt */
1277 break;
1278 }
1279 }
1280 }
1281
1282 /* Handle response interrupt */
1283 if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1284
1285 /* Drain the response queue from the board */
1286 while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1287 /* Complete the response */
1288 response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1289 request_id = TW_RESID_OUT(response_que.response_id);
1290 full_command_packet = tw_dev->command_packet_virt[request_id];
1291 error = 0;
1292 /* Check for command packet errors */
1293 if (full_command_packet->command.newcommand.status != 0) {
1294 if (tw_dev->srb[request_id] != NULL) {
1295 error = twa_fill_sense(tw_dev, request_id, 1, 1);
1296 } else {
1297 /* Skip ioctl error prints */
1298 if (request_id != tw_dev->chrdev_request_id) {
1299 error = twa_fill_sense(tw_dev, request_id, 0, 1);
1300 }
1301 }
1302 }
1303
1304 /* Check for correct state */
1305 if (tw_dev->state[request_id] != TW_S_POSTED) {
1306 if (tw_dev->srb[request_id] != NULL) {
1307 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1308 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1309 goto twa_interrupt_bail;
1310 }
1311 }
1312
1313 /* Check for internal command completion */
1314 if (tw_dev->srb[request_id] == NULL) {
1315 if (request_id != tw_dev->chrdev_request_id) {
1316 if (twa_aen_complete(tw_dev, request_id))
1317 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1318 } else {
1319 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1320 wake_up(&tw_dev->ioctl_wqueue);
1321 }
1322 } else {
1323 struct scsi_cmnd *cmd;
1324
1325 cmd = tw_dev->srb[request_id];
1326
1327 twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1328 /* If no error command was a success */
1329 if (error == 0) {
1330 cmd->result = (DID_OK << 16);
1331 }
1332
1333 /* If error, command failed */
1334 if (error == 1) {
1335 /* Ask for a host reset */
1336 cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1337 }
1338
1339 /* Report residual bytes for single sgl */
1340 if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1341 if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1342 scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
1343 }
1344
1345 /* Now complete the io */
1346 if (twa_command_mapped(cmd))
1347 scsi_dma_unmap(cmd);
1348 cmd->scsi_done(cmd);
1349 tw_dev->state[request_id] = TW_S_COMPLETED;
1350 twa_free_request_id(tw_dev, request_id);
1351 tw_dev->posted_request_count--;
1352 }
1353
1354 /* Check for valid status after each drain */
1355 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1356 if (twa_check_bits(status_reg_value)) {
1357 if (twa_decode_bits(tw_dev, status_reg_value)) {
1358 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1359 goto twa_interrupt_bail;
1360 }
1361 }
1362 }
1363 }
1364
1365twa_interrupt_bail:
1366 spin_unlock(tw_dev->host->host_lock);
1367 return IRQ_RETVAL(handled);
1368} /* End twa_interrupt() */
1369
1370/* This function will load the request id and various sgls for ioctls */
1371static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1372{
1373 TW_Command *oldcommand;
1374 TW_Command_Apache *newcommand;
1375 TW_SG_Entry *sgl;
1376 unsigned int pae = 0;
1377
1378 if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1379 pae = 1;
1380
1381 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1382 newcommand = &full_command_packet->command.newcommand;
1383 newcommand->request_id__lunl =
1384 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1385 if (length) {
1386 newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1387 newcommand->sg_list[0].length = cpu_to_le32(length);
1388 }
1389 newcommand->sgl_entries__lunh =
1390 cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1391 } else {
1392 oldcommand = &full_command_packet->command.oldcommand;
1393 oldcommand->request_id = request_id;
1394
1395 if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1396 /* Load the sg list */
1397 if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1398 sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1399 else
1400 sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1401 sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1402 sgl->length = cpu_to_le32(length);
1403
1404 oldcommand->size += pae;
1405 }
1406 }
1407} /* End twa_load_sgl() */
1408
1409/* This function will poll for a response interrupt of a request */
1410static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1411{
1412 int retval = 1, found = 0, response_request_id;
1413 TW_Response_Queue response_queue;
1414 TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1415
1416 if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1417 response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1418 response_request_id = TW_RESID_OUT(response_queue.response_id);
1419 if (request_id != response_request_id) {
1420 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1421 goto out;
1422 }
1423 if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1424 if (full_command_packet->command.newcommand.status != 0) {
1425 /* bad response */
1426 twa_fill_sense(tw_dev, request_id, 0, 0);
1427 goto out;
1428 }
1429 found = 1;
1430 } else {
1431 if (full_command_packet->command.oldcommand.status != 0) {
1432 /* bad response */
1433 twa_fill_sense(tw_dev, request_id, 0, 0);
1434 goto out;
1435 }
1436 found = 1;
1437 }
1438 }
1439
1440 if (found)
1441 retval = 0;
1442out:
1443 return retval;
1444} /* End twa_poll_response() */
1445
1446/* This function will poll the status register for a flag */
1447static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1448{
1449 u32 status_reg_value;
1450 unsigned long before;
1451 int retval = 1;
1452
1453 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1454 before = jiffies;
1455
1456 if (twa_check_bits(status_reg_value))
1457 twa_decode_bits(tw_dev, status_reg_value);
1458
1459 while ((status_reg_value & flag) != flag) {
1460 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1461
1462 if (twa_check_bits(status_reg_value))
1463 twa_decode_bits(tw_dev, status_reg_value);
1464
1465 if (time_after(jiffies, before + HZ * seconds))
1466 goto out;
1467
1468 msleep(50);
1469 }
1470 retval = 0;
1471out:
1472 return retval;
1473} /* End twa_poll_status() */
1474
1475/* This function will poll the status register for disappearance of a flag */
1476static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1477{
1478 u32 status_reg_value;
1479 unsigned long before;
1480 int retval = 1;
1481
1482 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1483 before = jiffies;
1484
1485 if (twa_check_bits(status_reg_value))
1486 twa_decode_bits(tw_dev, status_reg_value);
1487
1488 while ((status_reg_value & flag) != 0) {
1489 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1490 if (twa_check_bits(status_reg_value))
1491 twa_decode_bits(tw_dev, status_reg_value);
1492
1493 if (time_after(jiffies, before + HZ * seconds))
1494 goto out;
1495
1496 msleep(50);
1497 }
1498 retval = 0;
1499out:
1500 return retval;
1501} /* End twa_poll_status_gone() */
1502
1503/* This function will attempt to post a command packet to the board */
1504static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1505{
1506 u32 status_reg_value;
1507 dma_addr_t command_que_value;
1508 int retval = 1;
1509
1510 command_que_value = tw_dev->command_packet_phys[request_id];
1511
1512 /* For 9650SE write low 4 bytes first */
1513 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1514 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1515 command_que_value += TW_COMMAND_OFFSET;
1516 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1517 }
1518
1519 status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1520
1521 if (twa_check_bits(status_reg_value))
1522 twa_decode_bits(tw_dev, status_reg_value);
1523
1524 if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1525
1526 /* Only pend internal driver commands */
1527 if (!internal) {
1528 retval = SCSI_MLQUEUE_HOST_BUSY;
1529 goto out;
1530 }
1531
1532 /* Couldn't post the command packet, so we do it later */
1533 if (tw_dev->state[request_id] != TW_S_PENDING) {
1534 tw_dev->state[request_id] = TW_S_PENDING;
1535 tw_dev->pending_request_count++;
1536 if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1537 tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1538 }
1539 tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1540 tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1541 }
1542 TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1543 goto out;
1544 } else {
1545 if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1546 (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1547 /* Now write upper 4 bytes */
1548 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1549 } else {
1550 if (sizeof(dma_addr_t) > 4) {
1551 command_que_value += TW_COMMAND_OFFSET;
1552 writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1553 writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1554 } else {
1555 writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1556 }
1557 }
1558 tw_dev->state[request_id] = TW_S_POSTED;
1559 tw_dev->posted_request_count++;
1560 if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1561 tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1562 }
1563 }
1564 retval = 0;
1565out:
1566 return retval;
1567} /* End twa_post_command_packet() */
1568
1569/* This function will reset a device extension */
1570static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1571{
1572 int i = 0;
1573 int retval = 1;
1574 unsigned long flags = 0;
1575
1576 set_bit(TW_IN_RESET, &tw_dev->flags);
1577 TW_DISABLE_INTERRUPTS(tw_dev);
1578 TW_MASK_COMMAND_INTERRUPT(tw_dev);
1579 spin_lock_irqsave(tw_dev->host->host_lock, flags);
1580
1581 /* Abort all requests that are in progress */
1582 for (i = 0; i < TW_Q_LENGTH; i++) {
1583 if ((tw_dev->state[i] != TW_S_FINISHED) &&
1584 (tw_dev->state[i] != TW_S_INITIAL) &&
1585 (tw_dev->state[i] != TW_S_COMPLETED)) {
1586 if (tw_dev->srb[i]) {
1587 struct scsi_cmnd *cmd = tw_dev->srb[i];
1588
1589 cmd->result = (DID_RESET << 16);
1590 if (twa_command_mapped(cmd))
1591 scsi_dma_unmap(cmd);
1592 cmd->scsi_done(cmd);
1593 }
1594 }
1595 }
1596
1597 /* Reset queues and counts */
1598 for (i = 0; i < TW_Q_LENGTH; i++) {
1599 tw_dev->free_queue[i] = i;
1600 tw_dev->state[i] = TW_S_INITIAL;
1601 }
1602 tw_dev->free_head = TW_Q_START;
1603 tw_dev->free_tail = TW_Q_START;
1604 tw_dev->posted_request_count = 0;
1605 tw_dev->pending_request_count = 0;
1606 tw_dev->pending_head = TW_Q_START;
1607 tw_dev->pending_tail = TW_Q_START;
1608 tw_dev->reset_print = 0;
1609
1610 spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1611
1612 if (twa_reset_sequence(tw_dev, 1))
1613 goto out;
1614
1615 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1616 clear_bit(TW_IN_RESET, &tw_dev->flags);
1617 tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1618
1619 retval = 0;
1620out:
1621 return retval;
1622} /* End twa_reset_device_extension() */
1623
1624/* This function will reset a controller */
1625static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1626{
1627 int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1628
1629 while (tries < TW_MAX_RESET_TRIES) {
1630 if (do_soft_reset) {
1631 TW_SOFT_RESET(tw_dev);
1632 /* Clear pchip/response queue on 9550SX */
1633 if (twa_empty_response_queue_large(tw_dev)) {
1634 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1635 do_soft_reset = 1;
1636 tries++;
1637 continue;
1638 }
1639 }
1640
1641 /* Make sure controller is in a good state */
1642 if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1643 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1644 do_soft_reset = 1;
1645 tries++;
1646 continue;
1647 }
1648
1649 /* Empty response queue */
1650 if (twa_empty_response_queue(tw_dev)) {
1651 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1652 do_soft_reset = 1;
1653 tries++;
1654 continue;
1655 }
1656
1657 flashed = 0;
1658
1659 /* Check for compatibility/flash */
1660 if (twa_check_srl(tw_dev, &flashed)) {
1661 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1662 do_soft_reset = 1;
1663 tries++;
1664 continue;
1665 } else {
1666 if (flashed) {
1667 tries++;
1668 continue;
1669 }
1670 }
1671
1672 /* Drain the AEN queue */
1673 if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1674 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1675 do_soft_reset = 1;
1676 tries++;
1677 continue;
1678 }
1679
1680 /* If we got here, controller is in a good state */
1681 retval = 0;
1682 goto out;
1683 }
1684out:
1685 return retval;
1686} /* End twa_reset_sequence() */
1687
1688/* This funciton returns unit geometry in cylinders/heads/sectors */
1689static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1690{
1691 int heads, sectors, cylinders;
1692 TW_Device_Extension *tw_dev;
1693
1694 tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1695
1696 if (capacity >= 0x200000) {
1697 heads = 255;
1698 sectors = 63;
1699 cylinders = sector_div(capacity, heads * sectors);
1700 } else {
1701 heads = 64;
1702 sectors = 32;
1703 cylinders = sector_div(capacity, heads * sectors);
1704 }
1705
1706 geom[0] = heads;
1707 geom[1] = sectors;
1708 geom[2] = cylinders;
1709
1710 return 0;
1711} /* End twa_scsi_biosparam() */
1712
1713/* This is the new scsi eh reset function */
1714static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1715{
1716 TW_Device_Extension *tw_dev = NULL;
1717 int retval = FAILED;
1718
1719 tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1720
1721 tw_dev->num_resets++;
1722
1723 sdev_printk(KERN_WARNING, SCpnt->device,
1724 "WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1725 TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1726
1727 /* Make sure we are not issuing an ioctl or resetting from ioctl */
1728 mutex_lock(&tw_dev->ioctl_lock);
1729
1730 /* Now reset the card and some of the device extension data */
1731 if (twa_reset_device_extension(tw_dev)) {
1732 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1733 goto out;
1734 }
1735
1736 retval = SUCCESS;
1737out:
1738 mutex_unlock(&tw_dev->ioctl_lock);
1739 return retval;
1740} /* End twa_scsi_eh_reset() */
1741
1742/* This is the main scsi queue function to handle scsi opcodes */
1743static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1744{
1745 int request_id, retval;
1746 TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1747
1748 /* If we are resetting due to timed out ioctl, report as busy */
1749 if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1750 retval = SCSI_MLQUEUE_HOST_BUSY;
1751 goto out;
1752 }
1753
1754 /* Check if this FW supports luns */
1755 if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1756 SCpnt->result = (DID_BAD_TARGET << 16);
1757 done(SCpnt);
1758 retval = 0;
1759 goto out;
1760 }
1761
1762 /* Save done function into scsi_cmnd struct */
1763 SCpnt->scsi_done = done;
1764
1765 /* Get a free request id */
1766 twa_get_request_id(tw_dev, &request_id);
1767
1768 /* Save the scsi command for use by the ISR */
1769 tw_dev->srb[request_id] = SCpnt;
1770
1771 retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1772 switch (retval) {
1773 case SCSI_MLQUEUE_HOST_BUSY:
1774 if (twa_command_mapped(SCpnt))
1775 scsi_dma_unmap(SCpnt);
1776 twa_free_request_id(tw_dev, request_id);
1777 break;
1778 case 1:
1779 SCpnt->result = (DID_ERROR << 16);
1780 if (twa_command_mapped(SCpnt))
1781 scsi_dma_unmap(SCpnt);
1782 done(SCpnt);
1783 tw_dev->state[request_id] = TW_S_COMPLETED;
1784 twa_free_request_id(tw_dev, request_id);
1785 retval = 0;
1786 }
1787out:
1788 return retval;
1789} /* End twa_scsi_queue() */
1790
1791static DEF_SCSI_QCMD(twa_scsi_queue)
1792
1793/* This function hands scsi cdb's to the firmware */
1794static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
1795{
1796 TW_Command_Full *full_command_packet;
1797 TW_Command_Apache *command_packet;
1798 u32 num_sectors = 0x0;
1799 int i, sg_count;
1800 struct scsi_cmnd *srb = NULL;
1801 struct scatterlist *sglist = NULL, *sg;
1802 int retval = 1;
1803
1804 if (tw_dev->srb[request_id]) {
1805 srb = tw_dev->srb[request_id];
1806 if (scsi_sglist(srb))
1807 sglist = scsi_sglist(srb);
1808 }
1809
1810 /* Initialize command packet */
1811 full_command_packet = tw_dev->command_packet_virt[request_id];
1812 full_command_packet->header.header_desc.size_header = 128;
1813 full_command_packet->header.status_block.error = 0;
1814 full_command_packet->header.status_block.severity__reserved = 0;
1815
1816 command_packet = &full_command_packet->command.newcommand;
1817 command_packet->status = 0;
1818 command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1819
1820 /* We forced 16 byte cdb use earlier */
1821 if (!cdb)
1822 memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1823 else
1824 memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1825
1826 if (srb) {
1827 command_packet->unit = srb->device->id;
1828 command_packet->request_id__lunl =
1829 cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1830 } else {
1831 command_packet->request_id__lunl =
1832 cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1833 command_packet->unit = 0;
1834 }
1835
1836 command_packet->sgl_offset = 16;
1837
1838 if (!sglistarg) {
1839 /* Map sglist from scsi layer to cmd packet */
1840
1841 if (scsi_sg_count(srb)) {
1842 if (!twa_command_mapped(srb)) {
1843 if (srb->sc_data_direction == DMA_TO_DEVICE ||
1844 srb->sc_data_direction == DMA_BIDIRECTIONAL)
1845 scsi_sg_copy_to_buffer(srb,
1846 tw_dev->generic_buffer_virt[request_id],
1847 TW_SECTOR_SIZE);
1848 command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1849 command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1850 } else {
1851 sg_count = scsi_dma_map(srb);
1852 if (sg_count < 0)
1853 goto out;
1854
1855 scsi_for_each_sg(srb, sg, sg_count, i) {
1856 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1857 command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1858 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1859 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1860 goto out;
1861 }
1862 }
1863 }
1864 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1865 }
1866 } else {
1867 /* Internal cdb post */
1868 for (i = 0; i < use_sg; i++) {
1869 command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1870 command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1871 if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1872 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1873 goto out;
1874 }
1875 }
1876 command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1877 }
1878
1879 if (srb) {
1880 if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1881 num_sectors = (u32)srb->cmnd[4];
1882
1883 if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1884 num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1885 }
1886
1887 /* Update sector statistic */
1888 tw_dev->sector_count = num_sectors;
1889 if (tw_dev->sector_count > tw_dev->max_sector_count)
1890 tw_dev->max_sector_count = tw_dev->sector_count;
1891
1892 /* Update SG statistics */
1893 if (srb) {
1894 tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1895 if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1896 tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1897 }
1898
1899 /* Now post the command to the board */
1900 if (srb) {
1901 retval = twa_post_command_packet(tw_dev, request_id, 0);
1902 } else {
1903 twa_post_command_packet(tw_dev, request_id, 1);
1904 retval = 0;
1905 }
1906out:
1907 return retval;
1908} /* End twa_scsiop_execute_scsi() */
1909
1910/* This function completes an execute scsi operation */
1911static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1912{
1913 struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1914
1915 if (!twa_command_mapped(cmd) &&
1916 (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1917 cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1918 if (scsi_sg_count(cmd) == 1) {
1919 void *buf = tw_dev->generic_buffer_virt[request_id];
1920
1921 scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1922 }
1923 }
1924} /* End twa_scsiop_execute_scsi_complete() */
1925
1926/* This function tells the controller to shut down */
1927static void __twa_shutdown(TW_Device_Extension *tw_dev)
1928{
1929 /* Disable interrupts */
1930 TW_DISABLE_INTERRUPTS(tw_dev);
1931
1932 /* Free up the IRQ */
1933 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1934
1935 printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1936
1937 /* Tell the card we are shutting down */
1938 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1939 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1940 } else {
1941 printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1942 }
1943
1944 /* Clear all interrupts just before exit */
1945 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1946} /* End __twa_shutdown() */
1947
1948/* Wrapper for __twa_shutdown */
1949static void twa_shutdown(struct pci_dev *pdev)
1950{
1951 struct Scsi_Host *host = pci_get_drvdata(pdev);
1952 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1953
1954 __twa_shutdown(tw_dev);
1955} /* End twa_shutdown() */
1956
1957/* This function will look up a string */
1958static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1959{
1960 int index;
1961
1962 for (index = 0; ((code != table[index].code) &&
1963 (table[index].text != (char *)0)); index++);
1964 return(table[index].text);
1965} /* End twa_string_lookup() */
1966
1967/* This function gets called when a disk is coming on-line */
1968static int twa_slave_configure(struct scsi_device *sdev)
1969{
1970 /* Force 60 second timeout */
1971 blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1972
1973 return 0;
1974} /* End twa_slave_configure() */
1975
1976/* scsi_host_template initializer */
1977static struct scsi_host_template driver_template = {
1978 .module = THIS_MODULE,
1979 .name = "3ware 9000 Storage Controller",
1980 .queuecommand = twa_scsi_queue,
1981 .eh_host_reset_handler = twa_scsi_eh_reset,
1982 .bios_param = twa_scsi_biosparam,
1983 .change_queue_depth = scsi_change_queue_depth,
1984 .can_queue = TW_Q_LENGTH-2,
1985 .slave_configure = twa_slave_configure,
1986 .this_id = -1,
1987 .sg_tablesize = TW_APACHE_MAX_SGL_LENGTH,
1988 .max_sectors = TW_MAX_SECTORS,
1989 .cmd_per_lun = TW_MAX_CMDS_PER_LUN,
1990 .use_clustering = ENABLE_CLUSTERING,
1991 .shost_attrs = twa_host_attrs,
1992 .emulated = 1,
1993 .no_write_same = 1,
1994};
1995
1996/* This function will probe and initialize a card */
1997static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1998{
1999 struct Scsi_Host *host = NULL;
2000 TW_Device_Extension *tw_dev;
2001 unsigned long mem_addr, mem_len;
2002 int retval = -ENODEV;
2003
2004 retval = pci_enable_device(pdev);
2005 if (retval) {
2006 TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2007 goto out_disable_device;
2008 }
2009
2010 pci_set_master(pdev);
2011 pci_try_set_mwi(pdev);
2012
2013 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2014 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2015 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2016 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2017 TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2018 retval = -ENODEV;
2019 goto out_disable_device;
2020 }
2021
2022 host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2023 if (!host) {
2024 TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2025 retval = -ENOMEM;
2026 goto out_disable_device;
2027 }
2028 tw_dev = (TW_Device_Extension *)host->hostdata;
2029
2030 /* Save values to device extension */
2031 tw_dev->host = host;
2032 tw_dev->tw_pci_dev = pdev;
2033
2034 if (twa_initialize_device_extension(tw_dev)) {
2035 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2036 goto out_free_device_extension;
2037 }
2038
2039 /* Request IO regions */
2040 retval = pci_request_regions(pdev, "3w-9xxx");
2041 if (retval) {
2042 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2043 goto out_free_device_extension;
2044 }
2045
2046 if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2047 mem_addr = pci_resource_start(pdev, 1);
2048 mem_len = pci_resource_len(pdev, 1);
2049 } else {
2050 mem_addr = pci_resource_start(pdev, 2);
2051 mem_len = pci_resource_len(pdev, 2);
2052 }
2053
2054 /* Save base address */
2055 tw_dev->base_addr = ioremap(mem_addr, mem_len);
2056 if (!tw_dev->base_addr) {
2057 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2058 goto out_release_mem_region;
2059 }
2060
2061 /* Disable interrupts on the card */
2062 TW_DISABLE_INTERRUPTS(tw_dev);
2063
2064 /* Initialize the card */
2065 if (twa_reset_sequence(tw_dev, 0))
2066 goto out_iounmap;
2067
2068 /* Set host specific parameters */
2069 if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2070 (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2071 host->max_id = TW_MAX_UNITS_9650SE;
2072 else
2073 host->max_id = TW_MAX_UNITS;
2074
2075 host->max_cmd_len = TW_MAX_CDB_LEN;
2076
2077 /* Channels aren't supported by adapter */
2078 host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2079 host->max_channel = 0;
2080
2081 /* Register the card with the kernel SCSI layer */
2082 retval = scsi_add_host(host, &pdev->dev);
2083 if (retval) {
2084 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2085 goto out_iounmap;
2086 }
2087
2088 pci_set_drvdata(pdev, host);
2089
2090 printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2091 host->host_no, mem_addr, pdev->irq);
2092 printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2093 host->host_no,
2094 (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2095 TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2096 (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2097 TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2098 le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2099 TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2100
2101 /* Try to enable MSI */
2102 if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2103 !pci_enable_msi(pdev))
2104 set_bit(TW_USING_MSI, &tw_dev->flags);
2105
2106 /* Now setup the interrupt handler */
2107 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2108 if (retval) {
2109 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2110 goto out_remove_host;
2111 }
2112
2113 twa_device_extension_list[twa_device_extension_count] = tw_dev;
2114 twa_device_extension_count++;
2115
2116 /* Re-enable interrupts on the card */
2117 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2118
2119 /* Finally, scan the host */
2120 scsi_scan_host(host);
2121
2122 if (twa_major == -1) {
2123 if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2124 TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2125 }
2126 return 0;
2127
2128out_remove_host:
2129 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2130 pci_disable_msi(pdev);
2131 scsi_remove_host(host);
2132out_iounmap:
2133 iounmap(tw_dev->base_addr);
2134out_release_mem_region:
2135 pci_release_regions(pdev);
2136out_free_device_extension:
2137 twa_free_device_extension(tw_dev);
2138 scsi_host_put(host);
2139out_disable_device:
2140 pci_disable_device(pdev);
2141
2142 return retval;
2143} /* End twa_probe() */
2144
2145/* This function is called to remove a device */
2146static void twa_remove(struct pci_dev *pdev)
2147{
2148 struct Scsi_Host *host = pci_get_drvdata(pdev);
2149 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2150
2151 scsi_remove_host(tw_dev->host);
2152
2153 /* Unregister character device */
2154 if (twa_major >= 0) {
2155 unregister_chrdev(twa_major, "twa");
2156 twa_major = -1;
2157 }
2158
2159 /* Shutdown the card */
2160 __twa_shutdown(tw_dev);
2161
2162 /* Disable MSI if enabled */
2163 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2164 pci_disable_msi(pdev);
2165
2166 /* Free IO remapping */
2167 iounmap(tw_dev->base_addr);
2168
2169 /* Free up the mem region */
2170 pci_release_regions(pdev);
2171
2172 /* Free up device extension resources */
2173 twa_free_device_extension(tw_dev);
2174
2175 scsi_host_put(tw_dev->host);
2176 pci_disable_device(pdev);
2177 twa_device_extension_count--;
2178} /* End twa_remove() */
2179
2180#ifdef CONFIG_PM
2181/* This function is called on PCI suspend */
2182static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2183{
2184 struct Scsi_Host *host = pci_get_drvdata(pdev);
2185 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2186
2187 printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2188
2189 TW_DISABLE_INTERRUPTS(tw_dev);
2190 free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2191
2192 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2193 pci_disable_msi(pdev);
2194
2195 /* Tell the card we are shutting down */
2196 if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2197 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2198 } else {
2199 printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2200 }
2201 TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2202
2203 pci_save_state(pdev);
2204 pci_disable_device(pdev);
2205 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2206
2207 return 0;
2208} /* End twa_suspend() */
2209
2210/* This function is called on PCI resume */
2211static int twa_resume(struct pci_dev *pdev)
2212{
2213 int retval = 0;
2214 struct Scsi_Host *host = pci_get_drvdata(pdev);
2215 TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2216
2217 printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2218 pci_set_power_state(pdev, PCI_D0);
2219 pci_enable_wake(pdev, PCI_D0, 0);
2220 pci_restore_state(pdev);
2221
2222 retval = pci_enable_device(pdev);
2223 if (retval) {
2224 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2225 return retval;
2226 }
2227
2228 pci_set_master(pdev);
2229 pci_try_set_mwi(pdev);
2230
2231 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2232 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2233 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2234 || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2235 TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2236 retval = -ENODEV;
2237 goto out_disable_device;
2238 }
2239
2240 /* Initialize the card */
2241 if (twa_reset_sequence(tw_dev, 0)) {
2242 retval = -ENODEV;
2243 goto out_disable_device;
2244 }
2245
2246 /* Now setup the interrupt handler */
2247 retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2248 if (retval) {
2249 TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2250 retval = -ENODEV;
2251 goto out_disable_device;
2252 }
2253
2254 /* Now enable MSI if enabled */
2255 if (test_bit(TW_USING_MSI, &tw_dev->flags))
2256 pci_enable_msi(pdev);
2257
2258 /* Re-enable interrupts on the card */
2259 TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2260
2261 printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2262 return 0;
2263
2264out_disable_device:
2265 scsi_remove_host(host);
2266 pci_disable_device(pdev);
2267
2268 return retval;
2269} /* End twa_resume() */
2270#endif
2271
2272/* PCI Devices supported by this driver */
2273static struct pci_device_id twa_pci_tbl[] = {
2274 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2275 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2276 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2277 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2278 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2279 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2280 { PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2281 PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2282 { }
2283};
2284MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2285
2286/* pci_driver initializer */
2287static struct pci_driver twa_driver = {
2288 .name = "3w-9xxx",
2289 .id_table = twa_pci_tbl,
2290 .probe = twa_probe,
2291 .remove = twa_remove,
2292#ifdef CONFIG_PM
2293 .suspend = twa_suspend,
2294 .resume = twa_resume,
2295#endif
2296 .shutdown = twa_shutdown
2297};
2298
2299/* This function is called on driver initialization */
2300static int __init twa_init(void)
2301{
2302 printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2303
2304 return pci_register_driver(&twa_driver);
2305} /* End twa_init() */
2306
2307/* This function is called on driver exit */
2308static void __exit twa_exit(void)
2309{
2310 pci_unregister_driver(&twa_driver);
2311} /* End twa_exit() */
2312
2313module_init(twa_init);
2314module_exit(twa_exit);
2315