Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2   3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
   3
   4   Written By: Adam Radford <linuxraid@lsi.com>
   5   Modifications By: Tom Couch <linuxraid@lsi.com>
   6
   7   Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
   8   Copyright (C) 2010 LSI Corporation.
   9
  10   This program is free software; you can redistribute it and/or modify
  11   it under the terms of the GNU General Public License as published by
  12   the Free Software Foundation; version 2 of the License.
  13
  14   This program is distributed in the hope that it will be useful,
  15   but WITHOUT ANY WARRANTY; without even the implied warranty of
  16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17   GNU General Public License for more details.
  18
  19   NO WARRANTY
  20   THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  21   CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  22   LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  23   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  24   solely responsible for determining the appropriateness of using and
  25   distributing the Program and assumes all risks associated with its
  26   exercise of rights under this Agreement, including but not limited to
  27   the risks and costs of program errors, damage to or loss of data,
  28   programs or equipment, and unavailability or interruption of operations.
  29
  30   DISCLAIMER OF LIABILITY
  31   NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  32   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33   DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  34   ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  35   TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  36   USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  37   HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  38
  39   You should have received a copy of the GNU General Public License
  40   along with this program; if not, write to the Free Software
  41   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  42
  43   Bugs/Comments/Suggestions should be mailed to:
  44   linuxraid@lsi.com
  45
  46   For more information, goto:
  47   http://www.lsi.com
  48
  49   Note: This version of the driver does not contain a bundled firmware
  50         image.
  51
  52   History
  53   -------
  54   2.26.02.000 - Driver cleanup for kernel submission.
  55   2.26.02.001 - Replace schedule_timeout() calls with msleep().
  56   2.26.02.002 - Add support for PAE mode.
  57                 Add lun support.
  58                 Fix twa_remove() to free irq handler/unregister_chrdev()
  59                 before shutting down card.
  60                 Change to new 'change_queue_depth' api.
  61                 Fix 'handled=1' ISR usage, remove bogus IRQ check.
  62                 Remove un-needed eh_abort handler.
  63                 Add support for embedded firmware error strings.
  64   2.26.02.003 - Correctly handle single sgl's with use_sg=1.
  65   2.26.02.004 - Add support for 9550SX controllers.
  66   2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
  67   2.26.02.006 - Fix 9550SX pchip reset timeout.
  68                 Add big endian support.
  69   2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
  70   2.26.02.008 - Free irq handler in __twa_shutdown().
  71                 Serialize reset code.
  72                 Add support for 9650SE controllers.
  73   2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
  74   2.26.02.010 - Add support for 9690SA controllers.
  75   2.26.02.011 - Increase max AENs drained to 256.
  76                 Add MSI support and "use_msi" module parameter.
  77                 Fix bug in twa_get_param() on 4GB+.
  78                 Use pci_resource_len() for ioremap().
  79   2.26.02.012 - Add power management support.
  80   2.26.02.013 - Fix bug in twa_load_sgl().
  81   2.26.02.014 - Force 60 second timeout default.
  82*/
  83
  84#include <linux/module.h>
  85#include <linux/reboot.h>
  86#include <linux/spinlock.h>
  87#include <linux/interrupt.h>
  88#include <linux/moduleparam.h>
  89#include <linux/errno.h>
  90#include <linux/types.h>
  91#include <linux/delay.h>
  92#include <linux/pci.h>
  93#include <linux/time.h>
  94#include <linux/mutex.h>
  95#include <linux/slab.h>
  96#include <asm/io.h>
  97#include <asm/irq.h>
  98#include <asm/uaccess.h>
  99#include <scsi/scsi.h>
 100#include <scsi/scsi_host.h>
 101#include <scsi/scsi_tcq.h>
 102#include <scsi/scsi_cmnd.h>
 103#include "3w-9xxx.h"
 104
 105/* Globals */
 106#define TW_DRIVER_VERSION "2.26.02.014"
 107static DEFINE_MUTEX(twa_chrdev_mutex);
 108static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
 109static unsigned int twa_device_extension_count;
 110static int twa_major = -1;
 111extern struct timezone sys_tz;
 112
 113/* Module parameters */
 114MODULE_AUTHOR ("LSI");
 115MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
 116MODULE_LICENSE("GPL");
 117MODULE_VERSION(TW_DRIVER_VERSION);
 118
 119static int use_msi = 0;
 120module_param(use_msi, int, S_IRUGO);
 121MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts.  Default: 0");
 122
 123/* Function prototypes */
 124static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
 125static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
 126static char *twa_aen_severity_lookup(unsigned char severity_code);
 127static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
 128static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 129static int twa_chrdev_open(struct inode *inode, struct file *file);
 130static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
 131static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
 132static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
 133static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
 134 			      u32 set_features, unsigned short current_fw_srl, 
 135			      unsigned short current_fw_arch_id, 
 136			      unsigned short current_fw_branch, 
 137			      unsigned short current_fw_build, 
 138			      unsigned short *fw_on_ctlr_srl, 
 139			      unsigned short *fw_on_ctlr_arch_id, 
 140			      unsigned short *fw_on_ctlr_branch, 
 141			      unsigned short *fw_on_ctlr_build, 
 142			      u32 *init_connect_result);
 143static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
 144static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
 145static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
 146static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
 147static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
 148static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
 149static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
 
 
 150static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
 151static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
 152static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id);
 153
 154/* Functions */
 155
 156/* Show some statistics about the card */
 157static ssize_t twa_show_stats(struct device *dev,
 158			      struct device_attribute *attr, char *buf)
 159{
 160	struct Scsi_Host *host = class_to_shost(dev);
 161	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
 162	unsigned long flags = 0;
 163	ssize_t len;
 164
 165	spin_lock_irqsave(tw_dev->host->host_lock, flags);
 166	len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
 167		       "Current commands posted:   %4d\n"
 168		       "Max commands posted:       %4d\n"
 169		       "Current pending commands:  %4d\n"
 170		       "Max pending commands:      %4d\n"
 171		       "Last sgl length:           %4d\n"
 172		       "Max sgl length:            %4d\n"
 173		       "Last sector count:         %4d\n"
 174		       "Max sector count:          %4d\n"
 175		       "SCSI Host Resets:          %4d\n"
 176		       "AEN's:                     %4d\n", 
 177		       TW_DRIVER_VERSION,
 178		       tw_dev->posted_request_count,
 179		       tw_dev->max_posted_request_count,
 180		       tw_dev->pending_request_count,
 181		       tw_dev->max_pending_request_count,
 182		       tw_dev->sgl_entries,
 183		       tw_dev->max_sgl_entries,
 184		       tw_dev->sector_count,
 185		       tw_dev->max_sector_count,
 186		       tw_dev->num_resets,
 187		       tw_dev->aen_count);
 188	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
 189	return len;
 190} /* End twa_show_stats() */
 191
 192/* This function will set a devices queue depth */
 193static int twa_change_queue_depth(struct scsi_device *sdev, int queue_depth,
 194				  int reason)
 195{
 196	if (reason != SCSI_QDEPTH_DEFAULT)
 197		return -EOPNOTSUPP;
 198
 199	if (queue_depth > TW_Q_LENGTH-2)
 200		queue_depth = TW_Q_LENGTH-2;
 201	scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth);
 202	return queue_depth;
 203} /* End twa_change_queue_depth() */
 204
 205/* Create sysfs 'stats' entry */
 206static struct device_attribute twa_host_stats_attr = {
 207	.attr = {
 208		.name = 	"stats",
 209		.mode =		S_IRUGO,
 210	},
 211	.show = twa_show_stats
 212};
 213
 214/* Host attributes initializer */
 215static struct device_attribute *twa_host_attrs[] = {
 216	&twa_host_stats_attr,
 217	NULL,
 218};
 219
 
 
 220/* File operations struct for character device */
 221static const struct file_operations twa_fops = {
 222	.owner		= THIS_MODULE,
 223	.unlocked_ioctl	= twa_chrdev_ioctl,
 224	.open		= twa_chrdev_open,
 225	.release	= NULL,
 226	.llseek		= noop_llseek,
 227};
 228
 
 
 
 
 
 
 
 
 
 
 
 229/* This function will complete an aen request from the isr */
 230static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
 231{
 232	TW_Command_Full *full_command_packet;
 233	TW_Command *command_packet;
 234	TW_Command_Apache_Header *header;
 235	unsigned short aen;
 236	int retval = 1;
 237
 238	header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
 239	tw_dev->posted_request_count--;
 240	aen = le16_to_cpu(header->status_block.error);
 241	full_command_packet = tw_dev->command_packet_virt[request_id];
 242	command_packet = &full_command_packet->command.oldcommand;
 243
 244	/* First check for internal completion of set param for time sync */
 245	if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
 246		/* Keep reading the queue in case there are more aen's */
 247		if (twa_aen_read_queue(tw_dev, request_id))
 248			goto out2;
 249	        else {
 250			retval = 0;
 251			goto out;
 252		}
 253	}
 254
 255	switch (aen) {
 256	case TW_AEN_QUEUE_EMPTY:
 257		/* Quit reading the queue if this is the last one */
 258		break;
 259	case TW_AEN_SYNC_TIME_WITH_HOST:
 260		twa_aen_sync_time(tw_dev, request_id);
 261		retval = 0;
 262		goto out;
 263	default:
 264		twa_aen_queue_event(tw_dev, header);
 265
 266		/* If there are more aen's, keep reading the queue */
 267		if (twa_aen_read_queue(tw_dev, request_id))
 268			goto out2;
 269		else {
 270			retval = 0;
 271			goto out;
 272		}
 273	}
 274	retval = 0;
 275out2:
 276	tw_dev->state[request_id] = TW_S_COMPLETED;
 277	twa_free_request_id(tw_dev, request_id);
 278	clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
 279out:
 280	return retval;
 281} /* End twa_aen_complete() */
 282
 283/* This function will drain aen queue */
 284static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
 285{
 286	int request_id = 0;
 287	char cdb[TW_MAX_CDB_LEN];
 288	TW_SG_Entry sglist[1];
 289	int finished = 0, count = 0;
 290	TW_Command_Full *full_command_packet;
 291	TW_Command_Apache_Header *header;
 292	unsigned short aen;
 293	int first_reset = 0, queue = 0, retval = 1;
 294
 295	if (no_check_reset)
 296		first_reset = 0;
 297	else
 298		first_reset = 1;
 299
 300	full_command_packet = tw_dev->command_packet_virt[request_id];
 301	memset(full_command_packet, 0, sizeof(TW_Command_Full));
 302
 303	/* Initialize cdb */
 304	memset(&cdb, 0, TW_MAX_CDB_LEN);
 305	cdb[0] = REQUEST_SENSE; /* opcode */
 306	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
 307
 308	/* Initialize sglist */
 309	memset(&sglist, 0, sizeof(TW_SG_Entry));
 310	sglist[0].length = TW_SECTOR_SIZE;
 311	sglist[0].address = tw_dev->generic_buffer_phys[request_id];
 312
 313	if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
 314		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
 315		goto out;
 316	}
 317
 318	/* Mark internal command */
 319	tw_dev->srb[request_id] = NULL;
 320
 321	do {
 322		/* Send command to the board */
 323		if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
 324			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
 325			goto out;
 326		}
 327
 328		/* Now poll for completion */
 329		if (twa_poll_response(tw_dev, request_id, 30)) {
 330			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
 331			tw_dev->posted_request_count--;
 332			goto out;
 333		}
 334
 335		tw_dev->posted_request_count--;
 336		header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
 337		aen = le16_to_cpu(header->status_block.error);
 338		queue = 0;
 339		count++;
 340
 341		switch (aen) {
 342		case TW_AEN_QUEUE_EMPTY:
 343			if (first_reset != 1)
 344				goto out;
 345			else
 346				finished = 1;
 347			break;
 348		case TW_AEN_SOFT_RESET:
 349			if (first_reset == 0)
 350				first_reset = 1;
 351			else
 352				queue = 1;
 353			break;
 354		case TW_AEN_SYNC_TIME_WITH_HOST:
 355			break;
 356		default:
 357			queue = 1;
 358		}
 359
 360		/* Now queue an event info */
 361		if (queue)
 362			twa_aen_queue_event(tw_dev, header);
 363	} while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
 364
 365	if (count == TW_MAX_AEN_DRAIN)
 366		goto out;
 367
 368	retval = 0;
 369out:
 370	tw_dev->state[request_id] = TW_S_INITIAL;
 371	return retval;
 372} /* End twa_aen_drain_queue() */
 373
 374/* This function will queue an event */
 375static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
 376{
 377	u32 local_time;
 378	struct timeval time;
 379	TW_Event *event;
 380	unsigned short aen;
 381	char host[16];
 382	char *error_str;
 383
 384	tw_dev->aen_count++;
 385
 386	/* Fill out event info */
 387	event = tw_dev->event_queue[tw_dev->error_index];
 388
 389	/* Check for clobber */
 390	host[0] = '\0';
 391	if (tw_dev->host) {
 392		sprintf(host, " scsi%d:", tw_dev->host->host_no);
 393		if (event->retrieved == TW_AEN_NOT_RETRIEVED)
 394			tw_dev->aen_clobber = 1;
 395	}
 396
 397	aen = le16_to_cpu(header->status_block.error);
 398	memset(event, 0, sizeof(TW_Event));
 399
 400	event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
 401	do_gettimeofday(&time);
 402	local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
 403	event->time_stamp_sec = local_time;
 404	event->aen_code = aen;
 405	event->retrieved = TW_AEN_NOT_RETRIEVED;
 406	event->sequence_id = tw_dev->error_sequence_id;
 407	tw_dev->error_sequence_id++;
 408
 409	/* Check for embedded error string */
 410	error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
 411
 412	header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
 413	event->parameter_len = strlen(header->err_specific_desc);
 414	memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
 415	if (event->severity != TW_AEN_SEVERITY_DEBUG)
 416		printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
 417		       host,
 418		       twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
 419		       TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
 420		       error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
 421		       header->err_specific_desc);
 422	else
 423		tw_dev->aen_count--;
 424
 425	if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
 426		tw_dev->event_queue_wrapped = 1;
 427	tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
 428} /* End twa_aen_queue_event() */
 429
 430/* This function will read the aen queue from the isr */
 431static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
 432{
 433	char cdb[TW_MAX_CDB_LEN];
 434	TW_SG_Entry sglist[1];
 435	TW_Command_Full *full_command_packet;
 436	int retval = 1;
 437
 438	full_command_packet = tw_dev->command_packet_virt[request_id];
 439	memset(full_command_packet, 0, sizeof(TW_Command_Full));
 440
 441	/* Initialize cdb */
 442	memset(&cdb, 0, TW_MAX_CDB_LEN);
 443	cdb[0] = REQUEST_SENSE; /* opcode */
 444	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
 445
 446	/* Initialize sglist */
 447	memset(&sglist, 0, sizeof(TW_SG_Entry));
 448	sglist[0].length = TW_SECTOR_SIZE;
 449	sglist[0].address = tw_dev->generic_buffer_phys[request_id];
 450
 451	/* Mark internal command */
 452	tw_dev->srb[request_id] = NULL;
 453
 454	/* Now post the command packet */
 455	if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
 456		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
 457		goto out;
 458	}
 459	retval = 0;
 460out:
 461	return retval;
 462} /* End twa_aen_read_queue() */
 463
 464/* This function will look up an AEN severity string */
 465static char *twa_aen_severity_lookup(unsigned char severity_code)
 466{
 467	char *retval = NULL;
 468
 469	if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
 470	    (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
 471		goto out;
 472
 473	retval = twa_aen_severity_table[severity_code];
 474out:
 475	return retval;
 476} /* End twa_aen_severity_lookup() */
 477
 478/* This function will sync firmware time with the host time */
 479static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
 480{
 481	u32 schedulertime;
 482	struct timeval utc;
 483	TW_Command_Full *full_command_packet;
 484	TW_Command *command_packet;
 485	TW_Param_Apache *param;
 486	u32 local_time;
 487
 488	/* Fill out the command packet */
 489	full_command_packet = tw_dev->command_packet_virt[request_id];
 490	memset(full_command_packet, 0, sizeof(TW_Command_Full));
 491	command_packet = &full_command_packet->command.oldcommand;
 492	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
 493	command_packet->request_id = request_id;
 494	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
 495	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
 496	command_packet->size = TW_COMMAND_SIZE;
 497	command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
 498
 499	/* Setup the param */
 500	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
 501	memset(param, 0, TW_SECTOR_SIZE);
 502	param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
 503	param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
 504	param->parameter_size_bytes = cpu_to_le16(4);
 505
 506	/* Convert system time in UTC to local time seconds since last 
 507           Sunday 12:00AM */
 508	do_gettimeofday(&utc);
 509	local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
 510	schedulertime = local_time - (3 * 86400);
 511	schedulertime = cpu_to_le32(schedulertime % 604800);
 512
 513	memcpy(param->data, &schedulertime, sizeof(u32));
 514
 515	/* Mark internal command */
 516	tw_dev->srb[request_id] = NULL;
 517
 518	/* Now post the command */
 519	twa_post_command_packet(tw_dev, request_id, 1);
 520} /* End twa_aen_sync_time() */
 521
 522/* This function will allocate memory and check if it is correctly aligned */
 523static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
 524{
 525	int i;
 526	dma_addr_t dma_handle;
 527	unsigned long *cpu_addr;
 528	int retval = 1;
 529
 530	cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
 
 531	if (!cpu_addr) {
 532		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
 533		goto out;
 534	}
 535
 536	if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
 537		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
 538		pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
 
 539		goto out;
 540	}
 541
 542	memset(cpu_addr, 0, size*TW_Q_LENGTH);
 543
 544	for (i = 0; i < TW_Q_LENGTH; i++) {
 545		switch(which) {
 546		case 0:
 547			tw_dev->command_packet_phys[i] = dma_handle+(i*size);
 548			tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
 549			break;
 550		case 1:
 551			tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
 552			tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
 553			break;
 554		}
 555	}
 556	retval = 0;
 557out:
 558	return retval;
 559} /* End twa_allocate_memory() */
 560
 561/* This function will check the status register for unexpected bits */
 562static int twa_check_bits(u32 status_reg_value)
 563{
 564	int retval = 1;
 565
 566	if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
 567		goto out;
 568	if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
 569		goto out;
 570
 571	retval = 0;
 572out:
 573	return retval;
 574} /* End twa_check_bits() */
 575
 576/* This function will check the srl and decide if we are compatible  */
 577static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
 578{
 579	int retval = 1;
 580	unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
 581	unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
 582	u32 init_connect_result = 0;
 583
 584	if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
 585			       TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
 586			       TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
 587			       TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
 588			       &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
 589			       &fw_on_ctlr_build, &init_connect_result)) {
 590		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
 591		goto out;
 592	}
 593
 594	tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
 595	tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
 596	tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
 597
 598	/* Try base mode compatibility */
 599	if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
 600		if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
 601				       TW_EXTENDED_INIT_CONNECT,
 602				       TW_BASE_FW_SRL, TW_9000_ARCH_ID,
 603				       TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
 604				       &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
 605				       &fw_on_ctlr_branch, &fw_on_ctlr_build,
 606				       &init_connect_result)) {
 607			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
 608			goto out;
 609		}
 610		if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
 611			if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
 612				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
 613			} else {
 614				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
 615			}
 616			goto out;
 617		}
 618		tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
 619		tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
 620		tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
 621	}
 622
 623	/* Load rest of compatibility struct */
 624	strncpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION, strlen(TW_DRIVER_VERSION));
 
 625	tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
 626	tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
 627	tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
 628	tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
 629	tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
 630	tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
 631	tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
 632	tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
 633	tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
 634
 635	retval = 0;
 636out:
 637	return retval;
 638} /* End twa_check_srl() */
 639
 640/* This function handles ioctl for the character device */
 641static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 642{
 643	struct inode *inode = file->f_path.dentry->d_inode;
 644	long timeout;
 645	unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
 646	dma_addr_t dma_handle;
 647	int request_id = 0;
 648	unsigned int sequence_id = 0;
 649	unsigned char event_index, start_index;
 650	TW_Ioctl_Driver_Command driver_command;
 651	TW_Ioctl_Buf_Apache *tw_ioctl;
 652	TW_Lock *tw_lock;
 653	TW_Command_Full *full_command_packet;
 654	TW_Compatibility_Info *tw_compat_info;
 655	TW_Event *event;
 656	struct timeval current_time;
 657	u32 current_time_ms;
 658	TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
 659	int retval = TW_IOCTL_ERROR_OS_EFAULT;
 660	void __user *argp = (void __user *)arg;
 661
 662	mutex_lock(&twa_chrdev_mutex);
 663
 664	/* Only let one of these through at a time */
 665	if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
 666		retval = TW_IOCTL_ERROR_OS_EINTR;
 667		goto out;
 668	}
 669
 670	/* First copy down the driver command */
 671	if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
 672		goto out2;
 673
 674	/* Check data buffer size */
 675	if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
 676		retval = TW_IOCTL_ERROR_OS_EINVAL;
 677		goto out2;
 678	}
 679
 680	/* Hardware can only do multiple of 512 byte transfers */
 681	data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
 682
 683	/* Now allocate ioctl buf memory */
 684	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
 
 
 685	if (!cpu_addr) {
 686		retval = TW_IOCTL_ERROR_OS_ENOMEM;
 687		goto out2;
 688	}
 689
 690	tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
 691
 692	/* Now copy down the entire ioctl */
 693	if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
 694		goto out3;
 695
 696	/* See which ioctl we are doing */
 697	switch (cmd) {
 698	case TW_IOCTL_FIRMWARE_PASS_THROUGH:
 699		spin_lock_irqsave(tw_dev->host->host_lock, flags);
 700		twa_get_request_id(tw_dev, &request_id);
 701
 702		/* Flag internal command */
 703		tw_dev->srb[request_id] = NULL;
 704
 705		/* Flag chrdev ioctl */
 706		tw_dev->chrdev_request_id = request_id;
 707
 708		full_command_packet = &tw_ioctl->firmware_command;
 709
 710		/* Load request id and sglist for both command types */
 711		twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
 712
 713		memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
 714
 715		/* Now post the command packet to the controller */
 716		twa_post_command_packet(tw_dev, request_id, 1);
 717		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
 718
 719		timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
 720
 721		/* Now wait for command to complete */
 722		timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
 723
 724		/* We timed out, and didn't get an interrupt */
 725		if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
 726			/* Now we need to reset the board */
 727			printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
 728			       tw_dev->host->host_no, TW_DRIVER, 0x37,
 729			       cmd);
 730			retval = TW_IOCTL_ERROR_OS_EIO;
 731			twa_reset_device_extension(tw_dev);
 732			goto out3;
 733		}
 734
 735		/* Now copy in the command packet response */
 736		memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
 737		
 738		/* Now complete the io */
 739		spin_lock_irqsave(tw_dev->host->host_lock, flags);
 740		tw_dev->posted_request_count--;
 741		tw_dev->state[request_id] = TW_S_COMPLETED;
 742		twa_free_request_id(tw_dev, request_id);
 743		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
 744		break;
 745	case TW_IOCTL_GET_COMPATIBILITY_INFO:
 746		tw_ioctl->driver_command.status = 0;
 747		/* Copy compatibility struct into ioctl data buffer */
 748		tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
 749		memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
 750		break;
 751	case TW_IOCTL_GET_LAST_EVENT:
 752		if (tw_dev->event_queue_wrapped) {
 753			if (tw_dev->aen_clobber) {
 754				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 755				tw_dev->aen_clobber = 0;
 756			} else
 757				tw_ioctl->driver_command.status = 0;
 758		} else {
 759			if (!tw_dev->error_index) {
 760				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 761				break;
 762			}
 763			tw_ioctl->driver_command.status = 0;
 764		}
 765		event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
 766		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 767		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 768		break;
 769	case TW_IOCTL_GET_FIRST_EVENT:
 770		if (tw_dev->event_queue_wrapped) {
 771			if (tw_dev->aen_clobber) {
 772				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 773				tw_dev->aen_clobber = 0;
 774			} else 
 775				tw_ioctl->driver_command.status = 0;
 776			event_index = tw_dev->error_index;
 777		} else {
 778			if (!tw_dev->error_index) {
 779				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 780				break;
 781			}
 782			tw_ioctl->driver_command.status = 0;
 783			event_index = 0;
 784		}
 785		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 786		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 787		break;
 788	case TW_IOCTL_GET_NEXT_EVENT:
 789		event = (TW_Event *)tw_ioctl->data_buffer;
 790		sequence_id = event->sequence_id;
 791		tw_ioctl->driver_command.status = 0;
 792
 793		if (tw_dev->event_queue_wrapped) {
 794			if (tw_dev->aen_clobber) {
 795				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 796				tw_dev->aen_clobber = 0;
 797			}
 798			start_index = tw_dev->error_index;
 799		} else {
 800			if (!tw_dev->error_index) {
 801				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 802				break;
 803			}
 804			start_index = 0;
 805		}
 806		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
 807
 808		if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
 809			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
 810				tw_dev->aen_clobber = 1;
 811			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 812			break;
 813		}
 814		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 815		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 816		break;
 817	case TW_IOCTL_GET_PREVIOUS_EVENT:
 818		event = (TW_Event *)tw_ioctl->data_buffer;
 819		sequence_id = event->sequence_id;
 820		tw_ioctl->driver_command.status = 0;
 821
 822		if (tw_dev->event_queue_wrapped) {
 823			if (tw_dev->aen_clobber) {
 824				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 825				tw_dev->aen_clobber = 0;
 826			}
 827			start_index = tw_dev->error_index;
 828		} else {
 829			if (!tw_dev->error_index) {
 830				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 831				break;
 832			}
 833			start_index = 0;
 834		}
 835		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
 836
 837		if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
 838			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
 839				tw_dev->aen_clobber = 1;
 840			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 841			break;
 842		}
 843		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 844		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 845		break;
 846	case TW_IOCTL_GET_LOCK:
 847		tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
 848		do_gettimeofday(&current_time);
 849		current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
 850
 851		if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
 
 852			tw_dev->ioctl_sem_lock = 1;
 853			tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
 854			tw_ioctl->driver_command.status = 0;
 855			tw_lock->time_remaining_msec = tw_lock->timeout_msec;
 856		} else {
 857			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
 858			tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
 859		}
 860		break;
 861	case TW_IOCTL_RELEASE_LOCK:
 862		if (tw_dev->ioctl_sem_lock == 1) {
 863			tw_dev->ioctl_sem_lock = 0;
 864			tw_ioctl->driver_command.status = 0;
 865		} else {
 866			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
 867		}
 868		break;
 869	default:
 870		retval = TW_IOCTL_ERROR_OS_ENOTTY;
 871		goto out3;
 872	}
 873
 874	/* Now copy the entire response to userspace */
 875	if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
 876		retval = 0;
 877out3:
 878	/* Now free ioctl buf memory */
 879	dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
 
 
 880out2:
 881	mutex_unlock(&tw_dev->ioctl_lock);
 882out:
 883	mutex_unlock(&twa_chrdev_mutex);
 884	return retval;
 885} /* End twa_chrdev_ioctl() */
 886
 887/* This function handles open for the character device */
 888/* NOTE that this function will race with remove. */
 889static int twa_chrdev_open(struct inode *inode, struct file *file)
 890{
 891	unsigned int minor_number;
 892	int retval = TW_IOCTL_ERROR_OS_ENODEV;
 893
 
 
 
 
 
 894	minor_number = iminor(inode);
 895	if (minor_number >= twa_device_extension_count)
 896		goto out;
 897	retval = 0;
 898out:
 899	return retval;
 900} /* End twa_chrdev_open() */
 901
 902/* This function will print readable messages from status register errors */
 903static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
 904{
 905	int retval = 1;
 906
 907	/* Check for various error conditions and handle them appropriately */
 908	if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
 909		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
 910		writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
 911	}
 912
 913	if (status_reg_value & TW_STATUS_PCI_ABORT) {
 914		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
 915		writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
 916		pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
 917	}
 918
 919	if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
 920		if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
 921		     (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
 922		    (!test_bit(TW_IN_RESET, &tw_dev->flags)))
 923			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
 924		writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
 925	}
 926
 927	if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
 928		if (tw_dev->reset_print == 0) {
 929			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
 930			tw_dev->reset_print = 1;
 931		}
 932		goto out;
 933	}
 934	retval = 0;
 935out:
 936	return retval;
 937} /* End twa_decode_bits() */
 938
 939/* This function will empty the response queue */
 940static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
 941{
 942	u32 status_reg_value, response_que_value;
 943	int count = 0, retval = 1;
 944
 945	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
 946
 947	while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
 948		response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
 949		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
 950		count++;
 951	}
 952	if (count == TW_MAX_RESPONSE_DRAIN)
 953		goto out;
 954
 955	retval = 0;
 956out:
 957	return retval;
 958} /* End twa_empty_response_queue() */
 959
 960/* This function will clear the pchip/response queue on 9550SX */
 961static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
 962{
 963	u32 response_que_value = 0;
 964	unsigned long before;
 965	int retval = 1;
 966
 967	if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
 968		before = jiffies;
 969		while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
 970			response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
 971			msleep(1);
 972			if (time_after(jiffies, before + HZ * 30))
 973				goto out;
 974		}
 975		/* P-chip settle time */
 976		msleep(500);
 977		retval = 0;
 978	} else
 979		retval = 0;
 980out:
 981	return retval;
 982} /* End twa_empty_response_queue_large() */
 983
 984/* This function passes sense keys from firmware to scsi layer */
 985static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
 986{
 987	TW_Command_Full *full_command_packet;
 988	unsigned short error;
 989	int retval = 1;
 990	char *error_str;
 991
 992	full_command_packet = tw_dev->command_packet_virt[request_id];
 993
 994	/* Check for embedded error string */
 995	error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
 996
 997	/* Don't print error for Logical unit not supported during rollcall */
 998	error = le16_to_cpu(full_command_packet->header.status_block.error);
 999	if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
1000		if (print_host)
1001			printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1002			       tw_dev->host->host_no,
1003			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1004			       full_command_packet->header.status_block.error,
1005			       error_str[0] == '\0' ?
1006			       twa_string_lookup(twa_error_table,
1007						 full_command_packet->header.status_block.error) : error_str,
1008			       full_command_packet->header.err_specific_desc);
1009		else
1010			printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1011			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1012			       full_command_packet->header.status_block.error,
1013			       error_str[0] == '\0' ?
1014			       twa_string_lookup(twa_error_table,
1015						 full_command_packet->header.status_block.error) : error_str,
1016			       full_command_packet->header.err_specific_desc);
1017	}
1018
1019	if (copy_sense) {
1020		memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1021		tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1022		retval = TW_ISR_DONT_RESULT;
1023		goto out;
1024	}
1025	retval = 0;
1026out:
1027	return retval;
1028} /* End twa_fill_sense() */
1029
1030/* This function will free up device extension resources */
1031static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1032{
1033	if (tw_dev->command_packet_virt[0])
1034		pci_free_consistent(tw_dev->tw_pci_dev,
1035				    sizeof(TW_Command_Full)*TW_Q_LENGTH,
1036				    tw_dev->command_packet_virt[0],
1037				    tw_dev->command_packet_phys[0]);
1038
1039	if (tw_dev->generic_buffer_virt[0])
1040		pci_free_consistent(tw_dev->tw_pci_dev,
1041				    TW_SECTOR_SIZE*TW_Q_LENGTH,
1042				    tw_dev->generic_buffer_virt[0],
1043				    tw_dev->generic_buffer_phys[0]);
1044
1045	kfree(tw_dev->event_queue[0]);
1046} /* End twa_free_device_extension() */
1047
1048/* This function will free a request id */
1049static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1050{
1051	tw_dev->free_queue[tw_dev->free_tail] = request_id;
1052	tw_dev->state[request_id] = TW_S_FINISHED;
1053	tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1054} /* End twa_free_request_id() */
1055
1056/* This function will get parameter table entries from the firmware */
1057static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1058{
1059	TW_Command_Full *full_command_packet;
1060	TW_Command *command_packet;
1061	TW_Param_Apache *param;
1062	void *retval = NULL;
1063
1064	/* Setup the command packet */
1065	full_command_packet = tw_dev->command_packet_virt[request_id];
1066	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1067	command_packet = &full_command_packet->command.oldcommand;
1068
1069	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1070	command_packet->size              = TW_COMMAND_SIZE;
1071	command_packet->request_id        = request_id;
1072	command_packet->byte6_offset.block_count = cpu_to_le16(1);
1073
1074	/* Now setup the param */
1075	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1076	memset(param, 0, TW_SECTOR_SIZE);
1077	param->table_id = cpu_to_le16(table_id | 0x8000);
1078	param->parameter_id = cpu_to_le16(parameter_id);
1079	param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1080
1081	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1082	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1083
1084	/* Post the command packet to the board */
1085	twa_post_command_packet(tw_dev, request_id, 1);
1086
1087	/* Poll for completion */
1088	if (twa_poll_response(tw_dev, request_id, 30))
1089		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1090	else
1091		retval = (void *)&(param->data[0]);
1092
1093	tw_dev->posted_request_count--;
1094	tw_dev->state[request_id] = TW_S_INITIAL;
1095
1096	return retval;
1097} /* End twa_get_param() */
1098
1099/* This function will assign an available request id */
1100static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1101{
1102	*request_id = tw_dev->free_queue[tw_dev->free_head];
1103	tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1104	tw_dev->state[*request_id] = TW_S_STARTED;
1105} /* End twa_get_request_id() */
1106
1107/* This function will send an initconnection command to controller */
1108static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1109 			      u32 set_features, unsigned short current_fw_srl, 
1110			      unsigned short current_fw_arch_id, 
1111			      unsigned short current_fw_branch, 
1112			      unsigned short current_fw_build, 
1113			      unsigned short *fw_on_ctlr_srl, 
1114			      unsigned short *fw_on_ctlr_arch_id, 
1115			      unsigned short *fw_on_ctlr_branch, 
1116			      unsigned short *fw_on_ctlr_build, 
1117			      u32 *init_connect_result)
1118{
1119	TW_Command_Full *full_command_packet;
1120	TW_Initconnect *tw_initconnect;
1121	int request_id = 0, retval = 1;
1122
1123	/* Initialize InitConnection command packet */
1124	full_command_packet = tw_dev->command_packet_virt[request_id];
1125	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1126	full_command_packet->header.header_desc.size_header = 128;
1127	
1128	tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1129	tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1130	tw_initconnect->request_id = request_id;
1131	tw_initconnect->message_credits = cpu_to_le16(message_credits);
1132	tw_initconnect->features = set_features;
1133
1134	/* Turn on 64-bit sgl support if we need to */
1135	tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1136
1137	tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1138
1139	if (set_features & TW_EXTENDED_INIT_CONNECT) {
1140		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1141		tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1142		tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1143		tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1144		tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1145	} else 
1146		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1147
1148	/* Send command packet to the board */
1149	twa_post_command_packet(tw_dev, request_id, 1);
1150
1151	/* Poll for completion */
1152	if (twa_poll_response(tw_dev, request_id, 30)) {
1153		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1154	} else {
1155		if (set_features & TW_EXTENDED_INIT_CONNECT) {
1156			*fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1157			*fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1158			*fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1159			*fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1160			*init_connect_result = le32_to_cpu(tw_initconnect->result);
1161		}
1162		retval = 0;
1163	}
1164
1165	tw_dev->posted_request_count--;
1166	tw_dev->state[request_id] = TW_S_INITIAL;
1167
1168	return retval;
1169} /* End twa_initconnection() */
1170
1171/* This function will initialize the fields of a device extension */
1172static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1173{
1174	int i, retval = 1;
1175
1176	/* Initialize command packet buffers */
1177	if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1178		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1179		goto out;
1180	}
1181
1182	/* Initialize generic buffer */
1183	if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1184		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1185		goto out;
1186	}
1187
1188	/* Allocate event info space */
1189	tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1190	if (!tw_dev->event_queue[0]) {
1191		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1192		goto out;
1193	}
1194
1195
1196	for (i = 0; i < TW_Q_LENGTH; i++) {
1197		tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1198		tw_dev->free_queue[i] = i;
1199		tw_dev->state[i] = TW_S_INITIAL;
1200	}
1201
1202	tw_dev->pending_head = TW_Q_START;
1203	tw_dev->pending_tail = TW_Q_START;
1204	tw_dev->free_head = TW_Q_START;
1205	tw_dev->free_tail = TW_Q_START;
1206	tw_dev->error_sequence_id = 1;
1207	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1208
1209	mutex_init(&tw_dev->ioctl_lock);
1210	init_waitqueue_head(&tw_dev->ioctl_wqueue);
1211
1212	retval = 0;
1213out:
1214	return retval;
1215} /* End twa_initialize_device_extension() */
1216
1217/* This function is the interrupt service routine */
1218static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1219{
1220	int request_id, error = 0;
1221	u32 status_reg_value;
1222	TW_Response_Queue response_que;
1223	TW_Command_Full *full_command_packet;
1224	TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1225	int handled = 0;
1226
1227	/* Get the per adapter lock */
1228	spin_lock(tw_dev->host->host_lock);
1229
1230	/* Read the registers */
1231	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1232
1233	/* Check if this is our interrupt, otherwise bail */
1234	if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1235		goto twa_interrupt_bail;
1236
1237	handled = 1;
1238
1239	/* If we are resetting, bail */
1240	if (test_bit(TW_IN_RESET, &tw_dev->flags))
1241		goto twa_interrupt_bail;
1242
1243	/* Check controller for errors */
1244	if (twa_check_bits(status_reg_value)) {
1245		if (twa_decode_bits(tw_dev, status_reg_value)) {
1246			TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1247			goto twa_interrupt_bail;
1248		}
1249	}
1250
1251	/* Handle host interrupt */
1252	if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1253		TW_CLEAR_HOST_INTERRUPT(tw_dev);
1254
1255	/* Handle attention interrupt */
1256	if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1257		TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1258		if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1259			twa_get_request_id(tw_dev, &request_id);
1260
1261			error = twa_aen_read_queue(tw_dev, request_id);
1262			if (error) {
1263				tw_dev->state[request_id] = TW_S_COMPLETED;
1264				twa_free_request_id(tw_dev, request_id);
1265				clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1266			}
1267		}
1268	}
1269
1270	/* Handle command interrupt */
1271	if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1272		TW_MASK_COMMAND_INTERRUPT(tw_dev);
1273		/* Drain as many pending commands as we can */
1274		while (tw_dev->pending_request_count > 0) {
1275			request_id = tw_dev->pending_queue[tw_dev->pending_head];
1276			if (tw_dev->state[request_id] != TW_S_PENDING) {
1277				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1278				TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1279				goto twa_interrupt_bail;
1280			}
1281			if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1282				tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1283				tw_dev->pending_request_count--;
1284			} else {
1285				/* If we get here, we will continue re-posting on the next command interrupt */
1286				break;
1287			}
1288		}
1289	}
1290
1291	/* Handle response interrupt */
1292	if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1293
1294		/* Drain the response queue from the board */
1295		while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1296			/* Complete the response */
1297			response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1298			request_id = TW_RESID_OUT(response_que.response_id);
1299			full_command_packet = tw_dev->command_packet_virt[request_id];
1300			error = 0;
1301			/* Check for command packet errors */
1302			if (full_command_packet->command.newcommand.status != 0) {
1303				if (tw_dev->srb[request_id] != NULL) {
1304					error = twa_fill_sense(tw_dev, request_id, 1, 1);
1305				} else {
1306					/* Skip ioctl error prints */
1307					if (request_id != tw_dev->chrdev_request_id) {
1308						error = twa_fill_sense(tw_dev, request_id, 0, 1);
1309					}
1310				}
1311			}
1312
1313			/* Check for correct state */
1314			if (tw_dev->state[request_id] != TW_S_POSTED) {
1315				if (tw_dev->srb[request_id] != NULL) {
1316					TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1317					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1318					goto twa_interrupt_bail;
1319				}
1320			}
1321
1322			/* Check for internal command completion */
1323			if (tw_dev->srb[request_id] == NULL) {
1324				if (request_id != tw_dev->chrdev_request_id) {
1325					if (twa_aen_complete(tw_dev, request_id))
1326						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1327				} else {
1328					tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1329					wake_up(&tw_dev->ioctl_wqueue);
1330				}
1331			} else {
1332				struct scsi_cmnd *cmd;
1333
1334				cmd = tw_dev->srb[request_id];
1335
1336				twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1337				/* If no error command was a success */
1338				if (error == 0) {
1339					cmd->result = (DID_OK << 16);
1340				}
1341
1342				/* If error, command failed */
1343				if (error == 1) {
1344					/* Ask for a host reset */
1345					cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1346				}
1347
1348				/* Report residual bytes for single sgl */
1349				if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1350					if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1351						scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
 
 
1352				}
1353
1354				/* Now complete the io */
 
 
 
1355				tw_dev->state[request_id] = TW_S_COMPLETED;
1356				twa_free_request_id(tw_dev, request_id);
1357				tw_dev->posted_request_count--;
1358				tw_dev->srb[request_id]->scsi_done(tw_dev->srb[request_id]);
1359				twa_unmap_scsi_data(tw_dev, request_id);
1360			}
1361
1362			/* Check for valid status after each drain */
1363			status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1364			if (twa_check_bits(status_reg_value)) {
1365				if (twa_decode_bits(tw_dev, status_reg_value)) {
1366					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1367					goto twa_interrupt_bail;
1368				}
1369			}
1370		}
1371	}
1372
1373twa_interrupt_bail:
1374	spin_unlock(tw_dev->host->host_lock);
1375	return IRQ_RETVAL(handled);
1376} /* End twa_interrupt() */
1377
1378/* This function will load the request id and various sgls for ioctls */
1379static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1380{
1381	TW_Command *oldcommand;
1382	TW_Command_Apache *newcommand;
1383	TW_SG_Entry *sgl;
1384	unsigned int pae = 0;
1385
1386	if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1387		pae = 1;
1388
1389	if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1390		newcommand = &full_command_packet->command.newcommand;
1391		newcommand->request_id__lunl =
1392			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1393		if (length) {
1394			newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1395			newcommand->sg_list[0].length = cpu_to_le32(length);
1396		}
1397		newcommand->sgl_entries__lunh =
1398			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1399	} else {
1400		oldcommand = &full_command_packet->command.oldcommand;
1401		oldcommand->request_id = request_id;
1402
1403		if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1404			/* Load the sg list */
1405			if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1406				sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1407			else
1408				sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1409			sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1410			sgl->length = cpu_to_le32(length);
1411
1412			oldcommand->size += pae;
1413		}
1414	}
1415} /* End twa_load_sgl() */
1416
1417/* This function will perform a pci-dma mapping for a scatter gather list */
1418static int twa_map_scsi_sg_data(TW_Device_Extension *tw_dev, int request_id)
1419{
1420	int use_sg;
1421	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1422
1423	use_sg = scsi_dma_map(cmd);
1424	if (!use_sg)
1425		return 0;
1426	else if (use_sg < 0) {
1427		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1c, "Failed to map scatter gather list");
1428		return 0;
1429	}
1430
1431	cmd->SCp.phase = TW_PHASE_SGLIST;
1432	cmd->SCp.have_data_in = use_sg;
1433
1434	return use_sg;
1435} /* End twa_map_scsi_sg_data() */
1436
1437/* This function will poll for a response interrupt of a request */
1438static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1439{
1440	int retval = 1, found = 0, response_request_id;
1441	TW_Response_Queue response_queue;
1442	TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1443
1444	if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1445		response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1446		response_request_id = TW_RESID_OUT(response_queue.response_id);
1447		if (request_id != response_request_id) {
1448			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1449			goto out;
1450		}
1451		if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1452			if (full_command_packet->command.newcommand.status != 0) {
1453				/* bad response */
1454				twa_fill_sense(tw_dev, request_id, 0, 0);
1455				goto out;
1456			}
1457			found = 1;
1458		} else {
1459			if (full_command_packet->command.oldcommand.status != 0) {
1460				/* bad response */
1461				twa_fill_sense(tw_dev, request_id, 0, 0);
1462				goto out;
1463			}
1464			found = 1;
1465		}
1466	}
1467
1468	if (found)
1469		retval = 0;
1470out:
1471	return retval;
1472} /* End twa_poll_response() */
1473
1474/* This function will poll the status register for a flag */
1475static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1476{
1477	u32 status_reg_value; 
1478	unsigned long before;
1479	int retval = 1;
1480
1481	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1482	before = jiffies;
1483
1484	if (twa_check_bits(status_reg_value))
1485		twa_decode_bits(tw_dev, status_reg_value);
1486
1487	while ((status_reg_value & flag) != flag) {
1488		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1489
1490		if (twa_check_bits(status_reg_value))
1491			twa_decode_bits(tw_dev, status_reg_value);
1492
1493		if (time_after(jiffies, before + HZ * seconds))
1494			goto out;
1495
1496		msleep(50);
1497	}
1498	retval = 0;
1499out:
1500	return retval;
1501} /* End twa_poll_status() */
1502
1503/* This function will poll the status register for disappearance of a flag */
1504static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1505{
1506	u32 status_reg_value;
1507	unsigned long before;
1508	int retval = 1;
1509
1510	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1511	before = jiffies;
1512
1513	if (twa_check_bits(status_reg_value))
1514		twa_decode_bits(tw_dev, status_reg_value);
1515
1516	while ((status_reg_value & flag) != 0) {
1517		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1518		if (twa_check_bits(status_reg_value))
1519			twa_decode_bits(tw_dev, status_reg_value);
1520
1521		if (time_after(jiffies, before + HZ * seconds))
1522			goto out;
1523
1524		msleep(50);
1525	}
1526	retval = 0;
1527out:
1528	return retval;
1529} /* End twa_poll_status_gone() */
1530
1531/* This function will attempt to post a command packet to the board */
1532static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1533{
1534	u32 status_reg_value;
1535	dma_addr_t command_que_value;
1536	int retval = 1;
1537
1538	command_que_value = tw_dev->command_packet_phys[request_id];
1539
1540	/* For 9650SE write low 4 bytes first */
1541	if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1542	    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1543		command_que_value += TW_COMMAND_OFFSET;
1544		writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1545	}
1546
1547	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1548
1549	if (twa_check_bits(status_reg_value))
1550		twa_decode_bits(tw_dev, status_reg_value);
1551
1552	if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1553
1554		/* Only pend internal driver commands */
1555		if (!internal) {
1556			retval = SCSI_MLQUEUE_HOST_BUSY;
1557			goto out;
1558		}
1559
1560		/* Couldn't post the command packet, so we do it later */
1561		if (tw_dev->state[request_id] != TW_S_PENDING) {
1562			tw_dev->state[request_id] = TW_S_PENDING;
1563			tw_dev->pending_request_count++;
1564			if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1565				tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1566			}
1567			tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1568			tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1569		}
1570		TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1571		goto out;
1572	} else {
1573		if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1574		    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1575			/* Now write upper 4 bytes */
1576			writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1577		} else {
1578			if (sizeof(dma_addr_t) > 4) {
1579				command_que_value += TW_COMMAND_OFFSET;
1580				writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1581				writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1582			} else {
1583				writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1584			}
1585		}
1586		tw_dev->state[request_id] = TW_S_POSTED;
1587		tw_dev->posted_request_count++;
1588		if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1589			tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1590		}
1591	}
1592	retval = 0;
1593out:
1594	return retval;
1595} /* End twa_post_command_packet() */
1596
1597/* This function will reset a device extension */
1598static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1599{
1600	int i = 0;
1601	int retval = 1;
1602	unsigned long flags = 0;
1603
1604	set_bit(TW_IN_RESET, &tw_dev->flags);
1605	TW_DISABLE_INTERRUPTS(tw_dev);
1606	TW_MASK_COMMAND_INTERRUPT(tw_dev);
1607	spin_lock_irqsave(tw_dev->host->host_lock, flags);
1608
1609	/* Abort all requests that are in progress */
1610	for (i = 0; i < TW_Q_LENGTH; i++) {
1611		if ((tw_dev->state[i] != TW_S_FINISHED) &&
1612		    (tw_dev->state[i] != TW_S_INITIAL) &&
1613		    (tw_dev->state[i] != TW_S_COMPLETED)) {
1614			if (tw_dev->srb[i]) {
1615				tw_dev->srb[i]->result = (DID_RESET << 16);
1616				tw_dev->srb[i]->scsi_done(tw_dev->srb[i]);
1617				twa_unmap_scsi_data(tw_dev, i);
 
 
 
1618			}
1619		}
1620	}
1621
1622	/* Reset queues and counts */
1623	for (i = 0; i < TW_Q_LENGTH; i++) {
1624		tw_dev->free_queue[i] = i;
1625		tw_dev->state[i] = TW_S_INITIAL;
1626	}
1627	tw_dev->free_head = TW_Q_START;
1628	tw_dev->free_tail = TW_Q_START;
1629	tw_dev->posted_request_count = 0;
1630	tw_dev->pending_request_count = 0;
1631	tw_dev->pending_head = TW_Q_START;
1632	tw_dev->pending_tail = TW_Q_START;
1633	tw_dev->reset_print = 0;
1634
1635	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1636
1637	if (twa_reset_sequence(tw_dev, 1))
1638		goto out;
1639
1640	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1641	clear_bit(TW_IN_RESET, &tw_dev->flags);
1642	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1643
1644	retval = 0;
1645out:
1646	return retval;
1647} /* End twa_reset_device_extension() */
1648
1649/* This function will reset a controller */
1650static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1651{
1652	int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1653
1654	while (tries < TW_MAX_RESET_TRIES) {
1655		if (do_soft_reset) {
1656			TW_SOFT_RESET(tw_dev);
1657			/* Clear pchip/response queue on 9550SX */
1658			if (twa_empty_response_queue_large(tw_dev)) {
1659				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1660				do_soft_reset = 1;
1661				tries++;
1662				continue;
1663			}
1664		}
1665
1666		/* Make sure controller is in a good state */
1667		if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1668			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1669			do_soft_reset = 1;
1670			tries++;
1671			continue;
1672		}
1673
1674		/* Empty response queue */
1675		if (twa_empty_response_queue(tw_dev)) {
1676			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1677			do_soft_reset = 1;
1678			tries++;
1679			continue;
1680		}
1681
1682		flashed = 0;
1683
1684		/* Check for compatibility/flash */
1685		if (twa_check_srl(tw_dev, &flashed)) {
1686			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1687			do_soft_reset = 1;
1688			tries++;
1689			continue;
1690		} else {
1691			if (flashed) {
1692				tries++;
1693				continue;
1694			}
1695		}
1696
1697		/* Drain the AEN queue */
1698		if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1699			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1700			do_soft_reset = 1;
1701			tries++;
1702			continue;
1703		}
1704
1705		/* If we got here, controller is in a good state */
1706		retval = 0;
1707		goto out;
1708	}
1709out:
1710	return retval;
1711} /* End twa_reset_sequence() */
1712
1713/* This funciton returns unit geometry in cylinders/heads/sectors */
1714static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1715{
1716	int heads, sectors, cylinders;
1717	TW_Device_Extension *tw_dev;
1718
1719	tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1720
1721	if (capacity >= 0x200000) {
1722		heads = 255;
1723		sectors = 63;
1724		cylinders = sector_div(capacity, heads * sectors);
1725	} else {
1726		heads = 64;
1727		sectors = 32;
1728		cylinders = sector_div(capacity, heads * sectors);
1729	}
1730
1731	geom[0] = heads;
1732	geom[1] = sectors;
1733	geom[2] = cylinders;
1734
1735	return 0;
1736} /* End twa_scsi_biosparam() */
1737
1738/* This is the new scsi eh reset function */
1739static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1740{
1741	TW_Device_Extension *tw_dev = NULL;
1742	int retval = FAILED;
1743
1744	tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1745
1746	tw_dev->num_resets++;
1747
1748	sdev_printk(KERN_WARNING, SCpnt->device,
1749		"WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1750		TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1751
1752	/* Make sure we are not issuing an ioctl or resetting from ioctl */
1753	mutex_lock(&tw_dev->ioctl_lock);
1754
1755	/* Now reset the card and some of the device extension data */
1756	if (twa_reset_device_extension(tw_dev)) {
1757		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1758		goto out;
1759	}
1760
1761	retval = SUCCESS;
1762out:
1763	mutex_unlock(&tw_dev->ioctl_lock);
1764	return retval;
1765} /* End twa_scsi_eh_reset() */
1766
1767/* This is the main scsi queue function to handle scsi opcodes */
1768static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1769{
 
1770	int request_id, retval;
1771	TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1772
1773	/* If we are resetting due to timed out ioctl, report as busy */
1774	if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1775		retval = SCSI_MLQUEUE_HOST_BUSY;
1776		goto out;
1777	}
1778
1779	/* Check if this FW supports luns */
1780	if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1781		SCpnt->result = (DID_BAD_TARGET << 16);
1782		done(SCpnt);
1783		retval = 0;
1784		goto out;
1785	}
1786
1787	/* Save done function into scsi_cmnd struct */
1788	SCpnt->scsi_done = done;
1789		
1790	/* Get a free request id */
1791	twa_get_request_id(tw_dev, &request_id);
1792
1793	/* Save the scsi command for use by the ISR */
1794	tw_dev->srb[request_id] = SCpnt;
1795
1796	/* Initialize phase to zero */
1797	SCpnt->SCp.phase = TW_PHASE_INITIAL;
1798
1799	retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1800	switch (retval) {
1801	case SCSI_MLQUEUE_HOST_BUSY:
 
 
1802		twa_free_request_id(tw_dev, request_id);
1803		twa_unmap_scsi_data(tw_dev, request_id);
1804		break;
1805	case 1:
1806		tw_dev->state[request_id] = TW_S_COMPLETED;
1807		twa_free_request_id(tw_dev, request_id);
1808		twa_unmap_scsi_data(tw_dev, request_id);
1809		SCpnt->result = (DID_ERROR << 16);
 
 
1810		done(SCpnt);
 
 
1811		retval = 0;
1812	}
1813out:
1814	return retval;
1815} /* End twa_scsi_queue() */
1816
1817static DEF_SCSI_QCMD(twa_scsi_queue)
1818
1819/* This function hands scsi cdb's to the firmware */
1820static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
 
 
1821{
1822	TW_Command_Full *full_command_packet;
1823	TW_Command_Apache *command_packet;
1824	u32 num_sectors = 0x0;
1825	int i, sg_count;
1826	struct scsi_cmnd *srb = NULL;
1827	struct scatterlist *sglist = NULL, *sg;
1828	int retval = 1;
1829
1830	if (tw_dev->srb[request_id]) {
1831		srb = tw_dev->srb[request_id];
1832		if (scsi_sglist(srb))
1833			sglist = scsi_sglist(srb);
1834	}
1835
1836	/* Initialize command packet */
1837	full_command_packet = tw_dev->command_packet_virt[request_id];
1838	full_command_packet->header.header_desc.size_header = 128;
1839	full_command_packet->header.status_block.error = 0;
1840	full_command_packet->header.status_block.severity__reserved = 0;
1841
1842	command_packet = &full_command_packet->command.newcommand;
1843	command_packet->status = 0;
1844	command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1845
1846	/* We forced 16 byte cdb use earlier */
1847	if (!cdb)
1848		memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1849	else
1850		memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1851
1852	if (srb) {
1853		command_packet->unit = srb->device->id;
1854		command_packet->request_id__lunl =
1855			cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1856	} else {
1857		command_packet->request_id__lunl =
1858			cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1859		command_packet->unit = 0;
1860	}
1861
1862	command_packet->sgl_offset = 16;
1863
1864	if (!sglistarg) {
1865		/* Map sglist from scsi layer to cmd packet */
1866
1867		if (scsi_sg_count(srb)) {
1868			if ((scsi_sg_count(srb) == 1) &&
1869			    (scsi_bufflen(srb) < TW_MIN_SGL_LENGTH)) {
1870				if (srb->sc_data_direction == DMA_TO_DEVICE ||
1871				    srb->sc_data_direction == DMA_BIDIRECTIONAL)
1872					scsi_sg_copy_to_buffer(srb,
1873							       tw_dev->generic_buffer_virt[request_id],
1874							       TW_SECTOR_SIZE);
1875				command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1876				command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1877			} else {
1878				sg_count = twa_map_scsi_sg_data(tw_dev, request_id);
1879				if (sg_count == 0)
1880					goto out;
1881
1882				scsi_for_each_sg(srb, sg, sg_count, i) {
1883					command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1884					command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1885					if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1886						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1887						goto out;
1888					}
1889				}
1890			}
1891			command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1892		}
1893	} else {
1894		/* Internal cdb post */
1895		for (i = 0; i < use_sg; i++) {
1896			command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1897			command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1898			if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1899				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1900				goto out;
1901			}
1902		}
1903		command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1904	}
1905
1906	if (srb) {
1907		if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1908			num_sectors = (u32)srb->cmnd[4];
1909
1910		if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1911			num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1912	}
1913
1914	/* Update sector statistic */
1915	tw_dev->sector_count = num_sectors;
1916	if (tw_dev->sector_count > tw_dev->max_sector_count)
1917		tw_dev->max_sector_count = tw_dev->sector_count;
1918
1919	/* Update SG statistics */
1920	if (srb) {
1921		tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1922		if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1923			tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1924	}
1925
1926	/* Now post the command to the board */
1927	if (srb) {
1928		retval = twa_post_command_packet(tw_dev, request_id, 0);
1929	} else {
1930		twa_post_command_packet(tw_dev, request_id, 1);
1931		retval = 0;
1932	}
1933out:
1934	return retval;
1935} /* End twa_scsiop_execute_scsi() */
1936
1937/* This function completes an execute scsi operation */
1938static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1939{
1940	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1941
1942	if (scsi_bufflen(cmd) < TW_MIN_SGL_LENGTH &&
1943	    (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1944	     cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1945		if (scsi_sg_count(cmd) == 1) {
1946			void *buf = tw_dev->generic_buffer_virt[request_id];
1947
1948			scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1949		}
1950	}
1951} /* End twa_scsiop_execute_scsi_complete() */
1952
1953/* This function tells the controller to shut down */
1954static void __twa_shutdown(TW_Device_Extension *tw_dev)
1955{
1956	/* Disable interrupts */
1957	TW_DISABLE_INTERRUPTS(tw_dev);
1958
1959	/* Free up the IRQ */
1960	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1961
1962	printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1963
1964	/* Tell the card we are shutting down */
1965	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1966		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1967	} else {
1968		printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1969	}
1970
1971	/* Clear all interrupts just before exit */
1972	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1973} /* End __twa_shutdown() */
1974
1975/* Wrapper for __twa_shutdown */
1976static void twa_shutdown(struct pci_dev *pdev)
1977{
1978	struct Scsi_Host *host = pci_get_drvdata(pdev);
1979	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1980
1981	__twa_shutdown(tw_dev);
1982} /* End twa_shutdown() */
1983
1984/* This function will look up a string */
1985static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1986{
1987	int index;
1988
1989	for (index = 0; ((code != table[index].code) &&
1990		      (table[index].text != (char *)0)); index++);
1991	return(table[index].text);
1992} /* End twa_string_lookup() */
1993
1994/* This function will perform a pci-dma unmap */
1995static void twa_unmap_scsi_data(TW_Device_Extension *tw_dev, int request_id)
1996{
1997	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1998
1999	if (cmd->SCp.phase == TW_PHASE_SGLIST)
2000		scsi_dma_unmap(cmd);
2001} /* End twa_unmap_scsi_data() */
2002
2003/* This function gets called when a disk is coming on-line */
2004static int twa_slave_configure(struct scsi_device *sdev)
2005{
2006	/* Force 60 second timeout */
2007	blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
2008
2009	return 0;
2010} /* End twa_slave_configure() */
2011
2012/* scsi_host_template initializer */
2013static struct scsi_host_template driver_template = {
2014	.module			= THIS_MODULE,
2015	.name			= "3ware 9000 Storage Controller",
2016	.queuecommand		= twa_scsi_queue,
2017	.eh_host_reset_handler	= twa_scsi_eh_reset,
2018	.bios_param		= twa_scsi_biosparam,
2019	.change_queue_depth	= twa_change_queue_depth,
2020	.can_queue		= TW_Q_LENGTH-2,
2021	.slave_configure	= twa_slave_configure,
2022	.this_id		= -1,
2023	.sg_tablesize		= TW_APACHE_MAX_SGL_LENGTH,
2024	.max_sectors		= TW_MAX_SECTORS,
2025	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,
2026	.use_clustering		= ENABLE_CLUSTERING,
2027	.shost_attrs		= twa_host_attrs,
2028	.emulated		= 1
2029};
2030
2031/* This function will probe and initialize a card */
2032static int __devinit twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2033{
2034	struct Scsi_Host *host = NULL;
2035	TW_Device_Extension *tw_dev;
2036	unsigned long mem_addr, mem_len;
2037	int retval = -ENODEV;
2038
2039	retval = pci_enable_device(pdev);
2040	if (retval) {
2041		TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2042		goto out_disable_device;
2043	}
2044
2045	pci_set_master(pdev);
2046	pci_try_set_mwi(pdev);
2047
2048	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2049	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2050		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2051		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2052			TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2053			retval = -ENODEV;
2054			goto out_disable_device;
2055		}
2056
2057	host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2058	if (!host) {
2059		TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2060		retval = -ENOMEM;
2061		goto out_disable_device;
2062	}
2063	tw_dev = (TW_Device_Extension *)host->hostdata;
2064
2065	/* Save values to device extension */
2066	tw_dev->host = host;
2067	tw_dev->tw_pci_dev = pdev;
2068
2069	if (twa_initialize_device_extension(tw_dev)) {
2070		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
 
2071		goto out_free_device_extension;
2072	}
2073
2074	/* Request IO regions */
2075	retval = pci_request_regions(pdev, "3w-9xxx");
2076	if (retval) {
2077		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2078		goto out_free_device_extension;
2079	}
2080
2081	if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2082		mem_addr = pci_resource_start(pdev, 1);
2083		mem_len = pci_resource_len(pdev, 1);
2084	} else {
2085		mem_addr = pci_resource_start(pdev, 2);
2086		mem_len = pci_resource_len(pdev, 2);
2087	}
2088
2089	/* Save base address */
2090	tw_dev->base_addr = ioremap(mem_addr, mem_len);
2091	if (!tw_dev->base_addr) {
2092		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
 
2093		goto out_release_mem_region;
2094	}
2095
2096	/* Disable interrupts on the card */
2097	TW_DISABLE_INTERRUPTS(tw_dev);
2098
2099	/* Initialize the card */
2100	if (twa_reset_sequence(tw_dev, 0))
 
2101		goto out_iounmap;
 
2102
2103	/* Set host specific parameters */
2104	if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2105	    (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2106		host->max_id = TW_MAX_UNITS_9650SE;
2107	else
2108		host->max_id = TW_MAX_UNITS;
2109
2110	host->max_cmd_len = TW_MAX_CDB_LEN;
2111
2112	/* Channels aren't supported by adapter */
2113	host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2114	host->max_channel = 0;
2115
2116	/* Register the card with the kernel SCSI layer */
2117	retval = scsi_add_host(host, &pdev->dev);
2118	if (retval) {
2119		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2120		goto out_iounmap;
2121	}
2122
2123	pci_set_drvdata(pdev, host);
2124
2125	printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2126	       host->host_no, mem_addr, pdev->irq);
2127	printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2128	       host->host_no,
2129	       (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2130				     TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2131	       (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2132				     TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2133	       le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2134				     TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2135
2136	/* Try to enable MSI */
2137	if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2138	    !pci_enable_msi(pdev))
2139		set_bit(TW_USING_MSI, &tw_dev->flags);
2140
2141	/* Now setup the interrupt handler */
2142	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2143	if (retval) {
2144		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2145		goto out_remove_host;
2146	}
2147
2148	twa_device_extension_list[twa_device_extension_count] = tw_dev;
2149	twa_device_extension_count++;
2150
2151	/* Re-enable interrupts on the card */
2152	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2153
2154	/* Finally, scan the host */
2155	scsi_scan_host(host);
2156
2157	if (twa_major == -1) {
2158		if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2159			TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2160	}
2161	return 0;
2162
2163out_remove_host:
2164	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2165		pci_disable_msi(pdev);
2166	scsi_remove_host(host);
2167out_iounmap:
2168	iounmap(tw_dev->base_addr);
2169out_release_mem_region:
2170	pci_release_regions(pdev);
2171out_free_device_extension:
2172	twa_free_device_extension(tw_dev);
2173	scsi_host_put(host);
2174out_disable_device:
2175	pci_disable_device(pdev);
2176
2177	return retval;
2178} /* End twa_probe() */
2179
2180/* This function is called to remove a device */
2181static void twa_remove(struct pci_dev *pdev)
2182{
2183	struct Scsi_Host *host = pci_get_drvdata(pdev);
2184	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2185
2186	scsi_remove_host(tw_dev->host);
2187
2188	/* Unregister character device */
2189	if (twa_major >= 0) {
2190		unregister_chrdev(twa_major, "twa");
2191		twa_major = -1;
2192	}
2193
2194	/* Shutdown the card */
2195	__twa_shutdown(tw_dev);
2196
2197	/* Disable MSI if enabled */
2198	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2199		pci_disable_msi(pdev);
2200
2201	/* Free IO remapping */
2202	iounmap(tw_dev->base_addr);
2203
2204	/* Free up the mem region */
2205	pci_release_regions(pdev);
2206
2207	/* Free up device extension resources */
2208	twa_free_device_extension(tw_dev);
2209
2210	scsi_host_put(tw_dev->host);
2211	pci_disable_device(pdev);
2212	twa_device_extension_count--;
2213} /* End twa_remove() */
2214
2215#ifdef CONFIG_PM
2216/* This function is called on PCI suspend */
2217static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2218{
 
2219	struct Scsi_Host *host = pci_get_drvdata(pdev);
2220	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2221
2222	printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2223
2224	TW_DISABLE_INTERRUPTS(tw_dev);
2225	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2226
2227	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2228		pci_disable_msi(pdev);
2229
2230	/* Tell the card we are shutting down */
2231	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2232		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2233	} else {
2234		printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2235	}
2236	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2237
2238	pci_save_state(pdev);
2239	pci_disable_device(pdev);
2240	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2241
2242	return 0;
2243} /* End twa_suspend() */
2244
2245/* This function is called on PCI resume */
2246static int twa_resume(struct pci_dev *pdev)
2247{
2248	int retval = 0;
 
2249	struct Scsi_Host *host = pci_get_drvdata(pdev);
2250	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2251
2252	printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2253	pci_set_power_state(pdev, PCI_D0);
2254	pci_enable_wake(pdev, PCI_D0, 0);
2255	pci_restore_state(pdev);
2256
2257	retval = pci_enable_device(pdev);
2258	if (retval) {
2259		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2260		return retval;
2261	}
2262
2263	pci_set_master(pdev);
2264	pci_try_set_mwi(pdev);
2265
2266	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2267	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2268		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2269		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2270			TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2271			retval = -ENODEV;
2272			goto out_disable_device;
2273		}
2274
2275	/* Initialize the card */
2276	if (twa_reset_sequence(tw_dev, 0)) {
2277		retval = -ENODEV;
2278		goto out_disable_device;
2279	}
2280
2281	/* Now setup the interrupt handler */
2282	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2283	if (retval) {
2284		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2285		retval = -ENODEV;
2286		goto out_disable_device;
2287	}
2288
2289	/* Now enable MSI if enabled */
2290	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2291		pci_enable_msi(pdev);
2292
2293	/* Re-enable interrupts on the card */
2294	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2295
2296	printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2297	return 0;
2298
2299out_disable_device:
2300	scsi_remove_host(host);
2301	pci_disable_device(pdev);
2302
2303	return retval;
2304} /* End twa_resume() */
2305#endif
2306
2307/* PCI Devices supported by this driver */
2308static struct pci_device_id twa_pci_tbl[] __devinitdata = {
2309	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2310	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2311	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2312	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2313	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2314	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2315	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2316	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2317	{ }
2318};
2319MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2320
 
 
2321/* pci_driver initializer */
2322static struct pci_driver twa_driver = {
2323	.name		= "3w-9xxx",
2324	.id_table	= twa_pci_tbl,
2325	.probe		= twa_probe,
2326	.remove		= twa_remove,
2327#ifdef CONFIG_PM
2328	.suspend	= twa_suspend,
2329	.resume		= twa_resume,
2330#endif
2331	.shutdown	= twa_shutdown
2332};
2333
2334/* This function is called on driver initialization */
2335static int __init twa_init(void)
2336{
2337	printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2338
2339	return pci_register_driver(&twa_driver);
2340} /* End twa_init() */
2341
2342/* This function is called on driver exit */
2343static void __exit twa_exit(void)
2344{
2345	pci_unregister_driver(&twa_driver);
2346} /* End twa_exit() */
2347
2348module_init(twa_init);
2349module_exit(twa_exit);
2350
v6.13.7
   1/*
   2   3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
   3
   4   Written By: Adam Radford <aradford@gmail.com>
   5   Modifications By: Tom Couch
   6
   7   Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
   8   Copyright (C) 2010 LSI Corporation.
   9
  10   This program is free software; you can redistribute it and/or modify
  11   it under the terms of the GNU General Public License as published by
  12   the Free Software Foundation; version 2 of the License.
  13
  14   This program is distributed in the hope that it will be useful,
  15   but WITHOUT ANY WARRANTY; without even the implied warranty of
  16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17   GNU General Public License for more details.
  18
  19   NO WARRANTY
  20   THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  21   CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  22   LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  23   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  24   solely responsible for determining the appropriateness of using and
  25   distributing the Program and assumes all risks associated with its
  26   exercise of rights under this Agreement, including but not limited to
  27   the risks and costs of program errors, damage to or loss of data,
  28   programs or equipment, and unavailability or interruption of operations.
  29
  30   DISCLAIMER OF LIABILITY
  31   NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  32   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33   DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  34   ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  35   TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  36   USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  37   HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  38
  39   You should have received a copy of the GNU General Public License
  40   along with this program; if not, write to the Free Software
  41   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  42
  43   Bugs/Comments/Suggestions should be mailed to:
  44   aradford@gmail.com
 
 
 
  45
  46   Note: This version of the driver does not contain a bundled firmware
  47         image.
  48
  49   History
  50   -------
  51   2.26.02.000 - Driver cleanup for kernel submission.
  52   2.26.02.001 - Replace schedule_timeout() calls with msleep().
  53   2.26.02.002 - Add support for PAE mode.
  54                 Add lun support.
  55                 Fix twa_remove() to free irq handler/unregister_chrdev()
  56                 before shutting down card.
  57                 Change to new 'change_queue_depth' api.
  58                 Fix 'handled=1' ISR usage, remove bogus IRQ check.
  59                 Remove un-needed eh_abort handler.
  60                 Add support for embedded firmware error strings.
  61   2.26.02.003 - Correctly handle single sgl's with use_sg=1.
  62   2.26.02.004 - Add support for 9550SX controllers.
  63   2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
  64   2.26.02.006 - Fix 9550SX pchip reset timeout.
  65                 Add big endian support.
  66   2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
  67   2.26.02.008 - Free irq handler in __twa_shutdown().
  68                 Serialize reset code.
  69                 Add support for 9650SE controllers.
  70   2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
  71   2.26.02.010 - Add support for 9690SA controllers.
  72   2.26.02.011 - Increase max AENs drained to 256.
  73                 Add MSI support and "use_msi" module parameter.
  74                 Fix bug in twa_get_param() on 4GB+.
  75                 Use pci_resource_len() for ioremap().
  76   2.26.02.012 - Add power management support.
  77   2.26.02.013 - Fix bug in twa_load_sgl().
  78   2.26.02.014 - Force 60 second timeout default.
  79*/
  80
  81#include <linux/module.h>
  82#include <linux/reboot.h>
  83#include <linux/spinlock.h>
  84#include <linux/interrupt.h>
  85#include <linux/moduleparam.h>
  86#include <linux/errno.h>
  87#include <linux/types.h>
  88#include <linux/delay.h>
  89#include <linux/pci.h>
  90#include <linux/time.h>
  91#include <linux/mutex.h>
  92#include <linux/slab.h>
  93#include <asm/io.h>
  94#include <asm/irq.h>
  95#include <linux/uaccess.h>
  96#include <scsi/scsi.h>
  97#include <scsi/scsi_host.h>
  98#include <scsi/scsi_tcq.h>
  99#include <scsi/scsi_cmnd.h>
 100#include "3w-9xxx.h"
 101
 102/* Globals */
 103#define TW_DRIVER_VERSION "2.26.02.014"
 104static DEFINE_MUTEX(twa_chrdev_mutex);
 105static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
 106static unsigned int twa_device_extension_count;
 107static int twa_major = -1;
 108extern struct timezone sys_tz;
 109
 110/* Module parameters */
 111MODULE_AUTHOR ("LSI");
 112MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
 113MODULE_LICENSE("GPL");
 114MODULE_VERSION(TW_DRIVER_VERSION);
 115
 116static int use_msi = 0;
 117module_param(use_msi, int, S_IRUGO);
 118MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts.  Default: 0");
 119
 120/* Function prototypes */
 121static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
 122static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
 123static char *twa_aen_severity_lookup(unsigned char severity_code);
 124static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
 125static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 126static int twa_chrdev_open(struct inode *inode, struct file *file);
 127static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
 128static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
 129static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
 130static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
 131			      u32 set_features, unsigned short current_fw_srl,
 132			      unsigned short current_fw_arch_id,
 133			      unsigned short current_fw_branch,
 134			      unsigned short current_fw_build,
 135			      unsigned short *fw_on_ctlr_srl,
 136			      unsigned short *fw_on_ctlr_arch_id,
 137			      unsigned short *fw_on_ctlr_branch,
 138			      unsigned short *fw_on_ctlr_build,
 139			      u32 *init_connect_result);
 140static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
 141static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
 142static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
 143static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
 144static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
 145static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
 146static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
 147				   unsigned char *cdb, int use_sg,
 148				   TW_SG_Entry *sglistarg);
 149static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
 150static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
 
 151
 152/* Functions */
 153
 154/* Show some statistics about the card */
 155static ssize_t twa_show_stats(struct device *dev,
 156			      struct device_attribute *attr, char *buf)
 157{
 158	struct Scsi_Host *host = class_to_shost(dev);
 159	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
 160	unsigned long flags = 0;
 161	ssize_t len;
 162
 163	spin_lock_irqsave(tw_dev->host->host_lock, flags);
 164	len = sysfs_emit(buf, "3w-9xxx Driver version: %s\n"
 165			 "Current commands posted:   %4d\n"
 166			 "Max commands posted:       %4d\n"
 167			 "Current pending commands:  %4d\n"
 168			 "Max pending commands:      %4d\n"
 169			 "Last sgl length:           %4d\n"
 170			 "Max sgl length:            %4d\n"
 171			 "Last sector count:         %4d\n"
 172			 "Max sector count:          %4d\n"
 173			 "SCSI Host Resets:          %4d\n"
 174			 "AEN's:                     %4d\n",
 175			 TW_DRIVER_VERSION,
 176			 tw_dev->posted_request_count,
 177			 tw_dev->max_posted_request_count,
 178			 tw_dev->pending_request_count,
 179			 tw_dev->max_pending_request_count,
 180			 tw_dev->sgl_entries,
 181			 tw_dev->max_sgl_entries,
 182			 tw_dev->sector_count,
 183			 tw_dev->max_sector_count,
 184			 tw_dev->num_resets,
 185			 tw_dev->aen_count);
 186	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
 187	return len;
 188} /* End twa_show_stats() */
 189
 
 
 
 
 
 
 
 
 
 
 
 
 
 190/* Create sysfs 'stats' entry */
 191static struct device_attribute twa_host_stats_attr = {
 192	.attr = {
 193		.name =		"stats",
 194		.mode =		S_IRUGO,
 195	},
 196	.show = twa_show_stats
 197};
 198
 199/* Host attributes initializer */
 200static struct attribute *twa_host_attrs[] = {
 201	&twa_host_stats_attr.attr,
 202	NULL,
 203};
 204
 205ATTRIBUTE_GROUPS(twa_host);
 206
 207/* File operations struct for character device */
 208static const struct file_operations twa_fops = {
 209	.owner		= THIS_MODULE,
 210	.unlocked_ioctl	= twa_chrdev_ioctl,
 211	.open		= twa_chrdev_open,
 212	.release	= NULL,
 213	.llseek		= noop_llseek,
 214};
 215
 216/*
 217 * The controllers use an inline buffer instead of a mapped SGL for small,
 218 * single entry buffers.  Note that we treat a zero-length transfer like
 219 * a mapped SGL.
 220 */
 221static bool twa_command_mapped(struct scsi_cmnd *cmd)
 222{
 223	return scsi_sg_count(cmd) != 1 ||
 224		scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
 225}
 226
 227/* This function will complete an aen request from the isr */
 228static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
 229{
 230	TW_Command_Full *full_command_packet;
 231	TW_Command *command_packet;
 232	TW_Command_Apache_Header *header;
 233	unsigned short aen;
 234	int retval = 1;
 235
 236	header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
 237	tw_dev->posted_request_count--;
 238	aen = le16_to_cpu(header->status_block.error);
 239	full_command_packet = tw_dev->command_packet_virt[request_id];
 240	command_packet = &full_command_packet->command.oldcommand;
 241
 242	/* First check for internal completion of set param for time sync */
 243	if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
 244		/* Keep reading the queue in case there are more aen's */
 245		if (twa_aen_read_queue(tw_dev, request_id))
 246			goto out2;
 247		else {
 248			retval = 0;
 249			goto out;
 250		}
 251	}
 252
 253	switch (aen) {
 254	case TW_AEN_QUEUE_EMPTY:
 255		/* Quit reading the queue if this is the last one */
 256		break;
 257	case TW_AEN_SYNC_TIME_WITH_HOST:
 258		twa_aen_sync_time(tw_dev, request_id);
 259		retval = 0;
 260		goto out;
 261	default:
 262		twa_aen_queue_event(tw_dev, header);
 263
 264		/* If there are more aen's, keep reading the queue */
 265		if (twa_aen_read_queue(tw_dev, request_id))
 266			goto out2;
 267		else {
 268			retval = 0;
 269			goto out;
 270		}
 271	}
 272	retval = 0;
 273out2:
 274	tw_dev->state[request_id] = TW_S_COMPLETED;
 275	twa_free_request_id(tw_dev, request_id);
 276	clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
 277out:
 278	return retval;
 279} /* End twa_aen_complete() */
 280
 281/* This function will drain aen queue */
 282static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
 283{
 284	int request_id = 0;
 285	unsigned char cdb[TW_MAX_CDB_LEN];
 286	TW_SG_Entry sglist[1];
 287	int finished = 0, count = 0;
 288	TW_Command_Full *full_command_packet;
 289	TW_Command_Apache_Header *header;
 290	unsigned short aen;
 291	int first_reset = 0, queue = 0, retval = 1;
 292
 293	if (no_check_reset)
 294		first_reset = 0;
 295	else
 296		first_reset = 1;
 297
 298	full_command_packet = tw_dev->command_packet_virt[request_id];
 299	memset(full_command_packet, 0, sizeof(TW_Command_Full));
 300
 301	/* Initialize cdb */
 302	memset(&cdb, 0, TW_MAX_CDB_LEN);
 303	cdb[0] = REQUEST_SENSE; /* opcode */
 304	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
 305
 306	/* Initialize sglist */
 307	memset(&sglist, 0, sizeof(TW_SG_Entry));
 308	sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
 309	sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
 310
 311	if (tw_dev->generic_buffer_phys[request_id] & TW_ALIGNMENT_9000_SGL) {
 312		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
 313		goto out;
 314	}
 315
 316	/* Mark internal command */
 317	tw_dev->srb[request_id] = NULL;
 318
 319	do {
 320		/* Send command to the board */
 321		if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
 322			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
 323			goto out;
 324		}
 325
 326		/* Now poll for completion */
 327		if (twa_poll_response(tw_dev, request_id, 30)) {
 328			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
 329			tw_dev->posted_request_count--;
 330			goto out;
 331		}
 332
 333		tw_dev->posted_request_count--;
 334		header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
 335		aen = le16_to_cpu(header->status_block.error);
 336		queue = 0;
 337		count++;
 338
 339		switch (aen) {
 340		case TW_AEN_QUEUE_EMPTY:
 341			if (first_reset != 1)
 342				goto out;
 343			else
 344				finished = 1;
 345			break;
 346		case TW_AEN_SOFT_RESET:
 347			if (first_reset == 0)
 348				first_reset = 1;
 349			else
 350				queue = 1;
 351			break;
 352		case TW_AEN_SYNC_TIME_WITH_HOST:
 353			break;
 354		default:
 355			queue = 1;
 356		}
 357
 358		/* Now queue an event info */
 359		if (queue)
 360			twa_aen_queue_event(tw_dev, header);
 361	} while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
 362
 363	if (count == TW_MAX_AEN_DRAIN)
 364		goto out;
 365
 366	retval = 0;
 367out:
 368	tw_dev->state[request_id] = TW_S_INITIAL;
 369	return retval;
 370} /* End twa_aen_drain_queue() */
 371
 372/* This function will queue an event */
 373static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
 374{
 375	u32 local_time;
 
 376	TW_Event *event;
 377	unsigned short aen;
 378	char host[16];
 379	char *error_str;
 380
 381	tw_dev->aen_count++;
 382
 383	/* Fill out event info */
 384	event = tw_dev->event_queue[tw_dev->error_index];
 385
 386	/* Check for clobber */
 387	host[0] = '\0';
 388	if (tw_dev->host) {
 389		sprintf(host, " scsi%d:", tw_dev->host->host_no);
 390		if (event->retrieved == TW_AEN_NOT_RETRIEVED)
 391			tw_dev->aen_clobber = 1;
 392	}
 393
 394	aen = le16_to_cpu(header->status_block.error);
 395	memset(event, 0, sizeof(TW_Event));
 396
 397	event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
 398	/* event->time_stamp_sec overflows in y2106 */
 399	local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
 400	event->time_stamp_sec = local_time;
 401	event->aen_code = aen;
 402	event->retrieved = TW_AEN_NOT_RETRIEVED;
 403	event->sequence_id = tw_dev->error_sequence_id;
 404	tw_dev->error_sequence_id++;
 405
 406	/* Check for embedded error string */
 407	error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
 408
 409	header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
 410	event->parameter_len = strlen(header->err_specific_desc);
 411	memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
 412	if (event->severity != TW_AEN_SEVERITY_DEBUG)
 413		printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
 414		       host,
 415		       twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
 416		       TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
 417		       error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
 418		       header->err_specific_desc);
 419	else
 420		tw_dev->aen_count--;
 421
 422	if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
 423		tw_dev->event_queue_wrapped = 1;
 424	tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
 425} /* End twa_aen_queue_event() */
 426
 427/* This function will read the aen queue from the isr */
 428static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
 429{
 430	unsigned char cdb[TW_MAX_CDB_LEN];
 431	TW_SG_Entry sglist[1];
 432	TW_Command_Full *full_command_packet;
 433	int retval = 1;
 434
 435	full_command_packet = tw_dev->command_packet_virt[request_id];
 436	memset(full_command_packet, 0, sizeof(TW_Command_Full));
 437
 438	/* Initialize cdb */
 439	memset(&cdb, 0, TW_MAX_CDB_LEN);
 440	cdb[0] = REQUEST_SENSE; /* opcode */
 441	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
 442
 443	/* Initialize sglist */
 444	memset(&sglist, 0, sizeof(TW_SG_Entry));
 445	sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
 446	sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
 447
 448	/* Mark internal command */
 449	tw_dev->srb[request_id] = NULL;
 450
 451	/* Now post the command packet */
 452	if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
 453		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
 454		goto out;
 455	}
 456	retval = 0;
 457out:
 458	return retval;
 459} /* End twa_aen_read_queue() */
 460
 461/* This function will look up an AEN severity string */
 462static char *twa_aen_severity_lookup(unsigned char severity_code)
 463{
 464	char *retval = NULL;
 465
 466	if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
 467	    (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
 468		goto out;
 469
 470	retval = twa_aen_severity_table[severity_code];
 471out:
 472	return retval;
 473} /* End twa_aen_severity_lookup() */
 474
 475/* This function will sync firmware time with the host time */
 476static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
 477{
 478	u32 schedulertime;
 
 479	TW_Command_Full *full_command_packet;
 480	TW_Command *command_packet;
 481	TW_Param_Apache *param;
 482	time64_t local_time;
 483
 484	/* Fill out the command packet */
 485	full_command_packet = tw_dev->command_packet_virt[request_id];
 486	memset(full_command_packet, 0, sizeof(TW_Command_Full));
 487	command_packet = &full_command_packet->command.oldcommand;
 488	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
 489	command_packet->request_id = request_id;
 490	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
 491	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
 492	command_packet->size = TW_COMMAND_SIZE;
 493	command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
 494
 495	/* Setup the param */
 496	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
 497	memset(param, 0, TW_SECTOR_SIZE);
 498	param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
 499	param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
 500	param->parameter_size_bytes = cpu_to_le16(4);
 501
 502	/* Convert system time in UTC to local time seconds since last
 503           Sunday 12:00AM */
 504	local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
 505	div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
 
 
 506
 507	memcpy(param->data, &(__le32){cpu_to_le32(schedulertime)}, sizeof(__le32));
 508
 509	/* Mark internal command */
 510	tw_dev->srb[request_id] = NULL;
 511
 512	/* Now post the command */
 513	twa_post_command_packet(tw_dev, request_id, 1);
 514} /* End twa_aen_sync_time() */
 515
 516/* This function will allocate memory and check if it is correctly aligned */
 517static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
 518{
 519	int i;
 520	dma_addr_t dma_handle;
 521	unsigned long *cpu_addr;
 522	int retval = 1;
 523
 524	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
 525			size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
 526	if (!cpu_addr) {
 527		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
 528		goto out;
 529	}
 530
 531	if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
 532		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
 533		dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
 534				cpu_addr, dma_handle);
 535		goto out;
 536	}
 537
 538	memset(cpu_addr, 0, size*TW_Q_LENGTH);
 539
 540	for (i = 0; i < TW_Q_LENGTH; i++) {
 541		switch(which) {
 542		case 0:
 543			tw_dev->command_packet_phys[i] = dma_handle+(i*size);
 544			tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
 545			break;
 546		case 1:
 547			tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
 548			tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
 549			break;
 550		}
 551	}
 552	retval = 0;
 553out:
 554	return retval;
 555} /* End twa_allocate_memory() */
 556
 557/* This function will check the status register for unexpected bits */
 558static int twa_check_bits(u32 status_reg_value)
 559{
 560	int retval = 1;
 561
 562	if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
 563		goto out;
 564	if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
 565		goto out;
 566
 567	retval = 0;
 568out:
 569	return retval;
 570} /* End twa_check_bits() */
 571
 572/* This function will check the srl and decide if we are compatible  */
 573static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
 574{
 575	int retval = 1;
 576	unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
 577	unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
 578	u32 init_connect_result = 0;
 579
 580	if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
 581			       TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
 582			       TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
 583			       TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
 584			       &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
 585			       &fw_on_ctlr_build, &init_connect_result)) {
 586		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
 587		goto out;
 588	}
 589
 590	tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
 591	tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
 592	tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
 593
 594	/* Try base mode compatibility */
 595	if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
 596		if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
 597				       TW_EXTENDED_INIT_CONNECT,
 598				       TW_BASE_FW_SRL, TW_9000_ARCH_ID,
 599				       TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
 600				       &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
 601				       &fw_on_ctlr_branch, &fw_on_ctlr_build,
 602				       &init_connect_result)) {
 603			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
 604			goto out;
 605		}
 606		if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
 607			if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
 608				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
 609			} else {
 610				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
 611			}
 612			goto out;
 613		}
 614		tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
 615		tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
 616		tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
 617	}
 618
 619	/* Load rest of compatibility struct */
 620	strscpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
 621		sizeof(tw_dev->tw_compat_info.driver_version));
 622	tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
 623	tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
 624	tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
 625	tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
 626	tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
 627	tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
 628	tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
 629	tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
 630	tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
 631
 632	retval = 0;
 633out:
 634	return retval;
 635} /* End twa_check_srl() */
 636
 637/* This function handles ioctl for the character device */
 638static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 639{
 640	struct inode *inode = file_inode(file);
 641	long timeout;
 642	unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
 643	dma_addr_t dma_handle;
 644	int request_id = 0;
 645	unsigned int sequence_id = 0;
 646	unsigned char event_index, start_index;
 647	TW_Ioctl_Driver_Command driver_command;
 648	TW_Ioctl_Buf_Apache *tw_ioctl;
 649	TW_Lock *tw_lock;
 650	TW_Command_Full *full_command_packet;
 651	TW_Compatibility_Info *tw_compat_info;
 652	TW_Event *event;
 653	ktime_t current_time;
 
 654	TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
 655	int retval = TW_IOCTL_ERROR_OS_EFAULT;
 656	void __user *argp = (void __user *)arg;
 657
 658	mutex_lock(&twa_chrdev_mutex);
 659
 660	/* Only let one of these through at a time */
 661	if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
 662		retval = TW_IOCTL_ERROR_OS_EINTR;
 663		goto out;
 664	}
 665
 666	/* First copy down the driver command */
 667	if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
 668		goto out2;
 669
 670	/* Check data buffer size */
 671	if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
 672		retval = TW_IOCTL_ERROR_OS_EINVAL;
 673		goto out2;
 674	}
 675
 676	/* Hardware can only do multiple of 512 byte transfers */
 677	data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
 678
 679	/* Now allocate ioctl buf memory */
 680	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
 681				      sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
 682				      &dma_handle, GFP_KERNEL);
 683	if (!cpu_addr) {
 684		retval = TW_IOCTL_ERROR_OS_ENOMEM;
 685		goto out2;
 686	}
 687
 688	tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
 689
 690	/* Now copy down the entire ioctl */
 691	if (copy_from_user(tw_ioctl, argp, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length))
 692		goto out3;
 693
 694	/* See which ioctl we are doing */
 695	switch (cmd) {
 696	case TW_IOCTL_FIRMWARE_PASS_THROUGH:
 697		spin_lock_irqsave(tw_dev->host->host_lock, flags);
 698		twa_get_request_id(tw_dev, &request_id);
 699
 700		/* Flag internal command */
 701		tw_dev->srb[request_id] = NULL;
 702
 703		/* Flag chrdev ioctl */
 704		tw_dev->chrdev_request_id = request_id;
 705
 706		full_command_packet = &tw_ioctl->firmware_command;
 707
 708		/* Load request id and sglist for both command types */
 709		twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
 710
 711		memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
 712
 713		/* Now post the command packet to the controller */
 714		twa_post_command_packet(tw_dev, request_id, 1);
 715		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
 716
 717		timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
 718
 719		/* Now wait for command to complete */
 720		timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
 721
 722		/* We timed out, and didn't get an interrupt */
 723		if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
 724			/* Now we need to reset the board */
 725			printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
 726			       tw_dev->host->host_no, TW_DRIVER, 0x37,
 727			       cmd);
 728			retval = TW_IOCTL_ERROR_OS_EIO;
 729			twa_reset_device_extension(tw_dev);
 730			goto out3;
 731		}
 732
 733		/* Now copy in the command packet response */
 734		memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
 735
 736		/* Now complete the io */
 737		spin_lock_irqsave(tw_dev->host->host_lock, flags);
 738		tw_dev->posted_request_count--;
 739		tw_dev->state[request_id] = TW_S_COMPLETED;
 740		twa_free_request_id(tw_dev, request_id);
 741		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
 742		break;
 743	case TW_IOCTL_GET_COMPATIBILITY_INFO:
 744		tw_ioctl->driver_command.status = 0;
 745		/* Copy compatibility struct into ioctl data buffer */
 746		tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
 747		memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
 748		break;
 749	case TW_IOCTL_GET_LAST_EVENT:
 750		if (tw_dev->event_queue_wrapped) {
 751			if (tw_dev->aen_clobber) {
 752				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 753				tw_dev->aen_clobber = 0;
 754			} else
 755				tw_ioctl->driver_command.status = 0;
 756		} else {
 757			if (!tw_dev->error_index) {
 758				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 759				break;
 760			}
 761			tw_ioctl->driver_command.status = 0;
 762		}
 763		event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
 764		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 765		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 766		break;
 767	case TW_IOCTL_GET_FIRST_EVENT:
 768		if (tw_dev->event_queue_wrapped) {
 769			if (tw_dev->aen_clobber) {
 770				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 771				tw_dev->aen_clobber = 0;
 772			} else
 773				tw_ioctl->driver_command.status = 0;
 774			event_index = tw_dev->error_index;
 775		} else {
 776			if (!tw_dev->error_index) {
 777				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 778				break;
 779			}
 780			tw_ioctl->driver_command.status = 0;
 781			event_index = 0;
 782		}
 783		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 784		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 785		break;
 786	case TW_IOCTL_GET_NEXT_EVENT:
 787		event = (TW_Event *)tw_ioctl->data_buffer;
 788		sequence_id = event->sequence_id;
 789		tw_ioctl->driver_command.status = 0;
 790
 791		if (tw_dev->event_queue_wrapped) {
 792			if (tw_dev->aen_clobber) {
 793				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 794				tw_dev->aen_clobber = 0;
 795			}
 796			start_index = tw_dev->error_index;
 797		} else {
 798			if (!tw_dev->error_index) {
 799				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 800				break;
 801			}
 802			start_index = 0;
 803		}
 804		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
 805
 806		if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
 807			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
 808				tw_dev->aen_clobber = 1;
 809			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 810			break;
 811		}
 812		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 813		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 814		break;
 815	case TW_IOCTL_GET_PREVIOUS_EVENT:
 816		event = (TW_Event *)tw_ioctl->data_buffer;
 817		sequence_id = event->sequence_id;
 818		tw_ioctl->driver_command.status = 0;
 819
 820		if (tw_dev->event_queue_wrapped) {
 821			if (tw_dev->aen_clobber) {
 822				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 823				tw_dev->aen_clobber = 0;
 824			}
 825			start_index = tw_dev->error_index;
 826		} else {
 827			if (!tw_dev->error_index) {
 828				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 829				break;
 830			}
 831			start_index = 0;
 832		}
 833		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
 834
 835		if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
 836			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
 837				tw_dev->aen_clobber = 1;
 838			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 839			break;
 840		}
 841		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 842		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 843		break;
 844	case TW_IOCTL_GET_LOCK:
 845		tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
 846		current_time = ktime_get();
 
 847
 848		if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
 849		    ktime_after(current_time, tw_dev->ioctl_time)) {
 850			tw_dev->ioctl_sem_lock = 1;
 851			tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
 852			tw_ioctl->driver_command.status = 0;
 853			tw_lock->time_remaining_msec = tw_lock->timeout_msec;
 854		} else {
 855			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
 856			tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
 857		}
 858		break;
 859	case TW_IOCTL_RELEASE_LOCK:
 860		if (tw_dev->ioctl_sem_lock == 1) {
 861			tw_dev->ioctl_sem_lock = 0;
 862			tw_ioctl->driver_command.status = 0;
 863		} else {
 864			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
 865		}
 866		break;
 867	default:
 868		retval = TW_IOCTL_ERROR_OS_ENOTTY;
 869		goto out3;
 870	}
 871
 872	/* Now copy the entire response to userspace */
 873	if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0)
 874		retval = 0;
 875out3:
 876	/* Now free ioctl buf memory */
 877	dma_free_coherent(&tw_dev->tw_pci_dev->dev,
 878			  sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
 879			  cpu_addr, dma_handle);
 880out2:
 881	mutex_unlock(&tw_dev->ioctl_lock);
 882out:
 883	mutex_unlock(&twa_chrdev_mutex);
 884	return retval;
 885} /* End twa_chrdev_ioctl() */
 886
 887/* This function handles open for the character device */
 888/* NOTE that this function will race with remove. */
 889static int twa_chrdev_open(struct inode *inode, struct file *file)
 890{
 891	unsigned int minor_number;
 892	int retval = TW_IOCTL_ERROR_OS_ENODEV;
 893
 894	if (!capable(CAP_SYS_ADMIN)) {
 895		retval = -EACCES;
 896		goto out;
 897	}
 898
 899	minor_number = iminor(inode);
 900	if (minor_number >= twa_device_extension_count)
 901		goto out;
 902	retval = 0;
 903out:
 904	return retval;
 905} /* End twa_chrdev_open() */
 906
 907/* This function will print readable messages from status register errors */
 908static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
 909{
 910	int retval = 1;
 911
 912	/* Check for various error conditions and handle them appropriately */
 913	if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
 914		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
 915		writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
 916	}
 917
 918	if (status_reg_value & TW_STATUS_PCI_ABORT) {
 919		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
 920		writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
 921		pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
 922	}
 923
 924	if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
 925		if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
 926		     (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
 927		    (!test_bit(TW_IN_RESET, &tw_dev->flags)))
 928			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
 929		writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
 930	}
 931
 932	if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
 933		if (tw_dev->reset_print == 0) {
 934			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
 935			tw_dev->reset_print = 1;
 936		}
 937		goto out;
 938	}
 939	retval = 0;
 940out:
 941	return retval;
 942} /* End twa_decode_bits() */
 943
 944/* This function will empty the response queue */
 945static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
 946{
 947	u32 status_reg_value;
 948	int count = 0, retval = 1;
 949
 950	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
 951
 952	while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
 953		readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
 954		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
 955		count++;
 956	}
 957	if (count == TW_MAX_RESPONSE_DRAIN)
 958		goto out;
 959
 960	retval = 0;
 961out:
 962	return retval;
 963} /* End twa_empty_response_queue() */
 964
 965/* This function will clear the pchip/response queue on 9550SX */
 966static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
 967{
 968	u32 response_que_value = 0;
 969	unsigned long before;
 970	int retval = 1;
 971
 972	if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
 973		before = jiffies;
 974		while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
 975			response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
 976			msleep(1);
 977			if (time_after(jiffies, before + HZ * 30))
 978				goto out;
 979		}
 980		/* P-chip settle time */
 981		msleep(500);
 982		retval = 0;
 983	} else
 984		retval = 0;
 985out:
 986	return retval;
 987} /* End twa_empty_response_queue_large() */
 988
 989/* This function passes sense keys from firmware to scsi layer */
 990static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
 991{
 992	TW_Command_Full *full_command_packet;
 993	unsigned short error;
 994	int retval = 1;
 995	char *error_str;
 996
 997	full_command_packet = tw_dev->command_packet_virt[request_id];
 998
 999	/* Check for embedded error string */
1000	error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
1001
1002	/* Don't print error for Logical unit not supported during rollcall */
1003	error = le16_to_cpu(full_command_packet->header.status_block.error);
1004	if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
1005		if (print_host)
1006			printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1007			       tw_dev->host->host_no,
1008			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
1009			       error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
 
 
 
1010			       full_command_packet->header.err_specific_desc);
1011		else
1012			printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1013			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
1014			       error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
 
 
 
1015			       full_command_packet->header.err_specific_desc);
1016	}
1017
1018	if (copy_sense) {
1019		memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1020		tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1021		retval = TW_ISR_DONT_RESULT;
1022		goto out;
1023	}
1024	retval = 0;
1025out:
1026	return retval;
1027} /* End twa_fill_sense() */
1028
1029/* This function will free up device extension resources */
1030static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1031{
1032	if (tw_dev->command_packet_virt[0])
1033		dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1034				sizeof(TW_Command_Full) * TW_Q_LENGTH,
1035				tw_dev->command_packet_virt[0],
1036				tw_dev->command_packet_phys[0]);
1037
1038	if (tw_dev->generic_buffer_virt[0])
1039		dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1040				TW_SECTOR_SIZE * TW_Q_LENGTH,
1041				tw_dev->generic_buffer_virt[0],
1042				tw_dev->generic_buffer_phys[0]);
1043
1044	kfree(tw_dev->event_queue[0]);
1045} /* End twa_free_device_extension() */
1046
1047/* This function will free a request id */
1048static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1049{
1050	tw_dev->free_queue[tw_dev->free_tail] = request_id;
1051	tw_dev->state[request_id] = TW_S_FINISHED;
1052	tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1053} /* End twa_free_request_id() */
1054
1055/* This function will get parameter table entries from the firmware */
1056static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1057{
1058	TW_Command_Full *full_command_packet;
1059	TW_Command *command_packet;
1060	TW_Param_Apache *param;
1061	void *retval = NULL;
1062
1063	/* Setup the command packet */
1064	full_command_packet = tw_dev->command_packet_virt[request_id];
1065	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1066	command_packet = &full_command_packet->command.oldcommand;
1067
1068	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1069	command_packet->size		  = TW_COMMAND_SIZE;
1070	command_packet->request_id	  = request_id;
1071	command_packet->byte6_offset.block_count = cpu_to_le16(1);
1072
1073	/* Now setup the param */
1074	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1075	memset(param, 0, TW_SECTOR_SIZE);
1076	param->table_id = cpu_to_le16(table_id | 0x8000);
1077	param->parameter_id = cpu_to_le16(parameter_id);
1078	param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1079
1080	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1081	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1082
1083	/* Post the command packet to the board */
1084	twa_post_command_packet(tw_dev, request_id, 1);
1085
1086	/* Poll for completion */
1087	if (twa_poll_response(tw_dev, request_id, 30))
1088		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1089	else
1090		retval = (void *)&(param->data[0]);
1091
1092	tw_dev->posted_request_count--;
1093	tw_dev->state[request_id] = TW_S_INITIAL;
1094
1095	return retval;
1096} /* End twa_get_param() */
1097
1098/* This function will assign an available request id */
1099static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1100{
1101	*request_id = tw_dev->free_queue[tw_dev->free_head];
1102	tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1103	tw_dev->state[*request_id] = TW_S_STARTED;
1104} /* End twa_get_request_id() */
1105
1106/* This function will send an initconnection command to controller */
1107static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1108			      u32 set_features, unsigned short current_fw_srl,
1109			      unsigned short current_fw_arch_id,
1110			      unsigned short current_fw_branch,
1111			      unsigned short current_fw_build,
1112			      unsigned short *fw_on_ctlr_srl,
1113			      unsigned short *fw_on_ctlr_arch_id,
1114			      unsigned short *fw_on_ctlr_branch,
1115			      unsigned short *fw_on_ctlr_build,
1116			      u32 *init_connect_result)
1117{
1118	TW_Command_Full *full_command_packet;
1119	TW_Initconnect *tw_initconnect;
1120	int request_id = 0, retval = 1;
1121
1122	/* Initialize InitConnection command packet */
1123	full_command_packet = tw_dev->command_packet_virt[request_id];
1124	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1125	full_command_packet->header.header_desc.size_header = 128;
1126
1127	tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1128	tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1129	tw_initconnect->request_id = request_id;
1130	tw_initconnect->message_credits = cpu_to_le16(message_credits);
 
1131
1132	/* Turn on 64-bit sgl support if we need to */
1133	set_features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1134
1135	tw_initconnect->features = cpu_to_le32(set_features);
1136
1137	if (set_features & TW_EXTENDED_INIT_CONNECT) {
1138		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1139		tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1140		tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1141		tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1142		tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1143	} else
1144		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1145
1146	/* Send command packet to the board */
1147	twa_post_command_packet(tw_dev, request_id, 1);
1148
1149	/* Poll for completion */
1150	if (twa_poll_response(tw_dev, request_id, 30)) {
1151		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1152	} else {
1153		if (set_features & TW_EXTENDED_INIT_CONNECT) {
1154			*fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1155			*fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1156			*fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1157			*fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1158			*init_connect_result = le32_to_cpu(tw_initconnect->result);
1159		}
1160		retval = 0;
1161	}
1162
1163	tw_dev->posted_request_count--;
1164	tw_dev->state[request_id] = TW_S_INITIAL;
1165
1166	return retval;
1167} /* End twa_initconnection() */
1168
1169/* This function will initialize the fields of a device extension */
1170static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1171{
1172	int i, retval = 1;
1173
1174	/* Initialize command packet buffers */
1175	if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1176		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1177		goto out;
1178	}
1179
1180	/* Initialize generic buffer */
1181	if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1182		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1183		goto out;
1184	}
1185
1186	/* Allocate event info space */
1187	tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1188	if (!tw_dev->event_queue[0]) {
1189		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1190		goto out;
1191	}
1192
1193
1194	for (i = 0; i < TW_Q_LENGTH; i++) {
1195		tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1196		tw_dev->free_queue[i] = i;
1197		tw_dev->state[i] = TW_S_INITIAL;
1198	}
1199
1200	tw_dev->pending_head = TW_Q_START;
1201	tw_dev->pending_tail = TW_Q_START;
1202	tw_dev->free_head = TW_Q_START;
1203	tw_dev->free_tail = TW_Q_START;
1204	tw_dev->error_sequence_id = 1;
1205	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1206
1207	mutex_init(&tw_dev->ioctl_lock);
1208	init_waitqueue_head(&tw_dev->ioctl_wqueue);
1209
1210	retval = 0;
1211out:
1212	return retval;
1213} /* End twa_initialize_device_extension() */
1214
1215/* This function is the interrupt service routine */
1216static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1217{
1218	int request_id, error = 0;
1219	u32 status_reg_value;
1220	TW_Response_Queue response_que;
1221	TW_Command_Full *full_command_packet;
1222	TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1223	int handled = 0;
1224
1225	/* Get the per adapter lock */
1226	spin_lock(tw_dev->host->host_lock);
1227
1228	/* Read the registers */
1229	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1230
1231	/* Check if this is our interrupt, otherwise bail */
1232	if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1233		goto twa_interrupt_bail;
1234
1235	handled = 1;
1236
1237	/* If we are resetting, bail */
1238	if (test_bit(TW_IN_RESET, &tw_dev->flags))
1239		goto twa_interrupt_bail;
1240
1241	/* Check controller for errors */
1242	if (twa_check_bits(status_reg_value)) {
1243		if (twa_decode_bits(tw_dev, status_reg_value)) {
1244			TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1245			goto twa_interrupt_bail;
1246		}
1247	}
1248
1249	/* Handle host interrupt */
1250	if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1251		TW_CLEAR_HOST_INTERRUPT(tw_dev);
1252
1253	/* Handle attention interrupt */
1254	if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1255		TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1256		if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1257			twa_get_request_id(tw_dev, &request_id);
1258
1259			error = twa_aen_read_queue(tw_dev, request_id);
1260			if (error) {
1261				tw_dev->state[request_id] = TW_S_COMPLETED;
1262				twa_free_request_id(tw_dev, request_id);
1263				clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1264			}
1265		}
1266	}
1267
1268	/* Handle command interrupt */
1269	if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1270		TW_MASK_COMMAND_INTERRUPT(tw_dev);
1271		/* Drain as many pending commands as we can */
1272		while (tw_dev->pending_request_count > 0) {
1273			request_id = tw_dev->pending_queue[tw_dev->pending_head];
1274			if (tw_dev->state[request_id] != TW_S_PENDING) {
1275				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1276				TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1277				goto twa_interrupt_bail;
1278			}
1279			if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1280				tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1281				tw_dev->pending_request_count--;
1282			} else {
1283				/* If we get here, we will continue re-posting on the next command interrupt */
1284				break;
1285			}
1286		}
1287	}
1288
1289	/* Handle response interrupt */
1290	if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1291
1292		/* Drain the response queue from the board */
1293		while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1294			/* Complete the response */
1295			response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1296			request_id = TW_RESID_OUT(response_que.response_id);
1297			full_command_packet = tw_dev->command_packet_virt[request_id];
1298			error = 0;
1299			/* Check for command packet errors */
1300			if (full_command_packet->command.newcommand.status != 0) {
1301				if (tw_dev->srb[request_id] != NULL) {
1302					error = twa_fill_sense(tw_dev, request_id, 1, 1);
1303				} else {
1304					/* Skip ioctl error prints */
1305					if (request_id != tw_dev->chrdev_request_id) {
1306						error = twa_fill_sense(tw_dev, request_id, 0, 1);
1307					}
1308				}
1309			}
1310
1311			/* Check for correct state */
1312			if (tw_dev->state[request_id] != TW_S_POSTED) {
1313				if (tw_dev->srb[request_id] != NULL) {
1314					TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1315					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1316					goto twa_interrupt_bail;
1317				}
1318			}
1319
1320			/* Check for internal command completion */
1321			if (tw_dev->srb[request_id] == NULL) {
1322				if (request_id != tw_dev->chrdev_request_id) {
1323					if (twa_aen_complete(tw_dev, request_id))
1324						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1325				} else {
1326					tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1327					wake_up(&tw_dev->ioctl_wqueue);
1328				}
1329			} else {
1330				struct scsi_cmnd *cmd;
1331
1332				cmd = tw_dev->srb[request_id];
1333
1334				twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1335				/* If no error command was a success */
1336				if (error == 0) {
1337					cmd->result = (DID_OK << 16);
1338				}
1339
1340				/* If error, command failed */
1341				if (error == 1) {
1342					/* Ask for a host reset */
1343					cmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
1344				}
1345
1346				/* Report residual bytes for single sgl */
1347				if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1348					u32 length = le32_to_cpu(full_command_packet->command.newcommand.sg_list[0].length);
1349
1350					if (length < scsi_bufflen(cmd))
1351						scsi_set_resid(cmd, scsi_bufflen(cmd) - length);
1352				}
1353
1354				/* Now complete the io */
1355				if (twa_command_mapped(cmd))
1356					scsi_dma_unmap(cmd);
1357				scsi_done(cmd);
1358				tw_dev->state[request_id] = TW_S_COMPLETED;
1359				twa_free_request_id(tw_dev, request_id);
1360				tw_dev->posted_request_count--;
 
 
1361			}
1362
1363			/* Check for valid status after each drain */
1364			status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1365			if (twa_check_bits(status_reg_value)) {
1366				if (twa_decode_bits(tw_dev, status_reg_value)) {
1367					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1368					goto twa_interrupt_bail;
1369				}
1370			}
1371		}
1372	}
1373
1374twa_interrupt_bail:
1375	spin_unlock(tw_dev->host->host_lock);
1376	return IRQ_RETVAL(handled);
1377} /* End twa_interrupt() */
1378
1379/* This function will load the request id and various sgls for ioctls */
1380static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1381{
1382	TW_Command *oldcommand;
1383	TW_Command_Apache *newcommand;
1384	TW_SG_Entry *sgl;
1385	unsigned int pae = 0;
1386
1387	if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1388		pae = 1;
1389
1390	if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1391		newcommand = &full_command_packet->command.newcommand;
1392		newcommand->request_id__lunl =
1393			TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id);
1394		if (length) {
1395			newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
1396			newcommand->sg_list[0].length = cpu_to_le32(length);
1397		}
1398		newcommand->sgl_entries__lunh =
1399			TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0);
1400	} else {
1401		oldcommand = &full_command_packet->command.oldcommand;
1402		oldcommand->request_id = request_id;
1403
1404		if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1405			/* Load the sg list */
1406			if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1407				sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1408			else
1409				sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1410			sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
1411			sgl->length = cpu_to_le32(length);
1412
1413			oldcommand->size += pae;
1414		}
1415	}
1416} /* End twa_load_sgl() */
1417
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1418/* This function will poll for a response interrupt of a request */
1419static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1420{
1421	int retval = 1, found = 0, response_request_id;
1422	TW_Response_Queue response_queue;
1423	TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1424
1425	if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1426		response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1427		response_request_id = TW_RESID_OUT(response_queue.response_id);
1428		if (request_id != response_request_id) {
1429			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1430			goto out;
1431		}
1432		if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1433			if (full_command_packet->command.newcommand.status != 0) {
1434				/* bad response */
1435				twa_fill_sense(tw_dev, request_id, 0, 0);
1436				goto out;
1437			}
1438			found = 1;
1439		} else {
1440			if (full_command_packet->command.oldcommand.status != 0) {
1441				/* bad response */
1442				twa_fill_sense(tw_dev, request_id, 0, 0);
1443				goto out;
1444			}
1445			found = 1;
1446		}
1447	}
1448
1449	if (found)
1450		retval = 0;
1451out:
1452	return retval;
1453} /* End twa_poll_response() */
1454
1455/* This function will poll the status register for a flag */
1456static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1457{
1458	u32 status_reg_value;
1459	unsigned long before;
1460	int retval = 1;
1461
1462	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1463	before = jiffies;
1464
1465	if (twa_check_bits(status_reg_value))
1466		twa_decode_bits(tw_dev, status_reg_value);
1467
1468	while ((status_reg_value & flag) != flag) {
1469		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1470
1471		if (twa_check_bits(status_reg_value))
1472			twa_decode_bits(tw_dev, status_reg_value);
1473
1474		if (time_after(jiffies, before + HZ * seconds))
1475			goto out;
1476
1477		msleep(50);
1478	}
1479	retval = 0;
1480out:
1481	return retval;
1482} /* End twa_poll_status() */
1483
1484/* This function will poll the status register for disappearance of a flag */
1485static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1486{
1487	u32 status_reg_value;
1488	unsigned long before;
1489	int retval = 1;
1490
1491	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1492	before = jiffies;
1493
1494	if (twa_check_bits(status_reg_value))
1495		twa_decode_bits(tw_dev, status_reg_value);
1496
1497	while ((status_reg_value & flag) != 0) {
1498		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1499		if (twa_check_bits(status_reg_value))
1500			twa_decode_bits(tw_dev, status_reg_value);
1501
1502		if (time_after(jiffies, before + HZ * seconds))
1503			goto out;
1504
1505		msleep(50);
1506	}
1507	retval = 0;
1508out:
1509	return retval;
1510} /* End twa_poll_status_gone() */
1511
1512/* This function will attempt to post a command packet to the board */
1513static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1514{
1515	u32 status_reg_value;
1516	dma_addr_t command_que_value;
1517	int retval = 1;
1518
1519	command_que_value = tw_dev->command_packet_phys[request_id];
1520
1521	/* For 9650SE write low 4 bytes first */
1522	if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1523	    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1524		command_que_value += TW_COMMAND_OFFSET;
1525		writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1526	}
1527
1528	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1529
1530	if (twa_check_bits(status_reg_value))
1531		twa_decode_bits(tw_dev, status_reg_value);
1532
1533	if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1534
1535		/* Only pend internal driver commands */
1536		if (!internal) {
1537			retval = SCSI_MLQUEUE_HOST_BUSY;
1538			goto out;
1539		}
1540
1541		/* Couldn't post the command packet, so we do it later */
1542		if (tw_dev->state[request_id] != TW_S_PENDING) {
1543			tw_dev->state[request_id] = TW_S_PENDING;
1544			tw_dev->pending_request_count++;
1545			if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1546				tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1547			}
1548			tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1549			tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1550		}
1551		TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1552		goto out;
1553	} else {
1554		if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1555		    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1556			/* Now write upper 4 bytes */
1557			writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1558		} else {
1559			if (sizeof(dma_addr_t) > 4) {
1560				command_que_value += TW_COMMAND_OFFSET;
1561				writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1562				writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1563			} else {
1564				writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1565			}
1566		}
1567		tw_dev->state[request_id] = TW_S_POSTED;
1568		tw_dev->posted_request_count++;
1569		if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1570			tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1571		}
1572	}
1573	retval = 0;
1574out:
1575	return retval;
1576} /* End twa_post_command_packet() */
1577
1578/* This function will reset a device extension */
1579static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1580{
1581	int i = 0;
1582	int retval = 1;
1583	unsigned long flags = 0;
1584
1585	set_bit(TW_IN_RESET, &tw_dev->flags);
1586	TW_DISABLE_INTERRUPTS(tw_dev);
1587	TW_MASK_COMMAND_INTERRUPT(tw_dev);
1588	spin_lock_irqsave(tw_dev->host->host_lock, flags);
1589
1590	/* Abort all requests that are in progress */
1591	for (i = 0; i < TW_Q_LENGTH; i++) {
1592		if ((tw_dev->state[i] != TW_S_FINISHED) &&
1593		    (tw_dev->state[i] != TW_S_INITIAL) &&
1594		    (tw_dev->state[i] != TW_S_COMPLETED)) {
1595			if (tw_dev->srb[i]) {
1596				struct scsi_cmnd *cmd = tw_dev->srb[i];
1597
1598				cmd->result = (DID_RESET << 16);
1599				if (twa_command_mapped(cmd))
1600					scsi_dma_unmap(cmd);
1601				scsi_done(cmd);
1602			}
1603		}
1604	}
1605
1606	/* Reset queues and counts */
1607	for (i = 0; i < TW_Q_LENGTH; i++) {
1608		tw_dev->free_queue[i] = i;
1609		tw_dev->state[i] = TW_S_INITIAL;
1610	}
1611	tw_dev->free_head = TW_Q_START;
1612	tw_dev->free_tail = TW_Q_START;
1613	tw_dev->posted_request_count = 0;
1614	tw_dev->pending_request_count = 0;
1615	tw_dev->pending_head = TW_Q_START;
1616	tw_dev->pending_tail = TW_Q_START;
1617	tw_dev->reset_print = 0;
1618
1619	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1620
1621	if (twa_reset_sequence(tw_dev, 1))
1622		goto out;
1623
1624	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1625	clear_bit(TW_IN_RESET, &tw_dev->flags);
1626	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1627
1628	retval = 0;
1629out:
1630	return retval;
1631} /* End twa_reset_device_extension() */
1632
1633/* This function will reset a controller */
1634static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1635{
1636	int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1637
1638	while (tries < TW_MAX_RESET_TRIES) {
1639		if (do_soft_reset) {
1640			TW_SOFT_RESET(tw_dev);
1641			/* Clear pchip/response queue on 9550SX */
1642			if (twa_empty_response_queue_large(tw_dev)) {
1643				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1644				do_soft_reset = 1;
1645				tries++;
1646				continue;
1647			}
1648		}
1649
1650		/* Make sure controller is in a good state */
1651		if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1652			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1653			do_soft_reset = 1;
1654			tries++;
1655			continue;
1656		}
1657
1658		/* Empty response queue */
1659		if (twa_empty_response_queue(tw_dev)) {
1660			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1661			do_soft_reset = 1;
1662			tries++;
1663			continue;
1664		}
1665
1666		flashed = 0;
1667
1668		/* Check for compatibility/flash */
1669		if (twa_check_srl(tw_dev, &flashed)) {
1670			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1671			do_soft_reset = 1;
1672			tries++;
1673			continue;
1674		} else {
1675			if (flashed) {
1676				tries++;
1677				continue;
1678			}
1679		}
1680
1681		/* Drain the AEN queue */
1682		if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1683			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1684			do_soft_reset = 1;
1685			tries++;
1686			continue;
1687		}
1688
1689		/* If we got here, controller is in a good state */
1690		retval = 0;
1691		goto out;
1692	}
1693out:
1694	return retval;
1695} /* End twa_reset_sequence() */
1696
1697/* This funciton returns unit geometry in cylinders/heads/sectors */
1698static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1699{
1700	int heads, sectors, cylinders;
 
 
 
1701
1702	if (capacity >= 0x200000) {
1703		heads = 255;
1704		sectors = 63;
1705		cylinders = sector_div(capacity, heads * sectors);
1706	} else {
1707		heads = 64;
1708		sectors = 32;
1709		cylinders = sector_div(capacity, heads * sectors);
1710	}
1711
1712	geom[0] = heads;
1713	geom[1] = sectors;
1714	geom[2] = cylinders;
1715
1716	return 0;
1717} /* End twa_scsi_biosparam() */
1718
1719/* This is the new scsi eh reset function */
1720static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1721{
1722	TW_Device_Extension *tw_dev = NULL;
1723	int retval = FAILED;
1724
1725	tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1726
1727	tw_dev->num_resets++;
1728
1729	sdev_printk(KERN_WARNING, SCpnt->device,
1730		"WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1731		TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1732
1733	/* Make sure we are not issuing an ioctl or resetting from ioctl */
1734	mutex_lock(&tw_dev->ioctl_lock);
1735
1736	/* Now reset the card and some of the device extension data */
1737	if (twa_reset_device_extension(tw_dev)) {
1738		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1739		goto out;
1740	}
1741
1742	retval = SUCCESS;
1743out:
1744	mutex_unlock(&tw_dev->ioctl_lock);
1745	return retval;
1746} /* End twa_scsi_eh_reset() */
1747
1748/* This is the main scsi queue function to handle scsi opcodes */
1749static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt)
1750{
1751	void (*done)(struct scsi_cmnd *) = scsi_done;
1752	int request_id, retval;
1753	TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1754
1755	/* If we are resetting due to timed out ioctl, report as busy */
1756	if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1757		retval = SCSI_MLQUEUE_HOST_BUSY;
1758		goto out;
1759	}
1760
1761	/* Check if this FW supports luns */
1762	if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1763		SCpnt->result = (DID_BAD_TARGET << 16);
1764		done(SCpnt);
1765		retval = 0;
1766		goto out;
1767	}
1768
 
 
 
1769	/* Get a free request id */
1770	twa_get_request_id(tw_dev, &request_id);
1771
1772	/* Save the scsi command for use by the ISR */
1773	tw_dev->srb[request_id] = SCpnt;
1774
 
 
 
1775	retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1776	switch (retval) {
1777	case SCSI_MLQUEUE_HOST_BUSY:
1778		if (twa_command_mapped(SCpnt))
1779			scsi_dma_unmap(SCpnt);
1780		twa_free_request_id(tw_dev, request_id);
 
1781		break;
1782	case 1:
 
 
 
1783		SCpnt->result = (DID_ERROR << 16);
1784		if (twa_command_mapped(SCpnt))
1785			scsi_dma_unmap(SCpnt);
1786		done(SCpnt);
1787		tw_dev->state[request_id] = TW_S_COMPLETED;
1788		twa_free_request_id(tw_dev, request_id);
1789		retval = 0;
1790	}
1791out:
1792	return retval;
1793} /* End twa_scsi_queue() */
1794
1795static DEF_SCSI_QCMD(twa_scsi_queue)
1796
1797/* This function hands scsi cdb's to the firmware */
1798static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1799				   unsigned char *cdb, int use_sg,
1800				   TW_SG_Entry *sglistarg)
1801{
1802	TW_Command_Full *full_command_packet;
1803	TW_Command_Apache *command_packet;
1804	u32 num_sectors = 0x0;
1805	int i, sg_count;
1806	struct scsi_cmnd *srb = NULL;
1807	struct scatterlist *sg;
1808	int retval = 1;
1809
1810	if (tw_dev->srb[request_id])
1811		srb = tw_dev->srb[request_id];
 
 
 
1812
1813	/* Initialize command packet */
1814	full_command_packet = tw_dev->command_packet_virt[request_id];
1815	full_command_packet->header.header_desc.size_header = 128;
1816	full_command_packet->header.status_block.error = 0;
1817	full_command_packet->header.status_block.severity__reserved = 0;
1818
1819	command_packet = &full_command_packet->command.newcommand;
1820	command_packet->status = 0;
1821	command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1822
1823	/* We forced 16 byte cdb use earlier */
1824	if (!cdb)
1825		memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1826	else
1827		memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1828
1829	if (srb) {
1830		command_packet->unit = srb->device->id;
1831		command_packet->request_id__lunl =
1832			TW_REQ_LUN_IN(srb->device->lun, request_id);
1833	} else {
1834		command_packet->request_id__lunl =
1835			TW_REQ_LUN_IN(0, request_id);
1836		command_packet->unit = 0;
1837	}
1838
1839	command_packet->sgl_offset = 16;
1840
1841	if (!sglistarg) {
1842		/* Map sglist from scsi layer to cmd packet */
1843
1844		if (scsi_sg_count(srb)) {
1845			if (!twa_command_mapped(srb)) {
 
1846				if (srb->sc_data_direction == DMA_TO_DEVICE ||
1847				    srb->sc_data_direction == DMA_BIDIRECTIONAL)
1848					scsi_sg_copy_to_buffer(srb,
1849							       tw_dev->generic_buffer_virt[request_id],
1850							       TW_SECTOR_SIZE);
1851				command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1852				command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1853			} else {
1854				sg_count = scsi_dma_map(srb);
1855				if (sg_count < 0)
1856					goto out;
1857
1858				scsi_for_each_sg(srb, sg, sg_count, i) {
1859					command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1860					command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1861					if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1862						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1863						goto out;
1864					}
1865				}
1866			}
1867			command_packet->sgl_entries__lunh = TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]));
1868		}
1869	} else {
1870		/* Internal cdb post */
1871		for (i = 0; i < use_sg; i++) {
1872			command_packet->sg_list[i].address = sglistarg[i].address;
1873			command_packet->sg_list[i].length = sglistarg[i].length;
1874			if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1875				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1876				goto out;
1877			}
1878		}
1879		command_packet->sgl_entries__lunh = TW_REQ_LUN_IN(0, use_sg);
1880	}
1881
1882	if (srb) {
1883		if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1884			num_sectors = (u32)srb->cmnd[4];
1885
1886		if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1887			num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1888	}
1889
1890	/* Update sector statistic */
1891	tw_dev->sector_count = num_sectors;
1892	if (tw_dev->sector_count > tw_dev->max_sector_count)
1893		tw_dev->max_sector_count = tw_dev->sector_count;
1894
1895	/* Update SG statistics */
1896	if (srb) {
1897		tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1898		if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1899			tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1900	}
1901
1902	/* Now post the command to the board */
1903	if (srb) {
1904		retval = twa_post_command_packet(tw_dev, request_id, 0);
1905	} else {
1906		twa_post_command_packet(tw_dev, request_id, 1);
1907		retval = 0;
1908	}
1909out:
1910	return retval;
1911} /* End twa_scsiop_execute_scsi() */
1912
1913/* This function completes an execute scsi operation */
1914static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1915{
1916	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1917
1918	if (!twa_command_mapped(cmd) &&
1919	    (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1920	     cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1921		if (scsi_sg_count(cmd) == 1) {
1922			void *buf = tw_dev->generic_buffer_virt[request_id];
1923
1924			scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1925		}
1926	}
1927} /* End twa_scsiop_execute_scsi_complete() */
1928
1929/* This function tells the controller to shut down */
1930static void __twa_shutdown(TW_Device_Extension *tw_dev)
1931{
1932	/* Disable interrupts */
1933	TW_DISABLE_INTERRUPTS(tw_dev);
1934
1935	/* Free up the IRQ */
1936	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1937
1938	printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1939
1940	/* Tell the card we are shutting down */
1941	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1942		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1943	} else {
1944		printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1945	}
1946
1947	/* Clear all interrupts just before exit */
1948	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1949} /* End __twa_shutdown() */
1950
1951/* Wrapper for __twa_shutdown */
1952static void twa_shutdown(struct pci_dev *pdev)
1953{
1954	struct Scsi_Host *host = pci_get_drvdata(pdev);
1955	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1956
1957	__twa_shutdown(tw_dev);
1958} /* End twa_shutdown() */
1959
1960/* This function will look up a string */
1961static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1962{
1963	int index;
1964
1965	for (index = 0; ((code != table[index].code) &&
1966		      (table[index].text != (char *)0)); index++);
1967	return(table[index].text);
1968} /* End twa_string_lookup() */
1969
 
 
 
 
 
 
 
 
 
1970/* This function gets called when a disk is coming on-line */
1971static int twa_slave_configure(struct scsi_device *sdev)
1972{
1973	/* Force 60 second timeout */
1974	blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1975
1976	return 0;
1977} /* End twa_slave_configure() */
1978
1979static const struct scsi_host_template driver_template = {
 
1980	.module			= THIS_MODULE,
1981	.name			= "3ware 9000 Storage Controller",
1982	.queuecommand		= twa_scsi_queue,
1983	.eh_host_reset_handler	= twa_scsi_eh_reset,
1984	.bios_param		= twa_scsi_biosparam,
1985	.change_queue_depth	= scsi_change_queue_depth,
1986	.can_queue		= TW_Q_LENGTH-2,
1987	.slave_configure	= twa_slave_configure,
1988	.this_id		= -1,
1989	.sg_tablesize		= TW_APACHE_MAX_SGL_LENGTH,
1990	.max_sectors		= TW_MAX_SECTORS,
1991	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,
1992	.shost_groups		= twa_host_groups,
1993	.emulated		= 1,
1994	.no_write_same		= 1,
1995};
1996
1997/* This function will probe and initialize a card */
1998static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1999{
2000	struct Scsi_Host *host = NULL;
2001	TW_Device_Extension *tw_dev;
2002	unsigned long mem_addr, mem_len;
2003	int retval;
2004
2005	retval = pci_enable_device(pdev);
2006	if (retval) {
2007		TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2008		return -ENODEV;
2009	}
2010
2011	pci_set_master(pdev);
2012	pci_try_set_mwi(pdev);
2013
2014	retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2015	if (retval)
2016		retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2017	if (retval) {
2018		TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2019		retval = -ENODEV;
2020		goto out_disable_device;
2021	}
2022
2023	host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2024	if (!host) {
2025		TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2026		retval = -ENOMEM;
2027		goto out_disable_device;
2028	}
2029	tw_dev = (TW_Device_Extension *)host->hostdata;
2030
2031	/* Save values to device extension */
2032	tw_dev->host = host;
2033	tw_dev->tw_pci_dev = pdev;
2034
2035	if (twa_initialize_device_extension(tw_dev)) {
2036		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2037		retval = -ENOMEM;
2038		goto out_free_device_extension;
2039	}
2040
2041	/* Request IO regions */
2042	retval = pci_request_regions(pdev, "3w-9xxx");
2043	if (retval) {
2044		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2045		goto out_free_device_extension;
2046	}
2047
2048	if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2049		mem_addr = pci_resource_start(pdev, 1);
2050		mem_len = pci_resource_len(pdev, 1);
2051	} else {
2052		mem_addr = pci_resource_start(pdev, 2);
2053		mem_len = pci_resource_len(pdev, 2);
2054	}
2055
2056	/* Save base address */
2057	tw_dev->base_addr = ioremap(mem_addr, mem_len);
2058	if (!tw_dev->base_addr) {
2059		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2060		retval = -ENOMEM;
2061		goto out_release_mem_region;
2062	}
2063
2064	/* Disable interrupts on the card */
2065	TW_DISABLE_INTERRUPTS(tw_dev);
2066
2067	/* Initialize the card */
2068	if (twa_reset_sequence(tw_dev, 0)) {
2069		retval = -ENOMEM;
2070		goto out_iounmap;
2071	}
2072
2073	/* Set host specific parameters */
2074	if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2075	    (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2076		host->max_id = TW_MAX_UNITS_9650SE;
2077	else
2078		host->max_id = TW_MAX_UNITS;
2079
2080	host->max_cmd_len = TW_MAX_CDB_LEN;
2081
2082	/* Channels aren't supported by adapter */
2083	host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2084	host->max_channel = 0;
2085
2086	/* Register the card with the kernel SCSI layer */
2087	retval = scsi_add_host(host, &pdev->dev);
2088	if (retval) {
2089		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2090		goto out_iounmap;
2091	}
2092
2093	pci_set_drvdata(pdev, host);
2094
2095	printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2096	       host->host_no, mem_addr, pdev->irq);
2097	printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2098	       host->host_no,
2099	       (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2100				     TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2101	       (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2102				     TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2103	       le32_to_cpu(*(__le32 *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2104				     TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2105
2106	/* Try to enable MSI */
2107	if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2108	    !pci_enable_msi(pdev))
2109		set_bit(TW_USING_MSI, &tw_dev->flags);
2110
2111	/* Now setup the interrupt handler */
2112	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2113	if (retval) {
2114		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2115		goto out_remove_host;
2116	}
2117
2118	twa_device_extension_list[twa_device_extension_count] = tw_dev;
2119	twa_device_extension_count++;
2120
2121	/* Re-enable interrupts on the card */
2122	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2123
2124	/* Finally, scan the host */
2125	scsi_scan_host(host);
2126
2127	if (twa_major == -1) {
2128		if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2129			TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2130	}
2131	return 0;
2132
2133out_remove_host:
2134	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2135		pci_disable_msi(pdev);
2136	scsi_remove_host(host);
2137out_iounmap:
2138	iounmap(tw_dev->base_addr);
2139out_release_mem_region:
2140	pci_release_regions(pdev);
2141out_free_device_extension:
2142	twa_free_device_extension(tw_dev);
2143	scsi_host_put(host);
2144out_disable_device:
2145	pci_disable_device(pdev);
2146
2147	return retval;
2148} /* End twa_probe() */
2149
2150/* This function is called to remove a device */
2151static void twa_remove(struct pci_dev *pdev)
2152{
2153	struct Scsi_Host *host = pci_get_drvdata(pdev);
2154	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2155
2156	scsi_remove_host(tw_dev->host);
2157
2158	/* Unregister character device */
2159	if (twa_major >= 0) {
2160		unregister_chrdev(twa_major, "twa");
2161		twa_major = -1;
2162	}
2163
2164	/* Shutdown the card */
2165	__twa_shutdown(tw_dev);
2166
2167	/* Disable MSI if enabled */
2168	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2169		pci_disable_msi(pdev);
2170
2171	/* Free IO remapping */
2172	iounmap(tw_dev->base_addr);
2173
2174	/* Free up the mem region */
2175	pci_release_regions(pdev);
2176
2177	/* Free up device extension resources */
2178	twa_free_device_extension(tw_dev);
2179
2180	scsi_host_put(tw_dev->host);
2181	pci_disable_device(pdev);
2182	twa_device_extension_count--;
2183} /* End twa_remove() */
2184
 
2185/* This function is called on PCI suspend */
2186static int __maybe_unused twa_suspend(struct device *dev)
2187{
2188	struct pci_dev *pdev = to_pci_dev(dev);
2189	struct Scsi_Host *host = pci_get_drvdata(pdev);
2190	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2191
2192	printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2193
2194	TW_DISABLE_INTERRUPTS(tw_dev);
2195	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2196
2197	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2198		pci_disable_msi(pdev);
2199
2200	/* Tell the card we are shutting down */
2201	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2202		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2203	} else {
2204		printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2205	}
2206	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2207
 
 
 
 
2208	return 0;
2209} /* End twa_suspend() */
2210
2211/* This function is called on PCI resume */
2212static int __maybe_unused twa_resume(struct device *dev)
2213{
2214	int retval = 0;
2215	struct pci_dev *pdev = to_pci_dev(dev);
2216	struct Scsi_Host *host = pci_get_drvdata(pdev);
2217	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2218
2219	printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
 
 
 
 
 
 
 
 
 
2220
 
2221	pci_try_set_mwi(pdev);
2222
2223	retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2224	if (retval)
2225		retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2226	if (retval) {
2227		TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2228		retval = -ENODEV;
2229		goto out_disable_device;
2230	}
2231
2232	/* Initialize the card */
2233	if (twa_reset_sequence(tw_dev, 0)) {
2234		retval = -ENODEV;
2235		goto out_disable_device;
2236	}
2237
2238	/* Now setup the interrupt handler */
2239	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2240	if (retval) {
2241		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2242		retval = -ENODEV;
2243		goto out_disable_device;
2244	}
2245
2246	/* Now enable MSI if enabled */
2247	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2248		pci_enable_msi(pdev);
2249
2250	/* Re-enable interrupts on the card */
2251	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2252
2253	printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2254	return 0;
2255
2256out_disable_device:
2257	scsi_remove_host(host);
 
2258
2259	return retval;
2260} /* End twa_resume() */
 
2261
2262/* PCI Devices supported by this driver */
2263static struct pci_device_id twa_pci_tbl[] = {
2264	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2265	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2266	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2267	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2268	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2269	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2270	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2271	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2272	{ }
2273};
2274MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2275
2276static SIMPLE_DEV_PM_OPS(twa_pm_ops, twa_suspend, twa_resume);
2277
2278/* pci_driver initializer */
2279static struct pci_driver twa_driver = {
2280	.name		= "3w-9xxx",
2281	.id_table	= twa_pci_tbl,
2282	.probe		= twa_probe,
2283	.remove		= twa_remove,
2284	.driver.pm	= &twa_pm_ops,
 
 
 
2285	.shutdown	= twa_shutdown
2286};
2287
2288/* This function is called on driver initialization */
2289static int __init twa_init(void)
2290{
2291	printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2292
2293	return pci_register_driver(&twa_driver);
2294} /* End twa_init() */
2295
2296/* This function is called on driver exit */
2297static void __exit twa_exit(void)
2298{
2299	pci_unregister_driver(&twa_driver);
2300} /* End twa_exit() */
2301
2302module_init(twa_init);
2303module_exit(twa_exit);
2304