Linux Audio

Check our new training course

Real-Time Linux with PREEMPT_RT training

Feb 18-20, 2025
Register
Loading...
v4.10.11
   1/*
   2   3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
   3
   4   Written By: Adam Radford <aradford@gmail.com>
   5   Modifications By: Tom Couch
   6
   7   Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
   8   Copyright (C) 2010 LSI Corporation.
   9
  10   This program is free software; you can redistribute it and/or modify
  11   it under the terms of the GNU General Public License as published by
  12   the Free Software Foundation; version 2 of the License.
  13
  14   This program is distributed in the hope that it will be useful,
  15   but WITHOUT ANY WARRANTY; without even the implied warranty of
  16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17   GNU General Public License for more details.
  18
  19   NO WARRANTY
  20   THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  21   CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  22   LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  23   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  24   solely responsible for determining the appropriateness of using and
  25   distributing the Program and assumes all risks associated with its
  26   exercise of rights under this Agreement, including but not limited to
  27   the risks and costs of program errors, damage to or loss of data,
  28   programs or equipment, and unavailability or interruption of operations.
  29
  30   DISCLAIMER OF LIABILITY
  31   NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  32   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33   DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  34   ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  35   TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  36   USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  37   HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  38
  39   You should have received a copy of the GNU General Public License
  40   along with this program; if not, write to the Free Software
  41   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  42
  43   Bugs/Comments/Suggestions should be mailed to:
  44   aradford@gmail.com
  45
  46   Note: This version of the driver does not contain a bundled firmware
  47         image.
  48
  49   History
  50   -------
  51   2.26.02.000 - Driver cleanup for kernel submission.
  52   2.26.02.001 - Replace schedule_timeout() calls with msleep().
  53   2.26.02.002 - Add support for PAE mode.
  54                 Add lun support.
  55                 Fix twa_remove() to free irq handler/unregister_chrdev()
  56                 before shutting down card.
  57                 Change to new 'change_queue_depth' api.
  58                 Fix 'handled=1' ISR usage, remove bogus IRQ check.
  59                 Remove un-needed eh_abort handler.
  60                 Add support for embedded firmware error strings.
  61   2.26.02.003 - Correctly handle single sgl's with use_sg=1.
  62   2.26.02.004 - Add support for 9550SX controllers.
  63   2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
  64   2.26.02.006 - Fix 9550SX pchip reset timeout.
  65                 Add big endian support.
  66   2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
  67   2.26.02.008 - Free irq handler in __twa_shutdown().
  68                 Serialize reset code.
  69                 Add support for 9650SE controllers.
  70   2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
  71   2.26.02.010 - Add support for 9690SA controllers.
  72   2.26.02.011 - Increase max AENs drained to 256.
  73                 Add MSI support and "use_msi" module parameter.
  74                 Fix bug in twa_get_param() on 4GB+.
  75                 Use pci_resource_len() for ioremap().
  76   2.26.02.012 - Add power management support.
  77   2.26.02.013 - Fix bug in twa_load_sgl().
  78   2.26.02.014 - Force 60 second timeout default.
  79*/
  80
  81#include <linux/module.h>
  82#include <linux/reboot.h>
  83#include <linux/spinlock.h>
  84#include <linux/interrupt.h>
  85#include <linux/moduleparam.h>
  86#include <linux/errno.h>
  87#include <linux/types.h>
  88#include <linux/delay.h>
  89#include <linux/pci.h>
  90#include <linux/time.h>
  91#include <linux/mutex.h>
  92#include <linux/slab.h>
  93#include <asm/io.h>
  94#include <asm/irq.h>
  95#include <linux/uaccess.h>
  96#include <scsi/scsi.h>
  97#include <scsi/scsi_host.h>
  98#include <scsi/scsi_tcq.h>
  99#include <scsi/scsi_cmnd.h>
 100#include "3w-9xxx.h"
 101
 102/* Globals */
 103#define TW_DRIVER_VERSION "2.26.02.014"
 104static DEFINE_MUTEX(twa_chrdev_mutex);
 105static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
 106static unsigned int twa_device_extension_count;
 107static int twa_major = -1;
 108extern struct timezone sys_tz;
 109
 110/* Module parameters */
 111MODULE_AUTHOR ("LSI");
 112MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
 113MODULE_LICENSE("GPL");
 114MODULE_VERSION(TW_DRIVER_VERSION);
 115
 116static int use_msi = 0;
 117module_param(use_msi, int, S_IRUGO);
 118MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts.  Default: 0");
 119
 120/* Function prototypes */
 121static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
 122static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
 123static char *twa_aen_severity_lookup(unsigned char severity_code);
 124static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
 125static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 126static int twa_chrdev_open(struct inode *inode, struct file *file);
 127static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
 128static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
 129static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
 130static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
 131 			      u32 set_features, unsigned short current_fw_srl, 
 132			      unsigned short current_fw_arch_id, 
 133			      unsigned short current_fw_branch, 
 134			      unsigned short current_fw_build, 
 135			      unsigned short *fw_on_ctlr_srl, 
 136			      unsigned short *fw_on_ctlr_arch_id, 
 137			      unsigned short *fw_on_ctlr_branch, 
 138			      unsigned short *fw_on_ctlr_build, 
 139			      u32 *init_connect_result);
 140static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
 141static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
 142static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
 143static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
 144static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
 145static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
 146static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg);
 
 
 147static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
 148static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
 149
 150/* Functions */
 151
 152/* Show some statistics about the card */
 153static ssize_t twa_show_stats(struct device *dev,
 154			      struct device_attribute *attr, char *buf)
 155{
 156	struct Scsi_Host *host = class_to_shost(dev);
 157	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
 158	unsigned long flags = 0;
 159	ssize_t len;
 160
 161	spin_lock_irqsave(tw_dev->host->host_lock, flags);
 162	len = snprintf(buf, PAGE_SIZE, "3w-9xxx Driver version: %s\n"
 163		       "Current commands posted:   %4d\n"
 164		       "Max commands posted:       %4d\n"
 165		       "Current pending commands:  %4d\n"
 166		       "Max pending commands:      %4d\n"
 167		       "Last sgl length:           %4d\n"
 168		       "Max sgl length:            %4d\n"
 169		       "Last sector count:         %4d\n"
 170		       "Max sector count:          %4d\n"
 171		       "SCSI Host Resets:          %4d\n"
 172		       "AEN's:                     %4d\n", 
 173		       TW_DRIVER_VERSION,
 174		       tw_dev->posted_request_count,
 175		       tw_dev->max_posted_request_count,
 176		       tw_dev->pending_request_count,
 177		       tw_dev->max_pending_request_count,
 178		       tw_dev->sgl_entries,
 179		       tw_dev->max_sgl_entries,
 180		       tw_dev->sector_count,
 181		       tw_dev->max_sector_count,
 182		       tw_dev->num_resets,
 183		       tw_dev->aen_count);
 184	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
 185	return len;
 186} /* End twa_show_stats() */
 187
 188/* Create sysfs 'stats' entry */
 189static struct device_attribute twa_host_stats_attr = {
 190	.attr = {
 191		.name = 	"stats",
 192		.mode =		S_IRUGO,
 193	},
 194	.show = twa_show_stats
 195};
 196
 197/* Host attributes initializer */
 198static struct device_attribute *twa_host_attrs[] = {
 199	&twa_host_stats_attr,
 200	NULL,
 201};
 202
 
 
 203/* File operations struct for character device */
 204static const struct file_operations twa_fops = {
 205	.owner		= THIS_MODULE,
 206	.unlocked_ioctl	= twa_chrdev_ioctl,
 207	.open		= twa_chrdev_open,
 208	.release	= NULL,
 209	.llseek		= noop_llseek,
 210};
 211
 212/*
 213 * The controllers use an inline buffer instead of a mapped SGL for small,
 214 * single entry buffers.  Note that we treat a zero-length transfer like
 215 * a mapped SGL.
 216 */
 217static bool twa_command_mapped(struct scsi_cmnd *cmd)
 218{
 219	return scsi_sg_count(cmd) != 1 ||
 220		scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
 221}
 222
 223/* This function will complete an aen request from the isr */
 224static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
 225{
 226	TW_Command_Full *full_command_packet;
 227	TW_Command *command_packet;
 228	TW_Command_Apache_Header *header;
 229	unsigned short aen;
 230	int retval = 1;
 231
 232	header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
 233	tw_dev->posted_request_count--;
 234	aen = le16_to_cpu(header->status_block.error);
 235	full_command_packet = tw_dev->command_packet_virt[request_id];
 236	command_packet = &full_command_packet->command.oldcommand;
 237
 238	/* First check for internal completion of set param for time sync */
 239	if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
 240		/* Keep reading the queue in case there are more aen's */
 241		if (twa_aen_read_queue(tw_dev, request_id))
 242			goto out2;
 243	        else {
 244			retval = 0;
 245			goto out;
 246		}
 247	}
 248
 249	switch (aen) {
 250	case TW_AEN_QUEUE_EMPTY:
 251		/* Quit reading the queue if this is the last one */
 252		break;
 253	case TW_AEN_SYNC_TIME_WITH_HOST:
 254		twa_aen_sync_time(tw_dev, request_id);
 255		retval = 0;
 256		goto out;
 257	default:
 258		twa_aen_queue_event(tw_dev, header);
 259
 260		/* If there are more aen's, keep reading the queue */
 261		if (twa_aen_read_queue(tw_dev, request_id))
 262			goto out2;
 263		else {
 264			retval = 0;
 265			goto out;
 266		}
 267	}
 268	retval = 0;
 269out2:
 270	tw_dev->state[request_id] = TW_S_COMPLETED;
 271	twa_free_request_id(tw_dev, request_id);
 272	clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
 273out:
 274	return retval;
 275} /* End twa_aen_complete() */
 276
 277/* This function will drain aen queue */
 278static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
 279{
 280	int request_id = 0;
 281	char cdb[TW_MAX_CDB_LEN];
 282	TW_SG_Entry sglist[1];
 283	int finished = 0, count = 0;
 284	TW_Command_Full *full_command_packet;
 285	TW_Command_Apache_Header *header;
 286	unsigned short aen;
 287	int first_reset = 0, queue = 0, retval = 1;
 288
 289	if (no_check_reset)
 290		first_reset = 0;
 291	else
 292		first_reset = 1;
 293
 294	full_command_packet = tw_dev->command_packet_virt[request_id];
 295	memset(full_command_packet, 0, sizeof(TW_Command_Full));
 296
 297	/* Initialize cdb */
 298	memset(&cdb, 0, TW_MAX_CDB_LEN);
 299	cdb[0] = REQUEST_SENSE; /* opcode */
 300	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
 301
 302	/* Initialize sglist */
 303	memset(&sglist, 0, sizeof(TW_SG_Entry));
 304	sglist[0].length = TW_SECTOR_SIZE;
 305	sglist[0].address = tw_dev->generic_buffer_phys[request_id];
 306
 307	if (sglist[0].address & TW_ALIGNMENT_9000_SGL) {
 308		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
 309		goto out;
 310	}
 311
 312	/* Mark internal command */
 313	tw_dev->srb[request_id] = NULL;
 314
 315	do {
 316		/* Send command to the board */
 317		if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
 318			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
 319			goto out;
 320		}
 321
 322		/* Now poll for completion */
 323		if (twa_poll_response(tw_dev, request_id, 30)) {
 324			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
 325			tw_dev->posted_request_count--;
 326			goto out;
 327		}
 328
 329		tw_dev->posted_request_count--;
 330		header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
 331		aen = le16_to_cpu(header->status_block.error);
 332		queue = 0;
 333		count++;
 334
 335		switch (aen) {
 336		case TW_AEN_QUEUE_EMPTY:
 337			if (first_reset != 1)
 338				goto out;
 339			else
 340				finished = 1;
 341			break;
 342		case TW_AEN_SOFT_RESET:
 343			if (first_reset == 0)
 344				first_reset = 1;
 345			else
 346				queue = 1;
 347			break;
 348		case TW_AEN_SYNC_TIME_WITH_HOST:
 349			break;
 350		default:
 351			queue = 1;
 352		}
 353
 354		/* Now queue an event info */
 355		if (queue)
 356			twa_aen_queue_event(tw_dev, header);
 357	} while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
 358
 359	if (count == TW_MAX_AEN_DRAIN)
 360		goto out;
 361
 362	retval = 0;
 363out:
 364	tw_dev->state[request_id] = TW_S_INITIAL;
 365	return retval;
 366} /* End twa_aen_drain_queue() */
 367
 368/* This function will queue an event */
 369static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
 370{
 371	u32 local_time;
 372	struct timeval time;
 373	TW_Event *event;
 374	unsigned short aen;
 375	char host[16];
 376	char *error_str;
 377
 378	tw_dev->aen_count++;
 379
 380	/* Fill out event info */
 381	event = tw_dev->event_queue[tw_dev->error_index];
 382
 383	/* Check for clobber */
 384	host[0] = '\0';
 385	if (tw_dev->host) {
 386		sprintf(host, " scsi%d:", tw_dev->host->host_no);
 387		if (event->retrieved == TW_AEN_NOT_RETRIEVED)
 388			tw_dev->aen_clobber = 1;
 389	}
 390
 391	aen = le16_to_cpu(header->status_block.error);
 392	memset(event, 0, sizeof(TW_Event));
 393
 394	event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
 395	do_gettimeofday(&time);
 396	local_time = (u32)(time.tv_sec - (sys_tz.tz_minuteswest * 60));
 397	event->time_stamp_sec = local_time;
 398	event->aen_code = aen;
 399	event->retrieved = TW_AEN_NOT_RETRIEVED;
 400	event->sequence_id = tw_dev->error_sequence_id;
 401	tw_dev->error_sequence_id++;
 402
 403	/* Check for embedded error string */
 404	error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
 405
 406	header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
 407	event->parameter_len = strlen(header->err_specific_desc);
 408	memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
 409	if (event->severity != TW_AEN_SEVERITY_DEBUG)
 410		printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
 411		       host,
 412		       twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
 413		       TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
 414		       error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
 415		       header->err_specific_desc);
 416	else
 417		tw_dev->aen_count--;
 418
 419	if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
 420		tw_dev->event_queue_wrapped = 1;
 421	tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
 422} /* End twa_aen_queue_event() */
 423
 424/* This function will read the aen queue from the isr */
 425static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
 426{
 427	char cdb[TW_MAX_CDB_LEN];
 428	TW_SG_Entry sglist[1];
 429	TW_Command_Full *full_command_packet;
 430	int retval = 1;
 431
 432	full_command_packet = tw_dev->command_packet_virt[request_id];
 433	memset(full_command_packet, 0, sizeof(TW_Command_Full));
 434
 435	/* Initialize cdb */
 436	memset(&cdb, 0, TW_MAX_CDB_LEN);
 437	cdb[0] = REQUEST_SENSE; /* opcode */
 438	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
 439
 440	/* Initialize sglist */
 441	memset(&sglist, 0, sizeof(TW_SG_Entry));
 442	sglist[0].length = TW_SECTOR_SIZE;
 443	sglist[0].address = tw_dev->generic_buffer_phys[request_id];
 444
 445	/* Mark internal command */
 446	tw_dev->srb[request_id] = NULL;
 447
 448	/* Now post the command packet */
 449	if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
 450		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
 451		goto out;
 452	}
 453	retval = 0;
 454out:
 455	return retval;
 456} /* End twa_aen_read_queue() */
 457
 458/* This function will look up an AEN severity string */
 459static char *twa_aen_severity_lookup(unsigned char severity_code)
 460{
 461	char *retval = NULL;
 462
 463	if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
 464	    (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
 465		goto out;
 466
 467	retval = twa_aen_severity_table[severity_code];
 468out:
 469	return retval;
 470} /* End twa_aen_severity_lookup() */
 471
 472/* This function will sync firmware time with the host time */
 473static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
 474{
 475	u32 schedulertime;
 476	struct timeval utc;
 477	TW_Command_Full *full_command_packet;
 478	TW_Command *command_packet;
 479	TW_Param_Apache *param;
 480	u32 local_time;
 481
 482	/* Fill out the command packet */
 483	full_command_packet = tw_dev->command_packet_virt[request_id];
 484	memset(full_command_packet, 0, sizeof(TW_Command_Full));
 485	command_packet = &full_command_packet->command.oldcommand;
 486	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
 487	command_packet->request_id = request_id;
 488	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
 489	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
 490	command_packet->size = TW_COMMAND_SIZE;
 491	command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
 492
 493	/* Setup the param */
 494	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
 495	memset(param, 0, TW_SECTOR_SIZE);
 496	param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
 497	param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
 498	param->parameter_size_bytes = cpu_to_le16(4);
 499
 500	/* Convert system time in UTC to local time seconds since last 
 501           Sunday 12:00AM */
 502	do_gettimeofday(&utc);
 503	local_time = (u32)(utc.tv_sec - (sys_tz.tz_minuteswest * 60));
 504	schedulertime = local_time - (3 * 86400);
 505	schedulertime = cpu_to_le32(schedulertime % 604800);
 506
 507	memcpy(param->data, &schedulertime, sizeof(u32));
 508
 509	/* Mark internal command */
 510	tw_dev->srb[request_id] = NULL;
 511
 512	/* Now post the command */
 513	twa_post_command_packet(tw_dev, request_id, 1);
 514} /* End twa_aen_sync_time() */
 515
 516/* This function will allocate memory and check if it is correctly aligned */
 517static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
 518{
 519	int i;
 520	dma_addr_t dma_handle;
 521	unsigned long *cpu_addr;
 522	int retval = 1;
 523
 524	cpu_addr = pci_alloc_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, &dma_handle);
 
 525	if (!cpu_addr) {
 526		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
 527		goto out;
 528	}
 529
 530	if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
 531		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
 532		pci_free_consistent(tw_dev->tw_pci_dev, size*TW_Q_LENGTH, cpu_addr, dma_handle);
 
 533		goto out;
 534	}
 535
 536	memset(cpu_addr, 0, size*TW_Q_LENGTH);
 537
 538	for (i = 0; i < TW_Q_LENGTH; i++) {
 539		switch(which) {
 540		case 0:
 541			tw_dev->command_packet_phys[i] = dma_handle+(i*size);
 542			tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
 543			break;
 544		case 1:
 545			tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
 546			tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
 547			break;
 548		}
 549	}
 550	retval = 0;
 551out:
 552	return retval;
 553} /* End twa_allocate_memory() */
 554
 555/* This function will check the status register for unexpected bits */
 556static int twa_check_bits(u32 status_reg_value)
 557{
 558	int retval = 1;
 559
 560	if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
 561		goto out;
 562	if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
 563		goto out;
 564
 565	retval = 0;
 566out:
 567	return retval;
 568} /* End twa_check_bits() */
 569
 570/* This function will check the srl and decide if we are compatible  */
 571static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
 572{
 573	int retval = 1;
 574	unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
 575	unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
 576	u32 init_connect_result = 0;
 577
 578	if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
 579			       TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
 580			       TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
 581			       TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
 582			       &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
 583			       &fw_on_ctlr_build, &init_connect_result)) {
 584		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
 585		goto out;
 586	}
 587
 588	tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
 589	tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
 590	tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
 591
 592	/* Try base mode compatibility */
 593	if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
 594		if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
 595				       TW_EXTENDED_INIT_CONNECT,
 596				       TW_BASE_FW_SRL, TW_9000_ARCH_ID,
 597				       TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
 598				       &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
 599				       &fw_on_ctlr_branch, &fw_on_ctlr_build,
 600				       &init_connect_result)) {
 601			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
 602			goto out;
 603		}
 604		if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
 605			if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
 606				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
 607			} else {
 608				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
 609			}
 610			goto out;
 611		}
 612		tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
 613		tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
 614		tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
 615	}
 616
 617	/* Load rest of compatibility struct */
 618	strlcpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
 619		sizeof(tw_dev->tw_compat_info.driver_version));
 620	tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
 621	tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
 622	tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
 623	tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
 624	tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
 625	tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
 626	tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
 627	tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
 628	tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
 629
 630	retval = 0;
 631out:
 632	return retval;
 633} /* End twa_check_srl() */
 634
 635/* This function handles ioctl for the character device */
 636static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 637{
 638	struct inode *inode = file_inode(file);
 639	long timeout;
 640	unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
 641	dma_addr_t dma_handle;
 642	int request_id = 0;
 643	unsigned int sequence_id = 0;
 644	unsigned char event_index, start_index;
 645	TW_Ioctl_Driver_Command driver_command;
 646	TW_Ioctl_Buf_Apache *tw_ioctl;
 647	TW_Lock *tw_lock;
 648	TW_Command_Full *full_command_packet;
 649	TW_Compatibility_Info *tw_compat_info;
 650	TW_Event *event;
 651	struct timeval current_time;
 652	u32 current_time_ms;
 653	TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
 654	int retval = TW_IOCTL_ERROR_OS_EFAULT;
 655	void __user *argp = (void __user *)arg;
 656
 657	mutex_lock(&twa_chrdev_mutex);
 658
 659	/* Only let one of these through at a time */
 660	if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
 661		retval = TW_IOCTL_ERROR_OS_EINTR;
 662		goto out;
 663	}
 664
 665	/* First copy down the driver command */
 666	if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
 667		goto out2;
 668
 669	/* Check data buffer size */
 670	if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
 671		retval = TW_IOCTL_ERROR_OS_EINVAL;
 672		goto out2;
 673	}
 674
 675	/* Hardware can only do multiple of 512 byte transfers */
 676	data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
 677
 678	/* Now allocate ioctl buf memory */
 679	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, &dma_handle, GFP_KERNEL);
 
 
 680	if (!cpu_addr) {
 681		retval = TW_IOCTL_ERROR_OS_ENOMEM;
 682		goto out2;
 683	}
 684
 685	tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
 686
 687	/* Now copy down the entire ioctl */
 688	if (copy_from_user(tw_ioctl, argp, driver_command.buffer_length + sizeof(TW_Ioctl_Buf_Apache) - 1))
 689		goto out3;
 690
 691	/* See which ioctl we are doing */
 692	switch (cmd) {
 693	case TW_IOCTL_FIRMWARE_PASS_THROUGH:
 694		spin_lock_irqsave(tw_dev->host->host_lock, flags);
 695		twa_get_request_id(tw_dev, &request_id);
 696
 697		/* Flag internal command */
 698		tw_dev->srb[request_id] = NULL;
 699
 700		/* Flag chrdev ioctl */
 701		tw_dev->chrdev_request_id = request_id;
 702
 703		full_command_packet = &tw_ioctl->firmware_command;
 704
 705		/* Load request id and sglist for both command types */
 706		twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
 707
 708		memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
 709
 710		/* Now post the command packet to the controller */
 711		twa_post_command_packet(tw_dev, request_id, 1);
 712		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
 713
 714		timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
 715
 716		/* Now wait for command to complete */
 717		timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
 718
 719		/* We timed out, and didn't get an interrupt */
 720		if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
 721			/* Now we need to reset the board */
 722			printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
 723			       tw_dev->host->host_no, TW_DRIVER, 0x37,
 724			       cmd);
 725			retval = TW_IOCTL_ERROR_OS_EIO;
 726			twa_reset_device_extension(tw_dev);
 727			goto out3;
 728		}
 729
 730		/* Now copy in the command packet response */
 731		memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
 732		
 733		/* Now complete the io */
 734		spin_lock_irqsave(tw_dev->host->host_lock, flags);
 735		tw_dev->posted_request_count--;
 736		tw_dev->state[request_id] = TW_S_COMPLETED;
 737		twa_free_request_id(tw_dev, request_id);
 738		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
 739		break;
 740	case TW_IOCTL_GET_COMPATIBILITY_INFO:
 741		tw_ioctl->driver_command.status = 0;
 742		/* Copy compatibility struct into ioctl data buffer */
 743		tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
 744		memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
 745		break;
 746	case TW_IOCTL_GET_LAST_EVENT:
 747		if (tw_dev->event_queue_wrapped) {
 748			if (tw_dev->aen_clobber) {
 749				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 750				tw_dev->aen_clobber = 0;
 751			} else
 752				tw_ioctl->driver_command.status = 0;
 753		} else {
 754			if (!tw_dev->error_index) {
 755				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 756				break;
 757			}
 758			tw_ioctl->driver_command.status = 0;
 759		}
 760		event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
 761		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 762		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 763		break;
 764	case TW_IOCTL_GET_FIRST_EVENT:
 765		if (tw_dev->event_queue_wrapped) {
 766			if (tw_dev->aen_clobber) {
 767				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 768				tw_dev->aen_clobber = 0;
 769			} else 
 770				tw_ioctl->driver_command.status = 0;
 771			event_index = tw_dev->error_index;
 772		} else {
 773			if (!tw_dev->error_index) {
 774				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 775				break;
 776			}
 777			tw_ioctl->driver_command.status = 0;
 778			event_index = 0;
 779		}
 780		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 781		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 782		break;
 783	case TW_IOCTL_GET_NEXT_EVENT:
 784		event = (TW_Event *)tw_ioctl->data_buffer;
 785		sequence_id = event->sequence_id;
 786		tw_ioctl->driver_command.status = 0;
 787
 788		if (tw_dev->event_queue_wrapped) {
 789			if (tw_dev->aen_clobber) {
 790				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 791				tw_dev->aen_clobber = 0;
 792			}
 793			start_index = tw_dev->error_index;
 794		} else {
 795			if (!tw_dev->error_index) {
 796				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 797				break;
 798			}
 799			start_index = 0;
 800		}
 801		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
 802
 803		if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
 804			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
 805				tw_dev->aen_clobber = 1;
 806			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 807			break;
 808		}
 809		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 810		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 811		break;
 812	case TW_IOCTL_GET_PREVIOUS_EVENT:
 813		event = (TW_Event *)tw_ioctl->data_buffer;
 814		sequence_id = event->sequence_id;
 815		tw_ioctl->driver_command.status = 0;
 816
 817		if (tw_dev->event_queue_wrapped) {
 818			if (tw_dev->aen_clobber) {
 819				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 820				tw_dev->aen_clobber = 0;
 821			}
 822			start_index = tw_dev->error_index;
 823		} else {
 824			if (!tw_dev->error_index) {
 825				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 826				break;
 827			}
 828			start_index = 0;
 829		}
 830		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
 831
 832		if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
 833			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
 834				tw_dev->aen_clobber = 1;
 835			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 836			break;
 837		}
 838		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 839		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 840		break;
 841	case TW_IOCTL_GET_LOCK:
 842		tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
 843		do_gettimeofday(&current_time);
 844		current_time_ms = (current_time.tv_sec * 1000) + (current_time.tv_usec / 1000);
 845
 846		if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) || (current_time_ms >= tw_dev->ioctl_msec)) {
 
 847			tw_dev->ioctl_sem_lock = 1;
 848			tw_dev->ioctl_msec = current_time_ms + tw_lock->timeout_msec;
 849			tw_ioctl->driver_command.status = 0;
 850			tw_lock->time_remaining_msec = tw_lock->timeout_msec;
 851		} else {
 852			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
 853			tw_lock->time_remaining_msec = tw_dev->ioctl_msec - current_time_ms;
 854		}
 855		break;
 856	case TW_IOCTL_RELEASE_LOCK:
 857		if (tw_dev->ioctl_sem_lock == 1) {
 858			tw_dev->ioctl_sem_lock = 0;
 859			tw_ioctl->driver_command.status = 0;
 860		} else {
 861			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
 862		}
 863		break;
 864	default:
 865		retval = TW_IOCTL_ERROR_OS_ENOTTY;
 866		goto out3;
 867	}
 868
 869	/* Now copy the entire response to userspace */
 870	if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length - 1) == 0)
 871		retval = 0;
 872out3:
 873	/* Now free ioctl buf memory */
 874	dma_free_coherent(&tw_dev->tw_pci_dev->dev, data_buffer_length_adjusted+sizeof(TW_Ioctl_Buf_Apache) - 1, cpu_addr, dma_handle);
 
 
 875out2:
 876	mutex_unlock(&tw_dev->ioctl_lock);
 877out:
 878	mutex_unlock(&twa_chrdev_mutex);
 879	return retval;
 880} /* End twa_chrdev_ioctl() */
 881
 882/* This function handles open for the character device */
 883/* NOTE that this function will race with remove. */
 884static int twa_chrdev_open(struct inode *inode, struct file *file)
 885{
 886	unsigned int minor_number;
 887	int retval = TW_IOCTL_ERROR_OS_ENODEV;
 888
 
 
 
 
 
 889	minor_number = iminor(inode);
 890	if (minor_number >= twa_device_extension_count)
 891		goto out;
 892	retval = 0;
 893out:
 894	return retval;
 895} /* End twa_chrdev_open() */
 896
 897/* This function will print readable messages from status register errors */
 898static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
 899{
 900	int retval = 1;
 901
 902	/* Check for various error conditions and handle them appropriately */
 903	if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
 904		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
 905		writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
 906	}
 907
 908	if (status_reg_value & TW_STATUS_PCI_ABORT) {
 909		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
 910		writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
 911		pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
 912	}
 913
 914	if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
 915		if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
 916		     (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
 917		    (!test_bit(TW_IN_RESET, &tw_dev->flags)))
 918			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
 919		writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
 920	}
 921
 922	if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
 923		if (tw_dev->reset_print == 0) {
 924			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
 925			tw_dev->reset_print = 1;
 926		}
 927		goto out;
 928	}
 929	retval = 0;
 930out:
 931	return retval;
 932} /* End twa_decode_bits() */
 933
 934/* This function will empty the response queue */
 935static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
 936{
 937	u32 status_reg_value, response_que_value;
 938	int count = 0, retval = 1;
 939
 940	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
 941
 942	while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
 943		response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
 944		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
 945		count++;
 946	}
 947	if (count == TW_MAX_RESPONSE_DRAIN)
 948		goto out;
 949
 950	retval = 0;
 951out:
 952	return retval;
 953} /* End twa_empty_response_queue() */
 954
 955/* This function will clear the pchip/response queue on 9550SX */
 956static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
 957{
 958	u32 response_que_value = 0;
 959	unsigned long before;
 960	int retval = 1;
 961
 962	if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
 963		before = jiffies;
 964		while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
 965			response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
 966			msleep(1);
 967			if (time_after(jiffies, before + HZ * 30))
 968				goto out;
 969		}
 970		/* P-chip settle time */
 971		msleep(500);
 972		retval = 0;
 973	} else
 974		retval = 0;
 975out:
 976	return retval;
 977} /* End twa_empty_response_queue_large() */
 978
 979/* This function passes sense keys from firmware to scsi layer */
 980static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
 981{
 982	TW_Command_Full *full_command_packet;
 983	unsigned short error;
 984	int retval = 1;
 985	char *error_str;
 986
 987	full_command_packet = tw_dev->command_packet_virt[request_id];
 988
 989	/* Check for embedded error string */
 990	error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
 991
 992	/* Don't print error for Logical unit not supported during rollcall */
 993	error = le16_to_cpu(full_command_packet->header.status_block.error);
 994	if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
 995		if (print_host)
 996			printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
 997			       tw_dev->host->host_no,
 998			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
 999			       full_command_packet->header.status_block.error,
1000			       error_str[0] == '\0' ?
1001			       twa_string_lookup(twa_error_table,
1002						 full_command_packet->header.status_block.error) : error_str,
1003			       full_command_packet->header.err_specific_desc);
1004		else
1005			printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1006			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR,
1007			       full_command_packet->header.status_block.error,
1008			       error_str[0] == '\0' ?
1009			       twa_string_lookup(twa_error_table,
1010						 full_command_packet->header.status_block.error) : error_str,
1011			       full_command_packet->header.err_specific_desc);
1012	}
1013
1014	if (copy_sense) {
1015		memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1016		tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1017		retval = TW_ISR_DONT_RESULT;
1018		goto out;
1019	}
1020	retval = 0;
1021out:
1022	return retval;
1023} /* End twa_fill_sense() */
1024
1025/* This function will free up device extension resources */
1026static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1027{
1028	if (tw_dev->command_packet_virt[0])
1029		pci_free_consistent(tw_dev->tw_pci_dev,
1030				    sizeof(TW_Command_Full)*TW_Q_LENGTH,
1031				    tw_dev->command_packet_virt[0],
1032				    tw_dev->command_packet_phys[0]);
1033
1034	if (tw_dev->generic_buffer_virt[0])
1035		pci_free_consistent(tw_dev->tw_pci_dev,
1036				    TW_SECTOR_SIZE*TW_Q_LENGTH,
1037				    tw_dev->generic_buffer_virt[0],
1038				    tw_dev->generic_buffer_phys[0]);
1039
1040	kfree(tw_dev->event_queue[0]);
1041} /* End twa_free_device_extension() */
1042
1043/* This function will free a request id */
1044static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1045{
1046	tw_dev->free_queue[tw_dev->free_tail] = request_id;
1047	tw_dev->state[request_id] = TW_S_FINISHED;
1048	tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1049} /* End twa_free_request_id() */
1050
1051/* This function will get parameter table entries from the firmware */
1052static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1053{
1054	TW_Command_Full *full_command_packet;
1055	TW_Command *command_packet;
1056	TW_Param_Apache *param;
1057	void *retval = NULL;
1058
1059	/* Setup the command packet */
1060	full_command_packet = tw_dev->command_packet_virt[request_id];
1061	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1062	command_packet = &full_command_packet->command.oldcommand;
1063
1064	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1065	command_packet->size              = TW_COMMAND_SIZE;
1066	command_packet->request_id        = request_id;
1067	command_packet->byte6_offset.block_count = cpu_to_le16(1);
1068
1069	/* Now setup the param */
1070	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1071	memset(param, 0, TW_SECTOR_SIZE);
1072	param->table_id = cpu_to_le16(table_id | 0x8000);
1073	param->parameter_id = cpu_to_le16(parameter_id);
1074	param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1075
1076	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1077	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1078
1079	/* Post the command packet to the board */
1080	twa_post_command_packet(tw_dev, request_id, 1);
1081
1082	/* Poll for completion */
1083	if (twa_poll_response(tw_dev, request_id, 30))
1084		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1085	else
1086		retval = (void *)&(param->data[0]);
1087
1088	tw_dev->posted_request_count--;
1089	tw_dev->state[request_id] = TW_S_INITIAL;
1090
1091	return retval;
1092} /* End twa_get_param() */
1093
1094/* This function will assign an available request id */
1095static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1096{
1097	*request_id = tw_dev->free_queue[tw_dev->free_head];
1098	tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1099	tw_dev->state[*request_id] = TW_S_STARTED;
1100} /* End twa_get_request_id() */
1101
1102/* This function will send an initconnection command to controller */
1103static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1104 			      u32 set_features, unsigned short current_fw_srl, 
1105			      unsigned short current_fw_arch_id, 
1106			      unsigned short current_fw_branch, 
1107			      unsigned short current_fw_build, 
1108			      unsigned short *fw_on_ctlr_srl, 
1109			      unsigned short *fw_on_ctlr_arch_id, 
1110			      unsigned short *fw_on_ctlr_branch, 
1111			      unsigned short *fw_on_ctlr_build, 
1112			      u32 *init_connect_result)
1113{
1114	TW_Command_Full *full_command_packet;
1115	TW_Initconnect *tw_initconnect;
1116	int request_id = 0, retval = 1;
1117
1118	/* Initialize InitConnection command packet */
1119	full_command_packet = tw_dev->command_packet_virt[request_id];
1120	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1121	full_command_packet->header.header_desc.size_header = 128;
1122	
1123	tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1124	tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1125	tw_initconnect->request_id = request_id;
1126	tw_initconnect->message_credits = cpu_to_le16(message_credits);
1127	tw_initconnect->features = set_features;
1128
1129	/* Turn on 64-bit sgl support if we need to */
1130	tw_initconnect->features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1131
1132	tw_initconnect->features = cpu_to_le32(tw_initconnect->features);
1133
1134	if (set_features & TW_EXTENDED_INIT_CONNECT) {
1135		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1136		tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1137		tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1138		tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1139		tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1140	} else 
1141		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1142
1143	/* Send command packet to the board */
1144	twa_post_command_packet(tw_dev, request_id, 1);
1145
1146	/* Poll for completion */
1147	if (twa_poll_response(tw_dev, request_id, 30)) {
1148		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1149	} else {
1150		if (set_features & TW_EXTENDED_INIT_CONNECT) {
1151			*fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1152			*fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1153			*fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1154			*fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1155			*init_connect_result = le32_to_cpu(tw_initconnect->result);
1156		}
1157		retval = 0;
1158	}
1159
1160	tw_dev->posted_request_count--;
1161	tw_dev->state[request_id] = TW_S_INITIAL;
1162
1163	return retval;
1164} /* End twa_initconnection() */
1165
1166/* This function will initialize the fields of a device extension */
1167static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1168{
1169	int i, retval = 1;
1170
1171	/* Initialize command packet buffers */
1172	if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1173		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1174		goto out;
1175	}
1176
1177	/* Initialize generic buffer */
1178	if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1179		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1180		goto out;
1181	}
1182
1183	/* Allocate event info space */
1184	tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1185	if (!tw_dev->event_queue[0]) {
1186		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1187		goto out;
1188	}
1189
1190
1191	for (i = 0; i < TW_Q_LENGTH; i++) {
1192		tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1193		tw_dev->free_queue[i] = i;
1194		tw_dev->state[i] = TW_S_INITIAL;
1195	}
1196
1197	tw_dev->pending_head = TW_Q_START;
1198	tw_dev->pending_tail = TW_Q_START;
1199	tw_dev->free_head = TW_Q_START;
1200	tw_dev->free_tail = TW_Q_START;
1201	tw_dev->error_sequence_id = 1;
1202	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1203
1204	mutex_init(&tw_dev->ioctl_lock);
1205	init_waitqueue_head(&tw_dev->ioctl_wqueue);
1206
1207	retval = 0;
1208out:
1209	return retval;
1210} /* End twa_initialize_device_extension() */
1211
1212/* This function is the interrupt service routine */
1213static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1214{
1215	int request_id, error = 0;
1216	u32 status_reg_value;
1217	TW_Response_Queue response_que;
1218	TW_Command_Full *full_command_packet;
1219	TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1220	int handled = 0;
1221
1222	/* Get the per adapter lock */
1223	spin_lock(tw_dev->host->host_lock);
1224
1225	/* Read the registers */
1226	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1227
1228	/* Check if this is our interrupt, otherwise bail */
1229	if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1230		goto twa_interrupt_bail;
1231
1232	handled = 1;
1233
1234	/* If we are resetting, bail */
1235	if (test_bit(TW_IN_RESET, &tw_dev->flags))
1236		goto twa_interrupt_bail;
1237
1238	/* Check controller for errors */
1239	if (twa_check_bits(status_reg_value)) {
1240		if (twa_decode_bits(tw_dev, status_reg_value)) {
1241			TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1242			goto twa_interrupt_bail;
1243		}
1244	}
1245
1246	/* Handle host interrupt */
1247	if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1248		TW_CLEAR_HOST_INTERRUPT(tw_dev);
1249
1250	/* Handle attention interrupt */
1251	if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1252		TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1253		if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1254			twa_get_request_id(tw_dev, &request_id);
1255
1256			error = twa_aen_read_queue(tw_dev, request_id);
1257			if (error) {
1258				tw_dev->state[request_id] = TW_S_COMPLETED;
1259				twa_free_request_id(tw_dev, request_id);
1260				clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1261			}
1262		}
1263	}
1264
1265	/* Handle command interrupt */
1266	if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1267		TW_MASK_COMMAND_INTERRUPT(tw_dev);
1268		/* Drain as many pending commands as we can */
1269		while (tw_dev->pending_request_count > 0) {
1270			request_id = tw_dev->pending_queue[tw_dev->pending_head];
1271			if (tw_dev->state[request_id] != TW_S_PENDING) {
1272				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1273				TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1274				goto twa_interrupt_bail;
1275			}
1276			if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1277				tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1278				tw_dev->pending_request_count--;
1279			} else {
1280				/* If we get here, we will continue re-posting on the next command interrupt */
1281				break;
1282			}
1283		}
1284	}
1285
1286	/* Handle response interrupt */
1287	if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1288
1289		/* Drain the response queue from the board */
1290		while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1291			/* Complete the response */
1292			response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1293			request_id = TW_RESID_OUT(response_que.response_id);
1294			full_command_packet = tw_dev->command_packet_virt[request_id];
1295			error = 0;
1296			/* Check for command packet errors */
1297			if (full_command_packet->command.newcommand.status != 0) {
1298				if (tw_dev->srb[request_id] != NULL) {
1299					error = twa_fill_sense(tw_dev, request_id, 1, 1);
1300				} else {
1301					/* Skip ioctl error prints */
1302					if (request_id != tw_dev->chrdev_request_id) {
1303						error = twa_fill_sense(tw_dev, request_id, 0, 1);
1304					}
1305				}
1306			}
1307
1308			/* Check for correct state */
1309			if (tw_dev->state[request_id] != TW_S_POSTED) {
1310				if (tw_dev->srb[request_id] != NULL) {
1311					TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1312					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1313					goto twa_interrupt_bail;
1314				}
1315			}
1316
1317			/* Check for internal command completion */
1318			if (tw_dev->srb[request_id] == NULL) {
1319				if (request_id != tw_dev->chrdev_request_id) {
1320					if (twa_aen_complete(tw_dev, request_id))
1321						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1322				} else {
1323					tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1324					wake_up(&tw_dev->ioctl_wqueue);
1325				}
1326			} else {
1327				struct scsi_cmnd *cmd;
1328
1329				cmd = tw_dev->srb[request_id];
1330
1331				twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1332				/* If no error command was a success */
1333				if (error == 0) {
1334					cmd->result = (DID_OK << 16);
1335				}
1336
1337				/* If error, command failed */
1338				if (error == 1) {
1339					/* Ask for a host reset */
1340					cmd->result = (DID_OK << 16) | (CHECK_CONDITION << 1);
1341				}
1342
1343				/* Report residual bytes for single sgl */
1344				if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1345					if (full_command_packet->command.newcommand.sg_list[0].length < scsi_bufflen(tw_dev->srb[request_id]))
1346						scsi_set_resid(cmd, scsi_bufflen(cmd) - full_command_packet->command.newcommand.sg_list[0].length);
 
 
1347				}
1348
1349				/* Now complete the io */
1350				if (twa_command_mapped(cmd))
1351					scsi_dma_unmap(cmd);
1352				cmd->scsi_done(cmd);
1353				tw_dev->state[request_id] = TW_S_COMPLETED;
1354				twa_free_request_id(tw_dev, request_id);
1355				tw_dev->posted_request_count--;
1356			}
1357
1358			/* Check for valid status after each drain */
1359			status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1360			if (twa_check_bits(status_reg_value)) {
1361				if (twa_decode_bits(tw_dev, status_reg_value)) {
1362					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1363					goto twa_interrupt_bail;
1364				}
1365			}
1366		}
1367	}
1368
1369twa_interrupt_bail:
1370	spin_unlock(tw_dev->host->host_lock);
1371	return IRQ_RETVAL(handled);
1372} /* End twa_interrupt() */
1373
1374/* This function will load the request id and various sgls for ioctls */
1375static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1376{
1377	TW_Command *oldcommand;
1378	TW_Command_Apache *newcommand;
1379	TW_SG_Entry *sgl;
1380	unsigned int pae = 0;
1381
1382	if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1383		pae = 1;
1384
1385	if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1386		newcommand = &full_command_packet->command.newcommand;
1387		newcommand->request_id__lunl =
1388			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id));
1389		if (length) {
1390			newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1391			newcommand->sg_list[0].length = cpu_to_le32(length);
1392		}
1393		newcommand->sgl_entries__lunh =
1394			cpu_to_le16(TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0));
1395	} else {
1396		oldcommand = &full_command_packet->command.oldcommand;
1397		oldcommand->request_id = request_id;
1398
1399		if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1400			/* Load the sg list */
1401			if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1402				sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1403			else
1404				sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1405			sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache) - 1);
1406			sgl->length = cpu_to_le32(length);
1407
1408			oldcommand->size += pae;
1409		}
1410	}
1411} /* End twa_load_sgl() */
1412
1413/* This function will poll for a response interrupt of a request */
1414static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1415{
1416	int retval = 1, found = 0, response_request_id;
1417	TW_Response_Queue response_queue;
1418	TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1419
1420	if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1421		response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1422		response_request_id = TW_RESID_OUT(response_queue.response_id);
1423		if (request_id != response_request_id) {
1424			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1425			goto out;
1426		}
1427		if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1428			if (full_command_packet->command.newcommand.status != 0) {
1429				/* bad response */
1430				twa_fill_sense(tw_dev, request_id, 0, 0);
1431				goto out;
1432			}
1433			found = 1;
1434		} else {
1435			if (full_command_packet->command.oldcommand.status != 0) {
1436				/* bad response */
1437				twa_fill_sense(tw_dev, request_id, 0, 0);
1438				goto out;
1439			}
1440			found = 1;
1441		}
1442	}
1443
1444	if (found)
1445		retval = 0;
1446out:
1447	return retval;
1448} /* End twa_poll_response() */
1449
1450/* This function will poll the status register for a flag */
1451static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1452{
1453	u32 status_reg_value; 
1454	unsigned long before;
1455	int retval = 1;
1456
1457	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1458	before = jiffies;
1459
1460	if (twa_check_bits(status_reg_value))
1461		twa_decode_bits(tw_dev, status_reg_value);
1462
1463	while ((status_reg_value & flag) != flag) {
1464		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1465
1466		if (twa_check_bits(status_reg_value))
1467			twa_decode_bits(tw_dev, status_reg_value);
1468
1469		if (time_after(jiffies, before + HZ * seconds))
1470			goto out;
1471
1472		msleep(50);
1473	}
1474	retval = 0;
1475out:
1476	return retval;
1477} /* End twa_poll_status() */
1478
1479/* This function will poll the status register for disappearance of a flag */
1480static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1481{
1482	u32 status_reg_value;
1483	unsigned long before;
1484	int retval = 1;
1485
1486	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1487	before = jiffies;
1488
1489	if (twa_check_bits(status_reg_value))
1490		twa_decode_bits(tw_dev, status_reg_value);
1491
1492	while ((status_reg_value & flag) != 0) {
1493		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1494		if (twa_check_bits(status_reg_value))
1495			twa_decode_bits(tw_dev, status_reg_value);
1496
1497		if (time_after(jiffies, before + HZ * seconds))
1498			goto out;
1499
1500		msleep(50);
1501	}
1502	retval = 0;
1503out:
1504	return retval;
1505} /* End twa_poll_status_gone() */
1506
1507/* This function will attempt to post a command packet to the board */
1508static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1509{
1510	u32 status_reg_value;
1511	dma_addr_t command_que_value;
1512	int retval = 1;
1513
1514	command_que_value = tw_dev->command_packet_phys[request_id];
1515
1516	/* For 9650SE write low 4 bytes first */
1517	if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1518	    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1519		command_que_value += TW_COMMAND_OFFSET;
1520		writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1521	}
1522
1523	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1524
1525	if (twa_check_bits(status_reg_value))
1526		twa_decode_bits(tw_dev, status_reg_value);
1527
1528	if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1529
1530		/* Only pend internal driver commands */
1531		if (!internal) {
1532			retval = SCSI_MLQUEUE_HOST_BUSY;
1533			goto out;
1534		}
1535
1536		/* Couldn't post the command packet, so we do it later */
1537		if (tw_dev->state[request_id] != TW_S_PENDING) {
1538			tw_dev->state[request_id] = TW_S_PENDING;
1539			tw_dev->pending_request_count++;
1540			if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1541				tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1542			}
1543			tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1544			tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1545		}
1546		TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1547		goto out;
1548	} else {
1549		if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1550		    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1551			/* Now write upper 4 bytes */
1552			writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1553		} else {
1554			if (sizeof(dma_addr_t) > 4) {
1555				command_que_value += TW_COMMAND_OFFSET;
1556				writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1557				writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1558			} else {
1559				writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1560			}
1561		}
1562		tw_dev->state[request_id] = TW_S_POSTED;
1563		tw_dev->posted_request_count++;
1564		if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1565			tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1566		}
1567	}
1568	retval = 0;
1569out:
1570	return retval;
1571} /* End twa_post_command_packet() */
1572
1573/* This function will reset a device extension */
1574static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1575{
1576	int i = 0;
1577	int retval = 1;
1578	unsigned long flags = 0;
1579
1580	set_bit(TW_IN_RESET, &tw_dev->flags);
1581	TW_DISABLE_INTERRUPTS(tw_dev);
1582	TW_MASK_COMMAND_INTERRUPT(tw_dev);
1583	spin_lock_irqsave(tw_dev->host->host_lock, flags);
1584
1585	/* Abort all requests that are in progress */
1586	for (i = 0; i < TW_Q_LENGTH; i++) {
1587		if ((tw_dev->state[i] != TW_S_FINISHED) &&
1588		    (tw_dev->state[i] != TW_S_INITIAL) &&
1589		    (tw_dev->state[i] != TW_S_COMPLETED)) {
1590			if (tw_dev->srb[i]) {
1591				struct scsi_cmnd *cmd = tw_dev->srb[i];
1592
1593				cmd->result = (DID_RESET << 16);
1594				if (twa_command_mapped(cmd))
1595					scsi_dma_unmap(cmd);
1596				cmd->scsi_done(cmd);
1597			}
1598		}
1599	}
1600
1601	/* Reset queues and counts */
1602	for (i = 0; i < TW_Q_LENGTH; i++) {
1603		tw_dev->free_queue[i] = i;
1604		tw_dev->state[i] = TW_S_INITIAL;
1605	}
1606	tw_dev->free_head = TW_Q_START;
1607	tw_dev->free_tail = TW_Q_START;
1608	tw_dev->posted_request_count = 0;
1609	tw_dev->pending_request_count = 0;
1610	tw_dev->pending_head = TW_Q_START;
1611	tw_dev->pending_tail = TW_Q_START;
1612	tw_dev->reset_print = 0;
1613
1614	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1615
1616	if (twa_reset_sequence(tw_dev, 1))
1617		goto out;
1618
1619	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1620	clear_bit(TW_IN_RESET, &tw_dev->flags);
1621	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1622
1623	retval = 0;
1624out:
1625	return retval;
1626} /* End twa_reset_device_extension() */
1627
1628/* This function will reset a controller */
1629static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1630{
1631	int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1632
1633	while (tries < TW_MAX_RESET_TRIES) {
1634		if (do_soft_reset) {
1635			TW_SOFT_RESET(tw_dev);
1636			/* Clear pchip/response queue on 9550SX */
1637			if (twa_empty_response_queue_large(tw_dev)) {
1638				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1639				do_soft_reset = 1;
1640				tries++;
1641				continue;
1642			}
1643		}
1644
1645		/* Make sure controller is in a good state */
1646		if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1647			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1648			do_soft_reset = 1;
1649			tries++;
1650			continue;
1651		}
1652
1653		/* Empty response queue */
1654		if (twa_empty_response_queue(tw_dev)) {
1655			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1656			do_soft_reset = 1;
1657			tries++;
1658			continue;
1659		}
1660
1661		flashed = 0;
1662
1663		/* Check for compatibility/flash */
1664		if (twa_check_srl(tw_dev, &flashed)) {
1665			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1666			do_soft_reset = 1;
1667			tries++;
1668			continue;
1669		} else {
1670			if (flashed) {
1671				tries++;
1672				continue;
1673			}
1674		}
1675
1676		/* Drain the AEN queue */
1677		if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1678			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1679			do_soft_reset = 1;
1680			tries++;
1681			continue;
1682		}
1683
1684		/* If we got here, controller is in a good state */
1685		retval = 0;
1686		goto out;
1687	}
1688out:
1689	return retval;
1690} /* End twa_reset_sequence() */
1691
1692/* This funciton returns unit geometry in cylinders/heads/sectors */
1693static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1694{
1695	int heads, sectors, cylinders;
1696	TW_Device_Extension *tw_dev;
1697
1698	tw_dev = (TW_Device_Extension *)sdev->host->hostdata;
1699
1700	if (capacity >= 0x200000) {
1701		heads = 255;
1702		sectors = 63;
1703		cylinders = sector_div(capacity, heads * sectors);
1704	} else {
1705		heads = 64;
1706		sectors = 32;
1707		cylinders = sector_div(capacity, heads * sectors);
1708	}
1709
1710	geom[0] = heads;
1711	geom[1] = sectors;
1712	geom[2] = cylinders;
1713
1714	return 0;
1715} /* End twa_scsi_biosparam() */
1716
1717/* This is the new scsi eh reset function */
1718static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1719{
1720	TW_Device_Extension *tw_dev = NULL;
1721	int retval = FAILED;
1722
1723	tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1724
1725	tw_dev->num_resets++;
1726
1727	sdev_printk(KERN_WARNING, SCpnt->device,
1728		"WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1729		TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1730
1731	/* Make sure we are not issuing an ioctl or resetting from ioctl */
1732	mutex_lock(&tw_dev->ioctl_lock);
1733
1734	/* Now reset the card and some of the device extension data */
1735	if (twa_reset_device_extension(tw_dev)) {
1736		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1737		goto out;
1738	}
1739
1740	retval = SUCCESS;
1741out:
1742	mutex_unlock(&tw_dev->ioctl_lock);
1743	return retval;
1744} /* End twa_scsi_eh_reset() */
1745
1746/* This is the main scsi queue function to handle scsi opcodes */
1747static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
1748{
 
1749	int request_id, retval;
1750	TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1751
1752	/* If we are resetting due to timed out ioctl, report as busy */
1753	if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1754		retval = SCSI_MLQUEUE_HOST_BUSY;
1755		goto out;
1756	}
1757
1758	/* Check if this FW supports luns */
1759	if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1760		SCpnt->result = (DID_BAD_TARGET << 16);
1761		done(SCpnt);
1762		retval = 0;
1763		goto out;
1764	}
1765
1766	/* Save done function into scsi_cmnd struct */
1767	SCpnt->scsi_done = done;
1768		
1769	/* Get a free request id */
1770	twa_get_request_id(tw_dev, &request_id);
1771
1772	/* Save the scsi command for use by the ISR */
1773	tw_dev->srb[request_id] = SCpnt;
1774
1775	retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1776	switch (retval) {
1777	case SCSI_MLQUEUE_HOST_BUSY:
1778		if (twa_command_mapped(SCpnt))
1779			scsi_dma_unmap(SCpnt);
1780		twa_free_request_id(tw_dev, request_id);
1781		break;
1782	case 1:
1783		SCpnt->result = (DID_ERROR << 16);
1784		if (twa_command_mapped(SCpnt))
1785			scsi_dma_unmap(SCpnt);
1786		done(SCpnt);
1787		tw_dev->state[request_id] = TW_S_COMPLETED;
1788		twa_free_request_id(tw_dev, request_id);
1789		retval = 0;
1790	}
1791out:
1792	return retval;
1793} /* End twa_scsi_queue() */
1794
1795static DEF_SCSI_QCMD(twa_scsi_queue)
1796
1797/* This function hands scsi cdb's to the firmware */
1798static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id, char *cdb, int use_sg, TW_SG_Entry *sglistarg)
 
 
1799{
1800	TW_Command_Full *full_command_packet;
1801	TW_Command_Apache *command_packet;
1802	u32 num_sectors = 0x0;
1803	int i, sg_count;
1804	struct scsi_cmnd *srb = NULL;
1805	struct scatterlist *sglist = NULL, *sg;
1806	int retval = 1;
1807
1808	if (tw_dev->srb[request_id]) {
1809		srb = tw_dev->srb[request_id];
1810		if (scsi_sglist(srb))
1811			sglist = scsi_sglist(srb);
1812	}
1813
1814	/* Initialize command packet */
1815	full_command_packet = tw_dev->command_packet_virt[request_id];
1816	full_command_packet->header.header_desc.size_header = 128;
1817	full_command_packet->header.status_block.error = 0;
1818	full_command_packet->header.status_block.severity__reserved = 0;
1819
1820	command_packet = &full_command_packet->command.newcommand;
1821	command_packet->status = 0;
1822	command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1823
1824	/* We forced 16 byte cdb use earlier */
1825	if (!cdb)
1826		memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1827	else
1828		memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1829
1830	if (srb) {
1831		command_packet->unit = srb->device->id;
1832		command_packet->request_id__lunl =
1833			cpu_to_le16(TW_REQ_LUN_IN(srb->device->lun, request_id));
1834	} else {
1835		command_packet->request_id__lunl =
1836			cpu_to_le16(TW_REQ_LUN_IN(0, request_id));
1837		command_packet->unit = 0;
1838	}
1839
1840	command_packet->sgl_offset = 16;
1841
1842	if (!sglistarg) {
1843		/* Map sglist from scsi layer to cmd packet */
1844
1845		if (scsi_sg_count(srb)) {
1846			if (!twa_command_mapped(srb)) {
1847				if (srb->sc_data_direction == DMA_TO_DEVICE ||
1848				    srb->sc_data_direction == DMA_BIDIRECTIONAL)
1849					scsi_sg_copy_to_buffer(srb,
1850							       tw_dev->generic_buffer_virt[request_id],
1851							       TW_SECTOR_SIZE);
1852				command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1853				command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1854			} else {
1855				sg_count = scsi_dma_map(srb);
1856				if (sg_count < 0)
1857					goto out;
1858
1859				scsi_for_each_sg(srb, sg, sg_count, i) {
1860					command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1861					command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1862					if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1863						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1864						goto out;
1865					}
1866				}
1867			}
1868			command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id])));
1869		}
1870	} else {
1871		/* Internal cdb post */
1872		for (i = 0; i < use_sg; i++) {
1873			command_packet->sg_list[i].address = TW_CPU_TO_SGL(sglistarg[i].address);
1874			command_packet->sg_list[i].length = cpu_to_le32(sglistarg[i].length);
1875			if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1876				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1877				goto out;
1878			}
1879		}
1880		command_packet->sgl_entries__lunh = cpu_to_le16(TW_REQ_LUN_IN(0, use_sg));
1881	}
1882
1883	if (srb) {
1884		if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1885			num_sectors = (u32)srb->cmnd[4];
1886
1887		if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1888			num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1889	}
1890
1891	/* Update sector statistic */
1892	tw_dev->sector_count = num_sectors;
1893	if (tw_dev->sector_count > tw_dev->max_sector_count)
1894		tw_dev->max_sector_count = tw_dev->sector_count;
1895
1896	/* Update SG statistics */
1897	if (srb) {
1898		tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1899		if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1900			tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1901	}
1902
1903	/* Now post the command to the board */
1904	if (srb) {
1905		retval = twa_post_command_packet(tw_dev, request_id, 0);
1906	} else {
1907		twa_post_command_packet(tw_dev, request_id, 1);
1908		retval = 0;
1909	}
1910out:
1911	return retval;
1912} /* End twa_scsiop_execute_scsi() */
1913
1914/* This function completes an execute scsi operation */
1915static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1916{
1917	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1918
1919	if (!twa_command_mapped(cmd) &&
1920	    (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1921	     cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1922		if (scsi_sg_count(cmd) == 1) {
1923			void *buf = tw_dev->generic_buffer_virt[request_id];
1924
1925			scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1926		}
1927	}
1928} /* End twa_scsiop_execute_scsi_complete() */
1929
1930/* This function tells the controller to shut down */
1931static void __twa_shutdown(TW_Device_Extension *tw_dev)
1932{
1933	/* Disable interrupts */
1934	TW_DISABLE_INTERRUPTS(tw_dev);
1935
1936	/* Free up the IRQ */
1937	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1938
1939	printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1940
1941	/* Tell the card we are shutting down */
1942	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1943		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1944	} else {
1945		printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1946	}
1947
1948	/* Clear all interrupts just before exit */
1949	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1950} /* End __twa_shutdown() */
1951
1952/* Wrapper for __twa_shutdown */
1953static void twa_shutdown(struct pci_dev *pdev)
1954{
1955	struct Scsi_Host *host = pci_get_drvdata(pdev);
1956	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1957
1958	__twa_shutdown(tw_dev);
1959} /* End twa_shutdown() */
1960
1961/* This function will look up a string */
1962static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1963{
1964	int index;
1965
1966	for (index = 0; ((code != table[index].code) &&
1967		      (table[index].text != (char *)0)); index++);
1968	return(table[index].text);
1969} /* End twa_string_lookup() */
1970
1971/* This function gets called when a disk is coming on-line */
1972static int twa_slave_configure(struct scsi_device *sdev)
1973{
1974	/* Force 60 second timeout */
1975	blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1976
1977	return 0;
1978} /* End twa_slave_configure() */
1979
1980/* scsi_host_template initializer */
1981static struct scsi_host_template driver_template = {
1982	.module			= THIS_MODULE,
1983	.name			= "3ware 9000 Storage Controller",
1984	.queuecommand		= twa_scsi_queue,
1985	.eh_host_reset_handler	= twa_scsi_eh_reset,
1986	.bios_param		= twa_scsi_biosparam,
1987	.change_queue_depth	= scsi_change_queue_depth,
1988	.can_queue		= TW_Q_LENGTH-2,
1989	.slave_configure	= twa_slave_configure,
1990	.this_id		= -1,
1991	.sg_tablesize		= TW_APACHE_MAX_SGL_LENGTH,
1992	.max_sectors		= TW_MAX_SECTORS,
1993	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,
1994	.use_clustering		= ENABLE_CLUSTERING,
1995	.shost_attrs		= twa_host_attrs,
1996	.emulated		= 1,
1997	.no_write_same		= 1,
1998};
1999
2000/* This function will probe and initialize a card */
2001static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
2002{
2003	struct Scsi_Host *host = NULL;
2004	TW_Device_Extension *tw_dev;
2005	unsigned long mem_addr, mem_len;
2006	int retval = -ENODEV;
2007
2008	retval = pci_enable_device(pdev);
2009	if (retval) {
2010		TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2011		goto out_disable_device;
2012	}
2013
2014	pci_set_master(pdev);
2015	pci_try_set_mwi(pdev);
2016
2017	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2018	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2019		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2020		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2021			TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2022			retval = -ENODEV;
2023			goto out_disable_device;
2024		}
2025
2026	host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2027	if (!host) {
2028		TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2029		retval = -ENOMEM;
2030		goto out_disable_device;
2031	}
2032	tw_dev = (TW_Device_Extension *)host->hostdata;
2033
2034	/* Save values to device extension */
2035	tw_dev->host = host;
2036	tw_dev->tw_pci_dev = pdev;
2037
2038	if (twa_initialize_device_extension(tw_dev)) {
2039		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
 
2040		goto out_free_device_extension;
2041	}
2042
2043	/* Request IO regions */
2044	retval = pci_request_regions(pdev, "3w-9xxx");
2045	if (retval) {
2046		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2047		goto out_free_device_extension;
2048	}
2049
2050	if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2051		mem_addr = pci_resource_start(pdev, 1);
2052		mem_len = pci_resource_len(pdev, 1);
2053	} else {
2054		mem_addr = pci_resource_start(pdev, 2);
2055		mem_len = pci_resource_len(pdev, 2);
2056	}
2057
2058	/* Save base address */
2059	tw_dev->base_addr = ioremap(mem_addr, mem_len);
2060	if (!tw_dev->base_addr) {
2061		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
 
2062		goto out_release_mem_region;
2063	}
2064
2065	/* Disable interrupts on the card */
2066	TW_DISABLE_INTERRUPTS(tw_dev);
2067
2068	/* Initialize the card */
2069	if (twa_reset_sequence(tw_dev, 0))
 
2070		goto out_iounmap;
 
2071
2072	/* Set host specific parameters */
2073	if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2074	    (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2075		host->max_id = TW_MAX_UNITS_9650SE;
2076	else
2077		host->max_id = TW_MAX_UNITS;
2078
2079	host->max_cmd_len = TW_MAX_CDB_LEN;
2080
2081	/* Channels aren't supported by adapter */
2082	host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2083	host->max_channel = 0;
2084
2085	/* Register the card with the kernel SCSI layer */
2086	retval = scsi_add_host(host, &pdev->dev);
2087	if (retval) {
2088		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2089		goto out_iounmap;
2090	}
2091
2092	pci_set_drvdata(pdev, host);
2093
2094	printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2095	       host->host_no, mem_addr, pdev->irq);
2096	printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2097	       host->host_no,
2098	       (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2099				     TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2100	       (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2101				     TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2102	       le32_to_cpu(*(int *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2103				     TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2104
2105	/* Try to enable MSI */
2106	if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2107	    !pci_enable_msi(pdev))
2108		set_bit(TW_USING_MSI, &tw_dev->flags);
2109
2110	/* Now setup the interrupt handler */
2111	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2112	if (retval) {
2113		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2114		goto out_remove_host;
2115	}
2116
2117	twa_device_extension_list[twa_device_extension_count] = tw_dev;
2118	twa_device_extension_count++;
2119
2120	/* Re-enable interrupts on the card */
2121	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2122
2123	/* Finally, scan the host */
2124	scsi_scan_host(host);
2125
2126	if (twa_major == -1) {
2127		if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2128			TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2129	}
2130	return 0;
2131
2132out_remove_host:
2133	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2134		pci_disable_msi(pdev);
2135	scsi_remove_host(host);
2136out_iounmap:
2137	iounmap(tw_dev->base_addr);
2138out_release_mem_region:
2139	pci_release_regions(pdev);
2140out_free_device_extension:
2141	twa_free_device_extension(tw_dev);
2142	scsi_host_put(host);
2143out_disable_device:
2144	pci_disable_device(pdev);
2145
2146	return retval;
2147} /* End twa_probe() */
2148
2149/* This function is called to remove a device */
2150static void twa_remove(struct pci_dev *pdev)
2151{
2152	struct Scsi_Host *host = pci_get_drvdata(pdev);
2153	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2154
2155	scsi_remove_host(tw_dev->host);
2156
2157	/* Unregister character device */
2158	if (twa_major >= 0) {
2159		unregister_chrdev(twa_major, "twa");
2160		twa_major = -1;
2161	}
2162
2163	/* Shutdown the card */
2164	__twa_shutdown(tw_dev);
2165
2166	/* Disable MSI if enabled */
2167	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2168		pci_disable_msi(pdev);
2169
2170	/* Free IO remapping */
2171	iounmap(tw_dev->base_addr);
2172
2173	/* Free up the mem region */
2174	pci_release_regions(pdev);
2175
2176	/* Free up device extension resources */
2177	twa_free_device_extension(tw_dev);
2178
2179	scsi_host_put(tw_dev->host);
2180	pci_disable_device(pdev);
2181	twa_device_extension_count--;
2182} /* End twa_remove() */
2183
2184#ifdef CONFIG_PM
2185/* This function is called on PCI suspend */
2186static int twa_suspend(struct pci_dev *pdev, pm_message_t state)
2187{
 
2188	struct Scsi_Host *host = pci_get_drvdata(pdev);
2189	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2190
2191	printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2192
2193	TW_DISABLE_INTERRUPTS(tw_dev);
2194	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2195
2196	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2197		pci_disable_msi(pdev);
2198
2199	/* Tell the card we are shutting down */
2200	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2201		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2202	} else {
2203		printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2204	}
2205	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2206
2207	pci_save_state(pdev);
2208	pci_disable_device(pdev);
2209	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2210
2211	return 0;
2212} /* End twa_suspend() */
2213
2214/* This function is called on PCI resume */
2215static int twa_resume(struct pci_dev *pdev)
2216{
2217	int retval = 0;
 
2218	struct Scsi_Host *host = pci_get_drvdata(pdev);
2219	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2220
2221	printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
2222	pci_set_power_state(pdev, PCI_D0);
2223	pci_enable_wake(pdev, PCI_D0, 0);
2224	pci_restore_state(pdev);
2225
2226	retval = pci_enable_device(pdev);
2227	if (retval) {
2228		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x39, "Enable device failed during resume");
2229		return retval;
2230	}
2231
2232	pci_set_master(pdev);
2233	pci_try_set_mwi(pdev);
2234
2235	if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
2236	    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
2237		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))
2238		    || pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
2239			TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2240			retval = -ENODEV;
2241			goto out_disable_device;
2242		}
2243
2244	/* Initialize the card */
2245	if (twa_reset_sequence(tw_dev, 0)) {
2246		retval = -ENODEV;
2247		goto out_disable_device;
2248	}
2249
2250	/* Now setup the interrupt handler */
2251	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2252	if (retval) {
2253		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2254		retval = -ENODEV;
2255		goto out_disable_device;
2256	}
2257
2258	/* Now enable MSI if enabled */
2259	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2260		pci_enable_msi(pdev);
2261
2262	/* Re-enable interrupts on the card */
2263	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2264
2265	printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2266	return 0;
2267
2268out_disable_device:
2269	scsi_remove_host(host);
2270	pci_disable_device(pdev);
2271
2272	return retval;
2273} /* End twa_resume() */
2274#endif
2275
2276/* PCI Devices supported by this driver */
2277static struct pci_device_id twa_pci_tbl[] = {
2278	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2279	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2280	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2281	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2282	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2283	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2284	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2285	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2286	{ }
2287};
2288MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2289
 
 
2290/* pci_driver initializer */
2291static struct pci_driver twa_driver = {
2292	.name		= "3w-9xxx",
2293	.id_table	= twa_pci_tbl,
2294	.probe		= twa_probe,
2295	.remove		= twa_remove,
2296#ifdef CONFIG_PM
2297	.suspend	= twa_suspend,
2298	.resume		= twa_resume,
2299#endif
2300	.shutdown	= twa_shutdown
2301};
2302
2303/* This function is called on driver initialization */
2304static int __init twa_init(void)
2305{
2306	printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2307
2308	return pci_register_driver(&twa_driver);
2309} /* End twa_init() */
2310
2311/* This function is called on driver exit */
2312static void __exit twa_exit(void)
2313{
2314	pci_unregister_driver(&twa_driver);
2315} /* End twa_exit() */
2316
2317module_init(twa_init);
2318module_exit(twa_exit);
2319
v6.13.7
   1/*
   2   3w-9xxx.c -- 3ware 9000 Storage Controller device driver for Linux.
   3
   4   Written By: Adam Radford <aradford@gmail.com>
   5   Modifications By: Tom Couch
   6
   7   Copyright (C) 2004-2009 Applied Micro Circuits Corporation.
   8   Copyright (C) 2010 LSI Corporation.
   9
  10   This program is free software; you can redistribute it and/or modify
  11   it under the terms of the GNU General Public License as published by
  12   the Free Software Foundation; version 2 of the License.
  13
  14   This program is distributed in the hope that it will be useful,
  15   but WITHOUT ANY WARRANTY; without even the implied warranty of
  16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  17   GNU General Public License for more details.
  18
  19   NO WARRANTY
  20   THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
  21   CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
  22   LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
  23   MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
  24   solely responsible for determining the appropriateness of using and
  25   distributing the Program and assumes all risks associated with its
  26   exercise of rights under this Agreement, including but not limited to
  27   the risks and costs of program errors, damage to or loss of data,
  28   programs or equipment, and unavailability or interruption of operations.
  29
  30   DISCLAIMER OF LIABILITY
  31   NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
  32   DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  33   DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
  34   ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
  35   TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
  36   USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
  37   HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
  38
  39   You should have received a copy of the GNU General Public License
  40   along with this program; if not, write to the Free Software
  41   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
  42
  43   Bugs/Comments/Suggestions should be mailed to:
  44   aradford@gmail.com
  45
  46   Note: This version of the driver does not contain a bundled firmware
  47         image.
  48
  49   History
  50   -------
  51   2.26.02.000 - Driver cleanup for kernel submission.
  52   2.26.02.001 - Replace schedule_timeout() calls with msleep().
  53   2.26.02.002 - Add support for PAE mode.
  54                 Add lun support.
  55                 Fix twa_remove() to free irq handler/unregister_chrdev()
  56                 before shutting down card.
  57                 Change to new 'change_queue_depth' api.
  58                 Fix 'handled=1' ISR usage, remove bogus IRQ check.
  59                 Remove un-needed eh_abort handler.
  60                 Add support for embedded firmware error strings.
  61   2.26.02.003 - Correctly handle single sgl's with use_sg=1.
  62   2.26.02.004 - Add support for 9550SX controllers.
  63   2.26.02.005 - Fix use_sg == 0 mapping on systems with 4GB or higher.
  64   2.26.02.006 - Fix 9550SX pchip reset timeout.
  65                 Add big endian support.
  66   2.26.02.007 - Disable local interrupts during kmap/unmap_atomic().
  67   2.26.02.008 - Free irq handler in __twa_shutdown().
  68                 Serialize reset code.
  69                 Add support for 9650SE controllers.
  70   2.26.02.009 - Fix dma mask setting to fallback to 32-bit if 64-bit fails.
  71   2.26.02.010 - Add support for 9690SA controllers.
  72   2.26.02.011 - Increase max AENs drained to 256.
  73                 Add MSI support and "use_msi" module parameter.
  74                 Fix bug in twa_get_param() on 4GB+.
  75                 Use pci_resource_len() for ioremap().
  76   2.26.02.012 - Add power management support.
  77   2.26.02.013 - Fix bug in twa_load_sgl().
  78   2.26.02.014 - Force 60 second timeout default.
  79*/
  80
  81#include <linux/module.h>
  82#include <linux/reboot.h>
  83#include <linux/spinlock.h>
  84#include <linux/interrupt.h>
  85#include <linux/moduleparam.h>
  86#include <linux/errno.h>
  87#include <linux/types.h>
  88#include <linux/delay.h>
  89#include <linux/pci.h>
  90#include <linux/time.h>
  91#include <linux/mutex.h>
  92#include <linux/slab.h>
  93#include <asm/io.h>
  94#include <asm/irq.h>
  95#include <linux/uaccess.h>
  96#include <scsi/scsi.h>
  97#include <scsi/scsi_host.h>
  98#include <scsi/scsi_tcq.h>
  99#include <scsi/scsi_cmnd.h>
 100#include "3w-9xxx.h"
 101
 102/* Globals */
 103#define TW_DRIVER_VERSION "2.26.02.014"
 104static DEFINE_MUTEX(twa_chrdev_mutex);
 105static TW_Device_Extension *twa_device_extension_list[TW_MAX_SLOT];
 106static unsigned int twa_device_extension_count;
 107static int twa_major = -1;
 108extern struct timezone sys_tz;
 109
 110/* Module parameters */
 111MODULE_AUTHOR ("LSI");
 112MODULE_DESCRIPTION ("3ware 9000 Storage Controller Linux Driver");
 113MODULE_LICENSE("GPL");
 114MODULE_VERSION(TW_DRIVER_VERSION);
 115
 116static int use_msi = 0;
 117module_param(use_msi, int, S_IRUGO);
 118MODULE_PARM_DESC(use_msi, "Use Message Signaled Interrupts.  Default: 0");
 119
 120/* Function prototypes */
 121static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header);
 122static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id);
 123static char *twa_aen_severity_lookup(unsigned char severity_code);
 124static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id);
 125static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
 126static int twa_chrdev_open(struct inode *inode, struct file *file);
 127static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host);
 128static void twa_free_request_id(TW_Device_Extension *tw_dev,int request_id);
 129static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id);
 130static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
 131			      u32 set_features, unsigned short current_fw_srl,
 132			      unsigned short current_fw_arch_id,
 133			      unsigned short current_fw_branch,
 134			      unsigned short current_fw_build,
 135			      unsigned short *fw_on_ctlr_srl,
 136			      unsigned short *fw_on_ctlr_arch_id,
 137			      unsigned short *fw_on_ctlr_branch,
 138			      unsigned short *fw_on_ctlr_build,
 139			      u32 *init_connect_result);
 140static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length);
 141static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds);
 142static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds);
 143static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal);
 144static int twa_reset_device_extension(TW_Device_Extension *tw_dev);
 145static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset);
 146static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
 147				   unsigned char *cdb, int use_sg,
 148				   TW_SG_Entry *sglistarg);
 149static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id);
 150static char *twa_string_lookup(twa_message_type *table, unsigned int aen_code);
 151
 152/* Functions */
 153
 154/* Show some statistics about the card */
 155static ssize_t twa_show_stats(struct device *dev,
 156			      struct device_attribute *attr, char *buf)
 157{
 158	struct Scsi_Host *host = class_to_shost(dev);
 159	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
 160	unsigned long flags = 0;
 161	ssize_t len;
 162
 163	spin_lock_irqsave(tw_dev->host->host_lock, flags);
 164	len = sysfs_emit(buf, "3w-9xxx Driver version: %s\n"
 165			 "Current commands posted:   %4d\n"
 166			 "Max commands posted:       %4d\n"
 167			 "Current pending commands:  %4d\n"
 168			 "Max pending commands:      %4d\n"
 169			 "Last sgl length:           %4d\n"
 170			 "Max sgl length:            %4d\n"
 171			 "Last sector count:         %4d\n"
 172			 "Max sector count:          %4d\n"
 173			 "SCSI Host Resets:          %4d\n"
 174			 "AEN's:                     %4d\n",
 175			 TW_DRIVER_VERSION,
 176			 tw_dev->posted_request_count,
 177			 tw_dev->max_posted_request_count,
 178			 tw_dev->pending_request_count,
 179			 tw_dev->max_pending_request_count,
 180			 tw_dev->sgl_entries,
 181			 tw_dev->max_sgl_entries,
 182			 tw_dev->sector_count,
 183			 tw_dev->max_sector_count,
 184			 tw_dev->num_resets,
 185			 tw_dev->aen_count);
 186	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
 187	return len;
 188} /* End twa_show_stats() */
 189
 190/* Create sysfs 'stats' entry */
 191static struct device_attribute twa_host_stats_attr = {
 192	.attr = {
 193		.name =		"stats",
 194		.mode =		S_IRUGO,
 195	},
 196	.show = twa_show_stats
 197};
 198
 199/* Host attributes initializer */
 200static struct attribute *twa_host_attrs[] = {
 201	&twa_host_stats_attr.attr,
 202	NULL,
 203};
 204
 205ATTRIBUTE_GROUPS(twa_host);
 206
 207/* File operations struct for character device */
 208static const struct file_operations twa_fops = {
 209	.owner		= THIS_MODULE,
 210	.unlocked_ioctl	= twa_chrdev_ioctl,
 211	.open		= twa_chrdev_open,
 212	.release	= NULL,
 213	.llseek		= noop_llseek,
 214};
 215
 216/*
 217 * The controllers use an inline buffer instead of a mapped SGL for small,
 218 * single entry buffers.  Note that we treat a zero-length transfer like
 219 * a mapped SGL.
 220 */
 221static bool twa_command_mapped(struct scsi_cmnd *cmd)
 222{
 223	return scsi_sg_count(cmd) != 1 ||
 224		scsi_bufflen(cmd) >= TW_MIN_SGL_LENGTH;
 225}
 226
 227/* This function will complete an aen request from the isr */
 228static int twa_aen_complete(TW_Device_Extension *tw_dev, int request_id)
 229{
 230	TW_Command_Full *full_command_packet;
 231	TW_Command *command_packet;
 232	TW_Command_Apache_Header *header;
 233	unsigned short aen;
 234	int retval = 1;
 235
 236	header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
 237	tw_dev->posted_request_count--;
 238	aen = le16_to_cpu(header->status_block.error);
 239	full_command_packet = tw_dev->command_packet_virt[request_id];
 240	command_packet = &full_command_packet->command.oldcommand;
 241
 242	/* First check for internal completion of set param for time sync */
 243	if (TW_OP_OUT(command_packet->opcode__sgloffset) == TW_OP_SET_PARAM) {
 244		/* Keep reading the queue in case there are more aen's */
 245		if (twa_aen_read_queue(tw_dev, request_id))
 246			goto out2;
 247		else {
 248			retval = 0;
 249			goto out;
 250		}
 251	}
 252
 253	switch (aen) {
 254	case TW_AEN_QUEUE_EMPTY:
 255		/* Quit reading the queue if this is the last one */
 256		break;
 257	case TW_AEN_SYNC_TIME_WITH_HOST:
 258		twa_aen_sync_time(tw_dev, request_id);
 259		retval = 0;
 260		goto out;
 261	default:
 262		twa_aen_queue_event(tw_dev, header);
 263
 264		/* If there are more aen's, keep reading the queue */
 265		if (twa_aen_read_queue(tw_dev, request_id))
 266			goto out2;
 267		else {
 268			retval = 0;
 269			goto out;
 270		}
 271	}
 272	retval = 0;
 273out2:
 274	tw_dev->state[request_id] = TW_S_COMPLETED;
 275	twa_free_request_id(tw_dev, request_id);
 276	clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
 277out:
 278	return retval;
 279} /* End twa_aen_complete() */
 280
 281/* This function will drain aen queue */
 282static int twa_aen_drain_queue(TW_Device_Extension *tw_dev, int no_check_reset)
 283{
 284	int request_id = 0;
 285	unsigned char cdb[TW_MAX_CDB_LEN];
 286	TW_SG_Entry sglist[1];
 287	int finished = 0, count = 0;
 288	TW_Command_Full *full_command_packet;
 289	TW_Command_Apache_Header *header;
 290	unsigned short aen;
 291	int first_reset = 0, queue = 0, retval = 1;
 292
 293	if (no_check_reset)
 294		first_reset = 0;
 295	else
 296		first_reset = 1;
 297
 298	full_command_packet = tw_dev->command_packet_virt[request_id];
 299	memset(full_command_packet, 0, sizeof(TW_Command_Full));
 300
 301	/* Initialize cdb */
 302	memset(&cdb, 0, TW_MAX_CDB_LEN);
 303	cdb[0] = REQUEST_SENSE; /* opcode */
 304	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
 305
 306	/* Initialize sglist */
 307	memset(&sglist, 0, sizeof(TW_SG_Entry));
 308	sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
 309	sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
 310
 311	if (tw_dev->generic_buffer_phys[request_id] & TW_ALIGNMENT_9000_SGL) {
 312		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1, "Found unaligned address during AEN drain");
 313		goto out;
 314	}
 315
 316	/* Mark internal command */
 317	tw_dev->srb[request_id] = NULL;
 318
 319	do {
 320		/* Send command to the board */
 321		if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
 322			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2, "Error posting request sense");
 323			goto out;
 324		}
 325
 326		/* Now poll for completion */
 327		if (twa_poll_response(tw_dev, request_id, 30)) {
 328			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x3, "No valid response while draining AEN queue");
 329			tw_dev->posted_request_count--;
 330			goto out;
 331		}
 332
 333		tw_dev->posted_request_count--;
 334		header = (TW_Command_Apache_Header *)tw_dev->generic_buffer_virt[request_id];
 335		aen = le16_to_cpu(header->status_block.error);
 336		queue = 0;
 337		count++;
 338
 339		switch (aen) {
 340		case TW_AEN_QUEUE_EMPTY:
 341			if (first_reset != 1)
 342				goto out;
 343			else
 344				finished = 1;
 345			break;
 346		case TW_AEN_SOFT_RESET:
 347			if (first_reset == 0)
 348				first_reset = 1;
 349			else
 350				queue = 1;
 351			break;
 352		case TW_AEN_SYNC_TIME_WITH_HOST:
 353			break;
 354		default:
 355			queue = 1;
 356		}
 357
 358		/* Now queue an event info */
 359		if (queue)
 360			twa_aen_queue_event(tw_dev, header);
 361	} while ((finished == 0) && (count < TW_MAX_AEN_DRAIN));
 362
 363	if (count == TW_MAX_AEN_DRAIN)
 364		goto out;
 365
 366	retval = 0;
 367out:
 368	tw_dev->state[request_id] = TW_S_INITIAL;
 369	return retval;
 370} /* End twa_aen_drain_queue() */
 371
 372/* This function will queue an event */
 373static void twa_aen_queue_event(TW_Device_Extension *tw_dev, TW_Command_Apache_Header *header)
 374{
 375	u32 local_time;
 
 376	TW_Event *event;
 377	unsigned short aen;
 378	char host[16];
 379	char *error_str;
 380
 381	tw_dev->aen_count++;
 382
 383	/* Fill out event info */
 384	event = tw_dev->event_queue[tw_dev->error_index];
 385
 386	/* Check for clobber */
 387	host[0] = '\0';
 388	if (tw_dev->host) {
 389		sprintf(host, " scsi%d:", tw_dev->host->host_no);
 390		if (event->retrieved == TW_AEN_NOT_RETRIEVED)
 391			tw_dev->aen_clobber = 1;
 392	}
 393
 394	aen = le16_to_cpu(header->status_block.error);
 395	memset(event, 0, sizeof(TW_Event));
 396
 397	event->severity = TW_SEV_OUT(header->status_block.severity__reserved);
 398	/* event->time_stamp_sec overflows in y2106 */
 399	local_time = (u32)(ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
 400	event->time_stamp_sec = local_time;
 401	event->aen_code = aen;
 402	event->retrieved = TW_AEN_NOT_RETRIEVED;
 403	event->sequence_id = tw_dev->error_sequence_id;
 404	tw_dev->error_sequence_id++;
 405
 406	/* Check for embedded error string */
 407	error_str = &(header->err_specific_desc[strlen(header->err_specific_desc)+1]);
 408
 409	header->err_specific_desc[sizeof(header->err_specific_desc) - 1] = '\0';
 410	event->parameter_len = strlen(header->err_specific_desc);
 411	memcpy(event->parameter_data, header->err_specific_desc, event->parameter_len + (error_str[0] == '\0' ? 0 : (1 + strlen(error_str))));
 412	if (event->severity != TW_AEN_SEVERITY_DEBUG)
 413		printk(KERN_WARNING "3w-9xxx:%s AEN: %s (0x%02X:0x%04X): %s:%s.\n",
 414		       host,
 415		       twa_aen_severity_lookup(TW_SEV_OUT(header->status_block.severity__reserved)),
 416		       TW_MESSAGE_SOURCE_CONTROLLER_EVENT, aen,
 417		       error_str[0] == '\0' ? twa_string_lookup(twa_aen_table, aen) : error_str,
 418		       header->err_specific_desc);
 419	else
 420		tw_dev->aen_count--;
 421
 422	if ((tw_dev->error_index + 1) == TW_Q_LENGTH)
 423		tw_dev->event_queue_wrapped = 1;
 424	tw_dev->error_index = (tw_dev->error_index + 1 ) % TW_Q_LENGTH;
 425} /* End twa_aen_queue_event() */
 426
 427/* This function will read the aen queue from the isr */
 428static int twa_aen_read_queue(TW_Device_Extension *tw_dev, int request_id)
 429{
 430	unsigned char cdb[TW_MAX_CDB_LEN];
 431	TW_SG_Entry sglist[1];
 432	TW_Command_Full *full_command_packet;
 433	int retval = 1;
 434
 435	full_command_packet = tw_dev->command_packet_virt[request_id];
 436	memset(full_command_packet, 0, sizeof(TW_Command_Full));
 437
 438	/* Initialize cdb */
 439	memset(&cdb, 0, TW_MAX_CDB_LEN);
 440	cdb[0] = REQUEST_SENSE; /* opcode */
 441	cdb[4] = TW_ALLOCATION_LENGTH; /* allocation length */
 442
 443	/* Initialize sglist */
 444	memset(&sglist, 0, sizeof(TW_SG_Entry));
 445	sglist[0].length = cpu_to_le32(TW_SECTOR_SIZE);
 446	sglist[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
 447
 448	/* Mark internal command */
 449	tw_dev->srb[request_id] = NULL;
 450
 451	/* Now post the command packet */
 452	if (twa_scsiop_execute_scsi(tw_dev, request_id, cdb, 1, sglist)) {
 453		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x4, "Post failed while reading AEN queue");
 454		goto out;
 455	}
 456	retval = 0;
 457out:
 458	return retval;
 459} /* End twa_aen_read_queue() */
 460
 461/* This function will look up an AEN severity string */
 462static char *twa_aen_severity_lookup(unsigned char severity_code)
 463{
 464	char *retval = NULL;
 465
 466	if ((severity_code < (unsigned char) TW_AEN_SEVERITY_ERROR) ||
 467	    (severity_code > (unsigned char) TW_AEN_SEVERITY_DEBUG))
 468		goto out;
 469
 470	retval = twa_aen_severity_table[severity_code];
 471out:
 472	return retval;
 473} /* End twa_aen_severity_lookup() */
 474
 475/* This function will sync firmware time with the host time */
 476static void twa_aen_sync_time(TW_Device_Extension *tw_dev, int request_id)
 477{
 478	u32 schedulertime;
 
 479	TW_Command_Full *full_command_packet;
 480	TW_Command *command_packet;
 481	TW_Param_Apache *param;
 482	time64_t local_time;
 483
 484	/* Fill out the command packet */
 485	full_command_packet = tw_dev->command_packet_virt[request_id];
 486	memset(full_command_packet, 0, sizeof(TW_Command_Full));
 487	command_packet = &full_command_packet->command.oldcommand;
 488	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_SET_PARAM);
 489	command_packet->request_id = request_id;
 490	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
 491	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
 492	command_packet->size = TW_COMMAND_SIZE;
 493	command_packet->byte6_offset.parameter_count = cpu_to_le16(1);
 494
 495	/* Setup the param */
 496	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
 497	memset(param, 0, TW_SECTOR_SIZE);
 498	param->table_id = cpu_to_le16(TW_TIMEKEEP_TABLE | 0x8000); /* Controller time keep table */
 499	param->parameter_id = cpu_to_le16(0x3); /* SchedulerTime */
 500	param->parameter_size_bytes = cpu_to_le16(4);
 501
 502	/* Convert system time in UTC to local time seconds since last
 503           Sunday 12:00AM */
 504	local_time = (ktime_get_real_seconds() - (sys_tz.tz_minuteswest * 60));
 505	div_u64_rem(local_time - (3 * 86400), 604800, &schedulertime);
 
 
 506
 507	memcpy(param->data, &(__le32){cpu_to_le32(schedulertime)}, sizeof(__le32));
 508
 509	/* Mark internal command */
 510	tw_dev->srb[request_id] = NULL;
 511
 512	/* Now post the command */
 513	twa_post_command_packet(tw_dev, request_id, 1);
 514} /* End twa_aen_sync_time() */
 515
 516/* This function will allocate memory and check if it is correctly aligned */
 517static int twa_allocate_memory(TW_Device_Extension *tw_dev, int size, int which)
 518{
 519	int i;
 520	dma_addr_t dma_handle;
 521	unsigned long *cpu_addr;
 522	int retval = 1;
 523
 524	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
 525			size * TW_Q_LENGTH, &dma_handle, GFP_KERNEL);
 526	if (!cpu_addr) {
 527		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x5, "Memory allocation failed");
 528		goto out;
 529	}
 530
 531	if ((unsigned long)cpu_addr % (TW_ALIGNMENT_9000)) {
 532		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x6, "Failed to allocate correctly aligned memory");
 533		dma_free_coherent(&tw_dev->tw_pci_dev->dev, size * TW_Q_LENGTH,
 534				cpu_addr, dma_handle);
 535		goto out;
 536	}
 537
 538	memset(cpu_addr, 0, size*TW_Q_LENGTH);
 539
 540	for (i = 0; i < TW_Q_LENGTH; i++) {
 541		switch(which) {
 542		case 0:
 543			tw_dev->command_packet_phys[i] = dma_handle+(i*size);
 544			tw_dev->command_packet_virt[i] = (TW_Command_Full *)((unsigned char *)cpu_addr + (i*size));
 545			break;
 546		case 1:
 547			tw_dev->generic_buffer_phys[i] = dma_handle+(i*size);
 548			tw_dev->generic_buffer_virt[i] = (unsigned long *)((unsigned char *)cpu_addr + (i*size));
 549			break;
 550		}
 551	}
 552	retval = 0;
 553out:
 554	return retval;
 555} /* End twa_allocate_memory() */
 556
 557/* This function will check the status register for unexpected bits */
 558static int twa_check_bits(u32 status_reg_value)
 559{
 560	int retval = 1;
 561
 562	if ((status_reg_value & TW_STATUS_EXPECTED_BITS) != TW_STATUS_EXPECTED_BITS)
 563		goto out;
 564	if ((status_reg_value & TW_STATUS_UNEXPECTED_BITS) != 0)
 565		goto out;
 566
 567	retval = 0;
 568out:
 569	return retval;
 570} /* End twa_check_bits() */
 571
 572/* This function will check the srl and decide if we are compatible  */
 573static int twa_check_srl(TW_Device_Extension *tw_dev, int *flashed)
 574{
 575	int retval = 1;
 576	unsigned short fw_on_ctlr_srl = 0, fw_on_ctlr_arch_id = 0;
 577	unsigned short fw_on_ctlr_branch = 0, fw_on_ctlr_build = 0;
 578	u32 init_connect_result = 0;
 579
 580	if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
 581			       TW_EXTENDED_INIT_CONNECT, TW_CURRENT_DRIVER_SRL,
 582			       TW_9000_ARCH_ID, TW_CURRENT_DRIVER_BRANCH,
 583			       TW_CURRENT_DRIVER_BUILD, &fw_on_ctlr_srl,
 584			       &fw_on_ctlr_arch_id, &fw_on_ctlr_branch,
 585			       &fw_on_ctlr_build, &init_connect_result)) {
 586		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x7, "Initconnection failed while checking SRL");
 587		goto out;
 588	}
 589
 590	tw_dev->tw_compat_info.working_srl = fw_on_ctlr_srl;
 591	tw_dev->tw_compat_info.working_branch = fw_on_ctlr_branch;
 592	tw_dev->tw_compat_info.working_build = fw_on_ctlr_build;
 593
 594	/* Try base mode compatibility */
 595	if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
 596		if (twa_initconnection(tw_dev, TW_INIT_MESSAGE_CREDITS,
 597				       TW_EXTENDED_INIT_CONNECT,
 598				       TW_BASE_FW_SRL, TW_9000_ARCH_ID,
 599				       TW_BASE_FW_BRANCH, TW_BASE_FW_BUILD,
 600				       &fw_on_ctlr_srl, &fw_on_ctlr_arch_id,
 601				       &fw_on_ctlr_branch, &fw_on_ctlr_build,
 602				       &init_connect_result)) {
 603			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xa, "Initconnection (base mode) failed while checking SRL");
 604			goto out;
 605		}
 606		if (!(init_connect_result & TW_CTLR_FW_COMPATIBLE)) {
 607			if (TW_CURRENT_DRIVER_SRL > fw_on_ctlr_srl) {
 608				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x32, "Firmware and driver incompatibility: please upgrade firmware");
 609			} else {
 610				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x33, "Firmware and driver incompatibility: please upgrade driver");
 611			}
 612			goto out;
 613		}
 614		tw_dev->tw_compat_info.working_srl = TW_BASE_FW_SRL;
 615		tw_dev->tw_compat_info.working_branch = TW_BASE_FW_BRANCH;
 616		tw_dev->tw_compat_info.working_build = TW_BASE_FW_BUILD;
 617	}
 618
 619	/* Load rest of compatibility struct */
 620	strscpy(tw_dev->tw_compat_info.driver_version, TW_DRIVER_VERSION,
 621		sizeof(tw_dev->tw_compat_info.driver_version));
 622	tw_dev->tw_compat_info.driver_srl_high = TW_CURRENT_DRIVER_SRL;
 623	tw_dev->tw_compat_info.driver_branch_high = TW_CURRENT_DRIVER_BRANCH;
 624	tw_dev->tw_compat_info.driver_build_high = TW_CURRENT_DRIVER_BUILD;
 625	tw_dev->tw_compat_info.driver_srl_low = TW_BASE_FW_SRL;
 626	tw_dev->tw_compat_info.driver_branch_low = TW_BASE_FW_BRANCH;
 627	tw_dev->tw_compat_info.driver_build_low = TW_BASE_FW_BUILD;
 628	tw_dev->tw_compat_info.fw_on_ctlr_srl = fw_on_ctlr_srl;
 629	tw_dev->tw_compat_info.fw_on_ctlr_branch = fw_on_ctlr_branch;
 630	tw_dev->tw_compat_info.fw_on_ctlr_build = fw_on_ctlr_build;
 631
 632	retval = 0;
 633out:
 634	return retval;
 635} /* End twa_check_srl() */
 636
 637/* This function handles ioctl for the character device */
 638static long twa_chrdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 639{
 640	struct inode *inode = file_inode(file);
 641	long timeout;
 642	unsigned long *cpu_addr, data_buffer_length_adjusted = 0, flags = 0;
 643	dma_addr_t dma_handle;
 644	int request_id = 0;
 645	unsigned int sequence_id = 0;
 646	unsigned char event_index, start_index;
 647	TW_Ioctl_Driver_Command driver_command;
 648	TW_Ioctl_Buf_Apache *tw_ioctl;
 649	TW_Lock *tw_lock;
 650	TW_Command_Full *full_command_packet;
 651	TW_Compatibility_Info *tw_compat_info;
 652	TW_Event *event;
 653	ktime_t current_time;
 
 654	TW_Device_Extension *tw_dev = twa_device_extension_list[iminor(inode)];
 655	int retval = TW_IOCTL_ERROR_OS_EFAULT;
 656	void __user *argp = (void __user *)arg;
 657
 658	mutex_lock(&twa_chrdev_mutex);
 659
 660	/* Only let one of these through at a time */
 661	if (mutex_lock_interruptible(&tw_dev->ioctl_lock)) {
 662		retval = TW_IOCTL_ERROR_OS_EINTR;
 663		goto out;
 664	}
 665
 666	/* First copy down the driver command */
 667	if (copy_from_user(&driver_command, argp, sizeof(TW_Ioctl_Driver_Command)))
 668		goto out2;
 669
 670	/* Check data buffer size */
 671	if (driver_command.buffer_length > TW_MAX_SECTORS * 2048) {
 672		retval = TW_IOCTL_ERROR_OS_EINVAL;
 673		goto out2;
 674	}
 675
 676	/* Hardware can only do multiple of 512 byte transfers */
 677	data_buffer_length_adjusted = (driver_command.buffer_length + 511) & ~511;
 678
 679	/* Now allocate ioctl buf memory */
 680	cpu_addr = dma_alloc_coherent(&tw_dev->tw_pci_dev->dev,
 681				      sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
 682				      &dma_handle, GFP_KERNEL);
 683	if (!cpu_addr) {
 684		retval = TW_IOCTL_ERROR_OS_ENOMEM;
 685		goto out2;
 686	}
 687
 688	tw_ioctl = (TW_Ioctl_Buf_Apache *)cpu_addr;
 689
 690	/* Now copy down the entire ioctl */
 691	if (copy_from_user(tw_ioctl, argp, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length))
 692		goto out3;
 693
 694	/* See which ioctl we are doing */
 695	switch (cmd) {
 696	case TW_IOCTL_FIRMWARE_PASS_THROUGH:
 697		spin_lock_irqsave(tw_dev->host->host_lock, flags);
 698		twa_get_request_id(tw_dev, &request_id);
 699
 700		/* Flag internal command */
 701		tw_dev->srb[request_id] = NULL;
 702
 703		/* Flag chrdev ioctl */
 704		tw_dev->chrdev_request_id = request_id;
 705
 706		full_command_packet = &tw_ioctl->firmware_command;
 707
 708		/* Load request id and sglist for both command types */
 709		twa_load_sgl(tw_dev, full_command_packet, request_id, dma_handle, data_buffer_length_adjusted);
 710
 711		memcpy(tw_dev->command_packet_virt[request_id], &(tw_ioctl->firmware_command), sizeof(TW_Command_Full));
 712
 713		/* Now post the command packet to the controller */
 714		twa_post_command_packet(tw_dev, request_id, 1);
 715		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
 716
 717		timeout = TW_IOCTL_CHRDEV_TIMEOUT*HZ;
 718
 719		/* Now wait for command to complete */
 720		timeout = wait_event_timeout(tw_dev->ioctl_wqueue, tw_dev->chrdev_request_id == TW_IOCTL_CHRDEV_FREE, timeout);
 721
 722		/* We timed out, and didn't get an interrupt */
 723		if (tw_dev->chrdev_request_id != TW_IOCTL_CHRDEV_FREE) {
 724			/* Now we need to reset the board */
 725			printk(KERN_WARNING "3w-9xxx: scsi%d: WARNING: (0x%02X:0x%04X): Character ioctl (0x%x) timed out, resetting card.\n",
 726			       tw_dev->host->host_no, TW_DRIVER, 0x37,
 727			       cmd);
 728			retval = TW_IOCTL_ERROR_OS_EIO;
 729			twa_reset_device_extension(tw_dev);
 730			goto out3;
 731		}
 732
 733		/* Now copy in the command packet response */
 734		memcpy(&(tw_ioctl->firmware_command), tw_dev->command_packet_virt[request_id], sizeof(TW_Command_Full));
 735
 736		/* Now complete the io */
 737		spin_lock_irqsave(tw_dev->host->host_lock, flags);
 738		tw_dev->posted_request_count--;
 739		tw_dev->state[request_id] = TW_S_COMPLETED;
 740		twa_free_request_id(tw_dev, request_id);
 741		spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
 742		break;
 743	case TW_IOCTL_GET_COMPATIBILITY_INFO:
 744		tw_ioctl->driver_command.status = 0;
 745		/* Copy compatibility struct into ioctl data buffer */
 746		tw_compat_info = (TW_Compatibility_Info *)tw_ioctl->data_buffer;
 747		memcpy(tw_compat_info, &tw_dev->tw_compat_info, sizeof(TW_Compatibility_Info));
 748		break;
 749	case TW_IOCTL_GET_LAST_EVENT:
 750		if (tw_dev->event_queue_wrapped) {
 751			if (tw_dev->aen_clobber) {
 752				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 753				tw_dev->aen_clobber = 0;
 754			} else
 755				tw_ioctl->driver_command.status = 0;
 756		} else {
 757			if (!tw_dev->error_index) {
 758				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 759				break;
 760			}
 761			tw_ioctl->driver_command.status = 0;
 762		}
 763		event_index = (tw_dev->error_index - 1 + TW_Q_LENGTH) % TW_Q_LENGTH;
 764		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 765		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 766		break;
 767	case TW_IOCTL_GET_FIRST_EVENT:
 768		if (tw_dev->event_queue_wrapped) {
 769			if (tw_dev->aen_clobber) {
 770				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 771				tw_dev->aen_clobber = 0;
 772			} else
 773				tw_ioctl->driver_command.status = 0;
 774			event_index = tw_dev->error_index;
 775		} else {
 776			if (!tw_dev->error_index) {
 777				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 778				break;
 779			}
 780			tw_ioctl->driver_command.status = 0;
 781			event_index = 0;
 782		}
 783		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 784		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 785		break;
 786	case TW_IOCTL_GET_NEXT_EVENT:
 787		event = (TW_Event *)tw_ioctl->data_buffer;
 788		sequence_id = event->sequence_id;
 789		tw_ioctl->driver_command.status = 0;
 790
 791		if (tw_dev->event_queue_wrapped) {
 792			if (tw_dev->aen_clobber) {
 793				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 794				tw_dev->aen_clobber = 0;
 795			}
 796			start_index = tw_dev->error_index;
 797		} else {
 798			if (!tw_dev->error_index) {
 799				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 800				break;
 801			}
 802			start_index = 0;
 803		}
 804		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id + 1) % TW_Q_LENGTH;
 805
 806		if (!(tw_dev->event_queue[event_index]->sequence_id > sequence_id)) {
 807			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
 808				tw_dev->aen_clobber = 1;
 809			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 810			break;
 811		}
 812		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 813		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 814		break;
 815	case TW_IOCTL_GET_PREVIOUS_EVENT:
 816		event = (TW_Event *)tw_ioctl->data_buffer;
 817		sequence_id = event->sequence_id;
 818		tw_ioctl->driver_command.status = 0;
 819
 820		if (tw_dev->event_queue_wrapped) {
 821			if (tw_dev->aen_clobber) {
 822				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_AEN_CLOBBER;
 823				tw_dev->aen_clobber = 0;
 824			}
 825			start_index = tw_dev->error_index;
 826		} else {
 827			if (!tw_dev->error_index) {
 828				tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 829				break;
 830			}
 831			start_index = 0;
 832		}
 833		event_index = (start_index + sequence_id - tw_dev->event_queue[start_index]->sequence_id - 1) % TW_Q_LENGTH;
 834
 835		if (!(tw_dev->event_queue[event_index]->sequence_id < sequence_id)) {
 836			if (tw_ioctl->driver_command.status == TW_IOCTL_ERROR_STATUS_AEN_CLOBBER)
 837				tw_dev->aen_clobber = 1;
 838			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NO_MORE_EVENTS;
 839			break;
 840		}
 841		memcpy(tw_ioctl->data_buffer, tw_dev->event_queue[event_index], sizeof(TW_Event));
 842		tw_dev->event_queue[event_index]->retrieved = TW_AEN_RETRIEVED;
 843		break;
 844	case TW_IOCTL_GET_LOCK:
 845		tw_lock = (TW_Lock *)tw_ioctl->data_buffer;
 846		current_time = ktime_get();
 
 847
 848		if ((tw_lock->force_flag == 1) || (tw_dev->ioctl_sem_lock == 0) ||
 849		    ktime_after(current_time, tw_dev->ioctl_time)) {
 850			tw_dev->ioctl_sem_lock = 1;
 851			tw_dev->ioctl_time = ktime_add_ms(current_time, tw_lock->timeout_msec);
 852			tw_ioctl->driver_command.status = 0;
 853			tw_lock->time_remaining_msec = tw_lock->timeout_msec;
 854		} else {
 855			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_LOCKED;
 856			tw_lock->time_remaining_msec = ktime_ms_delta(tw_dev->ioctl_time, current_time);
 857		}
 858		break;
 859	case TW_IOCTL_RELEASE_LOCK:
 860		if (tw_dev->ioctl_sem_lock == 1) {
 861			tw_dev->ioctl_sem_lock = 0;
 862			tw_ioctl->driver_command.status = 0;
 863		} else {
 864			tw_ioctl->driver_command.status = TW_IOCTL_ERROR_STATUS_NOT_LOCKED;
 865		}
 866		break;
 867	default:
 868		retval = TW_IOCTL_ERROR_OS_ENOTTY;
 869		goto out3;
 870	}
 871
 872	/* Now copy the entire response to userspace */
 873	if (copy_to_user(argp, tw_ioctl, sizeof(TW_Ioctl_Buf_Apache) + driver_command.buffer_length) == 0)
 874		retval = 0;
 875out3:
 876	/* Now free ioctl buf memory */
 877	dma_free_coherent(&tw_dev->tw_pci_dev->dev,
 878			  sizeof(TW_Ioctl_Buf_Apache) + data_buffer_length_adjusted,
 879			  cpu_addr, dma_handle);
 880out2:
 881	mutex_unlock(&tw_dev->ioctl_lock);
 882out:
 883	mutex_unlock(&twa_chrdev_mutex);
 884	return retval;
 885} /* End twa_chrdev_ioctl() */
 886
 887/* This function handles open for the character device */
 888/* NOTE that this function will race with remove. */
 889static int twa_chrdev_open(struct inode *inode, struct file *file)
 890{
 891	unsigned int minor_number;
 892	int retval = TW_IOCTL_ERROR_OS_ENODEV;
 893
 894	if (!capable(CAP_SYS_ADMIN)) {
 895		retval = -EACCES;
 896		goto out;
 897	}
 898
 899	minor_number = iminor(inode);
 900	if (minor_number >= twa_device_extension_count)
 901		goto out;
 902	retval = 0;
 903out:
 904	return retval;
 905} /* End twa_chrdev_open() */
 906
 907/* This function will print readable messages from status register errors */
 908static int twa_decode_bits(TW_Device_Extension *tw_dev, u32 status_reg_value)
 909{
 910	int retval = 1;
 911
 912	/* Check for various error conditions and handle them appropriately */
 913	if (status_reg_value & TW_STATUS_PCI_PARITY_ERROR) {
 914		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xc, "PCI Parity Error: clearing");
 915		writel(TW_CONTROL_CLEAR_PARITY_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
 916	}
 917
 918	if (status_reg_value & TW_STATUS_PCI_ABORT) {
 919		TW_PRINTK(tw_dev->host, TW_DRIVER, 0xd, "PCI Abort: clearing");
 920		writel(TW_CONTROL_CLEAR_PCI_ABORT, TW_CONTROL_REG_ADDR(tw_dev));
 921		pci_write_config_word(tw_dev->tw_pci_dev, PCI_STATUS, TW_PCI_CLEAR_PCI_ABORT);
 922	}
 923
 924	if (status_reg_value & TW_STATUS_QUEUE_ERROR) {
 925		if (((tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9650SE) &&
 926		     (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9690SA)) ||
 927		    (!test_bit(TW_IN_RESET, &tw_dev->flags)))
 928			TW_PRINTK(tw_dev->host, TW_DRIVER, 0xe, "Controller Queue Error: clearing");
 929		writel(TW_CONTROL_CLEAR_QUEUE_ERROR, TW_CONTROL_REG_ADDR(tw_dev));
 930	}
 931
 932	if (status_reg_value & TW_STATUS_MICROCONTROLLER_ERROR) {
 933		if (tw_dev->reset_print == 0) {
 934			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x10, "Microcontroller Error: clearing");
 935			tw_dev->reset_print = 1;
 936		}
 937		goto out;
 938	}
 939	retval = 0;
 940out:
 941	return retval;
 942} /* End twa_decode_bits() */
 943
 944/* This function will empty the response queue */
 945static int twa_empty_response_queue(TW_Device_Extension *tw_dev)
 946{
 947	u32 status_reg_value;
 948	int count = 0, retval = 1;
 949
 950	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
 951
 952	while (((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) && (count < TW_MAX_RESPONSE_DRAIN)) {
 953		readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
 954		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
 955		count++;
 956	}
 957	if (count == TW_MAX_RESPONSE_DRAIN)
 958		goto out;
 959
 960	retval = 0;
 961out:
 962	return retval;
 963} /* End twa_empty_response_queue() */
 964
 965/* This function will clear the pchip/response queue on 9550SX */
 966static int twa_empty_response_queue_large(TW_Device_Extension *tw_dev)
 967{
 968	u32 response_que_value = 0;
 969	unsigned long before;
 970	int retval = 1;
 971
 972	if (tw_dev->tw_pci_dev->device != PCI_DEVICE_ID_3WARE_9000) {
 973		before = jiffies;
 974		while ((response_que_value & TW_9550SX_DRAIN_COMPLETED) != TW_9550SX_DRAIN_COMPLETED) {
 975			response_que_value = readl(TW_RESPONSE_QUEUE_REG_ADDR_LARGE(tw_dev));
 976			msleep(1);
 977			if (time_after(jiffies, before + HZ * 30))
 978				goto out;
 979		}
 980		/* P-chip settle time */
 981		msleep(500);
 982		retval = 0;
 983	} else
 984		retval = 0;
 985out:
 986	return retval;
 987} /* End twa_empty_response_queue_large() */
 988
 989/* This function passes sense keys from firmware to scsi layer */
 990static int twa_fill_sense(TW_Device_Extension *tw_dev, int request_id, int copy_sense, int print_host)
 991{
 992	TW_Command_Full *full_command_packet;
 993	unsigned short error;
 994	int retval = 1;
 995	char *error_str;
 996
 997	full_command_packet = tw_dev->command_packet_virt[request_id];
 998
 999	/* Check for embedded error string */
1000	error_str = &(full_command_packet->header.err_specific_desc[strlen(full_command_packet->header.err_specific_desc) + 1]);
1001
1002	/* Don't print error for Logical unit not supported during rollcall */
1003	error = le16_to_cpu(full_command_packet->header.status_block.error);
1004	if ((error != TW_ERROR_LOGICAL_UNIT_NOT_SUPPORTED) && (error != TW_ERROR_UNIT_OFFLINE)) {
1005		if (print_host)
1006			printk(KERN_WARNING "3w-9xxx: scsi%d: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1007			       tw_dev->host->host_no,
1008			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
1009			       error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
 
 
 
1010			       full_command_packet->header.err_specific_desc);
1011		else
1012			printk(KERN_WARNING "3w-9xxx: ERROR: (0x%02X:0x%04X): %s:%s.\n",
1013			       TW_MESSAGE_SOURCE_CONTROLLER_ERROR, error,
1014			       error_str[0] ? error_str : twa_string_lookup(twa_error_table, error),
 
 
 
1015			       full_command_packet->header.err_specific_desc);
1016	}
1017
1018	if (copy_sense) {
1019		memcpy(tw_dev->srb[request_id]->sense_buffer, full_command_packet->header.sense_data, TW_SENSE_DATA_LENGTH);
1020		tw_dev->srb[request_id]->result = (full_command_packet->command.newcommand.status << 1);
1021		retval = TW_ISR_DONT_RESULT;
1022		goto out;
1023	}
1024	retval = 0;
1025out:
1026	return retval;
1027} /* End twa_fill_sense() */
1028
1029/* This function will free up device extension resources */
1030static void twa_free_device_extension(TW_Device_Extension *tw_dev)
1031{
1032	if (tw_dev->command_packet_virt[0])
1033		dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1034				sizeof(TW_Command_Full) * TW_Q_LENGTH,
1035				tw_dev->command_packet_virt[0],
1036				tw_dev->command_packet_phys[0]);
1037
1038	if (tw_dev->generic_buffer_virt[0])
1039		dma_free_coherent(&tw_dev->tw_pci_dev->dev,
1040				TW_SECTOR_SIZE * TW_Q_LENGTH,
1041				tw_dev->generic_buffer_virt[0],
1042				tw_dev->generic_buffer_phys[0]);
1043
1044	kfree(tw_dev->event_queue[0]);
1045} /* End twa_free_device_extension() */
1046
1047/* This function will free a request id */
1048static void twa_free_request_id(TW_Device_Extension *tw_dev, int request_id)
1049{
1050	tw_dev->free_queue[tw_dev->free_tail] = request_id;
1051	tw_dev->state[request_id] = TW_S_FINISHED;
1052	tw_dev->free_tail = (tw_dev->free_tail + 1) % TW_Q_LENGTH;
1053} /* End twa_free_request_id() */
1054
1055/* This function will get parameter table entries from the firmware */
1056static void *twa_get_param(TW_Device_Extension *tw_dev, int request_id, int table_id, int parameter_id, int parameter_size_bytes)
1057{
1058	TW_Command_Full *full_command_packet;
1059	TW_Command *command_packet;
1060	TW_Param_Apache *param;
1061	void *retval = NULL;
1062
1063	/* Setup the command packet */
1064	full_command_packet = tw_dev->command_packet_virt[request_id];
1065	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1066	command_packet = &full_command_packet->command.oldcommand;
1067
1068	command_packet->opcode__sgloffset = TW_OPSGL_IN(2, TW_OP_GET_PARAM);
1069	command_packet->size		  = TW_COMMAND_SIZE;
1070	command_packet->request_id	  = request_id;
1071	command_packet->byte6_offset.block_count = cpu_to_le16(1);
1072
1073	/* Now setup the param */
1074	param = (TW_Param_Apache *)tw_dev->generic_buffer_virt[request_id];
1075	memset(param, 0, TW_SECTOR_SIZE);
1076	param->table_id = cpu_to_le16(table_id | 0x8000);
1077	param->parameter_id = cpu_to_le16(parameter_id);
1078	param->parameter_size_bytes = cpu_to_le16(parameter_size_bytes);
1079
1080	command_packet->byte8_offset.param.sgl[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1081	command_packet->byte8_offset.param.sgl[0].length = cpu_to_le32(TW_SECTOR_SIZE);
1082
1083	/* Post the command packet to the board */
1084	twa_post_command_packet(tw_dev, request_id, 1);
1085
1086	/* Poll for completion */
1087	if (twa_poll_response(tw_dev, request_id, 30))
1088		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x13, "No valid response during get param")
1089	else
1090		retval = (void *)&(param->data[0]);
1091
1092	tw_dev->posted_request_count--;
1093	tw_dev->state[request_id] = TW_S_INITIAL;
1094
1095	return retval;
1096} /* End twa_get_param() */
1097
1098/* This function will assign an available request id */
1099static void twa_get_request_id(TW_Device_Extension *tw_dev, int *request_id)
1100{
1101	*request_id = tw_dev->free_queue[tw_dev->free_head];
1102	tw_dev->free_head = (tw_dev->free_head + 1) % TW_Q_LENGTH;
1103	tw_dev->state[*request_id] = TW_S_STARTED;
1104} /* End twa_get_request_id() */
1105
1106/* This function will send an initconnection command to controller */
1107static int twa_initconnection(TW_Device_Extension *tw_dev, int message_credits,
1108			      u32 set_features, unsigned short current_fw_srl,
1109			      unsigned short current_fw_arch_id,
1110			      unsigned short current_fw_branch,
1111			      unsigned short current_fw_build,
1112			      unsigned short *fw_on_ctlr_srl,
1113			      unsigned short *fw_on_ctlr_arch_id,
1114			      unsigned short *fw_on_ctlr_branch,
1115			      unsigned short *fw_on_ctlr_build,
1116			      u32 *init_connect_result)
1117{
1118	TW_Command_Full *full_command_packet;
1119	TW_Initconnect *tw_initconnect;
1120	int request_id = 0, retval = 1;
1121
1122	/* Initialize InitConnection command packet */
1123	full_command_packet = tw_dev->command_packet_virt[request_id];
1124	memset(full_command_packet, 0, sizeof(TW_Command_Full));
1125	full_command_packet->header.header_desc.size_header = 128;
1126
1127	tw_initconnect = (TW_Initconnect *)&full_command_packet->command.oldcommand;
1128	tw_initconnect->opcode__reserved = TW_OPRES_IN(0, TW_OP_INIT_CONNECTION);
1129	tw_initconnect->request_id = request_id;
1130	tw_initconnect->message_credits = cpu_to_le16(message_credits);
 
1131
1132	/* Turn on 64-bit sgl support if we need to */
1133	set_features |= sizeof(dma_addr_t) > 4 ? 1 : 0;
1134
1135	tw_initconnect->features = cpu_to_le32(set_features);
1136
1137	if (set_features & TW_EXTENDED_INIT_CONNECT) {
1138		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE_EXTENDED;
1139		tw_initconnect->fw_srl = cpu_to_le16(current_fw_srl);
1140		tw_initconnect->fw_arch_id = cpu_to_le16(current_fw_arch_id);
1141		tw_initconnect->fw_branch = cpu_to_le16(current_fw_branch);
1142		tw_initconnect->fw_build = cpu_to_le16(current_fw_build);
1143	} else
1144		tw_initconnect->size = TW_INIT_COMMAND_PACKET_SIZE;
1145
1146	/* Send command packet to the board */
1147	twa_post_command_packet(tw_dev, request_id, 1);
1148
1149	/* Poll for completion */
1150	if (twa_poll_response(tw_dev, request_id, 30)) {
1151		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x15, "No valid response during init connection");
1152	} else {
1153		if (set_features & TW_EXTENDED_INIT_CONNECT) {
1154			*fw_on_ctlr_srl = le16_to_cpu(tw_initconnect->fw_srl);
1155			*fw_on_ctlr_arch_id = le16_to_cpu(tw_initconnect->fw_arch_id);
1156			*fw_on_ctlr_branch = le16_to_cpu(tw_initconnect->fw_branch);
1157			*fw_on_ctlr_build = le16_to_cpu(tw_initconnect->fw_build);
1158			*init_connect_result = le32_to_cpu(tw_initconnect->result);
1159		}
1160		retval = 0;
1161	}
1162
1163	tw_dev->posted_request_count--;
1164	tw_dev->state[request_id] = TW_S_INITIAL;
1165
1166	return retval;
1167} /* End twa_initconnection() */
1168
1169/* This function will initialize the fields of a device extension */
1170static int twa_initialize_device_extension(TW_Device_Extension *tw_dev)
1171{
1172	int i, retval = 1;
1173
1174	/* Initialize command packet buffers */
1175	if (twa_allocate_memory(tw_dev, sizeof(TW_Command_Full), 0)) {
1176		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x16, "Command packet memory allocation failed");
1177		goto out;
1178	}
1179
1180	/* Initialize generic buffer */
1181	if (twa_allocate_memory(tw_dev, TW_SECTOR_SIZE, 1)) {
1182		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x17, "Generic memory allocation failed");
1183		goto out;
1184	}
1185
1186	/* Allocate event info space */
1187	tw_dev->event_queue[0] = kcalloc(TW_Q_LENGTH, sizeof(TW_Event), GFP_KERNEL);
1188	if (!tw_dev->event_queue[0]) {
1189		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x18, "Event info memory allocation failed");
1190		goto out;
1191	}
1192
1193
1194	for (i = 0; i < TW_Q_LENGTH; i++) {
1195		tw_dev->event_queue[i] = (TW_Event *)((unsigned char *)tw_dev->event_queue[0] + (i * sizeof(TW_Event)));
1196		tw_dev->free_queue[i] = i;
1197		tw_dev->state[i] = TW_S_INITIAL;
1198	}
1199
1200	tw_dev->pending_head = TW_Q_START;
1201	tw_dev->pending_tail = TW_Q_START;
1202	tw_dev->free_head = TW_Q_START;
1203	tw_dev->free_tail = TW_Q_START;
1204	tw_dev->error_sequence_id = 1;
1205	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1206
1207	mutex_init(&tw_dev->ioctl_lock);
1208	init_waitqueue_head(&tw_dev->ioctl_wqueue);
1209
1210	retval = 0;
1211out:
1212	return retval;
1213} /* End twa_initialize_device_extension() */
1214
1215/* This function is the interrupt service routine */
1216static irqreturn_t twa_interrupt(int irq, void *dev_instance)
1217{
1218	int request_id, error = 0;
1219	u32 status_reg_value;
1220	TW_Response_Queue response_que;
1221	TW_Command_Full *full_command_packet;
1222	TW_Device_Extension *tw_dev = (TW_Device_Extension *)dev_instance;
1223	int handled = 0;
1224
1225	/* Get the per adapter lock */
1226	spin_lock(tw_dev->host->host_lock);
1227
1228	/* Read the registers */
1229	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1230
1231	/* Check if this is our interrupt, otherwise bail */
1232	if (!(status_reg_value & TW_STATUS_VALID_INTERRUPT))
1233		goto twa_interrupt_bail;
1234
1235	handled = 1;
1236
1237	/* If we are resetting, bail */
1238	if (test_bit(TW_IN_RESET, &tw_dev->flags))
1239		goto twa_interrupt_bail;
1240
1241	/* Check controller for errors */
1242	if (twa_check_bits(status_reg_value)) {
1243		if (twa_decode_bits(tw_dev, status_reg_value)) {
1244			TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1245			goto twa_interrupt_bail;
1246		}
1247	}
1248
1249	/* Handle host interrupt */
1250	if (status_reg_value & TW_STATUS_HOST_INTERRUPT)
1251		TW_CLEAR_HOST_INTERRUPT(tw_dev);
1252
1253	/* Handle attention interrupt */
1254	if (status_reg_value & TW_STATUS_ATTENTION_INTERRUPT) {
1255		TW_CLEAR_ATTENTION_INTERRUPT(tw_dev);
1256		if (!(test_and_set_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags))) {
1257			twa_get_request_id(tw_dev, &request_id);
1258
1259			error = twa_aen_read_queue(tw_dev, request_id);
1260			if (error) {
1261				tw_dev->state[request_id] = TW_S_COMPLETED;
1262				twa_free_request_id(tw_dev, request_id);
1263				clear_bit(TW_IN_ATTENTION_LOOP, &tw_dev->flags);
1264			}
1265		}
1266	}
1267
1268	/* Handle command interrupt */
1269	if (status_reg_value & TW_STATUS_COMMAND_INTERRUPT) {
1270		TW_MASK_COMMAND_INTERRUPT(tw_dev);
1271		/* Drain as many pending commands as we can */
1272		while (tw_dev->pending_request_count > 0) {
1273			request_id = tw_dev->pending_queue[tw_dev->pending_head];
1274			if (tw_dev->state[request_id] != TW_S_PENDING) {
1275				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x19, "Found request id that wasn't pending");
1276				TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1277				goto twa_interrupt_bail;
1278			}
1279			if (twa_post_command_packet(tw_dev, request_id, 1)==0) {
1280				tw_dev->pending_head = (tw_dev->pending_head + 1) % TW_Q_LENGTH;
1281				tw_dev->pending_request_count--;
1282			} else {
1283				/* If we get here, we will continue re-posting on the next command interrupt */
1284				break;
1285			}
1286		}
1287	}
1288
1289	/* Handle response interrupt */
1290	if (status_reg_value & TW_STATUS_RESPONSE_INTERRUPT) {
1291
1292		/* Drain the response queue from the board */
1293		while ((status_reg_value & TW_STATUS_RESPONSE_QUEUE_EMPTY) == 0) {
1294			/* Complete the response */
1295			response_que.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1296			request_id = TW_RESID_OUT(response_que.response_id);
1297			full_command_packet = tw_dev->command_packet_virt[request_id];
1298			error = 0;
1299			/* Check for command packet errors */
1300			if (full_command_packet->command.newcommand.status != 0) {
1301				if (tw_dev->srb[request_id] != NULL) {
1302					error = twa_fill_sense(tw_dev, request_id, 1, 1);
1303				} else {
1304					/* Skip ioctl error prints */
1305					if (request_id != tw_dev->chrdev_request_id) {
1306						error = twa_fill_sense(tw_dev, request_id, 0, 1);
1307					}
1308				}
1309			}
1310
1311			/* Check for correct state */
1312			if (tw_dev->state[request_id] != TW_S_POSTED) {
1313				if (tw_dev->srb[request_id] != NULL) {
1314					TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1a, "Received a request id that wasn't posted");
1315					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1316					goto twa_interrupt_bail;
1317				}
1318			}
1319
1320			/* Check for internal command completion */
1321			if (tw_dev->srb[request_id] == NULL) {
1322				if (request_id != tw_dev->chrdev_request_id) {
1323					if (twa_aen_complete(tw_dev, request_id))
1324						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1b, "Error completing AEN during attention interrupt");
1325				} else {
1326					tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1327					wake_up(&tw_dev->ioctl_wqueue);
1328				}
1329			} else {
1330				struct scsi_cmnd *cmd;
1331
1332				cmd = tw_dev->srb[request_id];
1333
1334				twa_scsiop_execute_scsi_complete(tw_dev, request_id);
1335				/* If no error command was a success */
1336				if (error == 0) {
1337					cmd->result = (DID_OK << 16);
1338				}
1339
1340				/* If error, command failed */
1341				if (error == 1) {
1342					/* Ask for a host reset */
1343					cmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
1344				}
1345
1346				/* Report residual bytes for single sgl */
1347				if ((scsi_sg_count(cmd) <= 1) && (full_command_packet->command.newcommand.status == 0)) {
1348					u32 length = le32_to_cpu(full_command_packet->command.newcommand.sg_list[0].length);
1349
1350					if (length < scsi_bufflen(cmd))
1351						scsi_set_resid(cmd, scsi_bufflen(cmd) - length);
1352				}
1353
1354				/* Now complete the io */
1355				if (twa_command_mapped(cmd))
1356					scsi_dma_unmap(cmd);
1357				scsi_done(cmd);
1358				tw_dev->state[request_id] = TW_S_COMPLETED;
1359				twa_free_request_id(tw_dev, request_id);
1360				tw_dev->posted_request_count--;
1361			}
1362
1363			/* Check for valid status after each drain */
1364			status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1365			if (twa_check_bits(status_reg_value)) {
1366				if (twa_decode_bits(tw_dev, status_reg_value)) {
1367					TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1368					goto twa_interrupt_bail;
1369				}
1370			}
1371		}
1372	}
1373
1374twa_interrupt_bail:
1375	spin_unlock(tw_dev->host->host_lock);
1376	return IRQ_RETVAL(handled);
1377} /* End twa_interrupt() */
1378
1379/* This function will load the request id and various sgls for ioctls */
1380static void twa_load_sgl(TW_Device_Extension *tw_dev, TW_Command_Full *full_command_packet, int request_id, dma_addr_t dma_handle, int length)
1381{
1382	TW_Command *oldcommand;
1383	TW_Command_Apache *newcommand;
1384	TW_SG_Entry *sgl;
1385	unsigned int pae = 0;
1386
1387	if ((sizeof(long) < 8) && (sizeof(dma_addr_t) > 4))
1388		pae = 1;
1389
1390	if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1391		newcommand = &full_command_packet->command.newcommand;
1392		newcommand->request_id__lunl =
1393			TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->request_id__lunl), request_id);
1394		if (length) {
1395			newcommand->sg_list[0].address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
1396			newcommand->sg_list[0].length = cpu_to_le32(length);
1397		}
1398		newcommand->sgl_entries__lunh =
1399			TW_REQ_LUN_IN(TW_LUN_OUT(newcommand->sgl_entries__lunh), length ? 1 : 0);
1400	} else {
1401		oldcommand = &full_command_packet->command.oldcommand;
1402		oldcommand->request_id = request_id;
1403
1404		if (TW_SGL_OUT(oldcommand->opcode__sgloffset)) {
1405			/* Load the sg list */
1406			if (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)
1407				sgl = (TW_SG_Entry *)((u32 *)oldcommand+oldcommand->size - (sizeof(TW_SG_Entry)/4) + pae);
1408			else
1409				sgl = (TW_SG_Entry *)((u32 *)oldcommand+TW_SGL_OUT(oldcommand->opcode__sgloffset));
1410			sgl->address = TW_CPU_TO_SGL(dma_handle + sizeof(TW_Ioctl_Buf_Apache));
1411			sgl->length = cpu_to_le32(length);
1412
1413			oldcommand->size += pae;
1414		}
1415	}
1416} /* End twa_load_sgl() */
1417
1418/* This function will poll for a response interrupt of a request */
1419static int twa_poll_response(TW_Device_Extension *tw_dev, int request_id, int seconds)
1420{
1421	int retval = 1, found = 0, response_request_id;
1422	TW_Response_Queue response_queue;
1423	TW_Command_Full *full_command_packet = tw_dev->command_packet_virt[request_id];
1424
1425	if (twa_poll_status_gone(tw_dev, TW_STATUS_RESPONSE_QUEUE_EMPTY, seconds) == 0) {
1426		response_queue.value = readl(TW_RESPONSE_QUEUE_REG_ADDR(tw_dev));
1427		response_request_id = TW_RESID_OUT(response_queue.response_id);
1428		if (request_id != response_request_id) {
1429			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1e, "Found unexpected request id while polling for response");
1430			goto out;
1431		}
1432		if (TW_OP_OUT(full_command_packet->command.newcommand.opcode__reserved) == TW_OP_EXECUTE_SCSI) {
1433			if (full_command_packet->command.newcommand.status != 0) {
1434				/* bad response */
1435				twa_fill_sense(tw_dev, request_id, 0, 0);
1436				goto out;
1437			}
1438			found = 1;
1439		} else {
1440			if (full_command_packet->command.oldcommand.status != 0) {
1441				/* bad response */
1442				twa_fill_sense(tw_dev, request_id, 0, 0);
1443				goto out;
1444			}
1445			found = 1;
1446		}
1447	}
1448
1449	if (found)
1450		retval = 0;
1451out:
1452	return retval;
1453} /* End twa_poll_response() */
1454
1455/* This function will poll the status register for a flag */
1456static int twa_poll_status(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1457{
1458	u32 status_reg_value;
1459	unsigned long before;
1460	int retval = 1;
1461
1462	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1463	before = jiffies;
1464
1465	if (twa_check_bits(status_reg_value))
1466		twa_decode_bits(tw_dev, status_reg_value);
1467
1468	while ((status_reg_value & flag) != flag) {
1469		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1470
1471		if (twa_check_bits(status_reg_value))
1472			twa_decode_bits(tw_dev, status_reg_value);
1473
1474		if (time_after(jiffies, before + HZ * seconds))
1475			goto out;
1476
1477		msleep(50);
1478	}
1479	retval = 0;
1480out:
1481	return retval;
1482} /* End twa_poll_status() */
1483
1484/* This function will poll the status register for disappearance of a flag */
1485static int twa_poll_status_gone(TW_Device_Extension *tw_dev, u32 flag, int seconds)
1486{
1487	u32 status_reg_value;
1488	unsigned long before;
1489	int retval = 1;
1490
1491	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1492	before = jiffies;
1493
1494	if (twa_check_bits(status_reg_value))
1495		twa_decode_bits(tw_dev, status_reg_value);
1496
1497	while ((status_reg_value & flag) != 0) {
1498		status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1499		if (twa_check_bits(status_reg_value))
1500			twa_decode_bits(tw_dev, status_reg_value);
1501
1502		if (time_after(jiffies, before + HZ * seconds))
1503			goto out;
1504
1505		msleep(50);
1506	}
1507	retval = 0;
1508out:
1509	return retval;
1510} /* End twa_poll_status_gone() */
1511
1512/* This function will attempt to post a command packet to the board */
1513static int twa_post_command_packet(TW_Device_Extension *tw_dev, int request_id, char internal)
1514{
1515	u32 status_reg_value;
1516	dma_addr_t command_que_value;
1517	int retval = 1;
1518
1519	command_que_value = tw_dev->command_packet_phys[request_id];
1520
1521	/* For 9650SE write low 4 bytes first */
1522	if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1523	    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1524		command_que_value += TW_COMMAND_OFFSET;
1525		writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev));
1526	}
1527
1528	status_reg_value = readl(TW_STATUS_REG_ADDR(tw_dev));
1529
1530	if (twa_check_bits(status_reg_value))
1531		twa_decode_bits(tw_dev, status_reg_value);
1532
1533	if (((tw_dev->pending_request_count > 0) && (tw_dev->state[request_id] != TW_S_PENDING)) || (status_reg_value & TW_STATUS_COMMAND_QUEUE_FULL)) {
1534
1535		/* Only pend internal driver commands */
1536		if (!internal) {
1537			retval = SCSI_MLQUEUE_HOST_BUSY;
1538			goto out;
1539		}
1540
1541		/* Couldn't post the command packet, so we do it later */
1542		if (tw_dev->state[request_id] != TW_S_PENDING) {
1543			tw_dev->state[request_id] = TW_S_PENDING;
1544			tw_dev->pending_request_count++;
1545			if (tw_dev->pending_request_count > tw_dev->max_pending_request_count) {
1546				tw_dev->max_pending_request_count = tw_dev->pending_request_count;
1547			}
1548			tw_dev->pending_queue[tw_dev->pending_tail] = request_id;
1549			tw_dev->pending_tail = (tw_dev->pending_tail + 1) % TW_Q_LENGTH;
1550		}
1551		TW_UNMASK_COMMAND_INTERRUPT(tw_dev);
1552		goto out;
1553	} else {
1554		if ((tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
1555		    (tw_dev->tw_pci_dev->device == PCI_DEVICE_ID_3WARE_9690SA)) {
1556			/* Now write upper 4 bytes */
1557			writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR_LARGE(tw_dev) + 0x4);
1558		} else {
1559			if (sizeof(dma_addr_t) > 4) {
1560				command_que_value += TW_COMMAND_OFFSET;
1561				writel((u32)command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1562				writel((u32)((u64)command_que_value >> 32), TW_COMMAND_QUEUE_REG_ADDR(tw_dev) + 0x4);
1563			} else {
1564				writel(TW_COMMAND_OFFSET + command_que_value, TW_COMMAND_QUEUE_REG_ADDR(tw_dev));
1565			}
1566		}
1567		tw_dev->state[request_id] = TW_S_POSTED;
1568		tw_dev->posted_request_count++;
1569		if (tw_dev->posted_request_count > tw_dev->max_posted_request_count) {
1570			tw_dev->max_posted_request_count = tw_dev->posted_request_count;
1571		}
1572	}
1573	retval = 0;
1574out:
1575	return retval;
1576} /* End twa_post_command_packet() */
1577
1578/* This function will reset a device extension */
1579static int twa_reset_device_extension(TW_Device_Extension *tw_dev)
1580{
1581	int i = 0;
1582	int retval = 1;
1583	unsigned long flags = 0;
1584
1585	set_bit(TW_IN_RESET, &tw_dev->flags);
1586	TW_DISABLE_INTERRUPTS(tw_dev);
1587	TW_MASK_COMMAND_INTERRUPT(tw_dev);
1588	spin_lock_irqsave(tw_dev->host->host_lock, flags);
1589
1590	/* Abort all requests that are in progress */
1591	for (i = 0; i < TW_Q_LENGTH; i++) {
1592		if ((tw_dev->state[i] != TW_S_FINISHED) &&
1593		    (tw_dev->state[i] != TW_S_INITIAL) &&
1594		    (tw_dev->state[i] != TW_S_COMPLETED)) {
1595			if (tw_dev->srb[i]) {
1596				struct scsi_cmnd *cmd = tw_dev->srb[i];
1597
1598				cmd->result = (DID_RESET << 16);
1599				if (twa_command_mapped(cmd))
1600					scsi_dma_unmap(cmd);
1601				scsi_done(cmd);
1602			}
1603		}
1604	}
1605
1606	/* Reset queues and counts */
1607	for (i = 0; i < TW_Q_LENGTH; i++) {
1608		tw_dev->free_queue[i] = i;
1609		tw_dev->state[i] = TW_S_INITIAL;
1610	}
1611	tw_dev->free_head = TW_Q_START;
1612	tw_dev->free_tail = TW_Q_START;
1613	tw_dev->posted_request_count = 0;
1614	tw_dev->pending_request_count = 0;
1615	tw_dev->pending_head = TW_Q_START;
1616	tw_dev->pending_tail = TW_Q_START;
1617	tw_dev->reset_print = 0;
1618
1619	spin_unlock_irqrestore(tw_dev->host->host_lock, flags);
1620
1621	if (twa_reset_sequence(tw_dev, 1))
1622		goto out;
1623
1624	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
1625	clear_bit(TW_IN_RESET, &tw_dev->flags);
1626	tw_dev->chrdev_request_id = TW_IOCTL_CHRDEV_FREE;
1627
1628	retval = 0;
1629out:
1630	return retval;
1631} /* End twa_reset_device_extension() */
1632
1633/* This function will reset a controller */
1634static int twa_reset_sequence(TW_Device_Extension *tw_dev, int soft_reset)
1635{
1636	int tries = 0, retval = 1, flashed = 0, do_soft_reset = soft_reset;
1637
1638	while (tries < TW_MAX_RESET_TRIES) {
1639		if (do_soft_reset) {
1640			TW_SOFT_RESET(tw_dev);
1641			/* Clear pchip/response queue on 9550SX */
1642			if (twa_empty_response_queue_large(tw_dev)) {
1643				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x36, "Response queue (large) empty failed during reset sequence");
1644				do_soft_reset = 1;
1645				tries++;
1646				continue;
1647			}
1648		}
1649
1650		/* Make sure controller is in a good state */
1651		if (twa_poll_status(tw_dev, TW_STATUS_MICROCONTROLLER_READY | (do_soft_reset == 1 ? TW_STATUS_ATTENTION_INTERRUPT : 0), 60)) {
1652			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x1f, "Microcontroller not ready during reset sequence");
1653			do_soft_reset = 1;
1654			tries++;
1655			continue;
1656		}
1657
1658		/* Empty response queue */
1659		if (twa_empty_response_queue(tw_dev)) {
1660			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x20, "Response queue empty failed during reset sequence");
1661			do_soft_reset = 1;
1662			tries++;
1663			continue;
1664		}
1665
1666		flashed = 0;
1667
1668		/* Check for compatibility/flash */
1669		if (twa_check_srl(tw_dev, &flashed)) {
1670			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x21, "Compatibility check failed during reset sequence");
1671			do_soft_reset = 1;
1672			tries++;
1673			continue;
1674		} else {
1675			if (flashed) {
1676				tries++;
1677				continue;
1678			}
1679		}
1680
1681		/* Drain the AEN queue */
1682		if (twa_aen_drain_queue(tw_dev, soft_reset)) {
1683			TW_PRINTK(tw_dev->host, TW_DRIVER, 0x22, "AEN drain failed during reset sequence");
1684			do_soft_reset = 1;
1685			tries++;
1686			continue;
1687		}
1688
1689		/* If we got here, controller is in a good state */
1690		retval = 0;
1691		goto out;
1692	}
1693out:
1694	return retval;
1695} /* End twa_reset_sequence() */
1696
1697/* This funciton returns unit geometry in cylinders/heads/sectors */
1698static int twa_scsi_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[])
1699{
1700	int heads, sectors, cylinders;
 
 
 
1701
1702	if (capacity >= 0x200000) {
1703		heads = 255;
1704		sectors = 63;
1705		cylinders = sector_div(capacity, heads * sectors);
1706	} else {
1707		heads = 64;
1708		sectors = 32;
1709		cylinders = sector_div(capacity, heads * sectors);
1710	}
1711
1712	geom[0] = heads;
1713	geom[1] = sectors;
1714	geom[2] = cylinders;
1715
1716	return 0;
1717} /* End twa_scsi_biosparam() */
1718
1719/* This is the new scsi eh reset function */
1720static int twa_scsi_eh_reset(struct scsi_cmnd *SCpnt)
1721{
1722	TW_Device_Extension *tw_dev = NULL;
1723	int retval = FAILED;
1724
1725	tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1726
1727	tw_dev->num_resets++;
1728
1729	sdev_printk(KERN_WARNING, SCpnt->device,
1730		"WARNING: (0x%02X:0x%04X): Command (0x%x) timed out, resetting card.\n",
1731		TW_DRIVER, 0x2c, SCpnt->cmnd[0]);
1732
1733	/* Make sure we are not issuing an ioctl or resetting from ioctl */
1734	mutex_lock(&tw_dev->ioctl_lock);
1735
1736	/* Now reset the card and some of the device extension data */
1737	if (twa_reset_device_extension(tw_dev)) {
1738		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2b, "Controller reset failed during scsi host reset");
1739		goto out;
1740	}
1741
1742	retval = SUCCESS;
1743out:
1744	mutex_unlock(&tw_dev->ioctl_lock);
1745	return retval;
1746} /* End twa_scsi_eh_reset() */
1747
1748/* This is the main scsi queue function to handle scsi opcodes */
1749static int twa_scsi_queue_lck(struct scsi_cmnd *SCpnt)
1750{
1751	void (*done)(struct scsi_cmnd *) = scsi_done;
1752	int request_id, retval;
1753	TW_Device_Extension *tw_dev = (TW_Device_Extension *)SCpnt->device->host->hostdata;
1754
1755	/* If we are resetting due to timed out ioctl, report as busy */
1756	if (test_bit(TW_IN_RESET, &tw_dev->flags)) {
1757		retval = SCSI_MLQUEUE_HOST_BUSY;
1758		goto out;
1759	}
1760
1761	/* Check if this FW supports luns */
1762	if ((SCpnt->device->lun != 0) && (tw_dev->tw_compat_info.working_srl < TW_FW_SRL_LUNS_SUPPORTED)) {
1763		SCpnt->result = (DID_BAD_TARGET << 16);
1764		done(SCpnt);
1765		retval = 0;
1766		goto out;
1767	}
1768
 
 
 
1769	/* Get a free request id */
1770	twa_get_request_id(tw_dev, &request_id);
1771
1772	/* Save the scsi command for use by the ISR */
1773	tw_dev->srb[request_id] = SCpnt;
1774
1775	retval = twa_scsiop_execute_scsi(tw_dev, request_id, NULL, 0, NULL);
1776	switch (retval) {
1777	case SCSI_MLQUEUE_HOST_BUSY:
1778		if (twa_command_mapped(SCpnt))
1779			scsi_dma_unmap(SCpnt);
1780		twa_free_request_id(tw_dev, request_id);
1781		break;
1782	case 1:
1783		SCpnt->result = (DID_ERROR << 16);
1784		if (twa_command_mapped(SCpnt))
1785			scsi_dma_unmap(SCpnt);
1786		done(SCpnt);
1787		tw_dev->state[request_id] = TW_S_COMPLETED;
1788		twa_free_request_id(tw_dev, request_id);
1789		retval = 0;
1790	}
1791out:
1792	return retval;
1793} /* End twa_scsi_queue() */
1794
1795static DEF_SCSI_QCMD(twa_scsi_queue)
1796
1797/* This function hands scsi cdb's to the firmware */
1798static int twa_scsiop_execute_scsi(TW_Device_Extension *tw_dev, int request_id,
1799				   unsigned char *cdb, int use_sg,
1800				   TW_SG_Entry *sglistarg)
1801{
1802	TW_Command_Full *full_command_packet;
1803	TW_Command_Apache *command_packet;
1804	u32 num_sectors = 0x0;
1805	int i, sg_count;
1806	struct scsi_cmnd *srb = NULL;
1807	struct scatterlist *sg;
1808	int retval = 1;
1809
1810	if (tw_dev->srb[request_id])
1811		srb = tw_dev->srb[request_id];
 
 
 
1812
1813	/* Initialize command packet */
1814	full_command_packet = tw_dev->command_packet_virt[request_id];
1815	full_command_packet->header.header_desc.size_header = 128;
1816	full_command_packet->header.status_block.error = 0;
1817	full_command_packet->header.status_block.severity__reserved = 0;
1818
1819	command_packet = &full_command_packet->command.newcommand;
1820	command_packet->status = 0;
1821	command_packet->opcode__reserved = TW_OPRES_IN(0, TW_OP_EXECUTE_SCSI);
1822
1823	/* We forced 16 byte cdb use earlier */
1824	if (!cdb)
1825		memcpy(command_packet->cdb, srb->cmnd, TW_MAX_CDB_LEN);
1826	else
1827		memcpy(command_packet->cdb, cdb, TW_MAX_CDB_LEN);
1828
1829	if (srb) {
1830		command_packet->unit = srb->device->id;
1831		command_packet->request_id__lunl =
1832			TW_REQ_LUN_IN(srb->device->lun, request_id);
1833	} else {
1834		command_packet->request_id__lunl =
1835			TW_REQ_LUN_IN(0, request_id);
1836		command_packet->unit = 0;
1837	}
1838
1839	command_packet->sgl_offset = 16;
1840
1841	if (!sglistarg) {
1842		/* Map sglist from scsi layer to cmd packet */
1843
1844		if (scsi_sg_count(srb)) {
1845			if (!twa_command_mapped(srb)) {
1846				if (srb->sc_data_direction == DMA_TO_DEVICE ||
1847				    srb->sc_data_direction == DMA_BIDIRECTIONAL)
1848					scsi_sg_copy_to_buffer(srb,
1849							       tw_dev->generic_buffer_virt[request_id],
1850							       TW_SECTOR_SIZE);
1851				command_packet->sg_list[0].address = TW_CPU_TO_SGL(tw_dev->generic_buffer_phys[request_id]);
1852				command_packet->sg_list[0].length = cpu_to_le32(TW_MIN_SGL_LENGTH);
1853			} else {
1854				sg_count = scsi_dma_map(srb);
1855				if (sg_count < 0)
1856					goto out;
1857
1858				scsi_for_each_sg(srb, sg, sg_count, i) {
1859					command_packet->sg_list[i].address = TW_CPU_TO_SGL(sg_dma_address(sg));
1860					command_packet->sg_list[i].length = cpu_to_le32(sg_dma_len(sg));
1861					if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1862						TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2e, "Found unaligned sgl address during execute scsi");
1863						goto out;
1864					}
1865				}
1866			}
1867			command_packet->sgl_entries__lunh = TW_REQ_LUN_IN((srb->device->lun >> 4), scsi_sg_count(tw_dev->srb[request_id]));
1868		}
1869	} else {
1870		/* Internal cdb post */
1871		for (i = 0; i < use_sg; i++) {
1872			command_packet->sg_list[i].address = sglistarg[i].address;
1873			command_packet->sg_list[i].length = sglistarg[i].length;
1874			if (command_packet->sg_list[i].address & TW_CPU_TO_SGL(TW_ALIGNMENT_9000_SGL)) {
1875				TW_PRINTK(tw_dev->host, TW_DRIVER, 0x2f, "Found unaligned sgl address during internal post");
1876				goto out;
1877			}
1878		}
1879		command_packet->sgl_entries__lunh = TW_REQ_LUN_IN(0, use_sg);
1880	}
1881
1882	if (srb) {
1883		if (srb->cmnd[0] == READ_6 || srb->cmnd[0] == WRITE_6)
1884			num_sectors = (u32)srb->cmnd[4];
1885
1886		if (srb->cmnd[0] == READ_10 || srb->cmnd[0] == WRITE_10)
1887			num_sectors = (u32)srb->cmnd[8] | ((u32)srb->cmnd[7] << 8);
1888	}
1889
1890	/* Update sector statistic */
1891	tw_dev->sector_count = num_sectors;
1892	if (tw_dev->sector_count > tw_dev->max_sector_count)
1893		tw_dev->max_sector_count = tw_dev->sector_count;
1894
1895	/* Update SG statistics */
1896	if (srb) {
1897		tw_dev->sgl_entries = scsi_sg_count(tw_dev->srb[request_id]);
1898		if (tw_dev->sgl_entries > tw_dev->max_sgl_entries)
1899			tw_dev->max_sgl_entries = tw_dev->sgl_entries;
1900	}
1901
1902	/* Now post the command to the board */
1903	if (srb) {
1904		retval = twa_post_command_packet(tw_dev, request_id, 0);
1905	} else {
1906		twa_post_command_packet(tw_dev, request_id, 1);
1907		retval = 0;
1908	}
1909out:
1910	return retval;
1911} /* End twa_scsiop_execute_scsi() */
1912
1913/* This function completes an execute scsi operation */
1914static void twa_scsiop_execute_scsi_complete(TW_Device_Extension *tw_dev, int request_id)
1915{
1916	struct scsi_cmnd *cmd = tw_dev->srb[request_id];
1917
1918	if (!twa_command_mapped(cmd) &&
1919	    (cmd->sc_data_direction == DMA_FROM_DEVICE ||
1920	     cmd->sc_data_direction == DMA_BIDIRECTIONAL)) {
1921		if (scsi_sg_count(cmd) == 1) {
1922			void *buf = tw_dev->generic_buffer_virt[request_id];
1923
1924			scsi_sg_copy_from_buffer(cmd, buf, TW_SECTOR_SIZE);
1925		}
1926	}
1927} /* End twa_scsiop_execute_scsi_complete() */
1928
1929/* This function tells the controller to shut down */
1930static void __twa_shutdown(TW_Device_Extension *tw_dev)
1931{
1932	/* Disable interrupts */
1933	TW_DISABLE_INTERRUPTS(tw_dev);
1934
1935	/* Free up the IRQ */
1936	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
1937
1938	printk(KERN_WARNING "3w-9xxx: Shutting down host %d.\n", tw_dev->host->host_no);
1939
1940	/* Tell the card we are shutting down */
1941	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
1942		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x31, "Connection shutdown failed");
1943	} else {
1944		printk(KERN_WARNING "3w-9xxx: Shutdown complete.\n");
1945	}
1946
1947	/* Clear all interrupts just before exit */
1948	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
1949} /* End __twa_shutdown() */
1950
1951/* Wrapper for __twa_shutdown */
1952static void twa_shutdown(struct pci_dev *pdev)
1953{
1954	struct Scsi_Host *host = pci_get_drvdata(pdev);
1955	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
1956
1957	__twa_shutdown(tw_dev);
1958} /* End twa_shutdown() */
1959
1960/* This function will look up a string */
1961static char *twa_string_lookup(twa_message_type *table, unsigned int code)
1962{
1963	int index;
1964
1965	for (index = 0; ((code != table[index].code) &&
1966		      (table[index].text != (char *)0)); index++);
1967	return(table[index].text);
1968} /* End twa_string_lookup() */
1969
1970/* This function gets called when a disk is coming on-line */
1971static int twa_slave_configure(struct scsi_device *sdev)
1972{
1973	/* Force 60 second timeout */
1974	blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
1975
1976	return 0;
1977} /* End twa_slave_configure() */
1978
1979static const struct scsi_host_template driver_template = {
 
1980	.module			= THIS_MODULE,
1981	.name			= "3ware 9000 Storage Controller",
1982	.queuecommand		= twa_scsi_queue,
1983	.eh_host_reset_handler	= twa_scsi_eh_reset,
1984	.bios_param		= twa_scsi_biosparam,
1985	.change_queue_depth	= scsi_change_queue_depth,
1986	.can_queue		= TW_Q_LENGTH-2,
1987	.slave_configure	= twa_slave_configure,
1988	.this_id		= -1,
1989	.sg_tablesize		= TW_APACHE_MAX_SGL_LENGTH,
1990	.max_sectors		= TW_MAX_SECTORS,
1991	.cmd_per_lun		= TW_MAX_CMDS_PER_LUN,
1992	.shost_groups		= twa_host_groups,
 
1993	.emulated		= 1,
1994	.no_write_same		= 1,
1995};
1996
1997/* This function will probe and initialize a card */
1998static int twa_probe(struct pci_dev *pdev, const struct pci_device_id *dev_id)
1999{
2000	struct Scsi_Host *host = NULL;
2001	TW_Device_Extension *tw_dev;
2002	unsigned long mem_addr, mem_len;
2003	int retval;
2004
2005	retval = pci_enable_device(pdev);
2006	if (retval) {
2007		TW_PRINTK(host, TW_DRIVER, 0x34, "Failed to enable pci device");
2008		return -ENODEV;
2009	}
2010
2011	pci_set_master(pdev);
2012	pci_try_set_mwi(pdev);
2013
2014	retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2015	if (retval)
2016		retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2017	if (retval) {
2018		TW_PRINTK(host, TW_DRIVER, 0x23, "Failed to set dma mask");
2019		retval = -ENODEV;
2020		goto out_disable_device;
2021	}
2022
2023	host = scsi_host_alloc(&driver_template, sizeof(TW_Device_Extension));
2024	if (!host) {
2025		TW_PRINTK(host, TW_DRIVER, 0x24, "Failed to allocate memory for device extension");
2026		retval = -ENOMEM;
2027		goto out_disable_device;
2028	}
2029	tw_dev = (TW_Device_Extension *)host->hostdata;
2030
2031	/* Save values to device extension */
2032	tw_dev->host = host;
2033	tw_dev->tw_pci_dev = pdev;
2034
2035	if (twa_initialize_device_extension(tw_dev)) {
2036		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x25, "Failed to initialize device extension");
2037		retval = -ENOMEM;
2038		goto out_free_device_extension;
2039	}
2040
2041	/* Request IO regions */
2042	retval = pci_request_regions(pdev, "3w-9xxx");
2043	if (retval) {
2044		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x26, "Failed to get mem region");
2045		goto out_free_device_extension;
2046	}
2047
2048	if (pdev->device == PCI_DEVICE_ID_3WARE_9000) {
2049		mem_addr = pci_resource_start(pdev, 1);
2050		mem_len = pci_resource_len(pdev, 1);
2051	} else {
2052		mem_addr = pci_resource_start(pdev, 2);
2053		mem_len = pci_resource_len(pdev, 2);
2054	}
2055
2056	/* Save base address */
2057	tw_dev->base_addr = ioremap(mem_addr, mem_len);
2058	if (!tw_dev->base_addr) {
2059		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x35, "Failed to ioremap");
2060		retval = -ENOMEM;
2061		goto out_release_mem_region;
2062	}
2063
2064	/* Disable interrupts on the card */
2065	TW_DISABLE_INTERRUPTS(tw_dev);
2066
2067	/* Initialize the card */
2068	if (twa_reset_sequence(tw_dev, 0)) {
2069		retval = -ENOMEM;
2070		goto out_iounmap;
2071	}
2072
2073	/* Set host specific parameters */
2074	if ((pdev->device == PCI_DEVICE_ID_3WARE_9650SE) ||
2075	    (pdev->device == PCI_DEVICE_ID_3WARE_9690SA))
2076		host->max_id = TW_MAX_UNITS_9650SE;
2077	else
2078		host->max_id = TW_MAX_UNITS;
2079
2080	host->max_cmd_len = TW_MAX_CDB_LEN;
2081
2082	/* Channels aren't supported by adapter */
2083	host->max_lun = TW_MAX_LUNS(tw_dev->tw_compat_info.working_srl);
2084	host->max_channel = 0;
2085
2086	/* Register the card with the kernel SCSI layer */
2087	retval = scsi_add_host(host, &pdev->dev);
2088	if (retval) {
2089		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x27, "scsi add host failed");
2090		goto out_iounmap;
2091	}
2092
2093	pci_set_drvdata(pdev, host);
2094
2095	printk(KERN_WARNING "3w-9xxx: scsi%d: Found a 3ware 9000 Storage Controller at 0x%lx, IRQ: %d.\n",
2096	       host->host_no, mem_addr, pdev->irq);
2097	printk(KERN_WARNING "3w-9xxx: scsi%d: Firmware %s, BIOS %s, Ports: %d.\n",
2098	       host->host_no,
2099	       (char *)twa_get_param(tw_dev, 0, TW_VERSION_TABLE,
2100				     TW_PARAM_FWVER, TW_PARAM_FWVER_LENGTH),
2101	       (char *)twa_get_param(tw_dev, 1, TW_VERSION_TABLE,
2102				     TW_PARAM_BIOSVER, TW_PARAM_BIOSVER_LENGTH),
2103	       le32_to_cpu(*(__le32 *)twa_get_param(tw_dev, 2, TW_INFORMATION_TABLE,
2104				     TW_PARAM_PORTCOUNT, TW_PARAM_PORTCOUNT_LENGTH)));
2105
2106	/* Try to enable MSI */
2107	if (use_msi && (pdev->device != PCI_DEVICE_ID_3WARE_9000) &&
2108	    !pci_enable_msi(pdev))
2109		set_bit(TW_USING_MSI, &tw_dev->flags);
2110
2111	/* Now setup the interrupt handler */
2112	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2113	if (retval) {
2114		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x30, "Error requesting IRQ");
2115		goto out_remove_host;
2116	}
2117
2118	twa_device_extension_list[twa_device_extension_count] = tw_dev;
2119	twa_device_extension_count++;
2120
2121	/* Re-enable interrupts on the card */
2122	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2123
2124	/* Finally, scan the host */
2125	scsi_scan_host(host);
2126
2127	if (twa_major == -1) {
2128		if ((twa_major = register_chrdev (0, "twa", &twa_fops)) < 0)
2129			TW_PRINTK(host, TW_DRIVER, 0x29, "Failed to register character device");
2130	}
2131	return 0;
2132
2133out_remove_host:
2134	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2135		pci_disable_msi(pdev);
2136	scsi_remove_host(host);
2137out_iounmap:
2138	iounmap(tw_dev->base_addr);
2139out_release_mem_region:
2140	pci_release_regions(pdev);
2141out_free_device_extension:
2142	twa_free_device_extension(tw_dev);
2143	scsi_host_put(host);
2144out_disable_device:
2145	pci_disable_device(pdev);
2146
2147	return retval;
2148} /* End twa_probe() */
2149
2150/* This function is called to remove a device */
2151static void twa_remove(struct pci_dev *pdev)
2152{
2153	struct Scsi_Host *host = pci_get_drvdata(pdev);
2154	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2155
2156	scsi_remove_host(tw_dev->host);
2157
2158	/* Unregister character device */
2159	if (twa_major >= 0) {
2160		unregister_chrdev(twa_major, "twa");
2161		twa_major = -1;
2162	}
2163
2164	/* Shutdown the card */
2165	__twa_shutdown(tw_dev);
2166
2167	/* Disable MSI if enabled */
2168	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2169		pci_disable_msi(pdev);
2170
2171	/* Free IO remapping */
2172	iounmap(tw_dev->base_addr);
2173
2174	/* Free up the mem region */
2175	pci_release_regions(pdev);
2176
2177	/* Free up device extension resources */
2178	twa_free_device_extension(tw_dev);
2179
2180	scsi_host_put(tw_dev->host);
2181	pci_disable_device(pdev);
2182	twa_device_extension_count--;
2183} /* End twa_remove() */
2184
 
2185/* This function is called on PCI suspend */
2186static int __maybe_unused twa_suspend(struct device *dev)
2187{
2188	struct pci_dev *pdev = to_pci_dev(dev);
2189	struct Scsi_Host *host = pci_get_drvdata(pdev);
2190	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2191
2192	printk(KERN_WARNING "3w-9xxx: Suspending host %d.\n", tw_dev->host->host_no);
2193
2194	TW_DISABLE_INTERRUPTS(tw_dev);
2195	free_irq(tw_dev->tw_pci_dev->irq, tw_dev);
2196
2197	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2198		pci_disable_msi(pdev);
2199
2200	/* Tell the card we are shutting down */
2201	if (twa_initconnection(tw_dev, 1, 0, 0, 0, 0, 0, NULL, NULL, NULL, NULL, NULL)) {
2202		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x38, "Connection shutdown failed during suspend");
2203	} else {
2204		printk(KERN_WARNING "3w-9xxx: Suspend complete.\n");
2205	}
2206	TW_CLEAR_ALL_INTERRUPTS(tw_dev);
2207
 
 
 
 
2208	return 0;
2209} /* End twa_suspend() */
2210
2211/* This function is called on PCI resume */
2212static int __maybe_unused twa_resume(struct device *dev)
2213{
2214	int retval = 0;
2215	struct pci_dev *pdev = to_pci_dev(dev);
2216	struct Scsi_Host *host = pci_get_drvdata(pdev);
2217	TW_Device_Extension *tw_dev = (TW_Device_Extension *)host->hostdata;
2218
2219	printk(KERN_WARNING "3w-9xxx: Resuming host %d.\n", tw_dev->host->host_no);
 
 
 
 
 
 
 
 
 
2220
 
2221	pci_try_set_mwi(pdev);
2222
2223	retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
2224	if (retval)
2225		retval = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
2226	if (retval) {
2227		TW_PRINTK(host, TW_DRIVER, 0x40, "Failed to set dma mask during resume");
2228		retval = -ENODEV;
2229		goto out_disable_device;
2230	}
2231
2232	/* Initialize the card */
2233	if (twa_reset_sequence(tw_dev, 0)) {
2234		retval = -ENODEV;
2235		goto out_disable_device;
2236	}
2237
2238	/* Now setup the interrupt handler */
2239	retval = request_irq(pdev->irq, twa_interrupt, IRQF_SHARED, "3w-9xxx", tw_dev);
2240	if (retval) {
2241		TW_PRINTK(tw_dev->host, TW_DRIVER, 0x42, "Error requesting IRQ during resume");
2242		retval = -ENODEV;
2243		goto out_disable_device;
2244	}
2245
2246	/* Now enable MSI if enabled */
2247	if (test_bit(TW_USING_MSI, &tw_dev->flags))
2248		pci_enable_msi(pdev);
2249
2250	/* Re-enable interrupts on the card */
2251	TW_ENABLE_AND_CLEAR_INTERRUPTS(tw_dev);
2252
2253	printk(KERN_WARNING "3w-9xxx: Resume complete.\n");
2254	return 0;
2255
2256out_disable_device:
2257	scsi_remove_host(host);
 
2258
2259	return retval;
2260} /* End twa_resume() */
 
2261
2262/* PCI Devices supported by this driver */
2263static struct pci_device_id twa_pci_tbl[] = {
2264	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9000,
2265	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2266	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9550SX,
2267	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2268	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9650SE,
2269	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2270	{ PCI_VENDOR_ID_3WARE, PCI_DEVICE_ID_3WARE_9690SA,
2271	  PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
2272	{ }
2273};
2274MODULE_DEVICE_TABLE(pci, twa_pci_tbl);
2275
2276static SIMPLE_DEV_PM_OPS(twa_pm_ops, twa_suspend, twa_resume);
2277
2278/* pci_driver initializer */
2279static struct pci_driver twa_driver = {
2280	.name		= "3w-9xxx",
2281	.id_table	= twa_pci_tbl,
2282	.probe		= twa_probe,
2283	.remove		= twa_remove,
2284	.driver.pm	= &twa_pm_ops,
 
 
 
2285	.shutdown	= twa_shutdown
2286};
2287
2288/* This function is called on driver initialization */
2289static int __init twa_init(void)
2290{
2291	printk(KERN_WARNING "3ware 9000 Storage Controller device driver for Linux v%s.\n", TW_DRIVER_VERSION);
2292
2293	return pci_register_driver(&twa_driver);
2294} /* End twa_init() */
2295
2296/* This function is called on driver exit */
2297static void __exit twa_exit(void)
2298{
2299	pci_unregister_driver(&twa_driver);
2300} /* End twa_exit() */
2301
2302module_init(twa_init);
2303module_exit(twa_exit);
2304