Linux Audio

Check our new training course

Linux debugging, profiling, tracing and performance analysis training

Apr 14-17, 2025
Register
Loading...
Note: File does not exist in v3.1.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
   4 *
   5 * This driver supports the newer, SCSI-based firmware interface only.
   6 *
   7 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
   8 *
   9 * Based on the original DAC960 driver, which has
  10 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
  11 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
  12 */
  13
  14#include <linux/module.h>
  15#include <linux/types.h>
  16#include <linux/delay.h>
  17#include <linux/interrupt.h>
  18#include <linux/pci.h>
  19#include <linux/raid_class.h>
  20#include <asm/unaligned.h>
  21#include <scsi/scsi.h>
  22#include <scsi/scsi_host.h>
  23#include <scsi/scsi_device.h>
  24#include <scsi/scsi_cmnd.h>
  25#include <scsi/scsi_tcq.h>
  26#include "myrs.h"
  27
  28static struct raid_template *myrs_raid_template;
  29
  30static struct myrs_devstate_name_entry {
  31	enum myrs_devstate state;
  32	char *name;
  33} myrs_devstate_name_list[] = {
  34	{ MYRS_DEVICE_UNCONFIGURED, "Unconfigured" },
  35	{ MYRS_DEVICE_ONLINE, "Online" },
  36	{ MYRS_DEVICE_REBUILD, "Rebuild" },
  37	{ MYRS_DEVICE_MISSING, "Missing" },
  38	{ MYRS_DEVICE_SUSPECTED_CRITICAL, "SuspectedCritical" },
  39	{ MYRS_DEVICE_OFFLINE, "Offline" },
  40	{ MYRS_DEVICE_CRITICAL, "Critical" },
  41	{ MYRS_DEVICE_SUSPECTED_DEAD, "SuspectedDead" },
  42	{ MYRS_DEVICE_COMMANDED_OFFLINE, "CommandedOffline" },
  43	{ MYRS_DEVICE_STANDBY, "Standby" },
  44	{ MYRS_DEVICE_INVALID_STATE, "Invalid" },
  45};
  46
  47static char *myrs_devstate_name(enum myrs_devstate state)
  48{
  49	struct myrs_devstate_name_entry *entry = myrs_devstate_name_list;
  50	int i;
  51
  52	for (i = 0; i < ARRAY_SIZE(myrs_devstate_name_list); i++) {
  53		if (entry[i].state == state)
  54			return entry[i].name;
  55	}
  56	return NULL;
  57}
  58
  59static struct myrs_raid_level_name_entry {
  60	enum myrs_raid_level level;
  61	char *name;
  62} myrs_raid_level_name_list[] = {
  63	{ MYRS_RAID_LEVEL0, "RAID0" },
  64	{ MYRS_RAID_LEVEL1, "RAID1" },
  65	{ MYRS_RAID_LEVEL3, "RAID3 right asymmetric parity" },
  66	{ MYRS_RAID_LEVEL5, "RAID5 right asymmetric parity" },
  67	{ MYRS_RAID_LEVEL6, "RAID6" },
  68	{ MYRS_RAID_JBOD, "JBOD" },
  69	{ MYRS_RAID_NEWSPAN, "New Mylex SPAN" },
  70	{ MYRS_RAID_LEVEL3F, "RAID3 fixed parity" },
  71	{ MYRS_RAID_LEVEL3L, "RAID3 left symmetric parity" },
  72	{ MYRS_RAID_SPAN, "Mylex SPAN" },
  73	{ MYRS_RAID_LEVEL5L, "RAID5 left symmetric parity" },
  74	{ MYRS_RAID_LEVELE, "RAIDE (concatenation)" },
  75	{ MYRS_RAID_PHYSICAL, "Physical device" },
  76};
  77
  78static char *myrs_raid_level_name(enum myrs_raid_level level)
  79{
  80	struct myrs_raid_level_name_entry *entry = myrs_raid_level_name_list;
  81	int i;
  82
  83	for (i = 0; i < ARRAY_SIZE(myrs_raid_level_name_list); i++) {
  84		if (entry[i].level == level)
  85			return entry[i].name;
  86	}
  87	return NULL;
  88}
  89
  90/*
  91 * myrs_reset_cmd - clears critical fields in struct myrs_cmdblk
  92 */
  93static inline void myrs_reset_cmd(struct myrs_cmdblk *cmd_blk)
  94{
  95	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
  96
  97	memset(mbox, 0, sizeof(union myrs_cmd_mbox));
  98	cmd_blk->status = 0;
  99}
 100
 101/*
 102 * myrs_qcmd - queues Command for DAC960 V2 Series Controllers.
 103 */
 104static void myrs_qcmd(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
 105{
 106	void __iomem *base = cs->io_base;
 107	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
 108	union myrs_cmd_mbox *next_mbox = cs->next_cmd_mbox;
 109
 110	cs->write_cmd_mbox(next_mbox, mbox);
 111
 112	if (cs->prev_cmd_mbox1->words[0] == 0 ||
 113	    cs->prev_cmd_mbox2->words[0] == 0)
 114		cs->get_cmd_mbox(base);
 115
 116	cs->prev_cmd_mbox2 = cs->prev_cmd_mbox1;
 117	cs->prev_cmd_mbox1 = next_mbox;
 118
 119	if (++next_mbox > cs->last_cmd_mbox)
 120		next_mbox = cs->first_cmd_mbox;
 121
 122	cs->next_cmd_mbox = next_mbox;
 123}
 124
 125/*
 126 * myrs_exec_cmd - executes V2 Command and waits for completion.
 127 */
 128static void myrs_exec_cmd(struct myrs_hba *cs,
 129		struct myrs_cmdblk *cmd_blk)
 130{
 131	DECLARE_COMPLETION_ONSTACK(complete);
 132	unsigned long flags;
 133
 134	cmd_blk->complete = &complete;
 135	spin_lock_irqsave(&cs->queue_lock, flags);
 136	myrs_qcmd(cs, cmd_blk);
 137	spin_unlock_irqrestore(&cs->queue_lock, flags);
 138
 139	wait_for_completion(&complete);
 140}
 141
 142/*
 143 * myrs_report_progress - prints progress message
 144 */
 145static void myrs_report_progress(struct myrs_hba *cs, unsigned short ldev_num,
 146		unsigned char *msg, unsigned long blocks,
 147		unsigned long size)
 148{
 149	shost_printk(KERN_INFO, cs->host,
 150		     "Logical Drive %d: %s in Progress: %d%% completed\n",
 151		     ldev_num, msg,
 152		     (100 * (int)(blocks >> 7)) / (int)(size >> 7));
 153}
 154
 155/*
 156 * myrs_get_ctlr_info - executes a Controller Information IOCTL Command
 157 */
 158static unsigned char myrs_get_ctlr_info(struct myrs_hba *cs)
 159{
 160	struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
 161	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
 162	dma_addr_t ctlr_info_addr;
 163	union myrs_sgl *sgl;
 164	unsigned char status;
 165	unsigned short ldev_present, ldev_critical, ldev_offline;
 166
 167	ldev_present = cs->ctlr_info->ldev_present;
 168	ldev_critical = cs->ctlr_info->ldev_critical;
 169	ldev_offline = cs->ctlr_info->ldev_offline;
 170
 171	ctlr_info_addr = dma_map_single(&cs->pdev->dev, cs->ctlr_info,
 172					sizeof(struct myrs_ctlr_info),
 173					DMA_FROM_DEVICE);
 174	if (dma_mapping_error(&cs->pdev->dev, ctlr_info_addr))
 175		return MYRS_STATUS_FAILED;
 176
 177	mutex_lock(&cs->dcmd_mutex);
 178	myrs_reset_cmd(cmd_blk);
 179	mbox->ctlr_info.id = MYRS_DCMD_TAG;
 180	mbox->ctlr_info.opcode = MYRS_CMD_OP_IOCTL;
 181	mbox->ctlr_info.control.dma_ctrl_to_host = true;
 182	mbox->ctlr_info.control.no_autosense = true;
 183	mbox->ctlr_info.dma_size = sizeof(struct myrs_ctlr_info);
 184	mbox->ctlr_info.ctlr_num = 0;
 185	mbox->ctlr_info.ioctl_opcode = MYRS_IOCTL_GET_CTLR_INFO;
 186	sgl = &mbox->ctlr_info.dma_addr;
 187	sgl->sge[0].sge_addr = ctlr_info_addr;
 188	sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
 189	dev_dbg(&cs->host->shost_gendev, "Sending GetControllerInfo\n");
 190	myrs_exec_cmd(cs, cmd_blk);
 191	status = cmd_blk->status;
 192	mutex_unlock(&cs->dcmd_mutex);
 193	dma_unmap_single(&cs->pdev->dev, ctlr_info_addr,
 194			 sizeof(struct myrs_ctlr_info), DMA_FROM_DEVICE);
 195	if (status == MYRS_STATUS_SUCCESS) {
 196		if (cs->ctlr_info->bg_init_active +
 197		    cs->ctlr_info->ldev_init_active +
 198		    cs->ctlr_info->pdev_init_active +
 199		    cs->ctlr_info->cc_active +
 200		    cs->ctlr_info->rbld_active +
 201		    cs->ctlr_info->exp_active != 0)
 202			cs->needs_update = true;
 203		if (cs->ctlr_info->ldev_present != ldev_present ||
 204		    cs->ctlr_info->ldev_critical != ldev_critical ||
 205		    cs->ctlr_info->ldev_offline != ldev_offline)
 206			shost_printk(KERN_INFO, cs->host,
 207				     "Logical drive count changes (%d/%d/%d)\n",
 208				     cs->ctlr_info->ldev_critical,
 209				     cs->ctlr_info->ldev_offline,
 210				     cs->ctlr_info->ldev_present);
 211	}
 212
 213	return status;
 214}
 215
 216/*
 217 * myrs_get_ldev_info - executes a Logical Device Information IOCTL Command
 218 */
 219static unsigned char myrs_get_ldev_info(struct myrs_hba *cs,
 220		unsigned short ldev_num, struct myrs_ldev_info *ldev_info)
 221{
 222	struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
 223	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
 224	dma_addr_t ldev_info_addr;
 225	struct myrs_ldev_info ldev_info_orig;
 226	union myrs_sgl *sgl;
 227	unsigned char status;
 228
 229	memcpy(&ldev_info_orig, ldev_info, sizeof(struct myrs_ldev_info));
 230	ldev_info_addr = dma_map_single(&cs->pdev->dev, ldev_info,
 231					sizeof(struct myrs_ldev_info),
 232					DMA_FROM_DEVICE);
 233	if (dma_mapping_error(&cs->pdev->dev, ldev_info_addr))
 234		return MYRS_STATUS_FAILED;
 235
 236	mutex_lock(&cs->dcmd_mutex);
 237	myrs_reset_cmd(cmd_blk);
 238	mbox->ldev_info.id = MYRS_DCMD_TAG;
 239	mbox->ldev_info.opcode = MYRS_CMD_OP_IOCTL;
 240	mbox->ldev_info.control.dma_ctrl_to_host = true;
 241	mbox->ldev_info.control.no_autosense = true;
 242	mbox->ldev_info.dma_size = sizeof(struct myrs_ldev_info);
 243	mbox->ldev_info.ldev.ldev_num = ldev_num;
 244	mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_GET_LDEV_INFO_VALID;
 245	sgl = &mbox->ldev_info.dma_addr;
 246	sgl->sge[0].sge_addr = ldev_info_addr;
 247	sgl->sge[0].sge_count = mbox->ldev_info.dma_size;
 248	dev_dbg(&cs->host->shost_gendev,
 249		"Sending GetLogicalDeviceInfoValid for ldev %d\n", ldev_num);
 250	myrs_exec_cmd(cs, cmd_blk);
 251	status = cmd_blk->status;
 252	mutex_unlock(&cs->dcmd_mutex);
 253	dma_unmap_single(&cs->pdev->dev, ldev_info_addr,
 254			 sizeof(struct myrs_ldev_info), DMA_FROM_DEVICE);
 255	if (status == MYRS_STATUS_SUCCESS) {
 256		unsigned short ldev_num = ldev_info->ldev_num;
 257		struct myrs_ldev_info *new = ldev_info;
 258		struct myrs_ldev_info *old = &ldev_info_orig;
 259		unsigned long ldev_size = new->cfg_devsize;
 260
 261		if (new->dev_state != old->dev_state) {
 262			const char *name;
 263
 264			name = myrs_devstate_name(new->dev_state);
 265			shost_printk(KERN_INFO, cs->host,
 266				     "Logical Drive %d is now %s\n",
 267				     ldev_num, name ? name : "Invalid");
 268		}
 269		if ((new->soft_errs != old->soft_errs) ||
 270		    (new->cmds_failed != old->cmds_failed) ||
 271		    (new->deferred_write_errs != old->deferred_write_errs))
 272			shost_printk(KERN_INFO, cs->host,
 273				     "Logical Drive %d Errors: Soft = %d, Failed = %d, Deferred Write = %d\n",
 274				     ldev_num, new->soft_errs,
 275				     new->cmds_failed,
 276				     new->deferred_write_errs);
 277		if (new->bg_init_active)
 278			myrs_report_progress(cs, ldev_num,
 279					     "Background Initialization",
 280					     new->bg_init_lba, ldev_size);
 281		else if (new->fg_init_active)
 282			myrs_report_progress(cs, ldev_num,
 283					     "Foreground Initialization",
 284					     new->fg_init_lba, ldev_size);
 285		else if (new->migration_active)
 286			myrs_report_progress(cs, ldev_num,
 287					     "Data Migration",
 288					     new->migration_lba, ldev_size);
 289		else if (new->patrol_active)
 290			myrs_report_progress(cs, ldev_num,
 291					     "Patrol Operation",
 292					     new->patrol_lba, ldev_size);
 293		if (old->bg_init_active && !new->bg_init_active)
 294			shost_printk(KERN_INFO, cs->host,
 295				     "Logical Drive %d: Background Initialization %s\n",
 296				     ldev_num,
 297				     (new->ldev_control.ldev_init_done ?
 298				      "Completed" : "Failed"));
 299	}
 300	return status;
 301}
 302
 303/*
 304 * myrs_get_pdev_info - executes a "Read Physical Device Information" Command
 305 */
 306static unsigned char myrs_get_pdev_info(struct myrs_hba *cs,
 307		unsigned char channel, unsigned char target, unsigned char lun,
 308		struct myrs_pdev_info *pdev_info)
 309{
 310	struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
 311	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
 312	dma_addr_t pdev_info_addr;
 313	union myrs_sgl *sgl;
 314	unsigned char status;
 315
 316	pdev_info_addr = dma_map_single(&cs->pdev->dev, pdev_info,
 317					sizeof(struct myrs_pdev_info),
 318					DMA_FROM_DEVICE);
 319	if (dma_mapping_error(&cs->pdev->dev, pdev_info_addr))
 320		return MYRS_STATUS_FAILED;
 321
 322	mutex_lock(&cs->dcmd_mutex);
 323	myrs_reset_cmd(cmd_blk);
 324	mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
 325	mbox->pdev_info.id = MYRS_DCMD_TAG;
 326	mbox->pdev_info.control.dma_ctrl_to_host = true;
 327	mbox->pdev_info.control.no_autosense = true;
 328	mbox->pdev_info.dma_size = sizeof(struct myrs_pdev_info);
 329	mbox->pdev_info.pdev.lun = lun;
 330	mbox->pdev_info.pdev.target = target;
 331	mbox->pdev_info.pdev.channel = channel;
 332	mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_GET_PDEV_INFO_VALID;
 333	sgl = &mbox->pdev_info.dma_addr;
 334	sgl->sge[0].sge_addr = pdev_info_addr;
 335	sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
 336	dev_dbg(&cs->host->shost_gendev,
 337		"Sending GetPhysicalDeviceInfoValid for pdev %d:%d:%d\n",
 338		channel, target, lun);
 339	myrs_exec_cmd(cs, cmd_blk);
 340	status = cmd_blk->status;
 341	mutex_unlock(&cs->dcmd_mutex);
 342	dma_unmap_single(&cs->pdev->dev, pdev_info_addr,
 343			 sizeof(struct myrs_pdev_info), DMA_FROM_DEVICE);
 344	return status;
 345}
 346
 347/*
 348 * myrs_dev_op - executes a "Device Operation" Command
 349 */
 350static unsigned char myrs_dev_op(struct myrs_hba *cs,
 351		enum myrs_ioctl_opcode opcode, enum myrs_opdev opdev)
 352{
 353	struct myrs_cmdblk *cmd_blk = &cs->dcmd_blk;
 354	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
 355	unsigned char status;
 356
 357	mutex_lock(&cs->dcmd_mutex);
 358	myrs_reset_cmd(cmd_blk);
 359	mbox->dev_op.opcode = MYRS_CMD_OP_IOCTL;
 360	mbox->dev_op.id = MYRS_DCMD_TAG;
 361	mbox->dev_op.control.dma_ctrl_to_host = true;
 362	mbox->dev_op.control.no_autosense = true;
 363	mbox->dev_op.ioctl_opcode = opcode;
 364	mbox->dev_op.opdev = opdev;
 365	myrs_exec_cmd(cs, cmd_blk);
 366	status = cmd_blk->status;
 367	mutex_unlock(&cs->dcmd_mutex);
 368	return status;
 369}
 370
 371/*
 372 * myrs_translate_pdev - translates a Physical Device Channel and
 373 * TargetID into a Logical Device.
 374 */
 375static unsigned char myrs_translate_pdev(struct myrs_hba *cs,
 376		unsigned char channel, unsigned char target, unsigned char lun,
 377		struct myrs_devmap *devmap)
 378{
 379	struct pci_dev *pdev = cs->pdev;
 380	dma_addr_t devmap_addr;
 381	struct myrs_cmdblk *cmd_blk;
 382	union myrs_cmd_mbox *mbox;
 383	union myrs_sgl *sgl;
 384	unsigned char status;
 385
 386	memset(devmap, 0x0, sizeof(struct myrs_devmap));
 387	devmap_addr = dma_map_single(&pdev->dev, devmap,
 388				     sizeof(struct myrs_devmap),
 389				     DMA_FROM_DEVICE);
 390	if (dma_mapping_error(&pdev->dev, devmap_addr))
 391		return MYRS_STATUS_FAILED;
 392
 393	mutex_lock(&cs->dcmd_mutex);
 394	cmd_blk = &cs->dcmd_blk;
 395	mbox = &cmd_blk->mbox;
 396	mbox->pdev_info.opcode = MYRS_CMD_OP_IOCTL;
 397	mbox->pdev_info.control.dma_ctrl_to_host = true;
 398	mbox->pdev_info.control.no_autosense = true;
 399	mbox->pdev_info.dma_size = sizeof(struct myrs_devmap);
 400	mbox->pdev_info.pdev.target = target;
 401	mbox->pdev_info.pdev.channel = channel;
 402	mbox->pdev_info.pdev.lun = lun;
 403	mbox->pdev_info.ioctl_opcode = MYRS_IOCTL_XLATE_PDEV_TO_LDEV;
 404	sgl = &mbox->pdev_info.dma_addr;
 405	sgl->sge[0].sge_addr = devmap_addr;
 406	sgl->sge[0].sge_count = mbox->pdev_info.dma_size;
 407
 408	myrs_exec_cmd(cs, cmd_blk);
 409	status = cmd_blk->status;
 410	mutex_unlock(&cs->dcmd_mutex);
 411	dma_unmap_single(&pdev->dev, devmap_addr,
 412			 sizeof(struct myrs_devmap), DMA_FROM_DEVICE);
 413	return status;
 414}
 415
 416/*
 417 * myrs_get_event - executes a Get Event Command
 418 */
 419static unsigned char myrs_get_event(struct myrs_hba *cs,
 420		unsigned int event_num, struct myrs_event *event_buf)
 421{
 422	struct pci_dev *pdev = cs->pdev;
 423	dma_addr_t event_addr;
 424	struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
 425	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
 426	union myrs_sgl *sgl;
 427	unsigned char status;
 428
 429	event_addr = dma_map_single(&pdev->dev, event_buf,
 430				    sizeof(struct myrs_event), DMA_FROM_DEVICE);
 431	if (dma_mapping_error(&pdev->dev, event_addr))
 432		return MYRS_STATUS_FAILED;
 433
 434	mbox->get_event.opcode = MYRS_CMD_OP_IOCTL;
 435	mbox->get_event.dma_size = sizeof(struct myrs_event);
 436	mbox->get_event.evnum_upper = event_num >> 16;
 437	mbox->get_event.ctlr_num = 0;
 438	mbox->get_event.ioctl_opcode = MYRS_IOCTL_GET_EVENT;
 439	mbox->get_event.evnum_lower = event_num & 0xFFFF;
 440	sgl = &mbox->get_event.dma_addr;
 441	sgl->sge[0].sge_addr = event_addr;
 442	sgl->sge[0].sge_count = mbox->get_event.dma_size;
 443	myrs_exec_cmd(cs, cmd_blk);
 444	status = cmd_blk->status;
 445	dma_unmap_single(&pdev->dev, event_addr,
 446			 sizeof(struct myrs_event), DMA_FROM_DEVICE);
 447
 448	return status;
 449}
 450
 451/*
 452 * myrs_get_fwstatus - executes a Get Health Status Command
 453 */
 454static unsigned char myrs_get_fwstatus(struct myrs_hba *cs)
 455{
 456	struct myrs_cmdblk *cmd_blk = &cs->mcmd_blk;
 457	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
 458	union myrs_sgl *sgl;
 459	unsigned char status = cmd_blk->status;
 460
 461	myrs_reset_cmd(cmd_blk);
 462	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
 463	mbox->common.id = MYRS_MCMD_TAG;
 464	mbox->common.control.dma_ctrl_to_host = true;
 465	mbox->common.control.no_autosense = true;
 466	mbox->common.dma_size = sizeof(struct myrs_fwstat);
 467	mbox->common.ioctl_opcode = MYRS_IOCTL_GET_HEALTH_STATUS;
 468	sgl = &mbox->common.dma_addr;
 469	sgl->sge[0].sge_addr = cs->fwstat_addr;
 470	sgl->sge[0].sge_count = mbox->ctlr_info.dma_size;
 471	dev_dbg(&cs->host->shost_gendev, "Sending GetHealthStatus\n");
 472	myrs_exec_cmd(cs, cmd_blk);
 473	status = cmd_blk->status;
 474
 475	return status;
 476}
 477
 478/*
 479 * myrs_enable_mmio_mbox - enables the Memory Mailbox Interface
 480 */
 481static bool myrs_enable_mmio_mbox(struct myrs_hba *cs,
 482		enable_mbox_t enable_mbox_fn)
 483{
 484	void __iomem *base = cs->io_base;
 485	struct pci_dev *pdev = cs->pdev;
 486	union myrs_cmd_mbox *cmd_mbox;
 487	struct myrs_stat_mbox *stat_mbox;
 488	union myrs_cmd_mbox *mbox;
 489	dma_addr_t mbox_addr;
 490	unsigned char status = MYRS_STATUS_FAILED;
 491
 492	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
 493		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 494			dev_err(&pdev->dev, "DMA mask out of range\n");
 495			return false;
 496		}
 497
 498	/* Temporary dma mapping, used only in the scope of this function */
 499	mbox = dma_alloc_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
 500				  &mbox_addr, GFP_KERNEL);
 501	if (dma_mapping_error(&pdev->dev, mbox_addr))
 502		return false;
 503
 504	/* These are the base addresses for the command memory mailbox array */
 505	cs->cmd_mbox_size = MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox);
 506	cmd_mbox = dma_alloc_coherent(&pdev->dev, cs->cmd_mbox_size,
 507				      &cs->cmd_mbox_addr, GFP_KERNEL);
 508	if (dma_mapping_error(&pdev->dev, cs->cmd_mbox_addr)) {
 509		dev_err(&pdev->dev, "Failed to map command mailbox\n");
 510		goto out_free;
 511	}
 512	cs->first_cmd_mbox = cmd_mbox;
 513	cmd_mbox += MYRS_MAX_CMD_MBOX - 1;
 514	cs->last_cmd_mbox = cmd_mbox;
 515	cs->next_cmd_mbox = cs->first_cmd_mbox;
 516	cs->prev_cmd_mbox1 = cs->last_cmd_mbox;
 517	cs->prev_cmd_mbox2 = cs->last_cmd_mbox - 1;
 518
 519	/* These are the base addresses for the status memory mailbox array */
 520	cs->stat_mbox_size = MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox);
 521	stat_mbox = dma_alloc_coherent(&pdev->dev, cs->stat_mbox_size,
 522				       &cs->stat_mbox_addr, GFP_KERNEL);
 523	if (dma_mapping_error(&pdev->dev, cs->stat_mbox_addr)) {
 524		dev_err(&pdev->dev, "Failed to map status mailbox\n");
 525		goto out_free;
 526	}
 527
 528	cs->first_stat_mbox = stat_mbox;
 529	stat_mbox += MYRS_MAX_STAT_MBOX - 1;
 530	cs->last_stat_mbox = stat_mbox;
 531	cs->next_stat_mbox = cs->first_stat_mbox;
 532
 533	cs->fwstat_buf = dma_alloc_coherent(&pdev->dev,
 534					    sizeof(struct myrs_fwstat),
 535					    &cs->fwstat_addr, GFP_KERNEL);
 536	if (dma_mapping_error(&pdev->dev, cs->fwstat_addr)) {
 537		dev_err(&pdev->dev, "Failed to map firmware health buffer\n");
 538		cs->fwstat_buf = NULL;
 539		goto out_free;
 540	}
 541	cs->ctlr_info = kzalloc(sizeof(struct myrs_ctlr_info),
 542				GFP_KERNEL | GFP_DMA);
 543	if (!cs->ctlr_info)
 544		goto out_free;
 545
 546	cs->event_buf = kzalloc(sizeof(struct myrs_event),
 547				GFP_KERNEL | GFP_DMA);
 548	if (!cs->event_buf)
 549		goto out_free;
 550
 551	/* Enable the Memory Mailbox Interface. */
 552	memset(mbox, 0, sizeof(union myrs_cmd_mbox));
 553	mbox->set_mbox.id = 1;
 554	mbox->set_mbox.opcode = MYRS_CMD_OP_IOCTL;
 555	mbox->set_mbox.control.no_autosense = true;
 556	mbox->set_mbox.first_cmd_mbox_size_kb =
 557		(MYRS_MAX_CMD_MBOX * sizeof(union myrs_cmd_mbox)) >> 10;
 558	mbox->set_mbox.first_stat_mbox_size_kb =
 559		(MYRS_MAX_STAT_MBOX * sizeof(struct myrs_stat_mbox)) >> 10;
 560	mbox->set_mbox.second_cmd_mbox_size_kb = 0;
 561	mbox->set_mbox.second_stat_mbox_size_kb = 0;
 562	mbox->set_mbox.sense_len = 0;
 563	mbox->set_mbox.ioctl_opcode = MYRS_IOCTL_SET_MEM_MBOX;
 564	mbox->set_mbox.fwstat_buf_size_kb = 1;
 565	mbox->set_mbox.fwstat_buf_addr = cs->fwstat_addr;
 566	mbox->set_mbox.first_cmd_mbox_addr = cs->cmd_mbox_addr;
 567	mbox->set_mbox.first_stat_mbox_addr = cs->stat_mbox_addr;
 568	status = enable_mbox_fn(base, mbox_addr);
 569
 570out_free:
 571	dma_free_coherent(&pdev->dev, sizeof(union myrs_cmd_mbox),
 572			  mbox, mbox_addr);
 573	if (status != MYRS_STATUS_SUCCESS)
 574		dev_err(&pdev->dev, "Failed to enable mailbox, status %X\n",
 575			status);
 576	return (status == MYRS_STATUS_SUCCESS);
 577}
 578
 579/*
 580 * myrs_get_config - reads the Configuration Information
 581 */
 582static int myrs_get_config(struct myrs_hba *cs)
 583{
 584	struct myrs_ctlr_info *info = cs->ctlr_info;
 585	struct Scsi_Host *shost = cs->host;
 586	unsigned char status;
 587	unsigned char model[20];
 588	unsigned char fw_version[12];
 589	int i, model_len;
 590
 591	/* Get data into dma-able area, then copy into permanent location */
 592	mutex_lock(&cs->cinfo_mutex);
 593	status = myrs_get_ctlr_info(cs);
 594	mutex_unlock(&cs->cinfo_mutex);
 595	if (status != MYRS_STATUS_SUCCESS) {
 596		shost_printk(KERN_ERR, shost,
 597			     "Failed to get controller information\n");
 598		return -ENODEV;
 599	}
 600
 601	/* Initialize the Controller Model Name and Full Model Name fields. */
 602	model_len = sizeof(info->ctlr_name);
 603	if (model_len > sizeof(model)-1)
 604		model_len = sizeof(model)-1;
 605	memcpy(model, info->ctlr_name, model_len);
 606	model_len--;
 607	while (model[model_len] == ' ' || model[model_len] == '\0')
 608		model_len--;
 609	model[++model_len] = '\0';
 610	strcpy(cs->model_name, "DAC960 ");
 611	strcat(cs->model_name, model);
 612	/* Initialize the Controller Firmware Version field. */
 613	sprintf(fw_version, "%d.%02d-%02d",
 614		info->fw_major_version, info->fw_minor_version,
 615		info->fw_turn_number);
 616	if (info->fw_major_version == 6 &&
 617	    info->fw_minor_version == 0 &&
 618	    info->fw_turn_number < 1) {
 619		shost_printk(KERN_WARNING, shost,
 620			"FIRMWARE VERSION %s DOES NOT PROVIDE THE CONTROLLER\n"
 621			"STATUS MONITORING FUNCTIONALITY NEEDED BY THIS DRIVER.\n"
 622			"PLEASE UPGRADE TO VERSION 6.00-01 OR ABOVE.\n",
 623			fw_version);
 624		return -ENODEV;
 625	}
 626	/* Initialize the Controller Channels and Targets. */
 627	shost->max_channel = info->physchan_present + info->virtchan_present;
 628	shost->max_id = info->max_targets[0];
 629	for (i = 1; i < 16; i++) {
 630		if (!info->max_targets[i])
 631			continue;
 632		if (shost->max_id < info->max_targets[i])
 633			shost->max_id = info->max_targets[i];
 634	}
 635
 636	/*
 637	 * Initialize the Controller Queue Depth, Driver Queue Depth,
 638	 * Logical Drive Count, Maximum Blocks per Command, Controller
 639	 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
 640	 * The Driver Queue Depth must be at most three less than
 641	 * the Controller Queue Depth; tag '1' is reserved for
 642	 * direct commands, and tag '2' for monitoring commands.
 643	 */
 644	shost->can_queue = info->max_tcq - 3;
 645	if (shost->can_queue > MYRS_MAX_CMD_MBOX - 3)
 646		shost->can_queue = MYRS_MAX_CMD_MBOX - 3;
 647	shost->max_sectors = info->max_transfer_size;
 648	shost->sg_tablesize = info->max_sge;
 649	if (shost->sg_tablesize > MYRS_SG_LIMIT)
 650		shost->sg_tablesize = MYRS_SG_LIMIT;
 651
 652	shost_printk(KERN_INFO, shost,
 653		"Configuring %s PCI RAID Controller\n", model);
 654	shost_printk(KERN_INFO, shost,
 655		"  Firmware Version: %s, Channels: %d, Memory Size: %dMB\n",
 656		fw_version, info->physchan_present, info->mem_size_mb);
 657
 658	shost_printk(KERN_INFO, shost,
 659		     "  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
 660		     shost->can_queue, shost->max_sectors);
 661
 662	shost_printk(KERN_INFO, shost,
 663		     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
 664		     shost->can_queue, shost->sg_tablesize, MYRS_SG_LIMIT);
 665	for (i = 0; i < info->physchan_max; i++) {
 666		if (!info->max_targets[i])
 667			continue;
 668		shost_printk(KERN_INFO, shost,
 669			     "  Device Channel %d: max %d devices\n",
 670			     i, info->max_targets[i]);
 671	}
 672	shost_printk(KERN_INFO, shost,
 673		     "  Physical: %d/%d channels, %d disks, %d devices\n",
 674		     info->physchan_present, info->physchan_max,
 675		     info->pdisk_present, info->pdev_present);
 676
 677	shost_printk(KERN_INFO, shost,
 678		     "  Logical: %d/%d channels, %d disks\n",
 679		     info->virtchan_present, info->virtchan_max,
 680		     info->ldev_present);
 681	return 0;
 682}
 683
 684/*
 685 * myrs_log_event - prints a Controller Event message
 686 */
 687static struct {
 688	int ev_code;
 689	unsigned char *ev_msg;
 690} myrs_ev_list[] = {
 691	/* Physical Device Events (0x0000 - 0x007F) */
 692	{ 0x0001, "P Online" },
 693	{ 0x0002, "P Standby" },
 694	{ 0x0005, "P Automatic Rebuild Started" },
 695	{ 0x0006, "P Manual Rebuild Started" },
 696	{ 0x0007, "P Rebuild Completed" },
 697	{ 0x0008, "P Rebuild Cancelled" },
 698	{ 0x0009, "P Rebuild Failed for Unknown Reasons" },
 699	{ 0x000A, "P Rebuild Failed due to New Physical Device" },
 700	{ 0x000B, "P Rebuild Failed due to Logical Drive Failure" },
 701	{ 0x000C, "S Offline" },
 702	{ 0x000D, "P Found" },
 703	{ 0x000E, "P Removed" },
 704	{ 0x000F, "P Unconfigured" },
 705	{ 0x0010, "P Expand Capacity Started" },
 706	{ 0x0011, "P Expand Capacity Completed" },
 707	{ 0x0012, "P Expand Capacity Failed" },
 708	{ 0x0013, "P Command Timed Out" },
 709	{ 0x0014, "P Command Aborted" },
 710	{ 0x0015, "P Command Retried" },
 711	{ 0x0016, "P Parity Error" },
 712	{ 0x0017, "P Soft Error" },
 713	{ 0x0018, "P Miscellaneous Error" },
 714	{ 0x0019, "P Reset" },
 715	{ 0x001A, "P Active Spare Found" },
 716	{ 0x001B, "P Warm Spare Found" },
 717	{ 0x001C, "S Sense Data Received" },
 718	{ 0x001D, "P Initialization Started" },
 719	{ 0x001E, "P Initialization Completed" },
 720	{ 0x001F, "P Initialization Failed" },
 721	{ 0x0020, "P Initialization Cancelled" },
 722	{ 0x0021, "P Failed because Write Recovery Failed" },
 723	{ 0x0022, "P Failed because SCSI Bus Reset Failed" },
 724	{ 0x0023, "P Failed because of Double Check Condition" },
 725	{ 0x0024, "P Failed because Device Cannot Be Accessed" },
 726	{ 0x0025, "P Failed because of Gross Error on SCSI Processor" },
 727	{ 0x0026, "P Failed because of Bad Tag from Device" },
 728	{ 0x0027, "P Failed because of Command Timeout" },
 729	{ 0x0028, "P Failed because of System Reset" },
 730	{ 0x0029, "P Failed because of Busy Status or Parity Error" },
 731	{ 0x002A, "P Failed because Host Set Device to Failed State" },
 732	{ 0x002B, "P Failed because of Selection Timeout" },
 733	{ 0x002C, "P Failed because of SCSI Bus Phase Error" },
 734	{ 0x002D, "P Failed because Device Returned Unknown Status" },
 735	{ 0x002E, "P Failed because Device Not Ready" },
 736	{ 0x002F, "P Failed because Device Not Found at Startup" },
 737	{ 0x0030, "P Failed because COD Write Operation Failed" },
 738	{ 0x0031, "P Failed because BDT Write Operation Failed" },
 739	{ 0x0039, "P Missing at Startup" },
 740	{ 0x003A, "P Start Rebuild Failed due to Physical Drive Too Small" },
 741	{ 0x003C, "P Temporarily Offline Device Automatically Made Online" },
 742	{ 0x003D, "P Standby Rebuild Started" },
 743	/* Logical Device Events (0x0080 - 0x00FF) */
 744	{ 0x0080, "M Consistency Check Started" },
 745	{ 0x0081, "M Consistency Check Completed" },
 746	{ 0x0082, "M Consistency Check Cancelled" },
 747	{ 0x0083, "M Consistency Check Completed With Errors" },
 748	{ 0x0084, "M Consistency Check Failed due to Logical Drive Failure" },
 749	{ 0x0085, "M Consistency Check Failed due to Physical Device Failure" },
 750	{ 0x0086, "L Offline" },
 751	{ 0x0087, "L Critical" },
 752	{ 0x0088, "L Online" },
 753	{ 0x0089, "M Automatic Rebuild Started" },
 754	{ 0x008A, "M Manual Rebuild Started" },
 755	{ 0x008B, "M Rebuild Completed" },
 756	{ 0x008C, "M Rebuild Cancelled" },
 757	{ 0x008D, "M Rebuild Failed for Unknown Reasons" },
 758	{ 0x008E, "M Rebuild Failed due to New Physical Device" },
 759	{ 0x008F, "M Rebuild Failed due to Logical Drive Failure" },
 760	{ 0x0090, "M Initialization Started" },
 761	{ 0x0091, "M Initialization Completed" },
 762	{ 0x0092, "M Initialization Cancelled" },
 763	{ 0x0093, "M Initialization Failed" },
 764	{ 0x0094, "L Found" },
 765	{ 0x0095, "L Deleted" },
 766	{ 0x0096, "M Expand Capacity Started" },
 767	{ 0x0097, "M Expand Capacity Completed" },
 768	{ 0x0098, "M Expand Capacity Failed" },
 769	{ 0x0099, "L Bad Block Found" },
 770	{ 0x009A, "L Size Changed" },
 771	{ 0x009B, "L Type Changed" },
 772	{ 0x009C, "L Bad Data Block Found" },
 773	{ 0x009E, "L Read of Data Block in BDT" },
 774	{ 0x009F, "L Write Back Data for Disk Block Lost" },
 775	{ 0x00A0, "L Temporarily Offline RAID-5/3 Drive Made Online" },
 776	{ 0x00A1, "L Temporarily Offline RAID-6/1/0/7 Drive Made Online" },
 777	{ 0x00A2, "L Standby Rebuild Started" },
 778	/* Fault Management Events (0x0100 - 0x017F) */
 779	{ 0x0140, "E Fan %d Failed" },
 780	{ 0x0141, "E Fan %d OK" },
 781	{ 0x0142, "E Fan %d Not Present" },
 782	{ 0x0143, "E Power Supply %d Failed" },
 783	{ 0x0144, "E Power Supply %d OK" },
 784	{ 0x0145, "E Power Supply %d Not Present" },
 785	{ 0x0146, "E Temperature Sensor %d Temperature Exceeds Safe Limit" },
 786	{ 0x0147, "E Temperature Sensor %d Temperature Exceeds Working Limit" },
 787	{ 0x0148, "E Temperature Sensor %d Temperature Normal" },
 788	{ 0x0149, "E Temperature Sensor %d Not Present" },
 789	{ 0x014A, "E Enclosure Management Unit %d Access Critical" },
 790	{ 0x014B, "E Enclosure Management Unit %d Access OK" },
 791	{ 0x014C, "E Enclosure Management Unit %d Access Offline" },
 792	/* Controller Events (0x0180 - 0x01FF) */
 793	{ 0x0181, "C Cache Write Back Error" },
 794	{ 0x0188, "C Battery Backup Unit Found" },
 795	{ 0x0189, "C Battery Backup Unit Charge Level Low" },
 796	{ 0x018A, "C Battery Backup Unit Charge Level OK" },
 797	{ 0x0193, "C Installation Aborted" },
 798	{ 0x0195, "C Battery Backup Unit Physically Removed" },
 799	{ 0x0196, "C Memory Error During Warm Boot" },
 800	{ 0x019E, "C Memory Soft ECC Error Corrected" },
 801	{ 0x019F, "C Memory Hard ECC Error Corrected" },
 802	{ 0x01A2, "C Battery Backup Unit Failed" },
 803	{ 0x01AB, "C Mirror Race Recovery Failed" },
 804	{ 0x01AC, "C Mirror Race on Critical Drive" },
 805	/* Controller Internal Processor Events */
 806	{ 0x0380, "C Internal Controller Hung" },
 807	{ 0x0381, "C Internal Controller Firmware Breakpoint" },
 808	{ 0x0390, "C Internal Controller i960 Processor Specific Error" },
 809	{ 0x03A0, "C Internal Controller StrongARM Processor Specific Error" },
 810	{ 0, "" }
 811};
 812
 813static void myrs_log_event(struct myrs_hba *cs, struct myrs_event *ev)
 814{
 815	unsigned char msg_buf[MYRS_LINE_BUFFER_SIZE];
 816	int ev_idx = 0, ev_code;
 817	unsigned char ev_type, *ev_msg;
 818	struct Scsi_Host *shost = cs->host;
 819	struct scsi_device *sdev;
 820	struct scsi_sense_hdr sshdr = {0};
 821	unsigned char sense_info[4];
 822	unsigned char cmd_specific[4];
 823
 824	if (ev->ev_code == 0x1C) {
 825		if (!scsi_normalize_sense(ev->sense_data, 40, &sshdr)) {
 826			memset(&sshdr, 0x0, sizeof(sshdr));
 827			memset(sense_info, 0x0, sizeof(sense_info));
 828			memset(cmd_specific, 0x0, sizeof(cmd_specific));
 829		} else {
 830			memcpy(sense_info, &ev->sense_data[3], 4);
 831			memcpy(cmd_specific, &ev->sense_data[7], 4);
 832		}
 833	}
 834	if (sshdr.sense_key == VENDOR_SPECIFIC &&
 835	    (sshdr.asc == 0x80 || sshdr.asc == 0x81))
 836		ev->ev_code = ((sshdr.asc - 0x80) << 8 | sshdr.ascq);
 837	while (true) {
 838		ev_code = myrs_ev_list[ev_idx].ev_code;
 839		if (ev_code == ev->ev_code || ev_code == 0)
 840			break;
 841		ev_idx++;
 842	}
 843	ev_type = myrs_ev_list[ev_idx].ev_msg[0];
 844	ev_msg = &myrs_ev_list[ev_idx].ev_msg[2];
 845	if (ev_code == 0) {
 846		shost_printk(KERN_WARNING, shost,
 847			     "Unknown Controller Event Code %04X\n",
 848			     ev->ev_code);
 849		return;
 850	}
 851	switch (ev_type) {
 852	case 'P':
 853		sdev = scsi_device_lookup(shost, ev->channel,
 854					  ev->target, 0);
 855		sdev_printk(KERN_INFO, sdev, "event %d: Physical Device %s\n",
 856			    ev->ev_seq, ev_msg);
 857		if (sdev && sdev->hostdata &&
 858		    sdev->channel < cs->ctlr_info->physchan_present) {
 859			struct myrs_pdev_info *pdev_info = sdev->hostdata;
 860
 861			switch (ev->ev_code) {
 862			case 0x0001:
 863			case 0x0007:
 864				pdev_info->dev_state = MYRS_DEVICE_ONLINE;
 865				break;
 866			case 0x0002:
 867				pdev_info->dev_state = MYRS_DEVICE_STANDBY;
 868				break;
 869			case 0x000C:
 870				pdev_info->dev_state = MYRS_DEVICE_OFFLINE;
 871				break;
 872			case 0x000E:
 873				pdev_info->dev_state = MYRS_DEVICE_MISSING;
 874				break;
 875			case 0x000F:
 876				pdev_info->dev_state = MYRS_DEVICE_UNCONFIGURED;
 877				break;
 878			}
 879		}
 880		break;
 881	case 'L':
 882		shost_printk(KERN_INFO, shost,
 883			     "event %d: Logical Drive %d %s\n",
 884			     ev->ev_seq, ev->lun, ev_msg);
 885		cs->needs_update = true;
 886		break;
 887	case 'M':
 888		shost_printk(KERN_INFO, shost,
 889			     "event %d: Logical Drive %d %s\n",
 890			     ev->ev_seq, ev->lun, ev_msg);
 891		cs->needs_update = true;
 892		break;
 893	case 'S':
 894		if (sshdr.sense_key == NO_SENSE ||
 895		    (sshdr.sense_key == NOT_READY &&
 896		     sshdr.asc == 0x04 && (sshdr.ascq == 0x01 ||
 897					    sshdr.ascq == 0x02)))
 898			break;
 899		shost_printk(KERN_INFO, shost,
 900			     "event %d: Physical Device %d:%d %s\n",
 901			     ev->ev_seq, ev->channel, ev->target, ev_msg);
 902		shost_printk(KERN_INFO, shost,
 903			     "Physical Device %d:%d Sense Key = %X, ASC = %02X, ASCQ = %02X\n",
 904			     ev->channel, ev->target,
 905			     sshdr.sense_key, sshdr.asc, sshdr.ascq);
 906		shost_printk(KERN_INFO, shost,
 907			     "Physical Device %d:%d Sense Information = %02X%02X%02X%02X %02X%02X%02X%02X\n",
 908			     ev->channel, ev->target,
 909			     sense_info[0], sense_info[1],
 910			     sense_info[2], sense_info[3],
 911			     cmd_specific[0], cmd_specific[1],
 912			     cmd_specific[2], cmd_specific[3]);
 913		break;
 914	case 'E':
 915		if (cs->disable_enc_msg)
 916			break;
 917		sprintf(msg_buf, ev_msg, ev->lun);
 918		shost_printk(KERN_INFO, shost, "event %d: Enclosure %d %s\n",
 919			     ev->ev_seq, ev->target, msg_buf);
 920		break;
 921	case 'C':
 922		shost_printk(KERN_INFO, shost, "event %d: Controller %s\n",
 923			     ev->ev_seq, ev_msg);
 924		break;
 925	default:
 926		shost_printk(KERN_INFO, shost,
 927			     "event %d: Unknown Event Code %04X\n",
 928			     ev->ev_seq, ev->ev_code);
 929		break;
 930	}
 931}
 932
 933/*
 934 * SCSI sysfs interface functions
 935 */
 936static ssize_t raid_state_show(struct device *dev,
 937		struct device_attribute *attr, char *buf)
 938{
 939	struct scsi_device *sdev = to_scsi_device(dev);
 940	struct myrs_hba *cs = shost_priv(sdev->host);
 941	int ret;
 942
 943	if (!sdev->hostdata)
 944		return snprintf(buf, 16, "Unknown\n");
 945
 946	if (sdev->channel >= cs->ctlr_info->physchan_present) {
 947		struct myrs_ldev_info *ldev_info = sdev->hostdata;
 948		const char *name;
 949
 950		name = myrs_devstate_name(ldev_info->dev_state);
 951		if (name)
 952			ret = snprintf(buf, 32, "%s\n", name);
 953		else
 954			ret = snprintf(buf, 32, "Invalid (%02X)\n",
 955				       ldev_info->dev_state);
 956	} else {
 957		struct myrs_pdev_info *pdev_info;
 958		const char *name;
 959
 960		pdev_info = sdev->hostdata;
 961		name = myrs_devstate_name(pdev_info->dev_state);
 962		if (name)
 963			ret = snprintf(buf, 32, "%s\n", name);
 964		else
 965			ret = snprintf(buf, 32, "Invalid (%02X)\n",
 966				       pdev_info->dev_state);
 967	}
 968	return ret;
 969}
 970
 971static ssize_t raid_state_store(struct device *dev,
 972		struct device_attribute *attr, const char *buf, size_t count)
 973{
 974	struct scsi_device *sdev = to_scsi_device(dev);
 975	struct myrs_hba *cs = shost_priv(sdev->host);
 976	struct myrs_cmdblk *cmd_blk;
 977	union myrs_cmd_mbox *mbox;
 978	enum myrs_devstate new_state;
 979	unsigned short ldev_num;
 980	unsigned char status;
 981
 982	if (!strncmp(buf, "offline", 7) ||
 983	    !strncmp(buf, "kill", 4))
 984		new_state = MYRS_DEVICE_OFFLINE;
 985	else if (!strncmp(buf, "online", 6))
 986		new_state = MYRS_DEVICE_ONLINE;
 987	else if (!strncmp(buf, "standby", 7))
 988		new_state = MYRS_DEVICE_STANDBY;
 989	else
 990		return -EINVAL;
 991
 992	if (sdev->channel < cs->ctlr_info->physchan_present) {
 993		struct myrs_pdev_info *pdev_info = sdev->hostdata;
 994		struct myrs_devmap *pdev_devmap =
 995			(struct myrs_devmap *)&pdev_info->rsvd13;
 996
 997		if (pdev_info->dev_state == new_state) {
 998			sdev_printk(KERN_INFO, sdev,
 999				    "Device already in %s\n",
1000				    myrs_devstate_name(new_state));
1001			return count;
1002		}
1003		status = myrs_translate_pdev(cs, sdev->channel, sdev->id,
1004					     sdev->lun, pdev_devmap);
1005		if (status != MYRS_STATUS_SUCCESS)
1006			return -ENXIO;
1007		ldev_num = pdev_devmap->ldev_num;
1008	} else {
1009		struct myrs_ldev_info *ldev_info = sdev->hostdata;
1010
1011		if (ldev_info->dev_state == new_state) {
1012			sdev_printk(KERN_INFO, sdev,
1013				    "Device already in %s\n",
1014				    myrs_devstate_name(new_state));
1015			return count;
1016		}
1017		ldev_num = ldev_info->ldev_num;
1018	}
1019	mutex_lock(&cs->dcmd_mutex);
1020	cmd_blk = &cs->dcmd_blk;
1021	myrs_reset_cmd(cmd_blk);
1022	mbox = &cmd_blk->mbox;
1023	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1024	mbox->common.id = MYRS_DCMD_TAG;
1025	mbox->common.control.dma_ctrl_to_host = true;
1026	mbox->common.control.no_autosense = true;
1027	mbox->set_devstate.ioctl_opcode = MYRS_IOCTL_SET_DEVICE_STATE;
1028	mbox->set_devstate.state = new_state;
1029	mbox->set_devstate.ldev.ldev_num = ldev_num;
1030	myrs_exec_cmd(cs, cmd_blk);
1031	status = cmd_blk->status;
1032	mutex_unlock(&cs->dcmd_mutex);
1033	if (status == MYRS_STATUS_SUCCESS) {
1034		if (sdev->channel < cs->ctlr_info->physchan_present) {
1035			struct myrs_pdev_info *pdev_info = sdev->hostdata;
1036
1037			pdev_info->dev_state = new_state;
1038		} else {
1039			struct myrs_ldev_info *ldev_info = sdev->hostdata;
1040
1041			ldev_info->dev_state = new_state;
1042		}
1043		sdev_printk(KERN_INFO, sdev,
1044			    "Set device state to %s\n",
1045			    myrs_devstate_name(new_state));
1046		return count;
1047	}
1048	sdev_printk(KERN_INFO, sdev,
1049		    "Failed to set device state to %s, status 0x%02x\n",
1050		    myrs_devstate_name(new_state), status);
1051	return -EINVAL;
1052}
1053static DEVICE_ATTR_RW(raid_state);
1054
1055static ssize_t raid_level_show(struct device *dev,
1056		struct device_attribute *attr, char *buf)
1057{
1058	struct scsi_device *sdev = to_scsi_device(dev);
1059	struct myrs_hba *cs = shost_priv(sdev->host);
1060	const char *name = NULL;
1061
1062	if (!sdev->hostdata)
1063		return snprintf(buf, 16, "Unknown\n");
1064
1065	if (sdev->channel >= cs->ctlr_info->physchan_present) {
1066		struct myrs_ldev_info *ldev_info;
1067
1068		ldev_info = sdev->hostdata;
1069		name = myrs_raid_level_name(ldev_info->raid_level);
1070		if (!name)
1071			return snprintf(buf, 32, "Invalid (%02X)\n",
1072					ldev_info->dev_state);
1073
1074	} else
1075		name = myrs_raid_level_name(MYRS_RAID_PHYSICAL);
1076
1077	return snprintf(buf, 32, "%s\n", name);
1078}
1079static DEVICE_ATTR_RO(raid_level);
1080
1081static ssize_t rebuild_show(struct device *dev,
1082		struct device_attribute *attr, char *buf)
1083{
1084	struct scsi_device *sdev = to_scsi_device(dev);
1085	struct myrs_hba *cs = shost_priv(sdev->host);
1086	struct myrs_ldev_info *ldev_info;
1087	unsigned short ldev_num;
1088	unsigned char status;
1089
1090	if (sdev->channel < cs->ctlr_info->physchan_present)
1091		return snprintf(buf, 32, "physical device - not rebuilding\n");
1092
1093	ldev_info = sdev->hostdata;
1094	ldev_num = ldev_info->ldev_num;
1095	status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1096	if (status != MYRS_STATUS_SUCCESS) {
1097		sdev_printk(KERN_INFO, sdev,
1098			    "Failed to get device information, status 0x%02x\n",
1099			    status);
1100		return -EIO;
1101	}
1102	if (ldev_info->rbld_active) {
1103		return snprintf(buf, 32, "rebuilding block %zu of %zu\n",
1104				(size_t)ldev_info->rbld_lba,
1105				(size_t)ldev_info->cfg_devsize);
1106	} else
1107		return snprintf(buf, 32, "not rebuilding\n");
1108}
1109
1110static ssize_t rebuild_store(struct device *dev,
1111		struct device_attribute *attr, const char *buf, size_t count)
1112{
1113	struct scsi_device *sdev = to_scsi_device(dev);
1114	struct myrs_hba *cs = shost_priv(sdev->host);
1115	struct myrs_ldev_info *ldev_info;
1116	struct myrs_cmdblk *cmd_blk;
1117	union myrs_cmd_mbox *mbox;
1118	unsigned short ldev_num;
1119	unsigned char status;
1120	int rebuild, ret;
1121
1122	if (sdev->channel < cs->ctlr_info->physchan_present)
1123		return -EINVAL;
1124
1125	ldev_info = sdev->hostdata;
1126	if (!ldev_info)
1127		return -ENXIO;
1128	ldev_num = ldev_info->ldev_num;
1129
1130	ret = kstrtoint(buf, 0, &rebuild);
1131	if (ret)
1132		return ret;
1133
1134	status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1135	if (status != MYRS_STATUS_SUCCESS) {
1136		sdev_printk(KERN_INFO, sdev,
1137			    "Failed to get device information, status 0x%02x\n",
1138			    status);
1139		return -EIO;
1140	}
1141
1142	if (rebuild && ldev_info->rbld_active) {
1143		sdev_printk(KERN_INFO, sdev,
1144			    "Rebuild Not Initiated; already in progress\n");
1145		return -EALREADY;
1146	}
1147	if (!rebuild && !ldev_info->rbld_active) {
1148		sdev_printk(KERN_INFO, sdev,
1149			    "Rebuild Not Cancelled; no rebuild in progress\n");
1150		return count;
1151	}
1152
1153	mutex_lock(&cs->dcmd_mutex);
1154	cmd_blk = &cs->dcmd_blk;
1155	myrs_reset_cmd(cmd_blk);
1156	mbox = &cmd_blk->mbox;
1157	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1158	mbox->common.id = MYRS_DCMD_TAG;
1159	mbox->common.control.dma_ctrl_to_host = true;
1160	mbox->common.control.no_autosense = true;
1161	if (rebuild) {
1162		mbox->ldev_info.ldev.ldev_num = ldev_num;
1163		mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_START;
1164	} else {
1165		mbox->ldev_info.ldev.ldev_num = ldev_num;
1166		mbox->ldev_info.ioctl_opcode = MYRS_IOCTL_RBLD_DEVICE_STOP;
1167	}
1168	myrs_exec_cmd(cs, cmd_blk);
1169	status = cmd_blk->status;
1170	mutex_unlock(&cs->dcmd_mutex);
1171	if (status) {
1172		sdev_printk(KERN_INFO, sdev,
1173			    "Rebuild Not %s, status 0x%02x\n",
1174			    rebuild ? "Initiated" : "Cancelled", status);
1175		ret = -EIO;
1176	} else {
1177		sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
1178			    rebuild ? "Initiated" : "Cancelled");
1179		ret = count;
1180	}
1181
1182	return ret;
1183}
1184static DEVICE_ATTR_RW(rebuild);
1185
1186static ssize_t consistency_check_show(struct device *dev,
1187		struct device_attribute *attr, char *buf)
1188{
1189	struct scsi_device *sdev = to_scsi_device(dev);
1190	struct myrs_hba *cs = shost_priv(sdev->host);
1191	struct myrs_ldev_info *ldev_info;
1192	unsigned short ldev_num;
1193
1194	if (sdev->channel < cs->ctlr_info->physchan_present)
1195		return snprintf(buf, 32, "physical device - not checking\n");
1196
1197	ldev_info = sdev->hostdata;
1198	if (!ldev_info)
1199		return -ENXIO;
1200	ldev_num = ldev_info->ldev_num;
1201	myrs_get_ldev_info(cs, ldev_num, ldev_info);
1202	if (ldev_info->cc_active)
1203		return snprintf(buf, 32, "checking block %zu of %zu\n",
1204				(size_t)ldev_info->cc_lba,
1205				(size_t)ldev_info->cfg_devsize);
1206	else
1207		return snprintf(buf, 32, "not checking\n");
1208}
1209
1210static ssize_t consistency_check_store(struct device *dev,
1211		struct device_attribute *attr, const char *buf, size_t count)
1212{
1213	struct scsi_device *sdev = to_scsi_device(dev);
1214	struct myrs_hba *cs = shost_priv(sdev->host);
1215	struct myrs_ldev_info *ldev_info;
1216	struct myrs_cmdblk *cmd_blk;
1217	union myrs_cmd_mbox *mbox;
1218	unsigned short ldev_num;
1219	unsigned char status;
1220	int check, ret;
1221
1222	if (sdev->channel < cs->ctlr_info->physchan_present)
1223		return -EINVAL;
1224
1225	ldev_info = sdev->hostdata;
1226	if (!ldev_info)
1227		return -ENXIO;
1228	ldev_num = ldev_info->ldev_num;
1229
1230	ret = kstrtoint(buf, 0, &check);
1231	if (ret)
1232		return ret;
1233
1234	status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1235	if (status != MYRS_STATUS_SUCCESS) {
1236		sdev_printk(KERN_INFO, sdev,
1237			    "Failed to get device information, status 0x%02x\n",
1238			    status);
1239		return -EIO;
1240	}
1241	if (check && ldev_info->cc_active) {
1242		sdev_printk(KERN_INFO, sdev,
1243			    "Consistency Check Not Initiated; "
1244			    "already in progress\n");
1245		return -EALREADY;
1246	}
1247	if (!check && !ldev_info->cc_active) {
1248		sdev_printk(KERN_INFO, sdev,
1249			    "Consistency Check Not Cancelled; "
1250			    "check not in progress\n");
1251		return count;
1252	}
1253
1254	mutex_lock(&cs->dcmd_mutex);
1255	cmd_blk = &cs->dcmd_blk;
1256	myrs_reset_cmd(cmd_blk);
1257	mbox = &cmd_blk->mbox;
1258	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1259	mbox->common.id = MYRS_DCMD_TAG;
1260	mbox->common.control.dma_ctrl_to_host = true;
1261	mbox->common.control.no_autosense = true;
1262	if (check) {
1263		mbox->cc.ldev.ldev_num = ldev_num;
1264		mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_START;
1265		mbox->cc.restore_consistency = true;
1266		mbox->cc.initialized_area_only = false;
1267	} else {
1268		mbox->cc.ldev.ldev_num = ldev_num;
1269		mbox->cc.ioctl_opcode = MYRS_IOCTL_CC_STOP;
1270	}
1271	myrs_exec_cmd(cs, cmd_blk);
1272	status = cmd_blk->status;
1273	mutex_unlock(&cs->dcmd_mutex);
1274	if (status != MYRS_STATUS_SUCCESS) {
1275		sdev_printk(KERN_INFO, sdev,
1276			    "Consistency Check Not %s, status 0x%02x\n",
1277			    check ? "Initiated" : "Cancelled", status);
1278		ret = -EIO;
1279	} else {
1280		sdev_printk(KERN_INFO, sdev, "Consistency Check %s\n",
1281			    check ? "Initiated" : "Cancelled");
1282		ret = count;
1283	}
1284
1285	return ret;
1286}
1287static DEVICE_ATTR_RW(consistency_check);
1288
1289static struct device_attribute *myrs_sdev_attrs[] = {
1290	&dev_attr_consistency_check,
1291	&dev_attr_rebuild,
1292	&dev_attr_raid_state,
1293	&dev_attr_raid_level,
1294	NULL,
1295};
1296
1297static ssize_t serial_show(struct device *dev,
1298		struct device_attribute *attr, char *buf)
1299{
1300	struct Scsi_Host *shost = class_to_shost(dev);
1301	struct myrs_hba *cs = shost_priv(shost);
1302	char serial[17];
1303
1304	memcpy(serial, cs->ctlr_info->serial_number, 16);
1305	serial[16] = '\0';
1306	return snprintf(buf, 16, "%s\n", serial);
1307}
1308static DEVICE_ATTR_RO(serial);
1309
1310static ssize_t ctlr_num_show(struct device *dev,
1311		struct device_attribute *attr, char *buf)
1312{
1313	struct Scsi_Host *shost = class_to_shost(dev);
1314	struct myrs_hba *cs = shost_priv(shost);
1315
1316	return snprintf(buf, 20, "%d\n", cs->host->host_no);
1317}
1318static DEVICE_ATTR_RO(ctlr_num);
1319
1320static struct myrs_cpu_type_tbl {
1321	enum myrs_cpu_type type;
1322	char *name;
1323} myrs_cpu_type_names[] = {
1324	{ MYRS_CPUTYPE_i960CA, "i960CA" },
1325	{ MYRS_CPUTYPE_i960RD, "i960RD" },
1326	{ MYRS_CPUTYPE_i960RN, "i960RN" },
1327	{ MYRS_CPUTYPE_i960RP, "i960RP" },
1328	{ MYRS_CPUTYPE_NorthBay, "NorthBay" },
1329	{ MYRS_CPUTYPE_StrongArm, "StrongARM" },
1330	{ MYRS_CPUTYPE_i960RM, "i960RM" },
1331};
1332
1333static ssize_t processor_show(struct device *dev,
1334		struct device_attribute *attr, char *buf)
1335{
1336	struct Scsi_Host *shost = class_to_shost(dev);
1337	struct myrs_hba *cs = shost_priv(shost);
1338	struct myrs_cpu_type_tbl *tbl;
1339	const char *first_processor = NULL;
1340	const char *second_processor = NULL;
1341	struct myrs_ctlr_info *info = cs->ctlr_info;
1342	ssize_t ret;
1343	int i;
1344
1345	if (info->cpu[0].cpu_count) {
1346		tbl = myrs_cpu_type_names;
1347		for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
1348			if (tbl[i].type == info->cpu[0].cpu_type) {
1349				first_processor = tbl[i].name;
1350				break;
1351			}
1352		}
1353	}
1354	if (info->cpu[1].cpu_count) {
1355		tbl = myrs_cpu_type_names;
1356		for (i = 0; i < ARRAY_SIZE(myrs_cpu_type_names); i++) {
1357			if (tbl[i].type == info->cpu[1].cpu_type) {
1358				second_processor = tbl[i].name;
1359				break;
1360			}
1361		}
1362	}
1363	if (first_processor && second_processor)
1364		ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n"
1365			       "2: %s (%s, %d cpus)\n",
1366			       info->cpu[0].cpu_name,
1367			       first_processor, info->cpu[0].cpu_count,
1368			       info->cpu[1].cpu_name,
1369			       second_processor, info->cpu[1].cpu_count);
1370	else if (first_processor && !second_processor)
1371		ret = snprintf(buf, 64, "1: %s (%s, %d cpus)\n2: absent\n",
1372			       info->cpu[0].cpu_name,
1373			       first_processor, info->cpu[0].cpu_count);
1374	else if (!first_processor && second_processor)
1375		ret = snprintf(buf, 64, "1: absent\n2: %s (%s, %d cpus)\n",
1376			       info->cpu[1].cpu_name,
1377			       second_processor, info->cpu[1].cpu_count);
1378	else
1379		ret = snprintf(buf, 64, "1: absent\n2: absent\n");
1380
1381	return ret;
1382}
1383static DEVICE_ATTR_RO(processor);
1384
1385static ssize_t model_show(struct device *dev,
1386		struct device_attribute *attr, char *buf)
1387{
1388	struct Scsi_Host *shost = class_to_shost(dev);
1389	struct myrs_hba *cs = shost_priv(shost);
1390
1391	return snprintf(buf, 28, "%s\n", cs->model_name);
1392}
1393static DEVICE_ATTR_RO(model);
1394
1395static ssize_t ctlr_type_show(struct device *dev,
1396		struct device_attribute *attr, char *buf)
1397{
1398	struct Scsi_Host *shost = class_to_shost(dev);
1399	struct myrs_hba *cs = shost_priv(shost);
1400
1401	return snprintf(buf, 4, "%d\n", cs->ctlr_info->ctlr_type);
1402}
1403static DEVICE_ATTR_RO(ctlr_type);
1404
1405static ssize_t cache_size_show(struct device *dev,
1406		struct device_attribute *attr, char *buf)
1407{
1408	struct Scsi_Host *shost = class_to_shost(dev);
1409	struct myrs_hba *cs = shost_priv(shost);
1410
1411	return snprintf(buf, 8, "%d MB\n", cs->ctlr_info->cache_size_mb);
1412}
1413static DEVICE_ATTR_RO(cache_size);
1414
1415static ssize_t firmware_show(struct device *dev,
1416		struct device_attribute *attr, char *buf)
1417{
1418	struct Scsi_Host *shost = class_to_shost(dev);
1419	struct myrs_hba *cs = shost_priv(shost);
1420
1421	return snprintf(buf, 16, "%d.%02d-%02d\n",
1422			cs->ctlr_info->fw_major_version,
1423			cs->ctlr_info->fw_minor_version,
1424			cs->ctlr_info->fw_turn_number);
1425}
1426static DEVICE_ATTR_RO(firmware);
1427
1428static ssize_t discovery_store(struct device *dev,
1429		struct device_attribute *attr, const char *buf, size_t count)
1430{
1431	struct Scsi_Host *shost = class_to_shost(dev);
1432	struct myrs_hba *cs = shost_priv(shost);
1433	struct myrs_cmdblk *cmd_blk;
1434	union myrs_cmd_mbox *mbox;
1435	unsigned char status;
1436
1437	mutex_lock(&cs->dcmd_mutex);
1438	cmd_blk = &cs->dcmd_blk;
1439	myrs_reset_cmd(cmd_blk);
1440	mbox = &cmd_blk->mbox;
1441	mbox->common.opcode = MYRS_CMD_OP_IOCTL;
1442	mbox->common.id = MYRS_DCMD_TAG;
1443	mbox->common.control.dma_ctrl_to_host = true;
1444	mbox->common.control.no_autosense = true;
1445	mbox->common.ioctl_opcode = MYRS_IOCTL_START_DISCOVERY;
1446	myrs_exec_cmd(cs, cmd_blk);
1447	status = cmd_blk->status;
1448	mutex_unlock(&cs->dcmd_mutex);
1449	if (status != MYRS_STATUS_SUCCESS) {
1450		shost_printk(KERN_INFO, shost,
1451			     "Discovery Not Initiated, status %02X\n",
1452			     status);
1453		return -EINVAL;
1454	}
1455	shost_printk(KERN_INFO, shost, "Discovery Initiated\n");
1456	cs->next_evseq = 0;
1457	cs->needs_update = true;
1458	queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
1459	flush_delayed_work(&cs->monitor_work);
1460	shost_printk(KERN_INFO, shost, "Discovery Completed\n");
1461
1462	return count;
1463}
1464static DEVICE_ATTR_WO(discovery);
1465
1466static ssize_t flush_cache_store(struct device *dev,
1467		struct device_attribute *attr, const char *buf, size_t count)
1468{
1469	struct Scsi_Host *shost = class_to_shost(dev);
1470	struct myrs_hba *cs = shost_priv(shost);
1471	unsigned char status;
1472
1473	status = myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA,
1474			     MYRS_RAID_CONTROLLER);
1475	if (status == MYRS_STATUS_SUCCESS) {
1476		shost_printk(KERN_INFO, shost, "Cache Flush Completed\n");
1477		return count;
1478	}
1479	shost_printk(KERN_INFO, shost,
1480		     "Cache Flush failed, status 0x%02x\n", status);
1481	return -EIO;
1482}
1483static DEVICE_ATTR_WO(flush_cache);
1484
1485static ssize_t disable_enclosure_messages_show(struct device *dev,
1486		struct device_attribute *attr, char *buf)
1487{
1488	struct Scsi_Host *shost = class_to_shost(dev);
1489	struct myrs_hba *cs = shost_priv(shost);
1490
1491	return snprintf(buf, 3, "%d\n", cs->disable_enc_msg);
1492}
1493
1494static ssize_t disable_enclosure_messages_store(struct device *dev,
1495		struct device_attribute *attr, const char *buf, size_t count)
1496{
1497	struct scsi_device *sdev = to_scsi_device(dev);
1498	struct myrs_hba *cs = shost_priv(sdev->host);
1499	int value, ret;
1500
1501	ret = kstrtoint(buf, 0, &value);
1502	if (ret)
1503		return ret;
1504
1505	if (value > 2)
1506		return -EINVAL;
1507
1508	cs->disable_enc_msg = value;
1509	return count;
1510}
1511static DEVICE_ATTR_RW(disable_enclosure_messages);
1512
1513static struct device_attribute *myrs_shost_attrs[] = {
1514	&dev_attr_serial,
1515	&dev_attr_ctlr_num,
1516	&dev_attr_processor,
1517	&dev_attr_model,
1518	&dev_attr_ctlr_type,
1519	&dev_attr_cache_size,
1520	&dev_attr_firmware,
1521	&dev_attr_discovery,
1522	&dev_attr_flush_cache,
1523	&dev_attr_disable_enclosure_messages,
1524	NULL,
1525};
1526
1527/*
1528 * SCSI midlayer interface
1529 */
1530static int myrs_host_reset(struct scsi_cmnd *scmd)
1531{
1532	struct Scsi_Host *shost = scmd->device->host;
1533	struct myrs_hba *cs = shost_priv(shost);
1534
1535	cs->reset(cs->io_base);
1536	return SUCCESS;
1537}
1538
1539static void myrs_mode_sense(struct myrs_hba *cs, struct scsi_cmnd *scmd,
1540		struct myrs_ldev_info *ldev_info)
1541{
1542	unsigned char modes[32], *mode_pg;
1543	bool dbd;
1544	size_t mode_len;
1545
1546	dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1547	if (dbd) {
1548		mode_len = 24;
1549		mode_pg = &modes[4];
1550	} else {
1551		mode_len = 32;
1552		mode_pg = &modes[12];
1553	}
1554	memset(modes, 0, sizeof(modes));
1555	modes[0] = mode_len - 1;
1556	modes[2] = 0x10; /* Enable FUA */
1557	if (ldev_info->ldev_control.wce == MYRS_LOGICALDEVICE_RO)
1558		modes[2] |= 0x80;
1559	if (!dbd) {
1560		unsigned char *block_desc = &modes[4];
1561
1562		modes[3] = 8;
1563		put_unaligned_be32(ldev_info->cfg_devsize, &block_desc[0]);
1564		put_unaligned_be32(ldev_info->devsize_bytes, &block_desc[5]);
1565	}
1566	mode_pg[0] = 0x08;
1567	mode_pg[1] = 0x12;
1568	if (ldev_info->ldev_control.rce == MYRS_READCACHE_DISABLED)
1569		mode_pg[2] |= 0x01;
1570	if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
1571	    ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
1572		mode_pg[2] |= 0x04;
1573	if (ldev_info->cacheline_size) {
1574		mode_pg[2] |= 0x08;
1575		put_unaligned_be16(1 << ldev_info->cacheline_size,
1576				   &mode_pg[14]);
1577	}
1578
1579	scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1580}
1581
1582static int myrs_queuecommand(struct Scsi_Host *shost,
1583		struct scsi_cmnd *scmd)
1584{
1585	struct myrs_hba *cs = shost_priv(shost);
1586	struct myrs_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1587	union myrs_cmd_mbox *mbox = &cmd_blk->mbox;
1588	struct scsi_device *sdev = scmd->device;
1589	union myrs_sgl *hw_sge;
1590	dma_addr_t sense_addr;
1591	struct scatterlist *sgl;
1592	unsigned long flags, timeout;
1593	int nsge;
1594
1595	if (!scmd->device->hostdata) {
1596		scmd->result = (DID_NO_CONNECT << 16);
1597		scmd->scsi_done(scmd);
1598		return 0;
1599	}
1600
1601	switch (scmd->cmnd[0]) {
1602	case REPORT_LUNS:
1603		scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x20, 0x0);
1604		scmd->scsi_done(scmd);
1605		return 0;
1606	case MODE_SENSE:
1607		if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1608			struct myrs_ldev_info *ldev_info = sdev->hostdata;
1609
1610			if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1611			    (scmd->cmnd[2] & 0x3F) != 0x08) {
1612				/* Illegal request, invalid field in CDB */
1613				scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
1614			} else {
1615				myrs_mode_sense(cs, scmd, ldev_info);
1616				scmd->result = (DID_OK << 16);
1617			}
1618			scmd->scsi_done(scmd);
1619			return 0;
1620		}
1621		break;
1622	}
1623
1624	myrs_reset_cmd(cmd_blk);
1625	cmd_blk->sense = dma_pool_alloc(cs->sense_pool, GFP_ATOMIC,
1626					&sense_addr);
1627	if (!cmd_blk->sense)
1628		return SCSI_MLQUEUE_HOST_BUSY;
1629	cmd_blk->sense_addr = sense_addr;
1630
1631	timeout = scmd->request->timeout;
1632	if (scmd->cmd_len <= 10) {
1633		if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1634			struct myrs_ldev_info *ldev_info = sdev->hostdata;
1635
1636			mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10;
1637			mbox->SCSI_10.pdev.lun = ldev_info->lun;
1638			mbox->SCSI_10.pdev.target = ldev_info->target;
1639			mbox->SCSI_10.pdev.channel = ldev_info->channel;
1640			mbox->SCSI_10.pdev.ctlr = 0;
1641		} else {
1642			mbox->SCSI_10.opcode = MYRS_CMD_OP_SCSI_10_PASSTHRU;
1643			mbox->SCSI_10.pdev.lun = sdev->lun;
1644			mbox->SCSI_10.pdev.target = sdev->id;
1645			mbox->SCSI_10.pdev.channel = sdev->channel;
1646		}
1647		mbox->SCSI_10.id = scmd->request->tag + 3;
1648		mbox->SCSI_10.control.dma_ctrl_to_host =
1649			(scmd->sc_data_direction == DMA_FROM_DEVICE);
1650		if (scmd->request->cmd_flags & REQ_FUA)
1651			mbox->SCSI_10.control.fua = true;
1652		mbox->SCSI_10.dma_size = scsi_bufflen(scmd);
1653		mbox->SCSI_10.sense_addr = cmd_blk->sense_addr;
1654		mbox->SCSI_10.sense_len = MYRS_SENSE_SIZE;
1655		mbox->SCSI_10.cdb_len = scmd->cmd_len;
1656		if (timeout > 60) {
1657			mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
1658			mbox->SCSI_10.tmo.tmo_val = timeout / 60;
1659		} else {
1660			mbox->SCSI_10.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
1661			mbox->SCSI_10.tmo.tmo_val = timeout;
1662		}
1663		memcpy(&mbox->SCSI_10.cdb, scmd->cmnd, scmd->cmd_len);
1664		hw_sge = &mbox->SCSI_10.dma_addr;
1665		cmd_blk->dcdb = NULL;
1666	} else {
1667		dma_addr_t dcdb_dma;
1668
1669		cmd_blk->dcdb = dma_pool_alloc(cs->dcdb_pool, GFP_ATOMIC,
1670					       &dcdb_dma);
1671		if (!cmd_blk->dcdb) {
1672			dma_pool_free(cs->sense_pool, cmd_blk->sense,
1673				      cmd_blk->sense_addr);
1674			cmd_blk->sense = NULL;
1675			cmd_blk->sense_addr = 0;
1676			return SCSI_MLQUEUE_HOST_BUSY;
1677		}
1678		cmd_blk->dcdb_dma = dcdb_dma;
1679		if (scmd->device->channel >= cs->ctlr_info->physchan_present) {
1680			struct myrs_ldev_info *ldev_info = sdev->hostdata;
1681
1682			mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_256;
1683			mbox->SCSI_255.pdev.lun = ldev_info->lun;
1684			mbox->SCSI_255.pdev.target = ldev_info->target;
1685			mbox->SCSI_255.pdev.channel = ldev_info->channel;
1686			mbox->SCSI_255.pdev.ctlr = 0;
1687		} else {
1688			mbox->SCSI_255.opcode = MYRS_CMD_OP_SCSI_255_PASSTHRU;
1689			mbox->SCSI_255.pdev.lun = sdev->lun;
1690			mbox->SCSI_255.pdev.target = sdev->id;
1691			mbox->SCSI_255.pdev.channel = sdev->channel;
1692		}
1693		mbox->SCSI_255.id = scmd->request->tag + 3;
1694		mbox->SCSI_255.control.dma_ctrl_to_host =
1695			(scmd->sc_data_direction == DMA_FROM_DEVICE);
1696		if (scmd->request->cmd_flags & REQ_FUA)
1697			mbox->SCSI_255.control.fua = true;
1698		mbox->SCSI_255.dma_size = scsi_bufflen(scmd);
1699		mbox->SCSI_255.sense_addr = cmd_blk->sense_addr;
1700		mbox->SCSI_255.sense_len = MYRS_SENSE_SIZE;
1701		mbox->SCSI_255.cdb_len = scmd->cmd_len;
1702		mbox->SCSI_255.cdb_addr = cmd_blk->dcdb_dma;
1703		if (timeout > 60) {
1704			mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_MINUTES;
1705			mbox->SCSI_255.tmo.tmo_val = timeout / 60;
1706		} else {
1707			mbox->SCSI_255.tmo.tmo_scale = MYRS_TMO_SCALE_SECONDS;
1708			mbox->SCSI_255.tmo.tmo_val = timeout;
1709		}
1710		memcpy(cmd_blk->dcdb, scmd->cmnd, scmd->cmd_len);
1711		hw_sge = &mbox->SCSI_255.dma_addr;
1712	}
1713	if (scmd->sc_data_direction == DMA_NONE)
1714		goto submit;
1715	nsge = scsi_dma_map(scmd);
1716	if (nsge == 1) {
1717		sgl = scsi_sglist(scmd);
1718		hw_sge->sge[0].sge_addr = (u64)sg_dma_address(sgl);
1719		hw_sge->sge[0].sge_count = (u64)sg_dma_len(sgl);
1720	} else {
1721		struct myrs_sge *hw_sgl;
1722		dma_addr_t hw_sgl_addr;
1723		int i;
1724
1725		if (nsge > 2) {
1726			hw_sgl = dma_pool_alloc(cs->sg_pool, GFP_ATOMIC,
1727						&hw_sgl_addr);
1728			if (WARN_ON(!hw_sgl)) {
1729				if (cmd_blk->dcdb) {
1730					dma_pool_free(cs->dcdb_pool,
1731						      cmd_blk->dcdb,
1732						      cmd_blk->dcdb_dma);
1733					cmd_blk->dcdb = NULL;
1734					cmd_blk->dcdb_dma = 0;
1735				}
1736				dma_pool_free(cs->sense_pool,
1737					      cmd_blk->sense,
1738					      cmd_blk->sense_addr);
1739				cmd_blk->sense = NULL;
1740				cmd_blk->sense_addr = 0;
1741				return SCSI_MLQUEUE_HOST_BUSY;
1742			}
1743			cmd_blk->sgl = hw_sgl;
1744			cmd_blk->sgl_addr = hw_sgl_addr;
1745			if (scmd->cmd_len <= 10)
1746				mbox->SCSI_10.control.add_sge_mem = true;
1747			else
1748				mbox->SCSI_255.control.add_sge_mem = true;
1749			hw_sge->ext.sge0_len = nsge;
1750			hw_sge->ext.sge0_addr = cmd_blk->sgl_addr;
1751		} else
1752			hw_sgl = hw_sge->sge;
1753
1754		scsi_for_each_sg(scmd, sgl, nsge, i) {
1755			if (WARN_ON(!hw_sgl)) {
1756				scsi_dma_unmap(scmd);
1757				scmd->result = (DID_ERROR << 16);
1758				scmd->scsi_done(scmd);
1759				return 0;
1760			}
1761			hw_sgl->sge_addr = (u64)sg_dma_address(sgl);
1762			hw_sgl->sge_count = (u64)sg_dma_len(sgl);
1763			hw_sgl++;
1764		}
1765	}
1766submit:
1767	spin_lock_irqsave(&cs->queue_lock, flags);
1768	myrs_qcmd(cs, cmd_blk);
1769	spin_unlock_irqrestore(&cs->queue_lock, flags);
1770
1771	return 0;
1772}
1773
1774static unsigned short myrs_translate_ldev(struct myrs_hba *cs,
1775		struct scsi_device *sdev)
1776{
1777	unsigned short ldev_num;
1778	unsigned int chan_offset =
1779		sdev->channel - cs->ctlr_info->physchan_present;
1780
1781	ldev_num = sdev->id + chan_offset * sdev->host->max_id;
1782
1783	return ldev_num;
1784}
1785
1786static int myrs_slave_alloc(struct scsi_device *sdev)
1787{
1788	struct myrs_hba *cs = shost_priv(sdev->host);
1789	unsigned char status;
1790
1791	if (sdev->channel > sdev->host->max_channel)
1792		return 0;
1793
1794	if (sdev->channel >= cs->ctlr_info->physchan_present) {
1795		struct myrs_ldev_info *ldev_info;
1796		unsigned short ldev_num;
1797
1798		if (sdev->lun > 0)
1799			return -ENXIO;
1800
1801		ldev_num = myrs_translate_ldev(cs, sdev);
1802
1803		ldev_info = kzalloc(sizeof(*ldev_info), GFP_KERNEL|GFP_DMA);
1804		if (!ldev_info)
1805			return -ENOMEM;
1806
1807		status = myrs_get_ldev_info(cs, ldev_num, ldev_info);
1808		if (status != MYRS_STATUS_SUCCESS) {
1809			sdev->hostdata = NULL;
1810			kfree(ldev_info);
1811		} else {
1812			enum raid_level level;
1813
1814			dev_dbg(&sdev->sdev_gendev,
1815				"Logical device mapping %d:%d:%d -> %d\n",
1816				ldev_info->channel, ldev_info->target,
1817				ldev_info->lun, ldev_info->ldev_num);
1818
1819			sdev->hostdata = ldev_info;
1820			switch (ldev_info->raid_level) {
1821			case MYRS_RAID_LEVEL0:
1822				level = RAID_LEVEL_LINEAR;
1823				break;
1824			case MYRS_RAID_LEVEL1:
1825				level = RAID_LEVEL_1;
1826				break;
1827			case MYRS_RAID_LEVEL3:
1828			case MYRS_RAID_LEVEL3F:
1829			case MYRS_RAID_LEVEL3L:
1830				level = RAID_LEVEL_3;
1831				break;
1832			case MYRS_RAID_LEVEL5:
1833			case MYRS_RAID_LEVEL5L:
1834				level = RAID_LEVEL_5;
1835				break;
1836			case MYRS_RAID_LEVEL6:
1837				level = RAID_LEVEL_6;
1838				break;
1839			case MYRS_RAID_LEVELE:
1840			case MYRS_RAID_NEWSPAN:
1841			case MYRS_RAID_SPAN:
1842				level = RAID_LEVEL_LINEAR;
1843				break;
1844			case MYRS_RAID_JBOD:
1845				level = RAID_LEVEL_JBOD;
1846				break;
1847			default:
1848				level = RAID_LEVEL_UNKNOWN;
1849				break;
1850			}
1851			raid_set_level(myrs_raid_template,
1852				       &sdev->sdev_gendev, level);
1853			if (ldev_info->dev_state != MYRS_DEVICE_ONLINE) {
1854				const char *name;
1855
1856				name = myrs_devstate_name(ldev_info->dev_state);
1857				sdev_printk(KERN_DEBUG, sdev,
1858					    "logical device in state %s\n",
1859					    name ? name : "Invalid");
1860			}
1861		}
1862	} else {
1863		struct myrs_pdev_info *pdev_info;
1864
1865		pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1866		if (!pdev_info)
1867			return -ENOMEM;
1868
1869		status = myrs_get_pdev_info(cs, sdev->channel,
1870					    sdev->id, sdev->lun,
1871					    pdev_info);
1872		if (status != MYRS_STATUS_SUCCESS) {
1873			sdev->hostdata = NULL;
1874			kfree(pdev_info);
1875			return -ENXIO;
1876		}
1877		sdev->hostdata = pdev_info;
1878	}
1879	return 0;
1880}
1881
1882static int myrs_slave_configure(struct scsi_device *sdev)
1883{
1884	struct myrs_hba *cs = shost_priv(sdev->host);
1885	struct myrs_ldev_info *ldev_info;
1886
1887	if (sdev->channel > sdev->host->max_channel)
1888		return -ENXIO;
1889
1890	if (sdev->channel < cs->ctlr_info->physchan_present) {
1891		/* Skip HBA device */
1892		if (sdev->type == TYPE_RAID)
1893			return -ENXIO;
1894		sdev->no_uld_attach = 1;
1895		return 0;
1896	}
1897	if (sdev->lun != 0)
1898		return -ENXIO;
1899
1900	ldev_info = sdev->hostdata;
1901	if (!ldev_info)
1902		return -ENXIO;
1903	if (ldev_info->ldev_control.wce == MYRS_WRITECACHE_ENABLED ||
1904	    ldev_info->ldev_control.wce == MYRS_INTELLIGENT_WRITECACHE_ENABLED)
1905		sdev->wce_default_on = 1;
1906	sdev->tagged_supported = 1;
1907	return 0;
1908}
1909
1910static void myrs_slave_destroy(struct scsi_device *sdev)
1911{
1912	kfree(sdev->hostdata);
1913}
1914
1915static struct scsi_host_template myrs_template = {
1916	.module			= THIS_MODULE,
1917	.name			= "DAC960",
1918	.proc_name		= "myrs",
1919	.queuecommand		= myrs_queuecommand,
1920	.eh_host_reset_handler	= myrs_host_reset,
1921	.slave_alloc		= myrs_slave_alloc,
1922	.slave_configure	= myrs_slave_configure,
1923	.slave_destroy		= myrs_slave_destroy,
1924	.cmd_size		= sizeof(struct myrs_cmdblk),
1925	.shost_attrs		= myrs_shost_attrs,
1926	.sdev_attrs		= myrs_sdev_attrs,
1927	.this_id		= -1,
1928};
1929
1930static struct myrs_hba *myrs_alloc_host(struct pci_dev *pdev,
1931		const struct pci_device_id *entry)
1932{
1933	struct Scsi_Host *shost;
1934	struct myrs_hba *cs;
1935
1936	shost = scsi_host_alloc(&myrs_template, sizeof(struct myrs_hba));
1937	if (!shost)
1938		return NULL;
1939
1940	shost->max_cmd_len = 16;
1941	shost->max_lun = 256;
1942	cs = shost_priv(shost);
1943	mutex_init(&cs->dcmd_mutex);
1944	mutex_init(&cs->cinfo_mutex);
1945	cs->host = shost;
1946
1947	return cs;
1948}
1949
1950/*
1951 * RAID template functions
1952 */
1953
1954/**
1955 * myrs_is_raid - return boolean indicating device is raid volume
1956 * @dev: the device struct object
1957 */
1958static int
1959myrs_is_raid(struct device *dev)
1960{
1961	struct scsi_device *sdev = to_scsi_device(dev);
1962	struct myrs_hba *cs = shost_priv(sdev->host);
1963
1964	return (sdev->channel >= cs->ctlr_info->physchan_present) ? 1 : 0;
1965}
1966
1967/**
1968 * myrs_get_resync - get raid volume resync percent complete
1969 * @dev: the device struct object
1970 */
1971static void
1972myrs_get_resync(struct device *dev)
1973{
1974	struct scsi_device *sdev = to_scsi_device(dev);
1975	struct myrs_hba *cs = shost_priv(sdev->host);
1976	struct myrs_ldev_info *ldev_info = sdev->hostdata;
1977	u64 percent_complete = 0;
1978
1979	if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
1980		return;
1981	if (ldev_info->rbld_active) {
1982		unsigned short ldev_num = ldev_info->ldev_num;
1983
1984		myrs_get_ldev_info(cs, ldev_num, ldev_info);
1985		percent_complete = ldev_info->rbld_lba * 100;
1986		do_div(percent_complete, ldev_info->cfg_devsize);
1987	}
1988	raid_set_resync(myrs_raid_template, dev, percent_complete);
1989}
1990
1991/**
1992 * myrs_get_state - get raid volume status
1993 * @dev: the device struct object
1994 */
1995static void
1996myrs_get_state(struct device *dev)
1997{
1998	struct scsi_device *sdev = to_scsi_device(dev);
1999	struct myrs_hba *cs = shost_priv(sdev->host);
2000	struct myrs_ldev_info *ldev_info = sdev->hostdata;
2001	enum raid_state state = RAID_STATE_UNKNOWN;
2002
2003	if (sdev->channel < cs->ctlr_info->physchan_present || !ldev_info)
2004		state = RAID_STATE_UNKNOWN;
2005	else {
2006		switch (ldev_info->dev_state) {
2007		case MYRS_DEVICE_ONLINE:
2008			state = RAID_STATE_ACTIVE;
2009			break;
2010		case MYRS_DEVICE_SUSPECTED_CRITICAL:
2011		case MYRS_DEVICE_CRITICAL:
2012			state = RAID_STATE_DEGRADED;
2013			break;
2014		case MYRS_DEVICE_REBUILD:
2015			state = RAID_STATE_RESYNCING;
2016			break;
2017		case MYRS_DEVICE_UNCONFIGURED:
2018		case MYRS_DEVICE_INVALID_STATE:
2019			state = RAID_STATE_UNKNOWN;
2020			break;
2021		default:
2022			state = RAID_STATE_OFFLINE;
2023		}
2024	}
2025	raid_set_state(myrs_raid_template, dev, state);
2026}
2027
2028static struct raid_function_template myrs_raid_functions = {
2029	.cookie		= &myrs_template,
2030	.is_raid	= myrs_is_raid,
2031	.get_resync	= myrs_get_resync,
2032	.get_state	= myrs_get_state,
2033};
2034
2035/*
2036 * PCI interface functions
2037 */
2038static void myrs_flush_cache(struct myrs_hba *cs)
2039{
2040	myrs_dev_op(cs, MYRS_IOCTL_FLUSH_DEVICE_DATA, MYRS_RAID_CONTROLLER);
2041}
2042
2043static void myrs_handle_scsi(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk,
2044		struct scsi_cmnd *scmd)
2045{
2046	unsigned char status;
2047
2048	if (!cmd_blk)
2049		return;
2050
2051	scsi_dma_unmap(scmd);
2052	status = cmd_blk->status;
2053	if (cmd_blk->sense) {
2054		if (status == MYRS_STATUS_FAILED && cmd_blk->sense_len) {
2055			unsigned int sense_len = SCSI_SENSE_BUFFERSIZE;
2056
2057			if (sense_len > cmd_blk->sense_len)
2058				sense_len = cmd_blk->sense_len;
2059			memcpy(scmd->sense_buffer, cmd_blk->sense, sense_len);
2060		}
2061		dma_pool_free(cs->sense_pool, cmd_blk->sense,
2062			      cmd_blk->sense_addr);
2063		cmd_blk->sense = NULL;
2064		cmd_blk->sense_addr = 0;
2065	}
2066	if (cmd_blk->dcdb) {
2067		dma_pool_free(cs->dcdb_pool, cmd_blk->dcdb,
2068			      cmd_blk->dcdb_dma);
2069		cmd_blk->dcdb = NULL;
2070		cmd_blk->dcdb_dma = 0;
2071	}
2072	if (cmd_blk->sgl) {
2073		dma_pool_free(cs->sg_pool, cmd_blk->sgl,
2074			      cmd_blk->sgl_addr);
2075		cmd_blk->sgl = NULL;
2076		cmd_blk->sgl_addr = 0;
2077	}
2078	if (cmd_blk->residual)
2079		scsi_set_resid(scmd, cmd_blk->residual);
2080	if (status == MYRS_STATUS_DEVICE_NON_RESPONSIVE ||
2081	    status == MYRS_STATUS_DEVICE_NON_RESPONSIVE2)
2082		scmd->result = (DID_BAD_TARGET << 16);
2083	else
2084		scmd->result = (DID_OK << 16) | status;
2085	scmd->scsi_done(scmd);
2086}
2087
2088static void myrs_handle_cmdblk(struct myrs_hba *cs, struct myrs_cmdblk *cmd_blk)
2089{
2090	if (!cmd_blk)
2091		return;
2092
2093	if (cmd_blk->complete) {
2094		complete(cmd_blk->complete);
2095		cmd_blk->complete = NULL;
2096	}
2097}
2098
2099static void myrs_monitor(struct work_struct *work)
2100{
2101	struct myrs_hba *cs = container_of(work, struct myrs_hba,
2102					   monitor_work.work);
2103	struct Scsi_Host *shost = cs->host;
2104	struct myrs_ctlr_info *info = cs->ctlr_info;
2105	unsigned int epoch = cs->fwstat_buf->epoch;
2106	unsigned long interval = MYRS_PRIMARY_MONITOR_INTERVAL;
2107	unsigned char status;
2108
2109	dev_dbg(&shost->shost_gendev, "monitor tick\n");
2110
2111	status = myrs_get_fwstatus(cs);
2112
2113	if (cs->needs_update) {
2114		cs->needs_update = false;
2115		mutex_lock(&cs->cinfo_mutex);
2116		status = myrs_get_ctlr_info(cs);
2117		mutex_unlock(&cs->cinfo_mutex);
2118	}
2119	if (cs->fwstat_buf->next_evseq - cs->next_evseq > 0) {
2120		status = myrs_get_event(cs, cs->next_evseq,
2121					cs->event_buf);
2122		if (status == MYRS_STATUS_SUCCESS) {
2123			myrs_log_event(cs, cs->event_buf);
2124			cs->next_evseq++;
2125			interval = 1;
2126		}
2127	}
2128
2129	if (time_after(jiffies, cs->secondary_monitor_time
2130		       + MYRS_SECONDARY_MONITOR_INTERVAL))
2131		cs->secondary_monitor_time = jiffies;
2132
2133	if (info->bg_init_active +
2134	    info->ldev_init_active +
2135	    info->pdev_init_active +
2136	    info->cc_active +
2137	    info->rbld_active +
2138	    info->exp_active != 0) {
2139		struct scsi_device *sdev;
2140
2141		shost_for_each_device(sdev, shost) {
2142			struct myrs_ldev_info *ldev_info;
2143			int ldev_num;
2144
2145			if (sdev->channel < info->physchan_present)
2146				continue;
2147			ldev_info = sdev->hostdata;
2148			if (!ldev_info)
2149				continue;
2150			ldev_num = ldev_info->ldev_num;
2151			myrs_get_ldev_info(cs, ldev_num, ldev_info);
2152		}
2153		cs->needs_update = true;
2154	}
2155	if (epoch == cs->epoch &&
2156	    cs->fwstat_buf->next_evseq == cs->next_evseq &&
2157	    (cs->needs_update == false ||
2158	     time_before(jiffies, cs->primary_monitor_time
2159			 + MYRS_PRIMARY_MONITOR_INTERVAL))) {
2160		interval = MYRS_SECONDARY_MONITOR_INTERVAL;
2161	}
2162
2163	if (interval > 1)
2164		cs->primary_monitor_time = jiffies;
2165	queue_delayed_work(cs->work_q, &cs->monitor_work, interval);
2166}
2167
2168static bool myrs_create_mempools(struct pci_dev *pdev, struct myrs_hba *cs)
2169{
2170	struct Scsi_Host *shost = cs->host;
2171	size_t elem_size, elem_align;
2172
2173	elem_align = sizeof(struct myrs_sge);
2174	elem_size = shost->sg_tablesize * elem_align;
2175	cs->sg_pool = dma_pool_create("myrs_sg", &pdev->dev,
2176				      elem_size, elem_align, 0);
2177	if (cs->sg_pool == NULL) {
2178		shost_printk(KERN_ERR, shost,
2179			     "Failed to allocate SG pool\n");
2180		return false;
2181	}
2182
2183	cs->sense_pool = dma_pool_create("myrs_sense", &pdev->dev,
2184					 MYRS_SENSE_SIZE, sizeof(int), 0);
2185	if (cs->sense_pool == NULL) {
2186		dma_pool_destroy(cs->sg_pool);
2187		cs->sg_pool = NULL;
2188		shost_printk(KERN_ERR, shost,
2189			     "Failed to allocate sense data pool\n");
2190		return false;
2191	}
2192
2193	cs->dcdb_pool = dma_pool_create("myrs_dcdb", &pdev->dev,
2194					MYRS_DCDB_SIZE,
2195					sizeof(unsigned char), 0);
2196	if (!cs->dcdb_pool) {
2197		dma_pool_destroy(cs->sg_pool);
2198		cs->sg_pool = NULL;
2199		dma_pool_destroy(cs->sense_pool);
2200		cs->sense_pool = NULL;
2201		shost_printk(KERN_ERR, shost,
2202			     "Failed to allocate DCDB pool\n");
2203		return false;
2204	}
2205
2206	snprintf(cs->work_q_name, sizeof(cs->work_q_name),
2207		 "myrs_wq_%d", shost->host_no);
2208	cs->work_q = create_singlethread_workqueue(cs->work_q_name);
2209	if (!cs->work_q) {
2210		dma_pool_destroy(cs->dcdb_pool);
2211		cs->dcdb_pool = NULL;
2212		dma_pool_destroy(cs->sg_pool);
2213		cs->sg_pool = NULL;
2214		dma_pool_destroy(cs->sense_pool);
2215		cs->sense_pool = NULL;
2216		shost_printk(KERN_ERR, shost,
2217			     "Failed to create workqueue\n");
2218		return false;
2219	}
2220
2221	/* Initialize the Monitoring Timer. */
2222	INIT_DELAYED_WORK(&cs->monitor_work, myrs_monitor);
2223	queue_delayed_work(cs->work_q, &cs->monitor_work, 1);
2224
2225	return true;
2226}
2227
2228static void myrs_destroy_mempools(struct myrs_hba *cs)
2229{
2230	cancel_delayed_work_sync(&cs->monitor_work);
2231	destroy_workqueue(cs->work_q);
2232
2233	dma_pool_destroy(cs->sg_pool);
2234	dma_pool_destroy(cs->dcdb_pool);
2235	dma_pool_destroy(cs->sense_pool);
2236}
2237
2238static void myrs_unmap(struct myrs_hba *cs)
2239{
2240	kfree(cs->event_buf);
2241	kfree(cs->ctlr_info);
2242	if (cs->fwstat_buf) {
2243		dma_free_coherent(&cs->pdev->dev, sizeof(struct myrs_fwstat),
2244				  cs->fwstat_buf, cs->fwstat_addr);
2245		cs->fwstat_buf = NULL;
2246	}
2247	if (cs->first_stat_mbox) {
2248		dma_free_coherent(&cs->pdev->dev, cs->stat_mbox_size,
2249				  cs->first_stat_mbox, cs->stat_mbox_addr);
2250		cs->first_stat_mbox = NULL;
2251	}
2252	if (cs->first_cmd_mbox) {
2253		dma_free_coherent(&cs->pdev->dev, cs->cmd_mbox_size,
2254				  cs->first_cmd_mbox, cs->cmd_mbox_addr);
2255		cs->first_cmd_mbox = NULL;
2256	}
2257}
2258
2259static void myrs_cleanup(struct myrs_hba *cs)
2260{
2261	struct pci_dev *pdev = cs->pdev;
2262
2263	/* Free the memory mailbox, status, and related structures */
2264	myrs_unmap(cs);
2265
2266	if (cs->mmio_base) {
2267		cs->disable_intr(cs);
2268		iounmap(cs->mmio_base);
2269		cs->mmio_base = NULL;
2270	}
2271	if (cs->irq)
2272		free_irq(cs->irq, cs);
2273	if (cs->io_addr)
2274		release_region(cs->io_addr, 0x80);
2275	pci_set_drvdata(pdev, NULL);
2276	pci_disable_device(pdev);
2277	scsi_host_put(cs->host);
2278}
2279
2280static struct myrs_hba *myrs_detect(struct pci_dev *pdev,
2281		const struct pci_device_id *entry)
2282{
2283	struct myrs_privdata *privdata =
2284		(struct myrs_privdata *)entry->driver_data;
2285	irq_handler_t irq_handler = privdata->irq_handler;
2286	unsigned int mmio_size = privdata->mmio_size;
2287	struct myrs_hba *cs = NULL;
2288
2289	cs = myrs_alloc_host(pdev, entry);
2290	if (!cs) {
2291		dev_err(&pdev->dev, "Unable to allocate Controller\n");
2292		return NULL;
2293	}
2294	cs->pdev = pdev;
2295
2296	if (pci_enable_device(pdev))
2297		goto Failure;
2298
2299	cs->pci_addr = pci_resource_start(pdev, 0);
2300
2301	pci_set_drvdata(pdev, cs);
2302	spin_lock_init(&cs->queue_lock);
2303	/* Map the Controller Register Window. */
2304	if (mmio_size < PAGE_SIZE)
2305		mmio_size = PAGE_SIZE;
2306	cs->mmio_base = ioremap(cs->pci_addr & PAGE_MASK, mmio_size);
2307	if (cs->mmio_base == NULL) {
2308		dev_err(&pdev->dev,
2309			"Unable to map Controller Register Window\n");
2310		goto Failure;
2311	}
2312
2313	cs->io_base = cs->mmio_base + (cs->pci_addr & ~PAGE_MASK);
2314	if (privdata->hw_init(pdev, cs, cs->io_base))
2315		goto Failure;
2316
2317	/* Acquire shared access to the IRQ Channel. */
2318	if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrs", cs) < 0) {
2319		dev_err(&pdev->dev,
2320			"Unable to acquire IRQ Channel %d\n", pdev->irq);
2321		goto Failure;
2322	}
2323	cs->irq = pdev->irq;
2324	return cs;
2325
2326Failure:
2327	dev_err(&pdev->dev,
2328		"Failed to initialize Controller\n");
2329	myrs_cleanup(cs);
2330	return NULL;
2331}
2332
2333/*
2334 * myrs_err_status reports Controller BIOS Messages passed through
2335 * the Error Status Register when the driver performs the BIOS handshaking.
2336 * It returns true for fatal errors and false otherwise.
2337 */
2338
2339static bool myrs_err_status(struct myrs_hba *cs, unsigned char status,
2340		unsigned char parm0, unsigned char parm1)
2341{
2342	struct pci_dev *pdev = cs->pdev;
2343
2344	switch (status) {
2345	case 0x00:
2346		dev_info(&pdev->dev,
2347			 "Physical Device %d:%d Not Responding\n",
2348			 parm1, parm0);
2349		break;
2350	case 0x08:
2351		dev_notice(&pdev->dev, "Spinning Up Drives\n");
2352		break;
2353	case 0x30:
2354		dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2355		break;
2356	case 0x60:
2357		dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2358		break;
2359	case 0x70:
2360		dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2361		break;
2362	case 0x90:
2363		dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2364			   parm1, parm0);
2365		break;
2366	case 0xA0:
2367		dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2368		break;
2369	case 0xB0:
2370		dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2371		break;
2372	case 0xD0:
2373		dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2374		break;
2375	case 0xF0:
2376		dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2377		return true;
2378	default:
2379		dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2380			status);
2381		return true;
2382	}
2383	return false;
2384}
2385
2386/*
2387 * Hardware-specific functions
2388 */
2389
2390/*
2391 * DAC960 GEM Series Controllers.
2392 */
2393
2394static inline void DAC960_GEM_hw_mbox_new_cmd(void __iomem *base)
2395{
2396	__le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
2397
2398	writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2399}
2400
2401static inline void DAC960_GEM_ack_hw_mbox_status(void __iomem *base)
2402{
2403	__le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_ACK_STS << 24);
2404
2405	writel(val, base + DAC960_GEM_IDB_CLEAR_OFFSET);
2406}
2407
2408static inline void DAC960_GEM_reset_ctrl(void __iomem *base)
2409{
2410	__le32 val = cpu_to_le32(DAC960_GEM_IDB_CTRL_RESET << 24);
2411
2412	writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2413}
2414
2415static inline void DAC960_GEM_mem_mbox_new_cmd(void __iomem *base)
2416{
2417	__le32 val = cpu_to_le32(DAC960_GEM_IDB_HWMBOX_NEW_CMD << 24);
2418
2419	writel(val, base + DAC960_GEM_IDB_READ_OFFSET);
2420}
2421
2422static inline bool DAC960_GEM_hw_mbox_is_full(void __iomem *base)
2423{
2424	__le32 val;
2425
2426	val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
2427	return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_HWMBOX_FULL;
2428}
2429
2430static inline bool DAC960_GEM_init_in_progress(void __iomem *base)
2431{
2432	__le32 val;
2433
2434	val = readl(base + DAC960_GEM_IDB_READ_OFFSET);
2435	return (le32_to_cpu(val) >> 24) & DAC960_GEM_IDB_INIT_IN_PROGRESS;
2436}
2437
2438static inline void DAC960_GEM_ack_hw_mbox_intr(void __iomem *base)
2439{
2440	__le32 val = cpu_to_le32(DAC960_GEM_ODB_HWMBOX_ACK_IRQ << 24);
2441
2442	writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
2443}
2444
2445static inline void DAC960_GEM_ack_intr(void __iomem *base)
2446{
2447	__le32 val = cpu_to_le32((DAC960_GEM_ODB_HWMBOX_ACK_IRQ |
2448				  DAC960_GEM_ODB_MMBOX_ACK_IRQ) << 24);
2449
2450	writel(val, base + DAC960_GEM_ODB_CLEAR_OFFSET);
2451}
2452
2453static inline bool DAC960_GEM_hw_mbox_status_available(void __iomem *base)
2454{
2455	__le32 val;
2456
2457	val = readl(base + DAC960_GEM_ODB_READ_OFFSET);
2458	return (le32_to_cpu(val) >> 24) & DAC960_GEM_ODB_HWMBOX_STS_AVAIL;
2459}
2460
2461static inline void DAC960_GEM_enable_intr(void __iomem *base)
2462{
2463	__le32 val = cpu_to_le32((DAC960_GEM_IRQMASK_HWMBOX_IRQ |
2464				  DAC960_GEM_IRQMASK_MMBOX_IRQ) << 24);
2465	writel(val, base + DAC960_GEM_IRQMASK_CLEAR_OFFSET);
2466}
2467
2468static inline void DAC960_GEM_disable_intr(void __iomem *base)
2469{
2470	__le32 val = 0;
2471
2472	writel(val, base + DAC960_GEM_IRQMASK_READ_OFFSET);
2473}
2474
2475static inline void DAC960_GEM_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
2476		union myrs_cmd_mbox *mbox)
2477{
2478	memcpy(&mem_mbox->words[1], &mbox->words[1],
2479	       sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
2480	/* Barrier to avoid reordering */
2481	wmb();
2482	mem_mbox->words[0] = mbox->words[0];
2483	/* Barrier to force PCI access */
2484	mb();
2485}
2486
2487static inline void DAC960_GEM_write_hw_mbox(void __iomem *base,
2488		dma_addr_t cmd_mbox_addr)
2489{
2490	dma_addr_writeql(cmd_mbox_addr, base + DAC960_GEM_CMDMBX_OFFSET);
2491}
2492
2493static inline unsigned char DAC960_GEM_read_cmd_status(void __iomem *base)
2494{
2495	return readw(base + DAC960_GEM_CMDSTS_OFFSET + 2);
2496}
2497
2498static inline bool
2499DAC960_GEM_read_error_status(void __iomem *base, unsigned char *error,
2500		unsigned char *param0, unsigned char *param1)
2501{
2502	__le32 val;
2503
2504	val = readl(base + DAC960_GEM_ERRSTS_READ_OFFSET);
2505	if (!((le32_to_cpu(val) >> 24) & DAC960_GEM_ERRSTS_PENDING))
2506		return false;
2507	*error = val & ~(DAC960_GEM_ERRSTS_PENDING << 24);
2508	*param0 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 0);
2509	*param1 = readb(base + DAC960_GEM_CMDMBX_OFFSET + 1);
2510	writel(0x03000000, base + DAC960_GEM_ERRSTS_CLEAR_OFFSET);
2511	return true;
2512}
2513
2514static inline unsigned char
2515DAC960_GEM_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
2516{
2517	unsigned char status;
2518
2519	while (DAC960_GEM_hw_mbox_is_full(base))
2520		udelay(1);
2521	DAC960_GEM_write_hw_mbox(base, mbox_addr);
2522	DAC960_GEM_hw_mbox_new_cmd(base);
2523	while (!DAC960_GEM_hw_mbox_status_available(base))
2524		udelay(1);
2525	status = DAC960_GEM_read_cmd_status(base);
2526	DAC960_GEM_ack_hw_mbox_intr(base);
2527	DAC960_GEM_ack_hw_mbox_status(base);
2528
2529	return status;
2530}
2531
2532static int DAC960_GEM_hw_init(struct pci_dev *pdev,
2533		struct myrs_hba *cs, void __iomem *base)
2534{
2535	int timeout = 0;
2536	unsigned char status, parm0, parm1;
2537
2538	DAC960_GEM_disable_intr(base);
2539	DAC960_GEM_ack_hw_mbox_status(base);
2540	udelay(1000);
2541	while (DAC960_GEM_init_in_progress(base) &&
2542	       timeout < MYRS_MAILBOX_TIMEOUT) {
2543		if (DAC960_GEM_read_error_status(base, &status,
2544						 &parm0, &parm1) &&
2545		    myrs_err_status(cs, status, parm0, parm1))
2546			return -EIO;
2547		udelay(10);
2548		timeout++;
2549	}
2550	if (timeout == MYRS_MAILBOX_TIMEOUT) {
2551		dev_err(&pdev->dev,
2552			"Timeout waiting for Controller Initialisation\n");
2553		return -ETIMEDOUT;
2554	}
2555	if (!myrs_enable_mmio_mbox(cs, DAC960_GEM_mbox_init)) {
2556		dev_err(&pdev->dev,
2557			"Unable to Enable Memory Mailbox Interface\n");
2558		DAC960_GEM_reset_ctrl(base);
2559		return -EAGAIN;
2560	}
2561	DAC960_GEM_enable_intr(base);
2562	cs->write_cmd_mbox = DAC960_GEM_write_cmd_mbox;
2563	cs->get_cmd_mbox = DAC960_GEM_mem_mbox_new_cmd;
2564	cs->disable_intr = DAC960_GEM_disable_intr;
2565	cs->reset = DAC960_GEM_reset_ctrl;
2566	return 0;
2567}
2568
2569static irqreturn_t DAC960_GEM_intr_handler(int irq, void *arg)
2570{
2571	struct myrs_hba *cs = arg;
2572	void __iomem *base = cs->io_base;
2573	struct myrs_stat_mbox *next_stat_mbox;
2574	unsigned long flags;
2575
2576	spin_lock_irqsave(&cs->queue_lock, flags);
2577	DAC960_GEM_ack_intr(base);
2578	next_stat_mbox = cs->next_stat_mbox;
2579	while (next_stat_mbox->id > 0) {
2580		unsigned short id = next_stat_mbox->id;
2581		struct scsi_cmnd *scmd = NULL;
2582		struct myrs_cmdblk *cmd_blk = NULL;
2583
2584		if (id == MYRS_DCMD_TAG)
2585			cmd_blk = &cs->dcmd_blk;
2586		else if (id == MYRS_MCMD_TAG)
2587			cmd_blk = &cs->mcmd_blk;
2588		else {
2589			scmd = scsi_host_find_tag(cs->host, id - 3);
2590			if (scmd)
2591				cmd_blk = scsi_cmd_priv(scmd);
2592		}
2593		if (cmd_blk) {
2594			cmd_blk->status = next_stat_mbox->status;
2595			cmd_blk->sense_len = next_stat_mbox->sense_len;
2596			cmd_blk->residual = next_stat_mbox->residual;
2597		} else
2598			dev_err(&cs->pdev->dev,
2599				"Unhandled command completion %d\n", id);
2600
2601		memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
2602		if (++next_stat_mbox > cs->last_stat_mbox)
2603			next_stat_mbox = cs->first_stat_mbox;
2604
2605		if (cmd_blk) {
2606			if (id < 3)
2607				myrs_handle_cmdblk(cs, cmd_blk);
2608			else
2609				myrs_handle_scsi(cs, cmd_blk, scmd);
2610		}
2611	}
2612	cs->next_stat_mbox = next_stat_mbox;
2613	spin_unlock_irqrestore(&cs->queue_lock, flags);
2614	return IRQ_HANDLED;
2615}
2616
2617static struct myrs_privdata DAC960_GEM_privdata = {
2618	.hw_init =		DAC960_GEM_hw_init,
2619	.irq_handler =		DAC960_GEM_intr_handler,
2620	.mmio_size =		DAC960_GEM_mmio_size,
2621};
2622
2623/*
2624 * DAC960 BA Series Controllers.
2625 */
2626
2627static inline void DAC960_BA_hw_mbox_new_cmd(void __iomem *base)
2628{
2629	writeb(DAC960_BA_IDB_HWMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
2630}
2631
2632static inline void DAC960_BA_ack_hw_mbox_status(void __iomem *base)
2633{
2634	writeb(DAC960_BA_IDB_HWMBOX_ACK_STS, base + DAC960_BA_IDB_OFFSET);
2635}
2636
2637static inline void DAC960_BA_reset_ctrl(void __iomem *base)
2638{
2639	writeb(DAC960_BA_IDB_CTRL_RESET, base + DAC960_BA_IDB_OFFSET);
2640}
2641
2642static inline void DAC960_BA_mem_mbox_new_cmd(void __iomem *base)
2643{
2644	writeb(DAC960_BA_IDB_MMBOX_NEW_CMD, base + DAC960_BA_IDB_OFFSET);
2645}
2646
2647static inline bool DAC960_BA_hw_mbox_is_full(void __iomem *base)
2648{
2649	u8 val;
2650
2651	val = readb(base + DAC960_BA_IDB_OFFSET);
2652	return !(val & DAC960_BA_IDB_HWMBOX_EMPTY);
2653}
2654
2655static inline bool DAC960_BA_init_in_progress(void __iomem *base)
2656{
2657	u8 val;
2658
2659	val = readb(base + DAC960_BA_IDB_OFFSET);
2660	return !(val & DAC960_BA_IDB_INIT_DONE);
2661}
2662
2663static inline void DAC960_BA_ack_hw_mbox_intr(void __iomem *base)
2664{
2665	writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ, base + DAC960_BA_ODB_OFFSET);
2666}
2667
2668static inline void DAC960_BA_ack_intr(void __iomem *base)
2669{
2670	writeb(DAC960_BA_ODB_HWMBOX_ACK_IRQ | DAC960_BA_ODB_MMBOX_ACK_IRQ,
2671	       base + DAC960_BA_ODB_OFFSET);
2672}
2673
2674static inline bool DAC960_BA_hw_mbox_status_available(void __iomem *base)
2675{
2676	u8 val;
2677
2678	val = readb(base + DAC960_BA_ODB_OFFSET);
2679	return val & DAC960_BA_ODB_HWMBOX_STS_AVAIL;
2680}
2681
2682static inline void DAC960_BA_enable_intr(void __iomem *base)
2683{
2684	writeb(~DAC960_BA_IRQMASK_DISABLE_IRQ, base + DAC960_BA_IRQMASK_OFFSET);
2685}
2686
2687static inline void DAC960_BA_disable_intr(void __iomem *base)
2688{
2689	writeb(0xFF, base + DAC960_BA_IRQMASK_OFFSET);
2690}
2691
2692static inline void DAC960_BA_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
2693		union myrs_cmd_mbox *mbox)
2694{
2695	memcpy(&mem_mbox->words[1], &mbox->words[1],
2696	       sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
2697	/* Barrier to avoid reordering */
2698	wmb();
2699	mem_mbox->words[0] = mbox->words[0];
2700	/* Barrier to force PCI access */
2701	mb();
2702}
2703
2704
2705static inline void DAC960_BA_write_hw_mbox(void __iomem *base,
2706		dma_addr_t cmd_mbox_addr)
2707{
2708	dma_addr_writeql(cmd_mbox_addr, base + DAC960_BA_CMDMBX_OFFSET);
2709}
2710
2711static inline unsigned char DAC960_BA_read_cmd_status(void __iomem *base)
2712{
2713	return readw(base + DAC960_BA_CMDSTS_OFFSET + 2);
2714}
2715
2716static inline bool
2717DAC960_BA_read_error_status(void __iomem *base, unsigned char *error,
2718		unsigned char *param0, unsigned char *param1)
2719{
2720	u8 val;
2721
2722	val = readb(base + DAC960_BA_ERRSTS_OFFSET);
2723	if (!(val & DAC960_BA_ERRSTS_PENDING))
2724		return false;
2725	val &= ~DAC960_BA_ERRSTS_PENDING;
2726	*error = val;
2727	*param0 = readb(base + DAC960_BA_CMDMBX_OFFSET + 0);
2728	*param1 = readb(base + DAC960_BA_CMDMBX_OFFSET + 1);
2729	writeb(0xFF, base + DAC960_BA_ERRSTS_OFFSET);
2730	return true;
2731}
2732
2733static inline unsigned char
2734DAC960_BA_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
2735{
2736	unsigned char status;
2737
2738	while (DAC960_BA_hw_mbox_is_full(base))
2739		udelay(1);
2740	DAC960_BA_write_hw_mbox(base, mbox_addr);
2741	DAC960_BA_hw_mbox_new_cmd(base);
2742	while (!DAC960_BA_hw_mbox_status_available(base))
2743		udelay(1);
2744	status = DAC960_BA_read_cmd_status(base);
2745	DAC960_BA_ack_hw_mbox_intr(base);
2746	DAC960_BA_ack_hw_mbox_status(base);
2747
2748	return status;
2749}
2750
2751static int DAC960_BA_hw_init(struct pci_dev *pdev,
2752		struct myrs_hba *cs, void __iomem *base)
2753{
2754	int timeout = 0;
2755	unsigned char status, parm0, parm1;
2756
2757	DAC960_BA_disable_intr(base);
2758	DAC960_BA_ack_hw_mbox_status(base);
2759	udelay(1000);
2760	while (DAC960_BA_init_in_progress(base) &&
2761	       timeout < MYRS_MAILBOX_TIMEOUT) {
2762		if (DAC960_BA_read_error_status(base, &status,
2763					      &parm0, &parm1) &&
2764		    myrs_err_status(cs, status, parm0, parm1))
2765			return -EIO;
2766		udelay(10);
2767		timeout++;
2768	}
2769	if (timeout == MYRS_MAILBOX_TIMEOUT) {
2770		dev_err(&pdev->dev,
2771			"Timeout waiting for Controller Initialisation\n");
2772		return -ETIMEDOUT;
2773	}
2774	if (!myrs_enable_mmio_mbox(cs, DAC960_BA_mbox_init)) {
2775		dev_err(&pdev->dev,
2776			"Unable to Enable Memory Mailbox Interface\n");
2777		DAC960_BA_reset_ctrl(base);
2778		return -EAGAIN;
2779	}
2780	DAC960_BA_enable_intr(base);
2781	cs->write_cmd_mbox = DAC960_BA_write_cmd_mbox;
2782	cs->get_cmd_mbox = DAC960_BA_mem_mbox_new_cmd;
2783	cs->disable_intr = DAC960_BA_disable_intr;
2784	cs->reset = DAC960_BA_reset_ctrl;
2785	return 0;
2786}
2787
2788static irqreturn_t DAC960_BA_intr_handler(int irq, void *arg)
2789{
2790	struct myrs_hba *cs = arg;
2791	void __iomem *base = cs->io_base;
2792	struct myrs_stat_mbox *next_stat_mbox;
2793	unsigned long flags;
2794
2795	spin_lock_irqsave(&cs->queue_lock, flags);
2796	DAC960_BA_ack_intr(base);
2797	next_stat_mbox = cs->next_stat_mbox;
2798	while (next_stat_mbox->id > 0) {
2799		unsigned short id = next_stat_mbox->id;
2800		struct scsi_cmnd *scmd = NULL;
2801		struct myrs_cmdblk *cmd_blk = NULL;
2802
2803		if (id == MYRS_DCMD_TAG)
2804			cmd_blk = &cs->dcmd_blk;
2805		else if (id == MYRS_MCMD_TAG)
2806			cmd_blk = &cs->mcmd_blk;
2807		else {
2808			scmd = scsi_host_find_tag(cs->host, id - 3);
2809			if (scmd)
2810				cmd_blk = scsi_cmd_priv(scmd);
2811		}
2812		if (cmd_blk) {
2813			cmd_blk->status = next_stat_mbox->status;
2814			cmd_blk->sense_len = next_stat_mbox->sense_len;
2815			cmd_blk->residual = next_stat_mbox->residual;
2816		} else
2817			dev_err(&cs->pdev->dev,
2818				"Unhandled command completion %d\n", id);
2819
2820		memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
2821		if (++next_stat_mbox > cs->last_stat_mbox)
2822			next_stat_mbox = cs->first_stat_mbox;
2823
2824		if (cmd_blk) {
2825			if (id < 3)
2826				myrs_handle_cmdblk(cs, cmd_blk);
2827			else
2828				myrs_handle_scsi(cs, cmd_blk, scmd);
2829		}
2830	}
2831	cs->next_stat_mbox = next_stat_mbox;
2832	spin_unlock_irqrestore(&cs->queue_lock, flags);
2833	return IRQ_HANDLED;
2834}
2835
2836static struct myrs_privdata DAC960_BA_privdata = {
2837	.hw_init =		DAC960_BA_hw_init,
2838	.irq_handler =		DAC960_BA_intr_handler,
2839	.mmio_size =		DAC960_BA_mmio_size,
2840};
2841
2842/*
2843 * DAC960 LP Series Controllers.
2844 */
2845
2846static inline void DAC960_LP_hw_mbox_new_cmd(void __iomem *base)
2847{
2848	writeb(DAC960_LP_IDB_HWMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
2849}
2850
2851static inline void DAC960_LP_ack_hw_mbox_status(void __iomem *base)
2852{
2853	writeb(DAC960_LP_IDB_HWMBOX_ACK_STS, base + DAC960_LP_IDB_OFFSET);
2854}
2855
2856static inline void DAC960_LP_reset_ctrl(void __iomem *base)
2857{
2858	writeb(DAC960_LP_IDB_CTRL_RESET, base + DAC960_LP_IDB_OFFSET);
2859}
2860
2861static inline void DAC960_LP_mem_mbox_new_cmd(void __iomem *base)
2862{
2863	writeb(DAC960_LP_IDB_MMBOX_NEW_CMD, base + DAC960_LP_IDB_OFFSET);
2864}
2865
2866static inline bool DAC960_LP_hw_mbox_is_full(void __iomem *base)
2867{
2868	u8 val;
2869
2870	val = readb(base + DAC960_LP_IDB_OFFSET);
2871	return val & DAC960_LP_IDB_HWMBOX_FULL;
2872}
2873
2874static inline bool DAC960_LP_init_in_progress(void __iomem *base)
2875{
2876	u8 val;
2877
2878	val = readb(base + DAC960_LP_IDB_OFFSET);
2879	return val & DAC960_LP_IDB_INIT_IN_PROGRESS;
2880}
2881
2882static inline void DAC960_LP_ack_hw_mbox_intr(void __iomem *base)
2883{
2884	writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ, base + DAC960_LP_ODB_OFFSET);
2885}
2886
2887static inline void DAC960_LP_ack_intr(void __iomem *base)
2888{
2889	writeb(DAC960_LP_ODB_HWMBOX_ACK_IRQ | DAC960_LP_ODB_MMBOX_ACK_IRQ,
2890	       base + DAC960_LP_ODB_OFFSET);
2891}
2892
2893static inline bool DAC960_LP_hw_mbox_status_available(void __iomem *base)
2894{
2895	u8 val;
2896
2897	val = readb(base + DAC960_LP_ODB_OFFSET);
2898	return val & DAC960_LP_ODB_HWMBOX_STS_AVAIL;
2899}
2900
2901static inline void DAC960_LP_enable_intr(void __iomem *base)
2902{
2903	writeb(~DAC960_LP_IRQMASK_DISABLE_IRQ, base + DAC960_LP_IRQMASK_OFFSET);
2904}
2905
2906static inline void DAC960_LP_disable_intr(void __iomem *base)
2907{
2908	writeb(0xFF, base + DAC960_LP_IRQMASK_OFFSET);
2909}
2910
2911static inline void DAC960_LP_write_cmd_mbox(union myrs_cmd_mbox *mem_mbox,
2912		union myrs_cmd_mbox *mbox)
2913{
2914	memcpy(&mem_mbox->words[1], &mbox->words[1],
2915	       sizeof(union myrs_cmd_mbox) - sizeof(unsigned int));
2916	/* Barrier to avoid reordering */
2917	wmb();
2918	mem_mbox->words[0] = mbox->words[0];
2919	/* Barrier to force PCI access */
2920	mb();
2921}
2922
2923static inline void DAC960_LP_write_hw_mbox(void __iomem *base,
2924		dma_addr_t cmd_mbox_addr)
2925{
2926	dma_addr_writeql(cmd_mbox_addr, base + DAC960_LP_CMDMBX_OFFSET);
2927}
2928
2929static inline unsigned char DAC960_LP_read_cmd_status(void __iomem *base)
2930{
2931	return readw(base + DAC960_LP_CMDSTS_OFFSET + 2);
2932}
2933
2934static inline bool
2935DAC960_LP_read_error_status(void __iomem *base, unsigned char *error,
2936		unsigned char *param0, unsigned char *param1)
2937{
2938	u8 val;
2939
2940	val = readb(base + DAC960_LP_ERRSTS_OFFSET);
2941	if (!(val & DAC960_LP_ERRSTS_PENDING))
2942		return false;
2943	val &= ~DAC960_LP_ERRSTS_PENDING;
2944	*error = val;
2945	*param0 = readb(base + DAC960_LP_CMDMBX_OFFSET + 0);
2946	*param1 = readb(base + DAC960_LP_CMDMBX_OFFSET + 1);
2947	writeb(0xFF, base + DAC960_LP_ERRSTS_OFFSET);
2948	return true;
2949}
2950
2951static inline unsigned char
2952DAC960_LP_mbox_init(void __iomem *base, dma_addr_t mbox_addr)
2953{
2954	unsigned char status;
2955
2956	while (DAC960_LP_hw_mbox_is_full(base))
2957		udelay(1);
2958	DAC960_LP_write_hw_mbox(base, mbox_addr);
2959	DAC960_LP_hw_mbox_new_cmd(base);
2960	while (!DAC960_LP_hw_mbox_status_available(base))
2961		udelay(1);
2962	status = DAC960_LP_read_cmd_status(base);
2963	DAC960_LP_ack_hw_mbox_intr(base);
2964	DAC960_LP_ack_hw_mbox_status(base);
2965
2966	return status;
2967}
2968
2969static int DAC960_LP_hw_init(struct pci_dev *pdev,
2970		struct myrs_hba *cs, void __iomem *base)
2971{
2972	int timeout = 0;
2973	unsigned char status, parm0, parm1;
2974
2975	DAC960_LP_disable_intr(base);
2976	DAC960_LP_ack_hw_mbox_status(base);
2977	udelay(1000);
2978	while (DAC960_LP_init_in_progress(base) &&
2979	       timeout < MYRS_MAILBOX_TIMEOUT) {
2980		if (DAC960_LP_read_error_status(base, &status,
2981					      &parm0, &parm1) &&
2982		    myrs_err_status(cs, status, parm0, parm1))
2983			return -EIO;
2984		udelay(10);
2985		timeout++;
2986	}
2987	if (timeout == MYRS_MAILBOX_TIMEOUT) {
2988		dev_err(&pdev->dev,
2989			"Timeout waiting for Controller Initialisation\n");
2990		return -ETIMEDOUT;
2991	}
2992	if (!myrs_enable_mmio_mbox(cs, DAC960_LP_mbox_init)) {
2993		dev_err(&pdev->dev,
2994			"Unable to Enable Memory Mailbox Interface\n");
2995		DAC960_LP_reset_ctrl(base);
2996		return -ENODEV;
2997	}
2998	DAC960_LP_enable_intr(base);
2999	cs->write_cmd_mbox = DAC960_LP_write_cmd_mbox;
3000	cs->get_cmd_mbox = DAC960_LP_mem_mbox_new_cmd;
3001	cs->disable_intr = DAC960_LP_disable_intr;
3002	cs->reset = DAC960_LP_reset_ctrl;
3003
3004	return 0;
3005}
3006
3007static irqreturn_t DAC960_LP_intr_handler(int irq, void *arg)
3008{
3009	struct myrs_hba *cs = arg;
3010	void __iomem *base = cs->io_base;
3011	struct myrs_stat_mbox *next_stat_mbox;
3012	unsigned long flags;
3013
3014	spin_lock_irqsave(&cs->queue_lock, flags);
3015	DAC960_LP_ack_intr(base);
3016	next_stat_mbox = cs->next_stat_mbox;
3017	while (next_stat_mbox->id > 0) {
3018		unsigned short id = next_stat_mbox->id;
3019		struct scsi_cmnd *scmd = NULL;
3020		struct myrs_cmdblk *cmd_blk = NULL;
3021
3022		if (id == MYRS_DCMD_TAG)
3023			cmd_blk = &cs->dcmd_blk;
3024		else if (id == MYRS_MCMD_TAG)
3025			cmd_blk = &cs->mcmd_blk;
3026		else {
3027			scmd = scsi_host_find_tag(cs->host, id - 3);
3028			if (scmd)
3029				cmd_blk = scsi_cmd_priv(scmd);
3030		}
3031		if (cmd_blk) {
3032			cmd_blk->status = next_stat_mbox->status;
3033			cmd_blk->sense_len = next_stat_mbox->sense_len;
3034			cmd_blk->residual = next_stat_mbox->residual;
3035		} else
3036			dev_err(&cs->pdev->dev,
3037				"Unhandled command completion %d\n", id);
3038
3039		memset(next_stat_mbox, 0, sizeof(struct myrs_stat_mbox));
3040		if (++next_stat_mbox > cs->last_stat_mbox)
3041			next_stat_mbox = cs->first_stat_mbox;
3042
3043		if (cmd_blk) {
3044			if (id < 3)
3045				myrs_handle_cmdblk(cs, cmd_blk);
3046			else
3047				myrs_handle_scsi(cs, cmd_blk, scmd);
3048		}
3049	}
3050	cs->next_stat_mbox = next_stat_mbox;
3051	spin_unlock_irqrestore(&cs->queue_lock, flags);
3052	return IRQ_HANDLED;
3053}
3054
3055static struct myrs_privdata DAC960_LP_privdata = {
3056	.hw_init =		DAC960_LP_hw_init,
3057	.irq_handler =		DAC960_LP_intr_handler,
3058	.mmio_size =		DAC960_LP_mmio_size,
3059};
3060
3061/*
3062 * Module functions
3063 */
3064static int
3065myrs_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3066{
3067	struct myrs_hba *cs;
3068	int ret;
3069
3070	cs = myrs_detect(dev, entry);
3071	if (!cs)
3072		return -ENODEV;
3073
3074	ret = myrs_get_config(cs);
3075	if (ret < 0) {
3076		myrs_cleanup(cs);
3077		return ret;
3078	}
3079
3080	if (!myrs_create_mempools(dev, cs)) {
3081		ret = -ENOMEM;
3082		goto failed;
3083	}
3084
3085	ret = scsi_add_host(cs->host, &dev->dev);
3086	if (ret) {
3087		dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3088		myrs_destroy_mempools(cs);
3089		goto failed;
3090	}
3091	scsi_scan_host(cs->host);
3092	return 0;
3093failed:
3094	myrs_cleanup(cs);
3095	return ret;
3096}
3097
3098
3099static void myrs_remove(struct pci_dev *pdev)
3100{
3101	struct myrs_hba *cs = pci_get_drvdata(pdev);
3102
3103	if (cs == NULL)
3104		return;
3105
3106	shost_printk(KERN_NOTICE, cs->host, "Flushing Cache...");
3107	myrs_flush_cache(cs);
3108	myrs_destroy_mempools(cs);
3109	myrs_cleanup(cs);
3110}
3111
3112
3113static const struct pci_device_id myrs_id_table[] = {
3114	{
3115		PCI_DEVICE_SUB(PCI_VENDOR_ID_MYLEX,
3116			       PCI_DEVICE_ID_MYLEX_DAC960_GEM,
3117			       PCI_VENDOR_ID_MYLEX, PCI_ANY_ID),
3118		.driver_data	= (unsigned long) &DAC960_GEM_privdata,
3119	},
3120	{
3121		PCI_DEVICE_DATA(MYLEX, DAC960_BA, &DAC960_BA_privdata),
3122	},
3123	{
3124		PCI_DEVICE_DATA(MYLEX, DAC960_LP, &DAC960_LP_privdata),
3125	},
3126	{0, },
3127};
3128
3129MODULE_DEVICE_TABLE(pci, myrs_id_table);
3130
3131static struct pci_driver myrs_pci_driver = {
3132	.name		= "myrs",
3133	.id_table	= myrs_id_table,
3134	.probe		= myrs_probe,
3135	.remove		= myrs_remove,
3136};
3137
3138static int __init myrs_init_module(void)
3139{
3140	int ret;
3141
3142	myrs_raid_template = raid_class_attach(&myrs_raid_functions);
3143	if (!myrs_raid_template)
3144		return -ENODEV;
3145
3146	ret = pci_register_driver(&myrs_pci_driver);
3147	if (ret)
3148		raid_class_release(myrs_raid_template);
3149
3150	return ret;
3151}
3152
3153static void __exit myrs_cleanup_module(void)
3154{
3155	pci_unregister_driver(&myrs_pci_driver);
3156	raid_class_release(myrs_raid_template);
3157}
3158
3159module_init(myrs_init_module);
3160module_exit(myrs_cleanup_module);
3161
3162MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (SCSI Interface)");
3163MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3164MODULE_LICENSE("GPL");