Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.15.
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Linux Driver for Mylex DAC960/AcceleRAID/eXtremeRAID PCI RAID Controllers
   4 *
   5 * Copyright 2017 Hannes Reinecke, SUSE Linux GmbH <hare@suse.com>
   6 *
   7 * Based on the original DAC960 driver,
   8 * Copyright 1998-2001 by Leonard N. Zubkoff <lnz@dandelion.com>
   9 * Portions Copyright 2002 by Mylex (An IBM Business Unit)
  10 *
  11 */
  12
  13#include <linux/module.h>
  14#include <linux/types.h>
  15#include <linux/delay.h>
  16#include <linux/interrupt.h>
  17#include <linux/pci.h>
  18#include <linux/raid_class.h>
  19#include <asm/unaligned.h>
  20#include <scsi/scsi.h>
  21#include <scsi/scsi_host.h>
  22#include <scsi/scsi_device.h>
  23#include <scsi/scsi_cmnd.h>
  24#include <scsi/scsi_tcq.h>
  25#include "myrb.h"
  26
  27static struct raid_template *myrb_raid_template;
  28
  29static void myrb_monitor(struct work_struct *work);
  30static inline void myrb_translate_devstate(void *DeviceState);
  31
  32static inline int myrb_logical_channel(struct Scsi_Host *shost)
  33{
  34	return shost->max_channel - 1;
  35}
  36
  37static struct myrb_devstate_name_entry {
  38	enum myrb_devstate state;
  39	const char *name;
  40} myrb_devstate_name_list[] = {
  41	{ MYRB_DEVICE_DEAD, "Dead" },
  42	{ MYRB_DEVICE_WO, "WriteOnly" },
  43	{ MYRB_DEVICE_ONLINE, "Online" },
  44	{ MYRB_DEVICE_CRITICAL, "Critical" },
  45	{ MYRB_DEVICE_STANDBY, "Standby" },
  46	{ MYRB_DEVICE_OFFLINE, "Offline" },
  47};
  48
  49static const char *myrb_devstate_name(enum myrb_devstate state)
  50{
  51	struct myrb_devstate_name_entry *entry = myrb_devstate_name_list;
  52	int i;
  53
  54	for (i = 0; i < ARRAY_SIZE(myrb_devstate_name_list); i++) {
  55		if (entry[i].state == state)
  56			return entry[i].name;
  57	}
  58	return "Unknown";
  59}
  60
  61static struct myrb_raidlevel_name_entry {
  62	enum myrb_raidlevel level;
  63	const char *name;
  64} myrb_raidlevel_name_list[] = {
  65	{ MYRB_RAID_LEVEL0, "RAID0" },
  66	{ MYRB_RAID_LEVEL1, "RAID1" },
  67	{ MYRB_RAID_LEVEL3, "RAID3" },
  68	{ MYRB_RAID_LEVEL5, "RAID5" },
  69	{ MYRB_RAID_LEVEL6, "RAID6" },
  70	{ MYRB_RAID_JBOD, "JBOD" },
  71};
  72
  73static const char *myrb_raidlevel_name(enum myrb_raidlevel level)
  74{
  75	struct myrb_raidlevel_name_entry *entry = myrb_raidlevel_name_list;
  76	int i;
  77
  78	for (i = 0; i < ARRAY_SIZE(myrb_raidlevel_name_list); i++) {
  79		if (entry[i].level == level)
  80			return entry[i].name;
  81	}
  82	return NULL;
  83}
  84
  85/**
  86 * myrb_create_mempools - allocates auxiliary data structures
  87 *
  88 * Return: true on success, false otherwise.
  89 */
  90static bool myrb_create_mempools(struct pci_dev *pdev, struct myrb_hba *cb)
  91{
  92	size_t elem_size, elem_align;
  93
  94	elem_align = sizeof(struct myrb_sge);
  95	elem_size = cb->host->sg_tablesize * elem_align;
  96	cb->sg_pool = dma_pool_create("myrb_sg", &pdev->dev,
  97				      elem_size, elem_align, 0);
  98	if (cb->sg_pool == NULL) {
  99		shost_printk(KERN_ERR, cb->host,
 100			     "Failed to allocate SG pool\n");
 101		return false;
 102	}
 103
 104	cb->dcdb_pool = dma_pool_create("myrb_dcdb", &pdev->dev,
 105				       sizeof(struct myrb_dcdb),
 106				       sizeof(unsigned int), 0);
 107	if (!cb->dcdb_pool) {
 108		dma_pool_destroy(cb->sg_pool);
 109		cb->sg_pool = NULL;
 110		shost_printk(KERN_ERR, cb->host,
 111			     "Failed to allocate DCDB pool\n");
 112		return false;
 113	}
 114
 115	snprintf(cb->work_q_name, sizeof(cb->work_q_name),
 116		 "myrb_wq_%d", cb->host->host_no);
 117	cb->work_q = create_singlethread_workqueue(cb->work_q_name);
 118	if (!cb->work_q) {
 119		dma_pool_destroy(cb->dcdb_pool);
 120		cb->dcdb_pool = NULL;
 121		dma_pool_destroy(cb->sg_pool);
 122		cb->sg_pool = NULL;
 123		shost_printk(KERN_ERR, cb->host,
 124			     "Failed to create workqueue\n");
 125		return false;
 126	}
 127
 128	/*
 129	 * Initialize the Monitoring Timer.
 130	 */
 131	INIT_DELAYED_WORK(&cb->monitor_work, myrb_monitor);
 132	queue_delayed_work(cb->work_q, &cb->monitor_work, 1);
 133
 134	return true;
 135}
 136
 137/**
 138 * myrb_destroy_mempools - tears down the memory pools for the controller
 139 */
 140static void myrb_destroy_mempools(struct myrb_hba *cb)
 141{
 142	cancel_delayed_work_sync(&cb->monitor_work);
 143	destroy_workqueue(cb->work_q);
 144
 145	dma_pool_destroy(cb->sg_pool);
 146	dma_pool_destroy(cb->dcdb_pool);
 147}
 148
 149/**
 150 * myrb_reset_cmd - reset command block
 151 */
 152static inline void myrb_reset_cmd(struct myrb_cmdblk *cmd_blk)
 153{
 154	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 155
 156	memset(mbox, 0, sizeof(union myrb_cmd_mbox));
 157	cmd_blk->status = 0;
 158}
 159
 160/**
 161 * myrb_qcmd - queues command block for execution
 162 */
 163static void myrb_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
 164{
 165	void __iomem *base = cb->io_base;
 166	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 167	union myrb_cmd_mbox *next_mbox = cb->next_cmd_mbox;
 168
 169	cb->write_cmd_mbox(next_mbox, mbox);
 170	if (cb->prev_cmd_mbox1->words[0] == 0 ||
 171	    cb->prev_cmd_mbox2->words[0] == 0)
 172		cb->get_cmd_mbox(base);
 173	cb->prev_cmd_mbox2 = cb->prev_cmd_mbox1;
 174	cb->prev_cmd_mbox1 = next_mbox;
 175	if (++next_mbox > cb->last_cmd_mbox)
 176		next_mbox = cb->first_cmd_mbox;
 177	cb->next_cmd_mbox = next_mbox;
 178}
 179
 180/**
 181 * myrb_exec_cmd - executes command block and waits for completion.
 182 *
 183 * Return: command status
 184 */
 185static unsigned short myrb_exec_cmd(struct myrb_hba *cb,
 186		struct myrb_cmdblk *cmd_blk)
 187{
 188	DECLARE_COMPLETION_ONSTACK(cmpl);
 189	unsigned long flags;
 190
 191	cmd_blk->completion = &cmpl;
 192
 193	spin_lock_irqsave(&cb->queue_lock, flags);
 194	cb->qcmd(cb, cmd_blk);
 195	spin_unlock_irqrestore(&cb->queue_lock, flags);
 196
 197	WARN_ON(in_interrupt());
 198	wait_for_completion(&cmpl);
 199	return cmd_blk->status;
 200}
 201
 202/**
 203 * myrb_exec_type3 - executes a type 3 command and waits for completion.
 204 *
 205 * Return: command status
 206 */
 207static unsigned short myrb_exec_type3(struct myrb_hba *cb,
 208		enum myrb_cmd_opcode op, dma_addr_t addr)
 209{
 210	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
 211	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 212	unsigned short status;
 213
 214	mutex_lock(&cb->dcmd_mutex);
 215	myrb_reset_cmd(cmd_blk);
 216	mbox->type3.id = MYRB_DCMD_TAG;
 217	mbox->type3.opcode = op;
 218	mbox->type3.addr = addr;
 219	status = myrb_exec_cmd(cb, cmd_blk);
 220	mutex_unlock(&cb->dcmd_mutex);
 221	return status;
 222}
 223
 224/**
 225 * myrb_exec_type3D - executes a type 3D command and waits for completion.
 226 *
 227 * Return: command status
 228 */
 229static unsigned short myrb_exec_type3D(struct myrb_hba *cb,
 230		enum myrb_cmd_opcode op, struct scsi_device *sdev,
 231		struct myrb_pdev_state *pdev_info)
 232{
 233	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
 234	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 235	unsigned short status;
 236	dma_addr_t pdev_info_addr;
 237
 238	pdev_info_addr = dma_map_single(&cb->pdev->dev, pdev_info,
 239					sizeof(struct myrb_pdev_state),
 240					DMA_FROM_DEVICE);
 241	if (dma_mapping_error(&cb->pdev->dev, pdev_info_addr))
 242		return MYRB_STATUS_SUBSYS_FAILED;
 243
 244	mutex_lock(&cb->dcmd_mutex);
 245	myrb_reset_cmd(cmd_blk);
 246	mbox->type3D.id = MYRB_DCMD_TAG;
 247	mbox->type3D.opcode = op;
 248	mbox->type3D.channel = sdev->channel;
 249	mbox->type3D.target = sdev->id;
 250	mbox->type3D.addr = pdev_info_addr;
 251	status = myrb_exec_cmd(cb, cmd_blk);
 252	mutex_unlock(&cb->dcmd_mutex);
 253	dma_unmap_single(&cb->pdev->dev, pdev_info_addr,
 254			 sizeof(struct myrb_pdev_state), DMA_FROM_DEVICE);
 255	if (status == MYRB_STATUS_SUCCESS &&
 256	    mbox->type3D.opcode == MYRB_CMD_GET_DEVICE_STATE_OLD)
 257		myrb_translate_devstate(pdev_info);
 258
 259	return status;
 260}
 261
 262static char *myrb_event_msg[] = {
 263	"killed because write recovery failed",
 264	"killed because of SCSI bus reset failure",
 265	"killed because of double check condition",
 266	"killed because it was removed",
 267	"killed because of gross error on SCSI chip",
 268	"killed because of bad tag returned from drive",
 269	"killed because of timeout on SCSI command",
 270	"killed because of reset SCSI command issued from system",
 271	"killed because busy or parity error count exceeded limit",
 272	"killed because of 'kill drive' command from system",
 273	"killed because of selection timeout",
 274	"killed due to SCSI phase sequence error",
 275	"killed due to unknown status",
 276};
 277
 278/**
 279 * myrb_get_event - get event log from HBA
 280 * @cb: pointer to the hba structure
 281 * @event: number of the event
 282 *
 283 * Execute a type 3E command and logs the event message
 284 */
 285static void myrb_get_event(struct myrb_hba *cb, unsigned int event)
 286{
 287	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
 288	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 289	struct myrb_log_entry *ev_buf;
 290	dma_addr_t ev_addr;
 291	unsigned short status;
 292
 293	ev_buf = dma_alloc_coherent(&cb->pdev->dev,
 294				    sizeof(struct myrb_log_entry),
 295				    &ev_addr, GFP_KERNEL);
 296	if (!ev_buf)
 297		return;
 298
 299	myrb_reset_cmd(cmd_blk);
 300	mbox->type3E.id = MYRB_MCMD_TAG;
 301	mbox->type3E.opcode = MYRB_CMD_EVENT_LOG_OPERATION;
 302	mbox->type3E.optype = DAC960_V1_GetEventLogEntry;
 303	mbox->type3E.opqual = 1;
 304	mbox->type3E.ev_seq = event;
 305	mbox->type3E.addr = ev_addr;
 306	status = myrb_exec_cmd(cb, cmd_blk);
 307	if (status != MYRB_STATUS_SUCCESS)
 308		shost_printk(KERN_INFO, cb->host,
 309			     "Failed to get event log %d, status %04x\n",
 310			     event, status);
 311
 312	else if (ev_buf->seq_num == event) {
 313		struct scsi_sense_hdr sshdr;
 314
 315		memset(&sshdr, 0, sizeof(sshdr));
 316		scsi_normalize_sense(ev_buf->sense, 32, &sshdr);
 317
 318		if (sshdr.sense_key == VENDOR_SPECIFIC &&
 319		    sshdr.asc == 0x80 &&
 320		    sshdr.ascq < ARRAY_SIZE(myrb_event_msg))
 321			shost_printk(KERN_CRIT, cb->host,
 322				     "Physical drive %d:%d: %s\n",
 323				     ev_buf->channel, ev_buf->target,
 324				     myrb_event_msg[sshdr.ascq]);
 325		else
 326			shost_printk(KERN_CRIT, cb->host,
 327				     "Physical drive %d:%d: Sense: %X/%02X/%02X\n",
 328				     ev_buf->channel, ev_buf->target,
 329				     sshdr.sense_key, sshdr.asc, sshdr.ascq);
 330	}
 331
 332	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_log_entry),
 333			  ev_buf, ev_addr);
 334}
 335
 336/**
 337 * myrb_get_errtable - retrieves the error table from the controller
 338 *
 339 * Executes a type 3 command and logs the error table from the controller.
 340 */
 341static void myrb_get_errtable(struct myrb_hba *cb)
 342{
 343	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
 344	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 345	unsigned short status;
 346	struct myrb_error_entry old_table[MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS];
 347
 348	memcpy(&old_table, cb->err_table, sizeof(old_table));
 349
 350	myrb_reset_cmd(cmd_blk);
 351	mbox->type3.id = MYRB_MCMD_TAG;
 352	mbox->type3.opcode = MYRB_CMD_GET_ERROR_TABLE;
 353	mbox->type3.addr = cb->err_table_addr;
 354	status = myrb_exec_cmd(cb, cmd_blk);
 355	if (status == MYRB_STATUS_SUCCESS) {
 356		struct myrb_error_entry *table = cb->err_table;
 357		struct myrb_error_entry *new, *old;
 358		size_t err_table_offset;
 359		struct scsi_device *sdev;
 360
 361		shost_for_each_device(sdev, cb->host) {
 362			if (sdev->channel >= myrb_logical_channel(cb->host))
 363				continue;
 364			err_table_offset = sdev->channel * MYRB_MAX_TARGETS
 365				+ sdev->id;
 366			new = table + err_table_offset;
 367			old = &old_table[err_table_offset];
 368			if (new->parity_err == old->parity_err &&
 369			    new->soft_err == old->soft_err &&
 370			    new->hard_err == old->hard_err &&
 371			    new->misc_err == old->misc_err)
 372				continue;
 373			sdev_printk(KERN_CRIT, sdev,
 374				    "Errors: Parity = %d, Soft = %d, Hard = %d, Misc = %d\n",
 375				    new->parity_err, new->soft_err,
 376				    new->hard_err, new->misc_err);
 377		}
 378	}
 379}
 380
 381/**
 382 * myrb_get_ldev_info - retrieves the logical device table from the controller
 383 *
 384 * Executes a type 3 command and updates the logical device table.
 385 *
 386 * Return: command status
 387 */
 388static unsigned short myrb_get_ldev_info(struct myrb_hba *cb)
 389{
 390	unsigned short status;
 391	int ldev_num, ldev_cnt = cb->enquiry->ldev_count;
 392	struct Scsi_Host *shost = cb->host;
 393
 394	status = myrb_exec_type3(cb, MYRB_CMD_GET_LDEV_INFO,
 395				 cb->ldev_info_addr);
 396	if (status != MYRB_STATUS_SUCCESS)
 397		return status;
 398
 399	for (ldev_num = 0; ldev_num < ldev_cnt; ldev_num++) {
 400		struct myrb_ldev_info *old = NULL;
 401		struct myrb_ldev_info *new = cb->ldev_info_buf + ldev_num;
 402		struct scsi_device *sdev;
 403
 404		sdev = scsi_device_lookup(shost, myrb_logical_channel(shost),
 405					  ldev_num, 0);
 406		if (!sdev) {
 407			if (new->state == MYRB_DEVICE_OFFLINE)
 408				continue;
 409			shost_printk(KERN_INFO, shost,
 410				     "Adding Logical Drive %d in state %s\n",
 411				     ldev_num, myrb_devstate_name(new->state));
 412			scsi_add_device(shost, myrb_logical_channel(shost),
 413					ldev_num, 0);
 414			continue;
 415		}
 416		old = sdev->hostdata;
 417		if (new->state != old->state)
 418			shost_printk(KERN_INFO, shost,
 419				     "Logical Drive %d is now %s\n",
 420				     ldev_num, myrb_devstate_name(new->state));
 421		if (new->wb_enabled != old->wb_enabled)
 422			sdev_printk(KERN_INFO, sdev,
 423				    "Logical Drive is now WRITE %s\n",
 424				    (new->wb_enabled ? "BACK" : "THRU"));
 425		memcpy(old, new, sizeof(*new));
 426		scsi_device_put(sdev);
 427	}
 428	return status;
 429}
 430
 431/**
 432 * myrb_get_rbld_progress - get rebuild progress information
 433 *
 434 * Executes a type 3 command and returns the rebuild progress
 435 * information.
 436 *
 437 * Return: command status
 438 */
 439static unsigned short myrb_get_rbld_progress(struct myrb_hba *cb,
 440		struct myrb_rbld_progress *rbld)
 441{
 442	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
 443	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 444	struct myrb_rbld_progress *rbld_buf;
 445	dma_addr_t rbld_addr;
 446	unsigned short status;
 447
 448	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
 449				      sizeof(struct myrb_rbld_progress),
 450				      &rbld_addr, GFP_KERNEL);
 451	if (!rbld_buf)
 452		return MYRB_STATUS_RBLD_NOT_CHECKED;
 453
 454	myrb_reset_cmd(cmd_blk);
 455	mbox->type3.id = MYRB_MCMD_TAG;
 456	mbox->type3.opcode = MYRB_CMD_GET_REBUILD_PROGRESS;
 457	mbox->type3.addr = rbld_addr;
 458	status = myrb_exec_cmd(cb, cmd_blk);
 459	if (rbld)
 460		memcpy(rbld, rbld_buf, sizeof(struct myrb_rbld_progress));
 461	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
 462			  rbld_buf, rbld_addr);
 463	return status;
 464}
 465
 466/**
 467 * myrb_update_rbld_progress - updates the rebuild status
 468 *
 469 * Updates the rebuild status for the attached logical devices.
 470 *
 471 */
 472static void myrb_update_rbld_progress(struct myrb_hba *cb)
 473{
 474	struct myrb_rbld_progress rbld_buf;
 475	unsigned short status;
 476
 477	status = myrb_get_rbld_progress(cb, &rbld_buf);
 478	if (status == MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS &&
 479	    cb->last_rbld_status == MYRB_STATUS_SUCCESS)
 480		status = MYRB_STATUS_RBLD_SUCCESS;
 481	if (status != MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS) {
 482		unsigned int blocks_done =
 483			rbld_buf.ldev_size - rbld_buf.blocks_left;
 484		struct scsi_device *sdev;
 485
 486		sdev = scsi_device_lookup(cb->host,
 487					  myrb_logical_channel(cb->host),
 488					  rbld_buf.ldev_num, 0);
 489		if (!sdev)
 490			return;
 491
 492		switch (status) {
 493		case MYRB_STATUS_SUCCESS:
 494			sdev_printk(KERN_INFO, sdev,
 495				    "Rebuild in Progress, %d%% completed\n",
 496				    (100 * (blocks_done >> 7))
 497				    / (rbld_buf.ldev_size >> 7));
 498			break;
 499		case MYRB_STATUS_RBLD_FAILED_LDEV_FAILURE:
 500			sdev_printk(KERN_INFO, sdev,
 501				    "Rebuild Failed due to Logical Drive Failure\n");
 502			break;
 503		case MYRB_STATUS_RBLD_FAILED_BADBLOCKS:
 504			sdev_printk(KERN_INFO, sdev,
 505				    "Rebuild Failed due to Bad Blocks on Other Drives\n");
 506			break;
 507		case MYRB_STATUS_RBLD_FAILED_NEW_DRIVE_FAILED:
 508			sdev_printk(KERN_INFO, sdev,
 509				    "Rebuild Failed due to Failure of Drive Being Rebuilt\n");
 510			break;
 511		case MYRB_STATUS_RBLD_SUCCESS:
 512			sdev_printk(KERN_INFO, sdev,
 513				    "Rebuild Completed Successfully\n");
 514			break;
 515		case MYRB_STATUS_RBLD_SUCCESS_TERMINATED:
 516			sdev_printk(KERN_INFO, sdev,
 517				     "Rebuild Successfully Terminated\n");
 518			break;
 519		default:
 520			break;
 521		}
 522		scsi_device_put(sdev);
 523	}
 524	cb->last_rbld_status = status;
 525}
 526
 527/**
 528 * myrb_get_cc_progress - retrieve the rebuild status
 529 *
 530 * Execute a type 3 Command and fetch the rebuild / consistency check
 531 * status.
 532 */
 533static void myrb_get_cc_progress(struct myrb_hba *cb)
 534{
 535	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
 536	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 537	struct myrb_rbld_progress *rbld_buf;
 538	dma_addr_t rbld_addr;
 539	unsigned short status;
 540
 541	rbld_buf = dma_alloc_coherent(&cb->pdev->dev,
 542				      sizeof(struct myrb_rbld_progress),
 543				      &rbld_addr, GFP_KERNEL);
 544	if (!rbld_buf) {
 545		cb->need_cc_status = true;
 546		return;
 547	}
 548	myrb_reset_cmd(cmd_blk);
 549	mbox->type3.id = MYRB_MCMD_TAG;
 550	mbox->type3.opcode = MYRB_CMD_REBUILD_STAT;
 551	mbox->type3.addr = rbld_addr;
 552	status = myrb_exec_cmd(cb, cmd_blk);
 553	if (status == MYRB_STATUS_SUCCESS) {
 554		unsigned int ldev_num = rbld_buf->ldev_num;
 555		unsigned int ldev_size = rbld_buf->ldev_size;
 556		unsigned int blocks_done =
 557			ldev_size - rbld_buf->blocks_left;
 558		struct scsi_device *sdev;
 559
 560		sdev = scsi_device_lookup(cb->host,
 561					  myrb_logical_channel(cb->host),
 562					  ldev_num, 0);
 563		if (sdev) {
 564			sdev_printk(KERN_INFO, sdev,
 565				    "Consistency Check in Progress: %d%% completed\n",
 566				    (100 * (blocks_done >> 7))
 567				    / (ldev_size >> 7));
 568			scsi_device_put(sdev);
 569		}
 570	}
 571	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_rbld_progress),
 572			  rbld_buf, rbld_addr);
 573}
 574
 575/**
 576 * myrb_bgi_control - updates background initialisation status
 577 *
 578 * Executes a type 3B command and updates the background initialisation status
 579 */
 580static void myrb_bgi_control(struct myrb_hba *cb)
 581{
 582	struct myrb_cmdblk *cmd_blk = &cb->mcmd_blk;
 583	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 584	struct myrb_bgi_status *bgi, *last_bgi;
 585	dma_addr_t bgi_addr;
 586	struct scsi_device *sdev = NULL;
 587	unsigned short status;
 588
 589	bgi = dma_alloc_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
 590				 &bgi_addr, GFP_KERNEL);
 591	if (!bgi) {
 592		shost_printk(KERN_ERR, cb->host,
 593			     "Failed to allocate bgi memory\n");
 594		return;
 595	}
 596	myrb_reset_cmd(cmd_blk);
 597	mbox->type3B.id = MYRB_DCMD_TAG;
 598	mbox->type3B.opcode = MYRB_CMD_BGI_CONTROL;
 599	mbox->type3B.optype = 0x20;
 600	mbox->type3B.addr = bgi_addr;
 601	status = myrb_exec_cmd(cb, cmd_blk);
 602	last_bgi = &cb->bgi_status;
 603	sdev = scsi_device_lookup(cb->host,
 604				  myrb_logical_channel(cb->host),
 605				  bgi->ldev_num, 0);
 606	switch (status) {
 607	case MYRB_STATUS_SUCCESS:
 608		switch (bgi->status) {
 609		case MYRB_BGI_INVALID:
 610			break;
 611		case MYRB_BGI_STARTED:
 612			if (!sdev)
 613				break;
 614			sdev_printk(KERN_INFO, sdev,
 615				    "Background Initialization Started\n");
 616			break;
 617		case MYRB_BGI_INPROGRESS:
 618			if (!sdev)
 619				break;
 620			if (bgi->blocks_done == last_bgi->blocks_done &&
 621			    bgi->ldev_num == last_bgi->ldev_num)
 622				break;
 623			sdev_printk(KERN_INFO, sdev,
 624				 "Background Initialization in Progress: %d%% completed\n",
 625				 (100 * (bgi->blocks_done >> 7))
 626				 / (bgi->ldev_size >> 7));
 627			break;
 628		case MYRB_BGI_SUSPENDED:
 629			if (!sdev)
 630				break;
 631			sdev_printk(KERN_INFO, sdev,
 632				    "Background Initialization Suspended\n");
 633			break;
 634		case MYRB_BGI_CANCELLED:
 635			if (!sdev)
 636				break;
 637			sdev_printk(KERN_INFO, sdev,
 638				    "Background Initialization Cancelled\n");
 639			break;
 640		}
 641		memcpy(&cb->bgi_status, bgi, sizeof(struct myrb_bgi_status));
 642		break;
 643	case MYRB_STATUS_BGI_SUCCESS:
 644		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
 645			sdev_printk(KERN_INFO, sdev,
 646				    "Background Initialization Completed Successfully\n");
 647		cb->bgi_status.status = MYRB_BGI_INVALID;
 648		break;
 649	case MYRB_STATUS_BGI_ABORTED:
 650		if (sdev && cb->bgi_status.status == MYRB_BGI_INPROGRESS)
 651			sdev_printk(KERN_INFO, sdev,
 652				    "Background Initialization Aborted\n");
 653		/* Fallthrough */
 654	case MYRB_STATUS_NO_BGI_INPROGRESS:
 655		cb->bgi_status.status = MYRB_BGI_INVALID;
 656		break;
 657	}
 658	if (sdev)
 659		scsi_device_put(sdev);
 660	dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_bgi_status),
 661			  bgi, bgi_addr);
 662}
 663
 664/**
 665 * myrb_hba_enquiry - updates the controller status
 666 *
 667 * Executes a DAC_V1_Enquiry command and updates the controller status.
 668 *
 669 * Return: command status
 670 */
 671static unsigned short myrb_hba_enquiry(struct myrb_hba *cb)
 672{
 673	struct myrb_enquiry old, *new;
 674	unsigned short status;
 675
 676	memcpy(&old, cb->enquiry, sizeof(struct myrb_enquiry));
 677
 678	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY, cb->enquiry_addr);
 679	if (status != MYRB_STATUS_SUCCESS)
 680		return status;
 681
 682	new = cb->enquiry;
 683	if (new->ldev_count > old.ldev_count) {
 684		int ldev_num = old.ldev_count - 1;
 685
 686		while (++ldev_num < new->ldev_count)
 687			shost_printk(KERN_CRIT, cb->host,
 688				     "Logical Drive %d Now Exists\n",
 689				     ldev_num);
 690	}
 691	if (new->ldev_count < old.ldev_count) {
 692		int ldev_num = new->ldev_count - 1;
 693
 694		while (++ldev_num < old.ldev_count)
 695			shost_printk(KERN_CRIT, cb->host,
 696				     "Logical Drive %d No Longer Exists\n",
 697				     ldev_num);
 698	}
 699	if (new->status.deferred != old.status.deferred)
 700		shost_printk(KERN_CRIT, cb->host,
 701			     "Deferred Write Error Flag is now %s\n",
 702			     (new->status.deferred ? "TRUE" : "FALSE"));
 703	if (new->ev_seq != old.ev_seq) {
 704		cb->new_ev_seq = new->ev_seq;
 705		cb->need_err_info = true;
 706		shost_printk(KERN_INFO, cb->host,
 707			     "Event log %d/%d (%d/%d) available\n",
 708			     cb->old_ev_seq, cb->new_ev_seq,
 709			     old.ev_seq, new->ev_seq);
 710	}
 711	if ((new->ldev_critical > 0 &&
 712	     new->ldev_critical != old.ldev_critical) ||
 713	    (new->ldev_offline > 0 &&
 714	     new->ldev_offline != old.ldev_offline) ||
 715	    (new->ldev_count != old.ldev_count)) {
 716		shost_printk(KERN_INFO, cb->host,
 717			     "Logical drive count changed (%d/%d/%d)\n",
 718			     new->ldev_critical,
 719			     new->ldev_offline,
 720			     new->ldev_count);
 721		cb->need_ldev_info = true;
 722	}
 723	if (new->pdev_dead > 0 ||
 724	    new->pdev_dead != old.pdev_dead ||
 725	    time_after_eq(jiffies, cb->secondary_monitor_time
 726			  + MYRB_SECONDARY_MONITOR_INTERVAL)) {
 727		cb->need_bgi_status = cb->bgi_status_supported;
 728		cb->secondary_monitor_time = jiffies;
 729	}
 730	if (new->rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
 731	    new->rbld == MYRB_BG_RBLD_IN_PROGRESS ||
 732	    old.rbld == MYRB_STDBY_RBLD_IN_PROGRESS ||
 733	    old.rbld == MYRB_BG_RBLD_IN_PROGRESS) {
 734		cb->need_rbld = true;
 735		cb->rbld_first = (new->ldev_critical < old.ldev_critical);
 736	}
 737	if (old.rbld == MYRB_BG_CHECK_IN_PROGRESS)
 738		switch (new->rbld) {
 739		case MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS:
 740			shost_printk(KERN_INFO, cb->host,
 741				     "Consistency Check Completed Successfully\n");
 742			break;
 743		case MYRB_STDBY_RBLD_IN_PROGRESS:
 744		case MYRB_BG_RBLD_IN_PROGRESS:
 745			break;
 746		case MYRB_BG_CHECK_IN_PROGRESS:
 747			cb->need_cc_status = true;
 748			break;
 749		case MYRB_STDBY_RBLD_COMPLETED_WITH_ERROR:
 750			shost_printk(KERN_INFO, cb->host,
 751				     "Consistency Check Completed with Error\n");
 752			break;
 753		case MYRB_BG_RBLD_OR_CHECK_FAILED_DRIVE_FAILED:
 754			shost_printk(KERN_INFO, cb->host,
 755				     "Consistency Check Failed - Physical Device Failed\n");
 756			break;
 757		case MYRB_BG_RBLD_OR_CHECK_FAILED_LDEV_FAILED:
 758			shost_printk(KERN_INFO, cb->host,
 759				     "Consistency Check Failed - Logical Drive Failed\n");
 760			break;
 761		case MYRB_BG_RBLD_OR_CHECK_FAILED_OTHER:
 762			shost_printk(KERN_INFO, cb->host,
 763				     "Consistency Check Failed - Other Causes\n");
 764			break;
 765		case MYRB_BG_RBLD_OR_CHECK_SUCCESS_TERMINATED:
 766			shost_printk(KERN_INFO, cb->host,
 767				     "Consistency Check Successfully Terminated\n");
 768			break;
 769		}
 770	else if (new->rbld == MYRB_BG_CHECK_IN_PROGRESS)
 771		cb->need_cc_status = true;
 772
 773	return MYRB_STATUS_SUCCESS;
 774}
 775
 776/**
 777 * myrb_set_pdev_state - sets the device state for a physical device
 778 *
 779 * Return: command status
 780 */
 781static unsigned short myrb_set_pdev_state(struct myrb_hba *cb,
 782		struct scsi_device *sdev, enum myrb_devstate state)
 783{
 784	struct myrb_cmdblk *cmd_blk = &cb->dcmd_blk;
 785	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
 786	unsigned short status;
 787
 788	mutex_lock(&cb->dcmd_mutex);
 789	mbox->type3D.opcode = MYRB_CMD_START_DEVICE;
 790	mbox->type3D.id = MYRB_DCMD_TAG;
 791	mbox->type3D.channel = sdev->channel;
 792	mbox->type3D.target = sdev->id;
 793	mbox->type3D.state = state & 0x1F;
 794	status = myrb_exec_cmd(cb, cmd_blk);
 795	mutex_unlock(&cb->dcmd_mutex);
 796
 797	return status;
 798}
 799
 800/**
 801 * myrb_enable_mmio - enables the Memory Mailbox Interface
 802 *
 803 * PD and P controller types have no memory mailbox, but still need the
 804 * other dma mapped memory.
 805 *
 806 * Return: true on success, false otherwise.
 807 */
 808static bool myrb_enable_mmio(struct myrb_hba *cb, mbox_mmio_init_t mmio_init_fn)
 809{
 810	void __iomem *base = cb->io_base;
 811	struct pci_dev *pdev = cb->pdev;
 812	size_t err_table_size;
 813	size_t ldev_info_size;
 814	union myrb_cmd_mbox *cmd_mbox_mem;
 815	struct myrb_stat_mbox *stat_mbox_mem;
 816	union myrb_cmd_mbox mbox;
 817	unsigned short status;
 818
 819	memset(&mbox, 0, sizeof(union myrb_cmd_mbox));
 820
 821	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
 822		dev_err(&pdev->dev, "DMA mask out of range\n");
 823		return false;
 824	}
 825
 826	cb->enquiry = dma_alloc_coherent(&pdev->dev,
 827					 sizeof(struct myrb_enquiry),
 828					 &cb->enquiry_addr, GFP_KERNEL);
 829	if (!cb->enquiry)
 830		return false;
 831
 832	err_table_size = sizeof(struct myrb_error_entry) *
 833		MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
 834	cb->err_table = dma_alloc_coherent(&pdev->dev, err_table_size,
 835					   &cb->err_table_addr, GFP_KERNEL);
 836	if (!cb->err_table)
 837		return false;
 838
 839	ldev_info_size = sizeof(struct myrb_ldev_info) * MYRB_MAX_LDEVS;
 840	cb->ldev_info_buf = dma_alloc_coherent(&pdev->dev, ldev_info_size,
 841					       &cb->ldev_info_addr, GFP_KERNEL);
 842	if (!cb->ldev_info_buf)
 843		return false;
 844
 845	/*
 846	 * Skip mailbox initialisation for PD and P Controllers
 847	 */
 848	if (!mmio_init_fn)
 849		return true;
 850
 851	/* These are the base addresses for the command memory mailbox array */
 852	cb->cmd_mbox_size =  MYRB_CMD_MBOX_COUNT * sizeof(union myrb_cmd_mbox);
 853	cb->first_cmd_mbox = dma_alloc_coherent(&pdev->dev,
 854						cb->cmd_mbox_size,
 855						&cb->cmd_mbox_addr,
 856						GFP_KERNEL);
 857	if (!cb->first_cmd_mbox)
 858		return false;
 859
 860	cmd_mbox_mem = cb->first_cmd_mbox;
 861	cmd_mbox_mem += MYRB_CMD_MBOX_COUNT - 1;
 862	cb->last_cmd_mbox = cmd_mbox_mem;
 863	cb->next_cmd_mbox = cb->first_cmd_mbox;
 864	cb->prev_cmd_mbox1 = cb->last_cmd_mbox;
 865	cb->prev_cmd_mbox2 = cb->last_cmd_mbox - 1;
 866
 867	/* These are the base addresses for the status memory mailbox array */
 868	cb->stat_mbox_size = MYRB_STAT_MBOX_COUNT *
 869	    sizeof(struct myrb_stat_mbox);
 870	cb->first_stat_mbox = dma_alloc_coherent(&pdev->dev,
 871						 cb->stat_mbox_size,
 872						 &cb->stat_mbox_addr,
 873						 GFP_KERNEL);
 874	if (!cb->first_stat_mbox)
 875		return false;
 876
 877	stat_mbox_mem = cb->first_stat_mbox;
 878	stat_mbox_mem += MYRB_STAT_MBOX_COUNT - 1;
 879	cb->last_stat_mbox = stat_mbox_mem;
 880	cb->next_stat_mbox = cb->first_stat_mbox;
 881
 882	/* Enable the Memory Mailbox Interface. */
 883	cb->dual_mode_interface = true;
 884	mbox.typeX.opcode = 0x2B;
 885	mbox.typeX.id = 0;
 886	mbox.typeX.opcode2 = 0x14;
 887	mbox.typeX.cmd_mbox_addr = cb->cmd_mbox_addr;
 888	mbox.typeX.stat_mbox_addr = cb->stat_mbox_addr;
 889
 890	status = mmio_init_fn(pdev, base, &mbox);
 891	if (status != MYRB_STATUS_SUCCESS) {
 892		cb->dual_mode_interface = false;
 893		mbox.typeX.opcode2 = 0x10;
 894		status = mmio_init_fn(pdev, base, &mbox);
 895		if (status != MYRB_STATUS_SUCCESS) {
 896			dev_err(&pdev->dev,
 897				"Failed to enable mailbox, statux %02X\n",
 898				status);
 899			return false;
 900		}
 901	}
 902	return true;
 903}
 904
 905/**
 906 * myrb_get_hba_config - reads the configuration information
 907 *
 908 * Reads the configuration information from the controller and
 909 * initializes the controller structure.
 910 *
 911 * Return: 0 on success, errno otherwise
 912 */
 913static int myrb_get_hba_config(struct myrb_hba *cb)
 914{
 915	struct myrb_enquiry2 *enquiry2;
 916	dma_addr_t enquiry2_addr;
 917	struct myrb_config2 *config2;
 918	dma_addr_t config2_addr;
 919	struct Scsi_Host *shost = cb->host;
 920	struct pci_dev *pdev = cb->pdev;
 921	int pchan_max = 0, pchan_cur = 0;
 922	unsigned short status;
 923	int ret = -ENODEV, memsize = 0;
 924
 925	enquiry2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
 926				      &enquiry2_addr, GFP_KERNEL);
 927	if (!enquiry2) {
 928		shost_printk(KERN_ERR, cb->host,
 929			     "Failed to allocate V1 enquiry2 memory\n");
 930		return -ENOMEM;
 931	}
 932	config2 = dma_alloc_coherent(&pdev->dev, sizeof(struct myrb_config2),
 933				     &config2_addr, GFP_KERNEL);
 934	if (!config2) {
 935		shost_printk(KERN_ERR, cb->host,
 936			     "Failed to allocate V1 config2 memory\n");
 937		dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
 938				  enquiry2, enquiry2_addr);
 939		return -ENOMEM;
 940	}
 941	mutex_lock(&cb->dma_mutex);
 942	status = myrb_hba_enquiry(cb);
 943	mutex_unlock(&cb->dma_mutex);
 944	if (status != MYRB_STATUS_SUCCESS) {
 945		shost_printk(KERN_WARNING, cb->host,
 946			     "Failed it issue V1 Enquiry\n");
 947		goto out_free;
 948	}
 949
 950	status = myrb_exec_type3(cb, MYRB_CMD_ENQUIRY2, enquiry2_addr);
 951	if (status != MYRB_STATUS_SUCCESS) {
 952		shost_printk(KERN_WARNING, cb->host,
 953			     "Failed to issue V1 Enquiry2\n");
 954		goto out_free;
 955	}
 956
 957	status = myrb_exec_type3(cb, MYRB_CMD_READ_CONFIG2, config2_addr);
 958	if (status != MYRB_STATUS_SUCCESS) {
 959		shost_printk(KERN_WARNING, cb->host,
 960			     "Failed to issue ReadConfig2\n");
 961		goto out_free;
 962	}
 963
 964	status = myrb_get_ldev_info(cb);
 965	if (status != MYRB_STATUS_SUCCESS) {
 966		shost_printk(KERN_WARNING, cb->host,
 967			     "Failed to get logical drive information\n");
 968		goto out_free;
 969	}
 970
 971	/*
 972	 * Initialize the Controller Model Name and Full Model Name fields.
 973	 */
 974	switch (enquiry2->hw.sub_model) {
 975	case DAC960_V1_P_PD_PU:
 976		if (enquiry2->scsi_cap.bus_speed == MYRB_SCSI_SPEED_ULTRA)
 977			strcpy(cb->model_name, "DAC960PU");
 978		else
 979			strcpy(cb->model_name, "DAC960PD");
 980		break;
 981	case DAC960_V1_PL:
 982		strcpy(cb->model_name, "DAC960PL");
 983		break;
 984	case DAC960_V1_PG:
 985		strcpy(cb->model_name, "DAC960PG");
 986		break;
 987	case DAC960_V1_PJ:
 988		strcpy(cb->model_name, "DAC960PJ");
 989		break;
 990	case DAC960_V1_PR:
 991		strcpy(cb->model_name, "DAC960PR");
 992		break;
 993	case DAC960_V1_PT:
 994		strcpy(cb->model_name, "DAC960PT");
 995		break;
 996	case DAC960_V1_PTL0:
 997		strcpy(cb->model_name, "DAC960PTL0");
 998		break;
 999	case DAC960_V1_PRL:
1000		strcpy(cb->model_name, "DAC960PRL");
1001		break;
1002	case DAC960_V1_PTL1:
1003		strcpy(cb->model_name, "DAC960PTL1");
1004		break;
1005	case DAC960_V1_1164P:
1006		strcpy(cb->model_name, "eXtremeRAID 1100");
1007		break;
1008	default:
1009		shost_printk(KERN_WARNING, cb->host,
1010			     "Unknown Model %X\n",
1011			     enquiry2->hw.sub_model);
1012		goto out;
1013	}
1014	/*
1015	 * Initialize the Controller Firmware Version field and verify that it
1016	 * is a supported firmware version.
1017	 * The supported firmware versions are:
1018	 *
1019	 * DAC1164P		    5.06 and above
1020	 * DAC960PTL/PRL/PJ/PG	    4.06 and above
1021	 * DAC960PU/PD/PL	    3.51 and above
1022	 * DAC960PU/PD/PL/P	    2.73 and above
1023	 */
1024#if defined(CONFIG_ALPHA)
1025	/*
1026	 * DEC Alpha machines were often equipped with DAC960 cards that were
1027	 * OEMed from Mylex, and had their own custom firmware. Version 2.70,
1028	 * the last custom FW revision to be released by DEC for these older
1029	 * controllers, appears to work quite well with this driver.
1030	 *
1031	 * Cards tested successfully were several versions each of the PD and
1032	 * PU, called by DEC the KZPSC and KZPAC, respectively, and having
1033	 * the Manufacturer Numbers (from Mylex), usually on a sticker on the
1034	 * back of the board, of:
1035	 *
1036	 * KZPSC:  D040347 (1-channel) or D040348 (2-channel)
1037	 *         or D040349 (3-channel)
1038	 * KZPAC:  D040395 (1-channel) or D040396 (2-channel)
1039	 *         or D040397 (3-channel)
1040	 */
1041# define FIRMWARE_27X	"2.70"
1042#else
1043# define FIRMWARE_27X	"2.73"
1044#endif
1045
1046	if (enquiry2->fw.major_version == 0) {
1047		enquiry2->fw.major_version = cb->enquiry->fw_major_version;
1048		enquiry2->fw.minor_version = cb->enquiry->fw_minor_version;
1049		enquiry2->fw.firmware_type = '0';
1050		enquiry2->fw.turn_id = 0;
1051	}
1052	snprintf(cb->fw_version, sizeof(cb->fw_version),
1053		"%d.%02d-%c-%02d",
1054		enquiry2->fw.major_version,
1055		enquiry2->fw.minor_version,
1056		enquiry2->fw.firmware_type,
1057		enquiry2->fw.turn_id);
1058	if (!((enquiry2->fw.major_version == 5 &&
1059	       enquiry2->fw.minor_version >= 6) ||
1060	      (enquiry2->fw.major_version == 4 &&
1061	       enquiry2->fw.minor_version >= 6) ||
1062	      (enquiry2->fw.major_version == 3 &&
1063	       enquiry2->fw.minor_version >= 51) ||
1064	      (enquiry2->fw.major_version == 2 &&
1065	       strcmp(cb->fw_version, FIRMWARE_27X) >= 0))) {
1066		shost_printk(KERN_WARNING, cb->host,
1067			"Firmware Version '%s' unsupported\n",
1068			cb->fw_version);
1069		goto out;
1070	}
1071	/*
1072	 * Initialize the Channels, Targets, Memory Size, and SAF-TE
1073	 * Enclosure Management Enabled fields.
1074	 */
1075	switch (enquiry2->hw.model) {
1076	case MYRB_5_CHANNEL_BOARD:
1077		pchan_max = 5;
1078		break;
1079	case MYRB_3_CHANNEL_BOARD:
1080	case MYRB_3_CHANNEL_ASIC_DAC:
1081		pchan_max = 3;
1082		break;
1083	case MYRB_2_CHANNEL_BOARD:
1084		pchan_max = 2;
1085		break;
1086	default:
1087		pchan_max = enquiry2->cfg_chan;
1088		break;
1089	}
1090	pchan_cur = enquiry2->cur_chan;
1091	if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_32BIT)
1092		cb->bus_width = 32;
1093	else if (enquiry2->scsi_cap.bus_width == MYRB_WIDTH_WIDE_16BIT)
1094		cb->bus_width = 16;
1095	else
1096		cb->bus_width = 8;
1097	cb->ldev_block_size = enquiry2->ldev_block_size;
1098	shost->max_channel = pchan_cur;
1099	shost->max_id = enquiry2->max_targets;
1100	memsize = enquiry2->mem_size >> 20;
1101	cb->safte_enabled = (enquiry2->fault_mgmt == MYRB_FAULT_SAFTE);
1102	/*
1103	 * Initialize the Controller Queue Depth, Driver Queue Depth,
1104	 * Logical Drive Count, Maximum Blocks per Command, Controller
1105	 * Scatter/Gather Limit, and Driver Scatter/Gather Limit.
1106	 * The Driver Queue Depth must be at most one less than the
1107	 * Controller Queue Depth to allow for an automatic drive
1108	 * rebuild operation.
1109	 */
1110	shost->can_queue = cb->enquiry->max_tcq;
1111	if (shost->can_queue < 3)
1112		shost->can_queue = enquiry2->max_cmds;
1113	if (shost->can_queue < 3)
1114		/* Play safe and disable TCQ */
1115		shost->can_queue = 1;
1116
1117	if (shost->can_queue > MYRB_CMD_MBOX_COUNT - 2)
1118		shost->can_queue = MYRB_CMD_MBOX_COUNT - 2;
1119	shost->max_sectors = enquiry2->max_sectors;
1120	shost->sg_tablesize = enquiry2->max_sge;
1121	if (shost->sg_tablesize > MYRB_SCATTER_GATHER_LIMIT)
1122		shost->sg_tablesize = MYRB_SCATTER_GATHER_LIMIT;
1123	/*
1124	 * Initialize the Stripe Size, Segment Size, and Geometry Translation.
1125	 */
1126	cb->stripe_size = config2->blocks_per_stripe * config2->block_factor
1127		>> (10 - MYRB_BLKSIZE_BITS);
1128	cb->segment_size = config2->blocks_per_cacheline * config2->block_factor
1129		>> (10 - MYRB_BLKSIZE_BITS);
1130	/* Assume 255/63 translation */
1131	cb->ldev_geom_heads = 255;
1132	cb->ldev_geom_sectors = 63;
1133	if (config2->drive_geometry) {
1134		cb->ldev_geom_heads = 128;
1135		cb->ldev_geom_sectors = 32;
1136	}
1137
1138	/*
1139	 * Initialize the Background Initialization Status.
1140	 */
1141	if ((cb->fw_version[0] == '4' &&
1142	     strcmp(cb->fw_version, "4.08") >= 0) ||
1143	    (cb->fw_version[0] == '5' &&
1144	     strcmp(cb->fw_version, "5.08") >= 0)) {
1145		cb->bgi_status_supported = true;
1146		myrb_bgi_control(cb);
1147	}
1148	cb->last_rbld_status = MYRB_NO_STDBY_RBLD_OR_CHECK_IN_PROGRESS;
1149	ret = 0;
1150
1151out:
1152	shost_printk(KERN_INFO, cb->host,
1153		"Configuring %s PCI RAID Controller\n", cb->model_name);
1154	shost_printk(KERN_INFO, cb->host,
1155		"  Firmware Version: %s, Memory Size: %dMB\n",
1156		cb->fw_version, memsize);
1157	if (cb->io_addr == 0)
1158		shost_printk(KERN_INFO, cb->host,
1159			"  I/O Address: n/a, PCI Address: 0x%lX, IRQ Channel: %d\n",
1160			(unsigned long)cb->pci_addr, cb->irq);
1161	else
1162		shost_printk(KERN_INFO, cb->host,
1163			"  I/O Address: 0x%lX, PCI Address: 0x%lX, IRQ Channel: %d\n",
1164			(unsigned long)cb->io_addr, (unsigned long)cb->pci_addr,
1165			cb->irq);
1166	shost_printk(KERN_INFO, cb->host,
1167		"  Controller Queue Depth: %d, Maximum Blocks per Command: %d\n",
1168		cb->host->can_queue, cb->host->max_sectors);
1169	shost_printk(KERN_INFO, cb->host,
1170		     "  Driver Queue Depth: %d, Scatter/Gather Limit: %d of %d Segments\n",
1171		     cb->host->can_queue, cb->host->sg_tablesize,
1172		     MYRB_SCATTER_GATHER_LIMIT);
1173	shost_printk(KERN_INFO, cb->host,
1174		     "  Stripe Size: %dKB, Segment Size: %dKB, BIOS Geometry: %d/%d%s\n",
1175		     cb->stripe_size, cb->segment_size,
1176		     cb->ldev_geom_heads, cb->ldev_geom_sectors,
1177		     cb->safte_enabled ?
1178		     "  SAF-TE Enclosure Management Enabled" : "");
1179	shost_printk(KERN_INFO, cb->host,
1180		     "  Physical: %d/%d channels %d/%d/%d devices\n",
1181		     pchan_cur, pchan_max, 0, cb->enquiry->pdev_dead,
1182		     cb->host->max_id);
1183
1184	shost_printk(KERN_INFO, cb->host,
1185		     "  Logical: 1/1 channels, %d/%d disks\n",
1186		     cb->enquiry->ldev_count, MYRB_MAX_LDEVS);
1187
1188out_free:
1189	dma_free_coherent(&pdev->dev, sizeof(struct myrb_enquiry2),
1190			  enquiry2, enquiry2_addr);
1191	dma_free_coherent(&pdev->dev, sizeof(struct myrb_config2),
1192			  config2, config2_addr);
1193
1194	return ret;
1195}
1196
1197/**
1198 * myrb_unmap - unmaps controller structures
1199 */
1200static void myrb_unmap(struct myrb_hba *cb)
1201{
1202	if (cb->ldev_info_buf) {
1203		size_t ldev_info_size = sizeof(struct myrb_ldev_info) *
1204			MYRB_MAX_LDEVS;
1205		dma_free_coherent(&cb->pdev->dev, ldev_info_size,
1206				  cb->ldev_info_buf, cb->ldev_info_addr);
1207		cb->ldev_info_buf = NULL;
1208	}
1209	if (cb->err_table) {
1210		size_t err_table_size = sizeof(struct myrb_error_entry) *
1211			MYRB_MAX_CHANNELS * MYRB_MAX_TARGETS;
1212		dma_free_coherent(&cb->pdev->dev, err_table_size,
1213				  cb->err_table, cb->err_table_addr);
1214		cb->err_table = NULL;
1215	}
1216	if (cb->enquiry) {
1217		dma_free_coherent(&cb->pdev->dev, sizeof(struct myrb_enquiry),
1218				  cb->enquiry, cb->enquiry_addr);
1219		cb->enquiry = NULL;
1220	}
1221	if (cb->first_stat_mbox) {
1222		dma_free_coherent(&cb->pdev->dev, cb->stat_mbox_size,
1223				  cb->first_stat_mbox, cb->stat_mbox_addr);
1224		cb->first_stat_mbox = NULL;
1225	}
1226	if (cb->first_cmd_mbox) {
1227		dma_free_coherent(&cb->pdev->dev, cb->cmd_mbox_size,
1228				  cb->first_cmd_mbox, cb->cmd_mbox_addr);
1229		cb->first_cmd_mbox = NULL;
1230	}
1231}
1232
1233/**
1234 * myrb_cleanup - cleanup controller structures
1235 */
1236static void myrb_cleanup(struct myrb_hba *cb)
1237{
1238	struct pci_dev *pdev = cb->pdev;
1239
1240	/* Free the memory mailbox, status, and related structures */
1241	myrb_unmap(cb);
1242
1243	if (cb->mmio_base) {
1244		cb->disable_intr(cb->io_base);
1245		iounmap(cb->mmio_base);
1246	}
1247	if (cb->irq)
1248		free_irq(cb->irq, cb);
1249	if (cb->io_addr)
1250		release_region(cb->io_addr, 0x80);
1251	pci_set_drvdata(pdev, NULL);
1252	pci_disable_device(pdev);
1253	scsi_host_put(cb->host);
1254}
1255
1256static int myrb_host_reset(struct scsi_cmnd *scmd)
1257{
1258	struct Scsi_Host *shost = scmd->device->host;
1259	struct myrb_hba *cb = shost_priv(shost);
1260
1261	cb->reset(cb->io_base);
1262	return SUCCESS;
1263}
1264
1265static int myrb_pthru_queuecommand(struct Scsi_Host *shost,
1266		struct scsi_cmnd *scmd)
1267{
1268	struct myrb_hba *cb = shost_priv(shost);
1269	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1270	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1271	struct myrb_dcdb *dcdb;
1272	dma_addr_t dcdb_addr;
1273	struct scsi_device *sdev = scmd->device;
1274	struct scatterlist *sgl;
1275	unsigned long flags;
1276	int nsge;
1277
1278	myrb_reset_cmd(cmd_blk);
1279	dcdb = dma_pool_alloc(cb->dcdb_pool, GFP_ATOMIC, &dcdb_addr);
1280	if (!dcdb)
1281		return SCSI_MLQUEUE_HOST_BUSY;
1282	nsge = scsi_dma_map(scmd);
1283	if (nsge > 1) {
1284		dma_pool_free(cb->dcdb_pool, dcdb, dcdb_addr);
1285		scmd->result = (DID_ERROR << 16);
1286		scmd->scsi_done(scmd);
1287		return 0;
1288	}
1289
1290	mbox->type3.opcode = MYRB_CMD_DCDB;
1291	mbox->type3.id = scmd->request->tag + 3;
1292	mbox->type3.addr = dcdb_addr;
1293	dcdb->channel = sdev->channel;
1294	dcdb->target = sdev->id;
1295	switch (scmd->sc_data_direction) {
1296	case DMA_NONE:
1297		dcdb->data_xfer = MYRB_DCDB_XFER_NONE;
1298		break;
1299	case DMA_TO_DEVICE:
1300		dcdb->data_xfer = MYRB_DCDB_XFER_SYSTEM_TO_DEVICE;
1301		break;
1302	case DMA_FROM_DEVICE:
1303		dcdb->data_xfer = MYRB_DCDB_XFER_DEVICE_TO_SYSTEM;
1304		break;
1305	default:
1306		dcdb->data_xfer = MYRB_DCDB_XFER_ILLEGAL;
1307		break;
1308	}
1309	dcdb->early_status = false;
1310	if (scmd->request->timeout <= 10)
1311		dcdb->timeout = MYRB_DCDB_TMO_10_SECS;
1312	else if (scmd->request->timeout <= 60)
1313		dcdb->timeout = MYRB_DCDB_TMO_60_SECS;
1314	else if (scmd->request->timeout <= 600)
1315		dcdb->timeout = MYRB_DCDB_TMO_10_MINS;
1316	else
1317		dcdb->timeout = MYRB_DCDB_TMO_24_HRS;
1318	dcdb->no_autosense = false;
1319	dcdb->allow_disconnect = true;
1320	sgl = scsi_sglist(scmd);
1321	dcdb->dma_addr = sg_dma_address(sgl);
1322	if (sg_dma_len(sgl) > USHRT_MAX) {
1323		dcdb->xfer_len_lo = sg_dma_len(sgl) & 0xffff;
1324		dcdb->xfer_len_hi4 = sg_dma_len(sgl) >> 16;
1325	} else {
1326		dcdb->xfer_len_lo = sg_dma_len(sgl);
1327		dcdb->xfer_len_hi4 = 0;
1328	}
1329	dcdb->cdb_len = scmd->cmd_len;
1330	dcdb->sense_len = sizeof(dcdb->sense);
1331	memcpy(&dcdb->cdb, scmd->cmnd, scmd->cmd_len);
1332
1333	spin_lock_irqsave(&cb->queue_lock, flags);
1334	cb->qcmd(cb, cmd_blk);
1335	spin_unlock_irqrestore(&cb->queue_lock, flags);
1336	return 0;
1337}
1338
1339static void myrb_inquiry(struct myrb_hba *cb,
1340		struct scsi_cmnd *scmd)
1341{
1342	unsigned char inq[36] = {
1343		0x00, 0x00, 0x03, 0x02, 0x20, 0x00, 0x01, 0x00,
1344		0x4d, 0x59, 0x4c, 0x45, 0x58, 0x20, 0x20, 0x20,
1345		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1346		0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
1347		0x20, 0x20, 0x20, 0x20,
1348	};
1349
1350	if (cb->bus_width > 16)
1351		inq[7] |= 1 << 6;
1352	if (cb->bus_width > 8)
1353		inq[7] |= 1 << 5;
1354	memcpy(&inq[16], cb->model_name, 16);
1355	memcpy(&inq[32], cb->fw_version, 1);
1356	memcpy(&inq[33], &cb->fw_version[2], 2);
1357	memcpy(&inq[35], &cb->fw_version[7], 1);
1358
1359	scsi_sg_copy_from_buffer(scmd, (void *)inq, 36);
1360}
1361
1362static void
1363myrb_mode_sense(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1364		struct myrb_ldev_info *ldev_info)
1365{
1366	unsigned char modes[32], *mode_pg;
1367	bool dbd;
1368	size_t mode_len;
1369
1370	dbd = (scmd->cmnd[1] & 0x08) == 0x08;
1371	if (dbd) {
1372		mode_len = 24;
1373		mode_pg = &modes[4];
1374	} else {
1375		mode_len = 32;
1376		mode_pg = &modes[12];
1377	}
1378	memset(modes, 0, sizeof(modes));
1379	modes[0] = mode_len - 1;
1380	if (!dbd) {
1381		unsigned char *block_desc = &modes[4];
1382
1383		modes[3] = 8;
1384		put_unaligned_be32(ldev_info->size, &block_desc[0]);
1385		put_unaligned_be32(cb->ldev_block_size, &block_desc[5]);
1386	}
1387	mode_pg[0] = 0x08;
1388	mode_pg[1] = 0x12;
1389	if (ldev_info->wb_enabled)
1390		mode_pg[2] |= 0x04;
1391	if (cb->segment_size) {
1392		mode_pg[2] |= 0x08;
1393		put_unaligned_be16(cb->segment_size, &mode_pg[14]);
1394	}
1395
1396	scsi_sg_copy_from_buffer(scmd, modes, mode_len);
1397}
1398
1399static void myrb_request_sense(struct myrb_hba *cb,
1400		struct scsi_cmnd *scmd)
1401{
1402	scsi_build_sense_buffer(0, scmd->sense_buffer,
1403				NO_SENSE, 0, 0);
1404	scsi_sg_copy_from_buffer(scmd, scmd->sense_buffer,
1405				 SCSI_SENSE_BUFFERSIZE);
1406}
1407
1408static void myrb_read_capacity(struct myrb_hba *cb, struct scsi_cmnd *scmd,
1409		struct myrb_ldev_info *ldev_info)
1410{
1411	unsigned char data[8];
1412
1413	dev_dbg(&scmd->device->sdev_gendev,
1414		"Capacity %u, blocksize %u\n",
1415		ldev_info->size, cb->ldev_block_size);
1416	put_unaligned_be32(ldev_info->size - 1, &data[0]);
1417	put_unaligned_be32(cb->ldev_block_size, &data[4]);
1418	scsi_sg_copy_from_buffer(scmd, data, 8);
1419}
1420
1421static int myrb_ldev_queuecommand(struct Scsi_Host *shost,
1422		struct scsi_cmnd *scmd)
1423{
1424	struct myrb_hba *cb = shost_priv(shost);
1425	struct myrb_cmdblk *cmd_blk = scsi_cmd_priv(scmd);
1426	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
1427	struct myrb_ldev_info *ldev_info;
1428	struct scsi_device *sdev = scmd->device;
1429	struct scatterlist *sgl;
1430	unsigned long flags;
1431	u64 lba;
1432	u32 block_cnt;
1433	int nsge;
1434
1435	ldev_info = sdev->hostdata;
1436	if (ldev_info->state != MYRB_DEVICE_ONLINE &&
1437	    ldev_info->state != MYRB_DEVICE_WO) {
1438		dev_dbg(&shost->shost_gendev, "ldev %u in state %x, skip\n",
1439			sdev->id, ldev_info ? ldev_info->state : 0xff);
1440		scmd->result = (DID_BAD_TARGET << 16);
1441		scmd->scsi_done(scmd);
1442		return 0;
1443	}
1444	switch (scmd->cmnd[0]) {
1445	case TEST_UNIT_READY:
1446		scmd->result = (DID_OK << 16);
1447		scmd->scsi_done(scmd);
1448		return 0;
1449	case INQUIRY:
1450		if (scmd->cmnd[1] & 1) {
1451			/* Illegal request, invalid field in CDB */
1452			scsi_build_sense_buffer(0, scmd->sense_buffer,
1453						ILLEGAL_REQUEST, 0x24, 0);
1454			scmd->result = (DRIVER_SENSE << 24) |
1455				SAM_STAT_CHECK_CONDITION;
1456		} else {
1457			myrb_inquiry(cb, scmd);
1458			scmd->result = (DID_OK << 16);
1459		}
1460		scmd->scsi_done(scmd);
1461		return 0;
1462	case SYNCHRONIZE_CACHE:
1463		scmd->result = (DID_OK << 16);
1464		scmd->scsi_done(scmd);
1465		return 0;
1466	case MODE_SENSE:
1467		if ((scmd->cmnd[2] & 0x3F) != 0x3F &&
1468		    (scmd->cmnd[2] & 0x3F) != 0x08) {
1469			/* Illegal request, invalid field in CDB */
1470			scsi_build_sense_buffer(0, scmd->sense_buffer,
1471						ILLEGAL_REQUEST, 0x24, 0);
1472			scmd->result = (DRIVER_SENSE << 24) |
1473				SAM_STAT_CHECK_CONDITION;
1474		} else {
1475			myrb_mode_sense(cb, scmd, ldev_info);
1476			scmd->result = (DID_OK << 16);
1477		}
1478		scmd->scsi_done(scmd);
1479		return 0;
1480	case READ_CAPACITY:
1481		if ((scmd->cmnd[1] & 1) ||
1482		    (scmd->cmnd[8] & 1)) {
1483			/* Illegal request, invalid field in CDB */
1484			scsi_build_sense_buffer(0, scmd->sense_buffer,
1485						ILLEGAL_REQUEST, 0x24, 0);
1486			scmd->result = (DRIVER_SENSE << 24) |
1487				SAM_STAT_CHECK_CONDITION;
1488			scmd->scsi_done(scmd);
1489			return 0;
1490		}
1491		lba = get_unaligned_be32(&scmd->cmnd[2]);
1492		if (lba) {
1493			/* Illegal request, invalid field in CDB */
1494			scsi_build_sense_buffer(0, scmd->sense_buffer,
1495						ILLEGAL_REQUEST, 0x24, 0);
1496			scmd->result = (DRIVER_SENSE << 24) |
1497				SAM_STAT_CHECK_CONDITION;
1498			scmd->scsi_done(scmd);
1499			return 0;
1500		}
1501		myrb_read_capacity(cb, scmd, ldev_info);
1502		scmd->scsi_done(scmd);
1503		return 0;
1504	case REQUEST_SENSE:
1505		myrb_request_sense(cb, scmd);
1506		scmd->result = (DID_OK << 16);
1507		return 0;
1508	case SEND_DIAGNOSTIC:
1509		if (scmd->cmnd[1] != 0x04) {
1510			/* Illegal request, invalid field in CDB */
1511			scsi_build_sense_buffer(0, scmd->sense_buffer,
1512						ILLEGAL_REQUEST, 0x24, 0);
1513			scmd->result = (DRIVER_SENSE << 24) |
1514				SAM_STAT_CHECK_CONDITION;
1515		} else {
1516			/* Assume good status */
1517			scmd->result = (DID_OK << 16);
1518		}
1519		scmd->scsi_done(scmd);
1520		return 0;
1521	case READ_6:
1522		if (ldev_info->state == MYRB_DEVICE_WO) {
1523			/* Data protect, attempt to read invalid data */
1524			scsi_build_sense_buffer(0, scmd->sense_buffer,
1525						DATA_PROTECT, 0x21, 0x06);
1526			scmd->result = (DRIVER_SENSE << 24) |
1527				SAM_STAT_CHECK_CONDITION;
1528			scmd->scsi_done(scmd);
1529			return 0;
1530		}
1531		/* fall through */
1532	case WRITE_6:
1533		lba = (((scmd->cmnd[1] & 0x1F) << 16) |
1534		       (scmd->cmnd[2] << 8) |
1535		       scmd->cmnd[3]);
1536		block_cnt = scmd->cmnd[4];
1537		break;
1538	case READ_10:
1539		if (ldev_info->state == MYRB_DEVICE_WO) {
1540			/* Data protect, attempt to read invalid data */
1541			scsi_build_sense_buffer(0, scmd->sense_buffer,
1542						DATA_PROTECT, 0x21, 0x06);
1543			scmd->result = (DRIVER_SENSE << 24) |
1544				SAM_STAT_CHECK_CONDITION;
1545			scmd->scsi_done(scmd);
1546			return 0;
1547		}
1548		/* fall through */
1549	case WRITE_10:
1550	case VERIFY:		/* 0x2F */
1551	case WRITE_VERIFY:	/* 0x2E */
1552		lba = get_unaligned_be32(&scmd->cmnd[2]);
1553		block_cnt = get_unaligned_be16(&scmd->cmnd[7]);
1554		break;
1555	case READ_12:
1556		if (ldev_info->state == MYRB_DEVICE_WO) {
1557			/* Data protect, attempt to read invalid data */
1558			scsi_build_sense_buffer(0, scmd->sense_buffer,
1559						DATA_PROTECT, 0x21, 0x06);
1560			scmd->result = (DRIVER_SENSE << 24) |
1561				SAM_STAT_CHECK_CONDITION;
1562			scmd->scsi_done(scmd);
1563			return 0;
1564		}
1565		/* fall through */
1566	case WRITE_12:
1567	case VERIFY_12: /* 0xAF */
1568	case WRITE_VERIFY_12:	/* 0xAE */
1569		lba = get_unaligned_be32(&scmd->cmnd[2]);
1570		block_cnt = get_unaligned_be32(&scmd->cmnd[6]);
1571		break;
1572	default:
1573		/* Illegal request, invalid opcode */
1574		scsi_build_sense_buffer(0, scmd->sense_buffer,
1575					ILLEGAL_REQUEST, 0x20, 0);
1576		scmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
1577		scmd->scsi_done(scmd);
1578		return 0;
1579	}
1580
1581	myrb_reset_cmd(cmd_blk);
1582	mbox->type5.id = scmd->request->tag + 3;
1583	if (scmd->sc_data_direction == DMA_NONE)
1584		goto submit;
1585	nsge = scsi_dma_map(scmd);
1586	if (nsge == 1) {
1587		sgl = scsi_sglist(scmd);
1588		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1589			mbox->type5.opcode = MYRB_CMD_READ;
1590		else
1591			mbox->type5.opcode = MYRB_CMD_WRITE;
1592
1593		mbox->type5.ld.xfer_len = block_cnt;
1594		mbox->type5.ld.ldev_num = sdev->id;
1595		mbox->type5.lba = lba;
1596		mbox->type5.addr = (u32)sg_dma_address(sgl);
1597	} else {
1598		struct myrb_sge *hw_sgl;
1599		dma_addr_t hw_sgl_addr;
1600		int i;
1601
1602		hw_sgl = dma_pool_alloc(cb->sg_pool, GFP_ATOMIC, &hw_sgl_addr);
1603		if (!hw_sgl)
1604			return SCSI_MLQUEUE_HOST_BUSY;
1605
1606		cmd_blk->sgl = hw_sgl;
1607		cmd_blk->sgl_addr = hw_sgl_addr;
1608
1609		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
1610			mbox->type5.opcode = MYRB_CMD_READ_SG;
1611		else
1612			mbox->type5.opcode = MYRB_CMD_WRITE_SG;
1613
1614		mbox->type5.ld.xfer_len = block_cnt;
1615		mbox->type5.ld.ldev_num = sdev->id;
1616		mbox->type5.lba = lba;
1617		mbox->type5.addr = hw_sgl_addr;
1618		mbox->type5.sg_count = nsge;
1619
1620		scsi_for_each_sg(scmd, sgl, nsge, i) {
1621			hw_sgl->sge_addr = (u32)sg_dma_address(sgl);
1622			hw_sgl->sge_count = (u32)sg_dma_len(sgl);
1623			hw_sgl++;
1624		}
1625	}
1626submit:
1627	spin_lock_irqsave(&cb->queue_lock, flags);
1628	cb->qcmd(cb, cmd_blk);
1629	spin_unlock_irqrestore(&cb->queue_lock, flags);
1630
1631	return 0;
1632}
1633
1634static int myrb_queuecommand(struct Scsi_Host *shost,
1635		struct scsi_cmnd *scmd)
1636{
1637	struct scsi_device *sdev = scmd->device;
1638
1639	if (sdev->channel > myrb_logical_channel(shost)) {
1640		scmd->result = (DID_BAD_TARGET << 16);
1641		scmd->scsi_done(scmd);
1642		return 0;
1643	}
1644	if (sdev->channel == myrb_logical_channel(shost))
1645		return myrb_ldev_queuecommand(shost, scmd);
1646
1647	return myrb_pthru_queuecommand(shost, scmd);
1648}
1649
1650static int myrb_ldev_slave_alloc(struct scsi_device *sdev)
1651{
1652	struct myrb_hba *cb = shost_priv(sdev->host);
1653	struct myrb_ldev_info *ldev_info;
1654	unsigned short ldev_num = sdev->id;
1655	enum raid_level level;
1656
1657	ldev_info = cb->ldev_info_buf + ldev_num;
1658	if (!ldev_info)
1659		return -ENXIO;
1660
1661	sdev->hostdata = kzalloc(sizeof(*ldev_info), GFP_KERNEL);
1662	if (!sdev->hostdata)
1663		return -ENOMEM;
1664	dev_dbg(&sdev->sdev_gendev,
1665		"slave alloc ldev %d state %x\n",
1666		ldev_num, ldev_info->state);
1667	memcpy(sdev->hostdata, ldev_info,
1668	       sizeof(*ldev_info));
1669	switch (ldev_info->raid_level) {
1670	case MYRB_RAID_LEVEL0:
1671		level = RAID_LEVEL_LINEAR;
1672		break;
1673	case MYRB_RAID_LEVEL1:
1674		level = RAID_LEVEL_1;
1675		break;
1676	case MYRB_RAID_LEVEL3:
1677		level = RAID_LEVEL_3;
1678		break;
1679	case MYRB_RAID_LEVEL5:
1680		level = RAID_LEVEL_5;
1681		break;
1682	case MYRB_RAID_LEVEL6:
1683		level = RAID_LEVEL_6;
1684		break;
1685	case MYRB_RAID_JBOD:
1686		level = RAID_LEVEL_JBOD;
1687		break;
1688	default:
1689		level = RAID_LEVEL_UNKNOWN;
1690		break;
1691	}
1692	raid_set_level(myrb_raid_template, &sdev->sdev_gendev, level);
1693	return 0;
1694}
1695
1696static int myrb_pdev_slave_alloc(struct scsi_device *sdev)
1697{
1698	struct myrb_hba *cb = shost_priv(sdev->host);
1699	struct myrb_pdev_state *pdev_info;
1700	unsigned short status;
1701
1702	if (sdev->id > MYRB_MAX_TARGETS)
1703		return -ENXIO;
1704
1705	pdev_info = kzalloc(sizeof(*pdev_info), GFP_KERNEL|GFP_DMA);
1706	if (!pdev_info)
1707		return -ENOMEM;
1708
1709	status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1710				  sdev, pdev_info);
1711	if (status != MYRB_STATUS_SUCCESS) {
1712		dev_dbg(&sdev->sdev_gendev,
1713			"Failed to get device state, status %x\n",
1714			status);
1715		kfree(pdev_info);
1716		return -ENXIO;
1717	}
1718	if (!pdev_info->present) {
1719		dev_dbg(&sdev->sdev_gendev,
1720			"device not present, skip\n");
1721		kfree(pdev_info);
1722		return -ENXIO;
1723	}
1724	dev_dbg(&sdev->sdev_gendev,
1725		"slave alloc pdev %d:%d state %x\n",
1726		sdev->channel, sdev->id, pdev_info->state);
1727	sdev->hostdata = pdev_info;
1728
1729	return 0;
1730}
1731
1732static int myrb_slave_alloc(struct scsi_device *sdev)
1733{
1734	if (sdev->channel > myrb_logical_channel(sdev->host))
1735		return -ENXIO;
1736
1737	if (sdev->lun > 0)
1738		return -ENXIO;
1739
1740	if (sdev->channel == myrb_logical_channel(sdev->host))
1741		return myrb_ldev_slave_alloc(sdev);
1742
1743	return myrb_pdev_slave_alloc(sdev);
1744}
1745
1746static int myrb_slave_configure(struct scsi_device *sdev)
1747{
1748	struct myrb_ldev_info *ldev_info;
1749
1750	if (sdev->channel > myrb_logical_channel(sdev->host))
1751		return -ENXIO;
1752
1753	if (sdev->channel < myrb_logical_channel(sdev->host)) {
1754		sdev->no_uld_attach = 1;
1755		return 0;
1756	}
1757	if (sdev->lun != 0)
1758		return -ENXIO;
1759
1760	ldev_info = sdev->hostdata;
1761	if (!ldev_info)
1762		return -ENXIO;
1763	if (ldev_info->state != MYRB_DEVICE_ONLINE)
1764		sdev_printk(KERN_INFO, sdev,
1765			    "Logical drive is %s\n",
1766			    myrb_devstate_name(ldev_info->state));
1767
1768	sdev->tagged_supported = 1;
1769	return 0;
1770}
1771
1772static void myrb_slave_destroy(struct scsi_device *sdev)
1773{
1774	kfree(sdev->hostdata);
1775}
1776
1777static int myrb_biosparam(struct scsi_device *sdev, struct block_device *bdev,
1778		sector_t capacity, int geom[])
1779{
1780	struct myrb_hba *cb = shost_priv(sdev->host);
1781
1782	geom[0] = cb->ldev_geom_heads;
1783	geom[1] = cb->ldev_geom_sectors;
1784	geom[2] = sector_div(capacity, geom[0] * geom[1]);
1785
1786	return 0;
1787}
1788
1789static ssize_t raid_state_show(struct device *dev,
1790		struct device_attribute *attr, char *buf)
1791{
1792	struct scsi_device *sdev = to_scsi_device(dev);
1793	struct myrb_hba *cb = shost_priv(sdev->host);
1794	int ret;
1795
1796	if (!sdev->hostdata)
1797		return snprintf(buf, 16, "Unknown\n");
1798
1799	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1800		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1801		const char *name;
1802
1803		name = myrb_devstate_name(ldev_info->state);
1804		if (name)
1805			ret = snprintf(buf, 32, "%s\n", name);
1806		else
1807			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1808				       ldev_info->state);
1809	} else {
1810		struct myrb_pdev_state *pdev_info = sdev->hostdata;
1811		unsigned short status;
1812		const char *name;
1813
1814		status = myrb_exec_type3D(cb, MYRB_CMD_GET_DEVICE_STATE,
1815					  sdev, pdev_info);
1816		if (status != MYRB_STATUS_SUCCESS)
1817			sdev_printk(KERN_INFO, sdev,
1818				    "Failed to get device state, status %x\n",
1819				    status);
1820
1821		if (!pdev_info->present)
1822			name = "Removed";
1823		else
1824			name = myrb_devstate_name(pdev_info->state);
1825		if (name)
1826			ret = snprintf(buf, 32, "%s\n", name);
1827		else
1828			ret = snprintf(buf, 32, "Invalid (%02X)\n",
1829				       pdev_info->state);
1830	}
1831	return ret;
1832}
1833
1834static ssize_t raid_state_store(struct device *dev,
1835		struct device_attribute *attr, const char *buf, size_t count)
1836{
1837	struct scsi_device *sdev = to_scsi_device(dev);
1838	struct myrb_hba *cb = shost_priv(sdev->host);
1839	struct myrb_pdev_state *pdev_info;
1840	enum myrb_devstate new_state;
1841	unsigned short status;
1842
1843	if (!strncmp(buf, "kill", 4) ||
1844	    !strncmp(buf, "offline", 7))
1845		new_state = MYRB_DEVICE_DEAD;
1846	else if (!strncmp(buf, "online", 6))
1847		new_state = MYRB_DEVICE_ONLINE;
1848	else if (!strncmp(buf, "standby", 7))
1849		new_state = MYRB_DEVICE_STANDBY;
1850	else
1851		return -EINVAL;
1852
1853	pdev_info = sdev->hostdata;
1854	if (!pdev_info) {
1855		sdev_printk(KERN_INFO, sdev,
1856			    "Failed - no physical device information\n");
1857		return -ENXIO;
1858	}
1859	if (!pdev_info->present) {
1860		sdev_printk(KERN_INFO, sdev,
1861			    "Failed - device not present\n");
1862		return -ENXIO;
1863	}
1864
1865	if (pdev_info->state == new_state)
1866		return count;
1867
1868	status = myrb_set_pdev_state(cb, sdev, new_state);
1869	switch (status) {
1870	case MYRB_STATUS_SUCCESS:
1871		break;
1872	case MYRB_STATUS_START_DEVICE_FAILED:
1873		sdev_printk(KERN_INFO, sdev,
1874			     "Failed - Unable to Start Device\n");
1875		count = -EAGAIN;
1876		break;
1877	case MYRB_STATUS_NO_DEVICE:
1878		sdev_printk(KERN_INFO, sdev,
1879			    "Failed - No Device at Address\n");
1880		count = -ENODEV;
1881		break;
1882	case MYRB_STATUS_INVALID_CHANNEL_OR_TARGET:
1883		sdev_printk(KERN_INFO, sdev,
1884			 "Failed - Invalid Channel or Target or Modifier\n");
1885		count = -EINVAL;
1886		break;
1887	case MYRB_STATUS_CHANNEL_BUSY:
1888		sdev_printk(KERN_INFO, sdev,
1889			 "Failed - Channel Busy\n");
1890		count = -EBUSY;
1891		break;
1892	default:
1893		sdev_printk(KERN_INFO, sdev,
1894			 "Failed - Unexpected Status %04X\n", status);
1895		count = -EIO;
1896		break;
1897	}
1898	return count;
1899}
1900static DEVICE_ATTR_RW(raid_state);
1901
1902static ssize_t raid_level_show(struct device *dev,
1903		struct device_attribute *attr, char *buf)
1904{
1905	struct scsi_device *sdev = to_scsi_device(dev);
1906
1907	if (sdev->channel == myrb_logical_channel(sdev->host)) {
1908		struct myrb_ldev_info *ldev_info = sdev->hostdata;
1909		const char *name;
1910
1911		if (!ldev_info)
1912			return -ENXIO;
1913
1914		name = myrb_raidlevel_name(ldev_info->raid_level);
1915		if (!name)
1916			return snprintf(buf, 32, "Invalid (%02X)\n",
1917					ldev_info->state);
1918		return snprintf(buf, 32, "%s\n", name);
1919	}
1920	return snprintf(buf, 32, "Physical Drive\n");
1921}
1922static DEVICE_ATTR_RO(raid_level);
1923
1924static ssize_t rebuild_show(struct device *dev,
1925		struct device_attribute *attr, char *buf)
1926{
1927	struct scsi_device *sdev = to_scsi_device(dev);
1928	struct myrb_hba *cb = shost_priv(sdev->host);
1929	struct myrb_rbld_progress rbld_buf;
1930	unsigned char status;
1931
1932	if (sdev->channel < myrb_logical_channel(sdev->host))
1933		return snprintf(buf, 32, "physical device - not rebuilding\n");
1934
1935	status = myrb_get_rbld_progress(cb, &rbld_buf);
1936
1937	if (rbld_buf.ldev_num != sdev->id ||
1938	    status != MYRB_STATUS_SUCCESS)
1939		return snprintf(buf, 32, "not rebuilding\n");
1940
1941	return snprintf(buf, 32, "rebuilding block %u of %u\n",
1942			rbld_buf.ldev_size - rbld_buf.blocks_left,
1943			rbld_buf.ldev_size);
1944}
1945
1946static ssize_t rebuild_store(struct device *dev,
1947		struct device_attribute *attr, const char *buf, size_t count)
1948{
1949	struct scsi_device *sdev = to_scsi_device(dev);
1950	struct myrb_hba *cb = shost_priv(sdev->host);
1951	struct myrb_cmdblk *cmd_blk;
1952	union myrb_cmd_mbox *mbox;
1953	unsigned short status;
1954	int rc, start;
1955	const char *msg;
1956
1957	rc = kstrtoint(buf, 0, &start);
1958	if (rc)
1959		return rc;
1960
1961	if (sdev->channel >= myrb_logical_channel(sdev->host))
1962		return -ENXIO;
1963
1964	status = myrb_get_rbld_progress(cb, NULL);
1965	if (start) {
1966		if (status == MYRB_STATUS_SUCCESS) {
1967			sdev_printk(KERN_INFO, sdev,
1968				    "Rebuild Not Initiated; already in progress\n");
1969			return -EALREADY;
1970		}
1971		mutex_lock(&cb->dcmd_mutex);
1972		cmd_blk = &cb->dcmd_blk;
1973		myrb_reset_cmd(cmd_blk);
1974		mbox = &cmd_blk->mbox;
1975		mbox->type3D.opcode = MYRB_CMD_REBUILD_ASYNC;
1976		mbox->type3D.id = MYRB_DCMD_TAG;
1977		mbox->type3D.channel = sdev->channel;
1978		mbox->type3D.target = sdev->id;
1979		status = myrb_exec_cmd(cb, cmd_blk);
1980		mutex_unlock(&cb->dcmd_mutex);
1981	} else {
1982		struct pci_dev *pdev = cb->pdev;
1983		unsigned char *rate;
1984		dma_addr_t rate_addr;
1985
1986		if (status != MYRB_STATUS_SUCCESS) {
1987			sdev_printk(KERN_INFO, sdev,
1988				    "Rebuild Not Cancelled; not in progress\n");
1989			return 0;
1990		}
1991
1992		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
1993					  &rate_addr, GFP_KERNEL);
1994		if (rate == NULL) {
1995			sdev_printk(KERN_INFO, sdev,
1996				    "Cancellation of Rebuild Failed - Out of Memory\n");
1997			return -ENOMEM;
1998		}
1999		mutex_lock(&cb->dcmd_mutex);
2000		cmd_blk = &cb->dcmd_blk;
2001		myrb_reset_cmd(cmd_blk);
2002		mbox = &cmd_blk->mbox;
2003		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2004		mbox->type3R.id = MYRB_DCMD_TAG;
2005		mbox->type3R.rbld_rate = 0xFF;
2006		mbox->type3R.addr = rate_addr;
2007		status = myrb_exec_cmd(cb, cmd_blk);
2008		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2009		mutex_unlock(&cb->dcmd_mutex);
2010	}
2011	if (status == MYRB_STATUS_SUCCESS) {
2012		sdev_printk(KERN_INFO, sdev, "Rebuild %s\n",
2013			    start ? "Initiated" : "Cancelled");
2014		return count;
2015	}
2016	if (!start) {
2017		sdev_printk(KERN_INFO, sdev,
2018			    "Rebuild Not Cancelled, status 0x%x\n",
2019			    status);
2020		return -EIO;
2021	}
2022
2023	switch (status) {
2024	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2025		msg = "Attempt to Rebuild Online or Unresponsive Drive";
2026		break;
2027	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2028		msg = "New Disk Failed During Rebuild";
2029		break;
2030	case MYRB_STATUS_INVALID_ADDRESS:
2031		msg = "Invalid Device Address";
2032		break;
2033	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2034		msg = "Already in Progress";
2035		break;
2036	default:
2037		msg = NULL;
2038		break;
2039	}
2040	if (msg)
2041		sdev_printk(KERN_INFO, sdev,
2042			    "Rebuild Failed - %s\n", msg);
2043	else
2044		sdev_printk(KERN_INFO, sdev,
2045			    "Rebuild Failed, status 0x%x\n", status);
2046
2047	return -EIO;
2048}
2049static DEVICE_ATTR_RW(rebuild);
2050
2051static ssize_t consistency_check_store(struct device *dev,
2052		struct device_attribute *attr, const char *buf, size_t count)
2053{
2054	struct scsi_device *sdev = to_scsi_device(dev);
2055	struct myrb_hba *cb = shost_priv(sdev->host);
2056	struct myrb_rbld_progress rbld_buf;
2057	struct myrb_cmdblk *cmd_blk;
2058	union myrb_cmd_mbox *mbox;
2059	unsigned short ldev_num = 0xFFFF;
2060	unsigned short status;
2061	int rc, start;
2062	const char *msg;
2063
2064	rc = kstrtoint(buf, 0, &start);
2065	if (rc)
2066		return rc;
2067
2068	if (sdev->channel < myrb_logical_channel(sdev->host))
2069		return -ENXIO;
2070
2071	status = myrb_get_rbld_progress(cb, &rbld_buf);
2072	if (start) {
2073		if (status == MYRB_STATUS_SUCCESS) {
2074			sdev_printk(KERN_INFO, sdev,
2075				    "Check Consistency Not Initiated; already in progress\n");
2076			return -EALREADY;
2077		}
2078		mutex_lock(&cb->dcmd_mutex);
2079		cmd_blk = &cb->dcmd_blk;
2080		myrb_reset_cmd(cmd_blk);
2081		mbox = &cmd_blk->mbox;
2082		mbox->type3C.opcode = MYRB_CMD_CHECK_CONSISTENCY_ASYNC;
2083		mbox->type3C.id = MYRB_DCMD_TAG;
2084		mbox->type3C.ldev_num = sdev->id;
2085		mbox->type3C.auto_restore = true;
2086
2087		status = myrb_exec_cmd(cb, cmd_blk);
2088		mutex_unlock(&cb->dcmd_mutex);
2089	} else {
2090		struct pci_dev *pdev = cb->pdev;
2091		unsigned char *rate;
2092		dma_addr_t rate_addr;
2093
2094		if (ldev_num != sdev->id) {
2095			sdev_printk(KERN_INFO, sdev,
2096				    "Check Consistency Not Cancelled; not in progress\n");
2097			return 0;
2098		}
2099		rate = dma_alloc_coherent(&pdev->dev, sizeof(char),
2100					  &rate_addr, GFP_KERNEL);
2101		if (rate == NULL) {
2102			sdev_printk(KERN_INFO, sdev,
2103				    "Cancellation of Check Consistency Failed - Out of Memory\n");
2104			return -ENOMEM;
2105		}
2106		mutex_lock(&cb->dcmd_mutex);
2107		cmd_blk = &cb->dcmd_blk;
2108		myrb_reset_cmd(cmd_blk);
2109		mbox = &cmd_blk->mbox;
2110		mbox->type3R.opcode = MYRB_CMD_REBUILD_CONTROL;
2111		mbox->type3R.id = MYRB_DCMD_TAG;
2112		mbox->type3R.rbld_rate = 0xFF;
2113		mbox->type3R.addr = rate_addr;
2114		status = myrb_exec_cmd(cb, cmd_blk);
2115		dma_free_coherent(&pdev->dev, sizeof(char), rate, rate_addr);
2116		mutex_unlock(&cb->dcmd_mutex);
2117	}
2118	if (status == MYRB_STATUS_SUCCESS) {
2119		sdev_printk(KERN_INFO, sdev, "Check Consistency %s\n",
2120			    start ? "Initiated" : "Cancelled");
2121		return count;
2122	}
2123	if (!start) {
2124		sdev_printk(KERN_INFO, sdev,
2125			    "Check Consistency Not Cancelled, status 0x%x\n",
2126			    status);
2127		return -EIO;
2128	}
2129
2130	switch (status) {
2131	case MYRB_STATUS_ATTEMPT_TO_RBLD_ONLINE_DRIVE:
2132		msg = "Dependent Physical Device is DEAD";
2133		break;
2134	case MYRB_STATUS_RBLD_NEW_DISK_FAILED:
2135		msg = "New Disk Failed During Rebuild";
2136		break;
2137	case MYRB_STATUS_INVALID_ADDRESS:
2138		msg = "Invalid or Nonredundant Logical Drive";
2139		break;
2140	case MYRB_STATUS_RBLD_OR_CHECK_INPROGRESS:
2141		msg = "Already in Progress";
2142		break;
2143	default:
2144		msg = NULL;
2145		break;
2146	}
2147	if (msg)
2148		sdev_printk(KERN_INFO, sdev,
2149			    "Check Consistency Failed - %s\n", msg);
2150	else
2151		sdev_printk(KERN_INFO, sdev,
2152			    "Check Consistency Failed, status 0x%x\n", status);
2153
2154	return -EIO;
2155}
2156
2157static ssize_t consistency_check_show(struct device *dev,
2158		struct device_attribute *attr, char *buf)
2159{
2160	return rebuild_show(dev, attr, buf);
2161}
2162static DEVICE_ATTR_RW(consistency_check);
2163
2164static ssize_t ctlr_num_show(struct device *dev,
2165		struct device_attribute *attr, char *buf)
2166{
2167	struct Scsi_Host *shost = class_to_shost(dev);
2168	struct myrb_hba *cb = shost_priv(shost);
2169
2170	return snprintf(buf, 20, "%d\n", cb->ctlr_num);
2171}
2172static DEVICE_ATTR_RO(ctlr_num);
2173
2174static ssize_t firmware_show(struct device *dev,
2175		struct device_attribute *attr, char *buf)
2176{
2177	struct Scsi_Host *shost = class_to_shost(dev);
2178	struct myrb_hba *cb = shost_priv(shost);
2179
2180	return snprintf(buf, 16, "%s\n", cb->fw_version);
2181}
2182static DEVICE_ATTR_RO(firmware);
2183
2184static ssize_t model_show(struct device *dev,
2185		struct device_attribute *attr, char *buf)
2186{
2187	struct Scsi_Host *shost = class_to_shost(dev);
2188	struct myrb_hba *cb = shost_priv(shost);
2189
2190	return snprintf(buf, 16, "%s\n", cb->model_name);
2191}
2192static DEVICE_ATTR_RO(model);
2193
2194static ssize_t flush_cache_store(struct device *dev,
2195		struct device_attribute *attr, const char *buf, size_t count)
2196{
2197	struct Scsi_Host *shost = class_to_shost(dev);
2198	struct myrb_hba *cb = shost_priv(shost);
2199	unsigned short status;
2200
2201	status = myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
2202	if (status == MYRB_STATUS_SUCCESS) {
2203		shost_printk(KERN_INFO, shost,
2204			     "Cache Flush Completed\n");
2205		return count;
2206	}
2207	shost_printk(KERN_INFO, shost,
2208		     "Cache Flush Failed, status %x\n", status);
2209	return -EIO;
2210}
2211static DEVICE_ATTR_WO(flush_cache);
2212
2213static struct device_attribute *myrb_sdev_attrs[] = {
2214	&dev_attr_rebuild,
2215	&dev_attr_consistency_check,
2216	&dev_attr_raid_state,
2217	&dev_attr_raid_level,
2218	NULL,
2219};
2220
2221static struct device_attribute *myrb_shost_attrs[] = {
2222	&dev_attr_ctlr_num,
2223	&dev_attr_model,
2224	&dev_attr_firmware,
2225	&dev_attr_flush_cache,
2226	NULL,
2227};
2228
2229struct scsi_host_template myrb_template = {
2230	.module			= THIS_MODULE,
2231	.name			= "DAC960",
2232	.proc_name		= "myrb",
2233	.queuecommand		= myrb_queuecommand,
2234	.eh_host_reset_handler	= myrb_host_reset,
2235	.slave_alloc		= myrb_slave_alloc,
2236	.slave_configure	= myrb_slave_configure,
2237	.slave_destroy		= myrb_slave_destroy,
2238	.bios_param		= myrb_biosparam,
2239	.cmd_size		= sizeof(struct myrb_cmdblk),
2240	.shost_attrs		= myrb_shost_attrs,
2241	.sdev_attrs		= myrb_sdev_attrs,
2242	.this_id		= -1,
2243};
2244
2245/**
2246 * myrb_is_raid - return boolean indicating device is raid volume
2247 * @dev the device struct object
2248 */
2249static int myrb_is_raid(struct device *dev)
2250{
2251	struct scsi_device *sdev = to_scsi_device(dev);
2252
2253	return sdev->channel == myrb_logical_channel(sdev->host);
2254}
2255
2256/**
2257 * myrb_get_resync - get raid volume resync percent complete
2258 * @dev the device struct object
2259 */
2260static void myrb_get_resync(struct device *dev)
2261{
2262	struct scsi_device *sdev = to_scsi_device(dev);
2263	struct myrb_hba *cb = shost_priv(sdev->host);
2264	struct myrb_rbld_progress rbld_buf;
2265	unsigned int percent_complete = 0;
2266	unsigned short status;
2267	unsigned int ldev_size = 0, remaining = 0;
2268
2269	if (sdev->channel < myrb_logical_channel(sdev->host))
2270		return;
2271	status = myrb_get_rbld_progress(cb, &rbld_buf);
2272	if (status == MYRB_STATUS_SUCCESS) {
2273		if (rbld_buf.ldev_num == sdev->id) {
2274			ldev_size = rbld_buf.ldev_size;
2275			remaining = rbld_buf.blocks_left;
2276		}
2277	}
2278	if (remaining && ldev_size)
2279		percent_complete = (ldev_size - remaining) * 100 / ldev_size;
2280	raid_set_resync(myrb_raid_template, dev, percent_complete);
2281}
2282
2283/**
2284 * myrb_get_state - get raid volume status
2285 * @dev the device struct object
2286 */
2287static void myrb_get_state(struct device *dev)
2288{
2289	struct scsi_device *sdev = to_scsi_device(dev);
2290	struct myrb_hba *cb = shost_priv(sdev->host);
2291	struct myrb_ldev_info *ldev_info = sdev->hostdata;
2292	enum raid_state state = RAID_STATE_UNKNOWN;
2293	unsigned short status;
2294
2295	if (sdev->channel < myrb_logical_channel(sdev->host) || !ldev_info)
2296		state = RAID_STATE_UNKNOWN;
2297	else {
2298		status = myrb_get_rbld_progress(cb, NULL);
2299		if (status == MYRB_STATUS_SUCCESS)
2300			state = RAID_STATE_RESYNCING;
2301		else {
2302			switch (ldev_info->state) {
2303			case MYRB_DEVICE_ONLINE:
2304				state = RAID_STATE_ACTIVE;
2305				break;
2306			case MYRB_DEVICE_WO:
2307			case MYRB_DEVICE_CRITICAL:
2308				state = RAID_STATE_DEGRADED;
2309				break;
2310			default:
2311				state = RAID_STATE_OFFLINE;
2312			}
2313		}
2314	}
2315	raid_set_state(myrb_raid_template, dev, state);
2316}
2317
2318struct raid_function_template myrb_raid_functions = {
2319	.cookie		= &myrb_template,
2320	.is_raid	= myrb_is_raid,
2321	.get_resync	= myrb_get_resync,
2322	.get_state	= myrb_get_state,
2323};
2324
2325static void myrb_handle_scsi(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk,
2326		struct scsi_cmnd *scmd)
2327{
2328	unsigned short status;
2329
2330	if (!cmd_blk)
2331		return;
2332
2333	scsi_dma_unmap(scmd);
2334
2335	if (cmd_blk->dcdb) {
2336		memcpy(scmd->sense_buffer, &cmd_blk->dcdb->sense, 64);
2337		dma_pool_free(cb->dcdb_pool, cmd_blk->dcdb,
2338			      cmd_blk->dcdb_addr);
2339		cmd_blk->dcdb = NULL;
2340	}
2341	if (cmd_blk->sgl) {
2342		dma_pool_free(cb->sg_pool, cmd_blk->sgl, cmd_blk->sgl_addr);
2343		cmd_blk->sgl = NULL;
2344		cmd_blk->sgl_addr = 0;
2345	}
2346	status = cmd_blk->status;
2347	switch (status) {
2348	case MYRB_STATUS_SUCCESS:
2349	case MYRB_STATUS_DEVICE_BUSY:
2350		scmd->result = (DID_OK << 16) | status;
2351		break;
2352	case MYRB_STATUS_BAD_DATA:
2353		dev_dbg(&scmd->device->sdev_gendev,
2354			"Bad Data Encountered\n");
2355		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2356			/* Unrecovered read error */
2357			scsi_build_sense_buffer(0, scmd->sense_buffer,
2358						MEDIUM_ERROR, 0x11, 0);
2359		else
2360			/* Write error */
2361			scsi_build_sense_buffer(0, scmd->sense_buffer,
2362						MEDIUM_ERROR, 0x0C, 0);
2363		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2364		break;
2365	case MYRB_STATUS_IRRECOVERABLE_DATA_ERROR:
2366		scmd_printk(KERN_ERR, scmd, "Irrecoverable Data Error\n");
2367		if (scmd->sc_data_direction == DMA_FROM_DEVICE)
2368			/* Unrecovered read error, auto-reallocation failed */
2369			scsi_build_sense_buffer(0, scmd->sense_buffer,
2370						MEDIUM_ERROR, 0x11, 0x04);
2371		else
2372			/* Write error, auto-reallocation failed */
2373			scsi_build_sense_buffer(0, scmd->sense_buffer,
2374						MEDIUM_ERROR, 0x0C, 0x02);
2375		scmd->result = (DID_OK << 16) | SAM_STAT_CHECK_CONDITION;
2376		break;
2377	case MYRB_STATUS_LDRV_NONEXISTENT_OR_OFFLINE:
2378		dev_dbg(&scmd->device->sdev_gendev,
2379			    "Logical Drive Nonexistent or Offline");
2380		scmd->result = (DID_BAD_TARGET << 16);
2381		break;
2382	case MYRB_STATUS_ACCESS_BEYOND_END_OF_LDRV:
2383		dev_dbg(&scmd->device->sdev_gendev,
2384			    "Attempt to Access Beyond End of Logical Drive");
2385		/* Logical block address out of range */
2386		scsi_build_sense_buffer(0, scmd->sense_buffer,
2387					NOT_READY, 0x21, 0);
2388		break;
2389	case MYRB_STATUS_DEVICE_NONRESPONSIVE:
2390		dev_dbg(&scmd->device->sdev_gendev, "Device nonresponsive\n");
2391		scmd->result = (DID_BAD_TARGET << 16);
2392		break;
2393	default:
2394		scmd_printk(KERN_ERR, scmd,
2395			    "Unexpected Error Status %04X", status);
2396		scmd->result = (DID_ERROR << 16);
2397		break;
2398	}
2399	scmd->scsi_done(scmd);
2400}
2401
2402static void myrb_handle_cmdblk(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
2403{
2404	if (!cmd_blk)
2405		return;
2406
2407	if (cmd_blk->completion) {
2408		complete(cmd_blk->completion);
2409		cmd_blk->completion = NULL;
2410	}
2411}
2412
2413static void myrb_monitor(struct work_struct *work)
2414{
2415	struct myrb_hba *cb = container_of(work,
2416			struct myrb_hba, monitor_work.work);
2417	struct Scsi_Host *shost = cb->host;
2418	unsigned long interval = MYRB_PRIMARY_MONITOR_INTERVAL;
2419
2420	dev_dbg(&shost->shost_gendev, "monitor tick\n");
2421
2422	if (cb->new_ev_seq > cb->old_ev_seq) {
2423		int event = cb->old_ev_seq;
2424
2425		dev_dbg(&shost->shost_gendev,
2426			"get event log no %d/%d\n",
2427			cb->new_ev_seq, event);
2428		myrb_get_event(cb, event);
2429		cb->old_ev_seq = event + 1;
2430		interval = 10;
2431	} else if (cb->need_err_info) {
2432		cb->need_err_info = false;
2433		dev_dbg(&shost->shost_gendev, "get error table\n");
2434		myrb_get_errtable(cb);
2435		interval = 10;
2436	} else if (cb->need_rbld && cb->rbld_first) {
2437		cb->need_rbld = false;
2438		dev_dbg(&shost->shost_gendev,
2439			"get rebuild progress\n");
2440		myrb_update_rbld_progress(cb);
2441		interval = 10;
2442	} else if (cb->need_ldev_info) {
2443		cb->need_ldev_info = false;
2444		dev_dbg(&shost->shost_gendev,
2445			"get logical drive info\n");
2446		myrb_get_ldev_info(cb);
2447		interval = 10;
2448	} else if (cb->need_rbld) {
2449		cb->need_rbld = false;
2450		dev_dbg(&shost->shost_gendev,
2451			"get rebuild progress\n");
2452		myrb_update_rbld_progress(cb);
2453		interval = 10;
2454	} else if (cb->need_cc_status) {
2455		cb->need_cc_status = false;
2456		dev_dbg(&shost->shost_gendev,
2457			"get consistency check progress\n");
2458		myrb_get_cc_progress(cb);
2459		interval = 10;
2460	} else if (cb->need_bgi_status) {
2461		cb->need_bgi_status = false;
2462		dev_dbg(&shost->shost_gendev, "get background init status\n");
2463		myrb_bgi_control(cb);
2464		interval = 10;
2465	} else {
2466		dev_dbg(&shost->shost_gendev, "new enquiry\n");
2467		mutex_lock(&cb->dma_mutex);
2468		myrb_hba_enquiry(cb);
2469		mutex_unlock(&cb->dma_mutex);
2470		if ((cb->new_ev_seq - cb->old_ev_seq > 0) ||
2471		    cb->need_err_info || cb->need_rbld ||
2472		    cb->need_ldev_info || cb->need_cc_status ||
2473		    cb->need_bgi_status) {
2474			dev_dbg(&shost->shost_gendev,
2475				"reschedule monitor\n");
2476			interval = 0;
2477		}
2478	}
2479	if (interval > 1)
2480		cb->primary_monitor_time = jiffies;
2481	queue_delayed_work(cb->work_q, &cb->monitor_work, interval);
2482}
2483
2484/**
2485 * myrb_err_status - reports controller BIOS messages
2486 *
2487 * Controller BIOS messages are passed through the Error Status Register
2488 * when the driver performs the BIOS handshaking.
2489 *
2490 * Return: true for fatal errors and false otherwise.
2491 */
2492bool myrb_err_status(struct myrb_hba *cb, unsigned char error,
2493		unsigned char parm0, unsigned char parm1)
2494{
2495	struct pci_dev *pdev = cb->pdev;
2496
2497	switch (error) {
2498	case 0x00:
2499		dev_info(&pdev->dev,
2500			 "Physical Device %d:%d Not Responding\n",
2501			 parm1, parm0);
2502		break;
2503	case 0x08:
2504		dev_notice(&pdev->dev, "Spinning Up Drives\n");
2505		break;
2506	case 0x30:
2507		dev_notice(&pdev->dev, "Configuration Checksum Error\n");
2508		break;
2509	case 0x60:
2510		dev_notice(&pdev->dev, "Mirror Race Recovery Failed\n");
2511		break;
2512	case 0x70:
2513		dev_notice(&pdev->dev, "Mirror Race Recovery In Progress\n");
2514		break;
2515	case 0x90:
2516		dev_notice(&pdev->dev, "Physical Device %d:%d COD Mismatch\n",
2517			   parm1, parm0);
2518		break;
2519	case 0xA0:
2520		dev_notice(&pdev->dev, "Logical Drive Installation Aborted\n");
2521		break;
2522	case 0xB0:
2523		dev_notice(&pdev->dev, "Mirror Race On A Critical Logical Drive\n");
2524		break;
2525	case 0xD0:
2526		dev_notice(&pdev->dev, "New Controller Configuration Found\n");
2527		break;
2528	case 0xF0:
2529		dev_err(&pdev->dev, "Fatal Memory Parity Error\n");
2530		return true;
2531	default:
2532		dev_err(&pdev->dev, "Unknown Initialization Error %02X\n",
2533			error);
2534		return true;
2535	}
2536	return false;
2537}
2538
2539/*
2540 * Hardware-specific functions
2541 */
2542
2543/*
2544 * DAC960 LA Series Controllers
2545 */
2546
2547static inline void DAC960_LA_hw_mbox_new_cmd(void __iomem *base)
2548{
2549	writeb(DAC960_LA_IDB_HWMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2550}
2551
2552static inline void DAC960_LA_ack_hw_mbox_status(void __iomem *base)
2553{
2554	writeb(DAC960_LA_IDB_HWMBOX_ACK_STS, base + DAC960_LA_IDB_OFFSET);
2555}
2556
2557static inline void DAC960_LA_gen_intr(void __iomem *base)
2558{
2559	writeb(DAC960_LA_IDB_GEN_IRQ, base + DAC960_LA_IDB_OFFSET);
2560}
2561
2562static inline void DAC960_LA_reset_ctrl(void __iomem *base)
2563{
2564	writeb(DAC960_LA_IDB_CTRL_RESET, base + DAC960_LA_IDB_OFFSET);
2565}
2566
2567static inline void DAC960_LA_mem_mbox_new_cmd(void __iomem *base)
2568{
2569	writeb(DAC960_LA_IDB_MMBOX_NEW_CMD, base + DAC960_LA_IDB_OFFSET);
2570}
2571
2572static inline bool DAC960_LA_hw_mbox_is_full(void __iomem *base)
2573{
2574	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2575
2576	return !(idb & DAC960_LA_IDB_HWMBOX_EMPTY);
2577}
2578
2579static inline bool DAC960_LA_init_in_progress(void __iomem *base)
2580{
2581	unsigned char idb = readb(base + DAC960_LA_IDB_OFFSET);
2582
2583	return !(idb & DAC960_LA_IDB_INIT_DONE);
2584}
2585
2586static inline void DAC960_LA_ack_hw_mbox_intr(void __iomem *base)
2587{
2588	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2589}
2590
2591static inline void DAC960_LA_ack_mem_mbox_intr(void __iomem *base)
2592{
2593	writeb(DAC960_LA_ODB_MMBOX_ACK_IRQ, base + DAC960_LA_ODB_OFFSET);
2594}
2595
2596static inline void DAC960_LA_ack_intr(void __iomem *base)
2597{
2598	writeb(DAC960_LA_ODB_HWMBOX_ACK_IRQ | DAC960_LA_ODB_MMBOX_ACK_IRQ,
2599	       base + DAC960_LA_ODB_OFFSET);
2600}
2601
2602static inline bool DAC960_LA_hw_mbox_status_available(void __iomem *base)
2603{
2604	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2605
2606	return odb & DAC960_LA_ODB_HWMBOX_STS_AVAIL;
2607}
2608
2609static inline bool DAC960_LA_mem_mbox_status_available(void __iomem *base)
2610{
2611	unsigned char odb = readb(base + DAC960_LA_ODB_OFFSET);
2612
2613	return odb & DAC960_LA_ODB_MMBOX_STS_AVAIL;
2614}
2615
2616static inline void DAC960_LA_enable_intr(void __iomem *base)
2617{
2618	unsigned char odb = 0xFF;
2619
2620	odb &= ~DAC960_LA_IRQMASK_DISABLE_IRQ;
2621	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2622}
2623
2624static inline void DAC960_LA_disable_intr(void __iomem *base)
2625{
2626	unsigned char odb = 0xFF;
2627
2628	odb |= DAC960_LA_IRQMASK_DISABLE_IRQ;
2629	writeb(odb, base + DAC960_LA_IRQMASK_OFFSET);
2630}
2631
2632static inline bool DAC960_LA_intr_enabled(void __iomem *base)
2633{
2634	unsigned char imask = readb(base + DAC960_LA_IRQMASK_OFFSET);
2635
2636	return !(imask & DAC960_LA_IRQMASK_DISABLE_IRQ);
2637}
2638
2639static inline void DAC960_LA_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2640		union myrb_cmd_mbox *mbox)
2641{
2642	mem_mbox->words[1] = mbox->words[1];
2643	mem_mbox->words[2] = mbox->words[2];
2644	mem_mbox->words[3] = mbox->words[3];
2645	/* Memory barrier to prevent reordering */
2646	wmb();
2647	mem_mbox->words[0] = mbox->words[0];
2648	/* Memory barrier to force PCI access */
2649	mb();
2650}
2651
2652static inline void DAC960_LA_write_hw_mbox(void __iomem *base,
2653		union myrb_cmd_mbox *mbox)
2654{
2655	writel(mbox->words[0], base + DAC960_LA_CMDOP_OFFSET);
2656	writel(mbox->words[1], base + DAC960_LA_MBOX4_OFFSET);
2657	writel(mbox->words[2], base + DAC960_LA_MBOX8_OFFSET);
2658	writeb(mbox->bytes[12], base + DAC960_LA_MBOX12_OFFSET);
2659}
2660
2661static inline unsigned char DAC960_LA_read_status_cmd_ident(void __iomem *base)
2662{
2663	return readb(base + DAC960_LA_STSID_OFFSET);
2664}
2665
2666static inline unsigned short DAC960_LA_read_status(void __iomem *base)
2667{
2668	return readw(base + DAC960_LA_STS_OFFSET);
2669}
2670
2671static inline bool
2672DAC960_LA_read_error_status(void __iomem *base, unsigned char *error,
2673		unsigned char *param0, unsigned char *param1)
2674{
2675	unsigned char errsts = readb(base + DAC960_LA_ERRSTS_OFFSET);
2676
2677	if (!(errsts & DAC960_LA_ERRSTS_PENDING))
2678		return false;
2679	errsts &= ~DAC960_LA_ERRSTS_PENDING;
2680
2681	*error = errsts;
2682	*param0 = readb(base + DAC960_LA_CMDOP_OFFSET);
2683	*param1 = readb(base + DAC960_LA_CMDID_OFFSET);
2684	writeb(0xFF, base + DAC960_LA_ERRSTS_OFFSET);
2685	return true;
2686}
2687
2688static inline unsigned short
2689DAC960_LA_mbox_init(struct pci_dev *pdev, void __iomem *base,
2690		union myrb_cmd_mbox *mbox)
2691{
2692	unsigned short status;
2693	int timeout = 0;
2694
2695	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2696		if (!DAC960_LA_hw_mbox_is_full(base))
2697			break;
2698		udelay(10);
2699		timeout++;
2700	}
2701	if (DAC960_LA_hw_mbox_is_full(base)) {
2702		dev_err(&pdev->dev,
2703			"Timeout waiting for empty mailbox\n");
2704		return MYRB_STATUS_SUBSYS_TIMEOUT;
2705	}
2706	DAC960_LA_write_hw_mbox(base, mbox);
2707	DAC960_LA_hw_mbox_new_cmd(base);
2708	timeout = 0;
2709	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2710		if (DAC960_LA_hw_mbox_status_available(base))
2711			break;
2712		udelay(10);
2713		timeout++;
2714	}
2715	if (!DAC960_LA_hw_mbox_status_available(base)) {
2716		dev_err(&pdev->dev, "Timeout waiting for mailbox status\n");
2717		return MYRB_STATUS_SUBSYS_TIMEOUT;
2718	}
2719	status = DAC960_LA_read_status(base);
2720	DAC960_LA_ack_hw_mbox_intr(base);
2721	DAC960_LA_ack_hw_mbox_status(base);
2722
2723	return status;
2724}
2725
2726static int DAC960_LA_hw_init(struct pci_dev *pdev,
2727		struct myrb_hba *cb, void __iomem *base)
2728{
2729	int timeout = 0;
2730	unsigned char error, parm0, parm1;
2731
2732	DAC960_LA_disable_intr(base);
2733	DAC960_LA_ack_hw_mbox_status(base);
2734	udelay(1000);
2735	timeout = 0;
2736	while (DAC960_LA_init_in_progress(base) &&
2737	       timeout < MYRB_MAILBOX_TIMEOUT) {
2738		if (DAC960_LA_read_error_status(base, &error,
2739					      &parm0, &parm1) &&
2740		    myrb_err_status(cb, error, parm0, parm1))
2741			return -ENODEV;
2742		udelay(10);
2743		timeout++;
2744	}
2745	if (timeout == MYRB_MAILBOX_TIMEOUT) {
2746		dev_err(&pdev->dev,
2747			"Timeout waiting for Controller Initialisation\n");
2748		return -ETIMEDOUT;
2749	}
2750	if (!myrb_enable_mmio(cb, DAC960_LA_mbox_init)) {
2751		dev_err(&pdev->dev,
2752			"Unable to Enable Memory Mailbox Interface\n");
2753		DAC960_LA_reset_ctrl(base);
2754		return -ENODEV;
2755	}
2756	DAC960_LA_enable_intr(base);
2757	cb->qcmd = myrb_qcmd;
2758	cb->write_cmd_mbox = DAC960_LA_write_cmd_mbox;
2759	if (cb->dual_mode_interface)
2760		cb->get_cmd_mbox = DAC960_LA_mem_mbox_new_cmd;
2761	else
2762		cb->get_cmd_mbox = DAC960_LA_hw_mbox_new_cmd;
2763	cb->disable_intr = DAC960_LA_disable_intr;
2764	cb->reset = DAC960_LA_reset_ctrl;
2765
2766	return 0;
2767}
2768
2769static irqreturn_t DAC960_LA_intr_handler(int irq, void *arg)
2770{
2771	struct myrb_hba *cb = arg;
2772	void __iomem *base = cb->io_base;
2773	struct myrb_stat_mbox *next_stat_mbox;
2774	unsigned long flags;
2775
2776	spin_lock_irqsave(&cb->queue_lock, flags);
2777	DAC960_LA_ack_intr(base);
2778	next_stat_mbox = cb->next_stat_mbox;
2779	while (next_stat_mbox->valid) {
2780		unsigned char id = next_stat_mbox->id;
2781		struct scsi_cmnd *scmd = NULL;
2782		struct myrb_cmdblk *cmd_blk = NULL;
2783
2784		if (id == MYRB_DCMD_TAG)
2785			cmd_blk = &cb->dcmd_blk;
2786		else if (id == MYRB_MCMD_TAG)
2787			cmd_blk = &cb->mcmd_blk;
2788		else {
2789			scmd = scsi_host_find_tag(cb->host, id - 3);
2790			if (scmd)
2791				cmd_blk = scsi_cmd_priv(scmd);
2792		}
2793		if (cmd_blk)
2794			cmd_blk->status = next_stat_mbox->status;
2795		else
2796			dev_err(&cb->pdev->dev,
2797				"Unhandled command completion %d\n", id);
2798
2799		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
2800		if (++next_stat_mbox > cb->last_stat_mbox)
2801			next_stat_mbox = cb->first_stat_mbox;
2802
2803		if (cmd_blk) {
2804			if (id < 3)
2805				myrb_handle_cmdblk(cb, cmd_blk);
2806			else
2807				myrb_handle_scsi(cb, cmd_blk, scmd);
2808		}
2809	}
2810	cb->next_stat_mbox = next_stat_mbox;
2811	spin_unlock_irqrestore(&cb->queue_lock, flags);
2812	return IRQ_HANDLED;
2813}
2814
2815struct myrb_privdata DAC960_LA_privdata = {
2816	.hw_init =	DAC960_LA_hw_init,
2817	.irq_handler =	DAC960_LA_intr_handler,
2818	.mmio_size =	DAC960_LA_mmio_size,
2819};
2820
2821/*
2822 * DAC960 PG Series Controllers
2823 */
2824static inline void DAC960_PG_hw_mbox_new_cmd(void __iomem *base)
2825{
2826	writel(DAC960_PG_IDB_HWMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2827}
2828
2829static inline void DAC960_PG_ack_hw_mbox_status(void __iomem *base)
2830{
2831	writel(DAC960_PG_IDB_HWMBOX_ACK_STS, base + DAC960_PG_IDB_OFFSET);
2832}
2833
2834static inline void DAC960_PG_gen_intr(void __iomem *base)
2835{
2836	writel(DAC960_PG_IDB_GEN_IRQ, base + DAC960_PG_IDB_OFFSET);
2837}
2838
2839static inline void DAC960_PG_reset_ctrl(void __iomem *base)
2840{
2841	writel(DAC960_PG_IDB_CTRL_RESET, base + DAC960_PG_IDB_OFFSET);
2842}
2843
2844static inline void DAC960_PG_mem_mbox_new_cmd(void __iomem *base)
2845{
2846	writel(DAC960_PG_IDB_MMBOX_NEW_CMD, base + DAC960_PG_IDB_OFFSET);
2847}
2848
2849static inline bool DAC960_PG_hw_mbox_is_full(void __iomem *base)
2850{
2851	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2852
2853	return idb & DAC960_PG_IDB_HWMBOX_FULL;
2854}
2855
2856static inline bool DAC960_PG_init_in_progress(void __iomem *base)
2857{
2858	unsigned char idb = readl(base + DAC960_PG_IDB_OFFSET);
2859
2860	return idb & DAC960_PG_IDB_INIT_IN_PROGRESS;
2861}
2862
2863static inline void DAC960_PG_ack_hw_mbox_intr(void __iomem *base)
2864{
2865	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2866}
2867
2868static inline void DAC960_PG_ack_mem_mbox_intr(void __iomem *base)
2869{
2870	writel(DAC960_PG_ODB_MMBOX_ACK_IRQ, base + DAC960_PG_ODB_OFFSET);
2871}
2872
2873static inline void DAC960_PG_ack_intr(void __iomem *base)
2874{
2875	writel(DAC960_PG_ODB_HWMBOX_ACK_IRQ | DAC960_PG_ODB_MMBOX_ACK_IRQ,
2876	       base + DAC960_PG_ODB_OFFSET);
2877}
2878
2879static inline bool DAC960_PG_hw_mbox_status_available(void __iomem *base)
2880{
2881	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2882
2883	return odb & DAC960_PG_ODB_HWMBOX_STS_AVAIL;
2884}
2885
2886static inline bool DAC960_PG_mem_mbox_status_available(void __iomem *base)
2887{
2888	unsigned char odb = readl(base + DAC960_PG_ODB_OFFSET);
2889
2890	return odb & DAC960_PG_ODB_MMBOX_STS_AVAIL;
2891}
2892
2893static inline void DAC960_PG_enable_intr(void __iomem *base)
2894{
2895	unsigned int imask = (unsigned int)-1;
2896
2897	imask &= ~DAC960_PG_IRQMASK_DISABLE_IRQ;
2898	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2899}
2900
2901static inline void DAC960_PG_disable_intr(void __iomem *base)
2902{
2903	unsigned int imask = (unsigned int)-1;
2904
2905	writel(imask, base + DAC960_PG_IRQMASK_OFFSET);
2906}
2907
2908static inline bool DAC960_PG_intr_enabled(void __iomem *base)
2909{
2910	unsigned int imask = readl(base + DAC960_PG_IRQMASK_OFFSET);
2911
2912	return !(imask & DAC960_PG_IRQMASK_DISABLE_IRQ);
2913}
2914
2915static inline void DAC960_PG_write_cmd_mbox(union myrb_cmd_mbox *mem_mbox,
2916		union myrb_cmd_mbox *mbox)
2917{
2918	mem_mbox->words[1] = mbox->words[1];
2919	mem_mbox->words[2] = mbox->words[2];
2920	mem_mbox->words[3] = mbox->words[3];
2921	/* Memory barrier to prevent reordering */
2922	wmb();
2923	mem_mbox->words[0] = mbox->words[0];
2924	/* Memory barrier to force PCI access */
2925	mb();
2926}
2927
2928static inline void DAC960_PG_write_hw_mbox(void __iomem *base,
2929		union myrb_cmd_mbox *mbox)
2930{
2931	writel(mbox->words[0], base + DAC960_PG_CMDOP_OFFSET);
2932	writel(mbox->words[1], base + DAC960_PG_MBOX4_OFFSET);
2933	writel(mbox->words[2], base + DAC960_PG_MBOX8_OFFSET);
2934	writeb(mbox->bytes[12], base + DAC960_PG_MBOX12_OFFSET);
2935}
2936
2937static inline unsigned char
2938DAC960_PG_read_status_cmd_ident(void __iomem *base)
2939{
2940	return readb(base + DAC960_PG_STSID_OFFSET);
2941}
2942
2943static inline unsigned short
2944DAC960_PG_read_status(void __iomem *base)
2945{
2946	return readw(base + DAC960_PG_STS_OFFSET);
2947}
2948
2949static inline bool
2950DAC960_PG_read_error_status(void __iomem *base, unsigned char *error,
2951		unsigned char *param0, unsigned char *param1)
2952{
2953	unsigned char errsts = readb(base + DAC960_PG_ERRSTS_OFFSET);
2954
2955	if (!(errsts & DAC960_PG_ERRSTS_PENDING))
2956		return false;
2957	errsts &= ~DAC960_PG_ERRSTS_PENDING;
2958	*error = errsts;
2959	*param0 = readb(base + DAC960_PG_CMDOP_OFFSET);
2960	*param1 = readb(base + DAC960_PG_CMDID_OFFSET);
2961	writeb(0, base + DAC960_PG_ERRSTS_OFFSET);
2962	return true;
2963}
2964
2965static inline unsigned short
2966DAC960_PG_mbox_init(struct pci_dev *pdev, void __iomem *base,
2967		union myrb_cmd_mbox *mbox)
2968{
2969	unsigned short status;
2970	int timeout = 0;
2971
2972	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2973		if (!DAC960_PG_hw_mbox_is_full(base))
2974			break;
2975		udelay(10);
2976		timeout++;
2977	}
2978	if (DAC960_PG_hw_mbox_is_full(base)) {
2979		dev_err(&pdev->dev,
2980			"Timeout waiting for empty mailbox\n");
2981		return MYRB_STATUS_SUBSYS_TIMEOUT;
2982	}
2983	DAC960_PG_write_hw_mbox(base, mbox);
2984	DAC960_PG_hw_mbox_new_cmd(base);
2985
2986	timeout = 0;
2987	while (timeout < MYRB_MAILBOX_TIMEOUT) {
2988		if (DAC960_PG_hw_mbox_status_available(base))
2989			break;
2990		udelay(10);
2991		timeout++;
2992	}
2993	if (!DAC960_PG_hw_mbox_status_available(base)) {
2994		dev_err(&pdev->dev,
2995			"Timeout waiting for mailbox status\n");
2996		return MYRB_STATUS_SUBSYS_TIMEOUT;
2997	}
2998	status = DAC960_PG_read_status(base);
2999	DAC960_PG_ack_hw_mbox_intr(base);
3000	DAC960_PG_ack_hw_mbox_status(base);
3001
3002	return status;
3003}
3004
3005static int DAC960_PG_hw_init(struct pci_dev *pdev,
3006		struct myrb_hba *cb, void __iomem *base)
3007{
3008	int timeout = 0;
3009	unsigned char error, parm0, parm1;
3010
3011	DAC960_PG_disable_intr(base);
3012	DAC960_PG_ack_hw_mbox_status(base);
3013	udelay(1000);
3014	while (DAC960_PG_init_in_progress(base) &&
3015	       timeout < MYRB_MAILBOX_TIMEOUT) {
3016		if (DAC960_PG_read_error_status(base, &error,
3017						&parm0, &parm1) &&
3018		    myrb_err_status(cb, error, parm0, parm1))
3019			return -EIO;
3020		udelay(10);
3021		timeout++;
3022	}
3023	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3024		dev_err(&pdev->dev,
3025			"Timeout waiting for Controller Initialisation\n");
3026		return -ETIMEDOUT;
3027	}
3028	if (!myrb_enable_mmio(cb, DAC960_PG_mbox_init)) {
3029		dev_err(&pdev->dev,
3030			"Unable to Enable Memory Mailbox Interface\n");
3031		DAC960_PG_reset_ctrl(base);
3032		return -ENODEV;
3033	}
3034	DAC960_PG_enable_intr(base);
3035	cb->qcmd = myrb_qcmd;
3036	cb->write_cmd_mbox = DAC960_PG_write_cmd_mbox;
3037	if (cb->dual_mode_interface)
3038		cb->get_cmd_mbox = DAC960_PG_mem_mbox_new_cmd;
3039	else
3040		cb->get_cmd_mbox = DAC960_PG_hw_mbox_new_cmd;
3041	cb->disable_intr = DAC960_PG_disable_intr;
3042	cb->reset = DAC960_PG_reset_ctrl;
3043
3044	return 0;
3045}
3046
3047static irqreturn_t DAC960_PG_intr_handler(int irq, void *arg)
3048{
3049	struct myrb_hba *cb = arg;
3050	void __iomem *base = cb->io_base;
3051	struct myrb_stat_mbox *next_stat_mbox;
3052	unsigned long flags;
3053
3054	spin_lock_irqsave(&cb->queue_lock, flags);
3055	DAC960_PG_ack_intr(base);
3056	next_stat_mbox = cb->next_stat_mbox;
3057	while (next_stat_mbox->valid) {
3058		unsigned char id = next_stat_mbox->id;
3059		struct scsi_cmnd *scmd = NULL;
3060		struct myrb_cmdblk *cmd_blk = NULL;
3061
3062		if (id == MYRB_DCMD_TAG)
3063			cmd_blk = &cb->dcmd_blk;
3064		else if (id == MYRB_MCMD_TAG)
3065			cmd_blk = &cb->mcmd_blk;
3066		else {
3067			scmd = scsi_host_find_tag(cb->host, id - 3);
3068			if (scmd)
3069				cmd_blk = scsi_cmd_priv(scmd);
3070		}
3071		if (cmd_blk)
3072			cmd_blk->status = next_stat_mbox->status;
3073		else
3074			dev_err(&cb->pdev->dev,
3075				"Unhandled command completion %d\n", id);
3076
3077		memset(next_stat_mbox, 0, sizeof(struct myrb_stat_mbox));
3078		if (++next_stat_mbox > cb->last_stat_mbox)
3079			next_stat_mbox = cb->first_stat_mbox;
3080
3081		if (id < 3)
3082			myrb_handle_cmdblk(cb, cmd_blk);
3083		else
3084			myrb_handle_scsi(cb, cmd_blk, scmd);
3085	}
3086	cb->next_stat_mbox = next_stat_mbox;
3087	spin_unlock_irqrestore(&cb->queue_lock, flags);
3088	return IRQ_HANDLED;
3089}
3090
3091struct myrb_privdata DAC960_PG_privdata = {
3092	.hw_init =	DAC960_PG_hw_init,
3093	.irq_handler =	DAC960_PG_intr_handler,
3094	.mmio_size =	DAC960_PG_mmio_size,
3095};
3096
3097
3098/*
3099 * DAC960 PD Series Controllers
3100 */
3101
3102static inline void DAC960_PD_hw_mbox_new_cmd(void __iomem *base)
3103{
3104	writeb(DAC960_PD_IDB_HWMBOX_NEW_CMD, base + DAC960_PD_IDB_OFFSET);
3105}
3106
3107static inline void DAC960_PD_ack_hw_mbox_status(void __iomem *base)
3108{
3109	writeb(DAC960_PD_IDB_HWMBOX_ACK_STS, base + DAC960_PD_IDB_OFFSET);
3110}
3111
3112static inline void DAC960_PD_gen_intr(void __iomem *base)
3113{
3114	writeb(DAC960_PD_IDB_GEN_IRQ, base + DAC960_PD_IDB_OFFSET);
3115}
3116
3117static inline void DAC960_PD_reset_ctrl(void __iomem *base)
3118{
3119	writeb(DAC960_PD_IDB_CTRL_RESET, base + DAC960_PD_IDB_OFFSET);
3120}
3121
3122static inline bool DAC960_PD_hw_mbox_is_full(void __iomem *base)
3123{
3124	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3125
3126	return idb & DAC960_PD_IDB_HWMBOX_FULL;
3127}
3128
3129static inline bool DAC960_PD_init_in_progress(void __iomem *base)
3130{
3131	unsigned char idb = readb(base + DAC960_PD_IDB_OFFSET);
3132
3133	return idb & DAC960_PD_IDB_INIT_IN_PROGRESS;
3134}
3135
3136static inline void DAC960_PD_ack_intr(void __iomem *base)
3137{
3138	writeb(DAC960_PD_ODB_HWMBOX_ACK_IRQ, base + DAC960_PD_ODB_OFFSET);
3139}
3140
3141static inline bool DAC960_PD_hw_mbox_status_available(void __iomem *base)
3142{
3143	unsigned char odb = readb(base + DAC960_PD_ODB_OFFSET);
3144
3145	return odb & DAC960_PD_ODB_HWMBOX_STS_AVAIL;
3146}
3147
3148static inline void DAC960_PD_enable_intr(void __iomem *base)
3149{
3150	writeb(DAC960_PD_IRQMASK_ENABLE_IRQ, base + DAC960_PD_IRQEN_OFFSET);
3151}
3152
3153static inline void DAC960_PD_disable_intr(void __iomem *base)
3154{
3155	writeb(0, base + DAC960_PD_IRQEN_OFFSET);
3156}
3157
3158static inline bool DAC960_PD_intr_enabled(void __iomem *base)
3159{
3160	unsigned char imask = readb(base + DAC960_PD_IRQEN_OFFSET);
3161
3162	return imask & DAC960_PD_IRQMASK_ENABLE_IRQ;
3163}
3164
3165static inline void DAC960_PD_write_cmd_mbox(void __iomem *base,
3166		union myrb_cmd_mbox *mbox)
3167{
3168	writel(mbox->words[0], base + DAC960_PD_CMDOP_OFFSET);
3169	writel(mbox->words[1], base + DAC960_PD_MBOX4_OFFSET);
3170	writel(mbox->words[2], base + DAC960_PD_MBOX8_OFFSET);
3171	writeb(mbox->bytes[12], base + DAC960_PD_MBOX12_OFFSET);
3172}
3173
3174static inline unsigned char
3175DAC960_PD_read_status_cmd_ident(void __iomem *base)
3176{
3177	return readb(base + DAC960_PD_STSID_OFFSET);
3178}
3179
3180static inline unsigned short
3181DAC960_PD_read_status(void __iomem *base)
3182{
3183	return readw(base + DAC960_PD_STS_OFFSET);
3184}
3185
3186static inline bool
3187DAC960_PD_read_error_status(void __iomem *base, unsigned char *error,
3188		unsigned char *param0, unsigned char *param1)
3189{
3190	unsigned char errsts = readb(base + DAC960_PD_ERRSTS_OFFSET);
3191
3192	if (!(errsts & DAC960_PD_ERRSTS_PENDING))
3193		return false;
3194	errsts &= ~DAC960_PD_ERRSTS_PENDING;
3195	*error = errsts;
3196	*param0 = readb(base + DAC960_PD_CMDOP_OFFSET);
3197	*param1 = readb(base + DAC960_PD_CMDID_OFFSET);
3198	writeb(0, base + DAC960_PD_ERRSTS_OFFSET);
3199	return true;
3200}
3201
3202static void DAC960_PD_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3203{
3204	void __iomem *base = cb->io_base;
3205	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3206
3207	while (DAC960_PD_hw_mbox_is_full(base))
3208		udelay(1);
3209	DAC960_PD_write_cmd_mbox(base, mbox);
3210	DAC960_PD_hw_mbox_new_cmd(base);
3211}
3212
3213static int DAC960_PD_hw_init(struct pci_dev *pdev,
3214		struct myrb_hba *cb, void __iomem *base)
3215{
3216	int timeout = 0;
3217	unsigned char error, parm0, parm1;
3218
3219	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3220		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3221			(unsigned long)cb->io_addr);
3222		return -EBUSY;
3223	}
3224	DAC960_PD_disable_intr(base);
3225	DAC960_PD_ack_hw_mbox_status(base);
3226	udelay(1000);
3227	while (DAC960_PD_init_in_progress(base) &&
3228	       timeout < MYRB_MAILBOX_TIMEOUT) {
3229		if (DAC960_PD_read_error_status(base, &error,
3230					      &parm0, &parm1) &&
3231		    myrb_err_status(cb, error, parm0, parm1))
3232			return -EIO;
3233		udelay(10);
3234		timeout++;
3235	}
3236	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3237		dev_err(&pdev->dev,
3238			"Timeout waiting for Controller Initialisation\n");
3239		return -ETIMEDOUT;
3240	}
3241	if (!myrb_enable_mmio(cb, NULL)) {
3242		dev_err(&pdev->dev,
3243			"Unable to Enable Memory Mailbox Interface\n");
3244		DAC960_PD_reset_ctrl(base);
3245		return -ENODEV;
3246	}
3247	DAC960_PD_enable_intr(base);
3248	cb->qcmd = DAC960_PD_qcmd;
3249	cb->disable_intr = DAC960_PD_disable_intr;
3250	cb->reset = DAC960_PD_reset_ctrl;
3251
3252	return 0;
3253}
3254
3255static irqreturn_t DAC960_PD_intr_handler(int irq, void *arg)
3256{
3257	struct myrb_hba *cb = arg;
3258	void __iomem *base = cb->io_base;
3259	unsigned long flags;
3260
3261	spin_lock_irqsave(&cb->queue_lock, flags);
3262	while (DAC960_PD_hw_mbox_status_available(base)) {
3263		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3264		struct scsi_cmnd *scmd = NULL;
3265		struct myrb_cmdblk *cmd_blk = NULL;
3266
3267		if (id == MYRB_DCMD_TAG)
3268			cmd_blk = &cb->dcmd_blk;
3269		else if (id == MYRB_MCMD_TAG)
3270			cmd_blk = &cb->mcmd_blk;
3271		else {
3272			scmd = scsi_host_find_tag(cb->host, id - 3);
3273			if (scmd)
3274				cmd_blk = scsi_cmd_priv(scmd);
3275		}
3276		if (cmd_blk)
3277			cmd_blk->status = DAC960_PD_read_status(base);
3278		else
3279			dev_err(&cb->pdev->dev,
3280				"Unhandled command completion %d\n", id);
3281
3282		DAC960_PD_ack_intr(base);
3283		DAC960_PD_ack_hw_mbox_status(base);
3284
3285		if (id < 3)
3286			myrb_handle_cmdblk(cb, cmd_blk);
3287		else
3288			myrb_handle_scsi(cb, cmd_blk, scmd);
3289	}
3290	spin_unlock_irqrestore(&cb->queue_lock, flags);
3291	return IRQ_HANDLED;
3292}
3293
3294struct myrb_privdata DAC960_PD_privdata = {
3295	.hw_init =	DAC960_PD_hw_init,
3296	.irq_handler =	DAC960_PD_intr_handler,
3297	.mmio_size =	DAC960_PD_mmio_size,
3298};
3299
3300
3301/*
3302 * DAC960 P Series Controllers
3303 *
3304 * Similar to the DAC960 PD Series Controllers, but some commands have
3305 * to be translated.
3306 */
3307
3308static inline void myrb_translate_enquiry(void *enq)
3309{
3310	memcpy(enq + 132, enq + 36, 64);
3311	memset(enq + 36, 0, 96);
3312}
3313
3314static inline void myrb_translate_devstate(void *state)
3315{
3316	memcpy(state + 2, state + 3, 1);
3317	memmove(state + 4, state + 5, 2);
3318	memmove(state + 6, state + 8, 4);
3319}
3320
3321static inline void myrb_translate_to_rw_command(struct myrb_cmdblk *cmd_blk)
3322{
3323	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3324	int ldev_num = mbox->type5.ld.ldev_num;
3325
3326	mbox->bytes[3] &= 0x7;
3327	mbox->bytes[3] |= mbox->bytes[7] << 6;
3328	mbox->bytes[7] = ldev_num;
3329}
3330
3331static inline void myrb_translate_from_rw_command(struct myrb_cmdblk *cmd_blk)
3332{
3333	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3334	int ldev_num = mbox->bytes[7];
3335
3336	mbox->bytes[7] = mbox->bytes[3] >> 6;
3337	mbox->bytes[3] &= 0x7;
3338	mbox->bytes[3] |= ldev_num << 3;
3339}
3340
3341static void DAC960_P_qcmd(struct myrb_hba *cb, struct myrb_cmdblk *cmd_blk)
3342{
3343	void __iomem *base = cb->io_base;
3344	union myrb_cmd_mbox *mbox = &cmd_blk->mbox;
3345
3346	switch (mbox->common.opcode) {
3347	case MYRB_CMD_ENQUIRY:
3348		mbox->common.opcode = MYRB_CMD_ENQUIRY_OLD;
3349		break;
3350	case MYRB_CMD_GET_DEVICE_STATE:
3351		mbox->common.opcode = MYRB_CMD_GET_DEVICE_STATE_OLD;
3352		break;
3353	case MYRB_CMD_READ:
3354		mbox->common.opcode = MYRB_CMD_READ_OLD;
3355		myrb_translate_to_rw_command(cmd_blk);
3356		break;
3357	case MYRB_CMD_WRITE:
3358		mbox->common.opcode = MYRB_CMD_WRITE_OLD;
3359		myrb_translate_to_rw_command(cmd_blk);
3360		break;
3361	case MYRB_CMD_READ_SG:
3362		mbox->common.opcode = MYRB_CMD_READ_SG_OLD;
3363		myrb_translate_to_rw_command(cmd_blk);
3364		break;
3365	case MYRB_CMD_WRITE_SG:
3366		mbox->common.opcode = MYRB_CMD_WRITE_SG_OLD;
3367		myrb_translate_to_rw_command(cmd_blk);
3368		break;
3369	default:
3370		break;
3371	}
3372	while (DAC960_PD_hw_mbox_is_full(base))
3373		udelay(1);
3374	DAC960_PD_write_cmd_mbox(base, mbox);
3375	DAC960_PD_hw_mbox_new_cmd(base);
3376}
3377
3378
3379static int DAC960_P_hw_init(struct pci_dev *pdev,
3380		struct myrb_hba *cb, void __iomem *base)
3381{
3382	int timeout = 0;
3383	unsigned char error, parm0, parm1;
3384
3385	if (!request_region(cb->io_addr, 0x80, "myrb")) {
3386		dev_err(&pdev->dev, "IO port 0x%lx busy\n",
3387			(unsigned long)cb->io_addr);
3388		return -EBUSY;
3389	}
3390	DAC960_PD_disable_intr(base);
3391	DAC960_PD_ack_hw_mbox_status(base);
3392	udelay(1000);
3393	while (DAC960_PD_init_in_progress(base) &&
3394	       timeout < MYRB_MAILBOX_TIMEOUT) {
3395		if (DAC960_PD_read_error_status(base, &error,
3396						&parm0, &parm1) &&
3397		    myrb_err_status(cb, error, parm0, parm1))
3398			return -EAGAIN;
3399		udelay(10);
3400		timeout++;
3401	}
3402	if (timeout == MYRB_MAILBOX_TIMEOUT) {
3403		dev_err(&pdev->dev,
3404			"Timeout waiting for Controller Initialisation\n");
3405		return -ETIMEDOUT;
3406	}
3407	if (!myrb_enable_mmio(cb, NULL)) {
3408		dev_err(&pdev->dev,
3409			"Unable to allocate DMA mapped memory\n");
3410		DAC960_PD_reset_ctrl(base);
3411		return -ETIMEDOUT;
3412	}
3413	DAC960_PD_enable_intr(base);
3414	cb->qcmd = DAC960_P_qcmd;
3415	cb->disable_intr = DAC960_PD_disable_intr;
3416	cb->reset = DAC960_PD_reset_ctrl;
3417
3418	return 0;
3419}
3420
3421static irqreturn_t DAC960_P_intr_handler(int irq, void *arg)
3422{
3423	struct myrb_hba *cb = arg;
3424	void __iomem *base = cb->io_base;
3425	unsigned long flags;
3426
3427	spin_lock_irqsave(&cb->queue_lock, flags);
3428	while (DAC960_PD_hw_mbox_status_available(base)) {
3429		unsigned char id = DAC960_PD_read_status_cmd_ident(base);
3430		struct scsi_cmnd *scmd = NULL;
3431		struct myrb_cmdblk *cmd_blk = NULL;
3432		union myrb_cmd_mbox *mbox;
3433		enum myrb_cmd_opcode op;
3434
3435
3436		if (id == MYRB_DCMD_TAG)
3437			cmd_blk = &cb->dcmd_blk;
3438		else if (id == MYRB_MCMD_TAG)
3439			cmd_blk = &cb->mcmd_blk;
3440		else {
3441			scmd = scsi_host_find_tag(cb->host, id - 3);
3442			if (scmd)
3443				cmd_blk = scsi_cmd_priv(scmd);
3444		}
3445		if (cmd_blk)
3446			cmd_blk->status = DAC960_PD_read_status(base);
3447		else
3448			dev_err(&cb->pdev->dev,
3449				"Unhandled command completion %d\n", id);
3450
3451		DAC960_PD_ack_intr(base);
3452		DAC960_PD_ack_hw_mbox_status(base);
3453
3454		if (!cmd_blk)
3455			continue;
3456
3457		mbox = &cmd_blk->mbox;
3458		op = mbox->common.opcode;
3459		switch (op) {
3460		case MYRB_CMD_ENQUIRY_OLD:
3461			mbox->common.opcode = MYRB_CMD_ENQUIRY;
3462			myrb_translate_enquiry(cb->enquiry);
3463			break;
3464		case MYRB_CMD_READ_OLD:
3465			mbox->common.opcode = MYRB_CMD_READ;
3466			myrb_translate_from_rw_command(cmd_blk);
3467			break;
3468		case MYRB_CMD_WRITE_OLD:
3469			mbox->common.opcode = MYRB_CMD_WRITE;
3470			myrb_translate_from_rw_command(cmd_blk);
3471			break;
3472		case MYRB_CMD_READ_SG_OLD:
3473			mbox->common.opcode = MYRB_CMD_READ_SG;
3474			myrb_translate_from_rw_command(cmd_blk);
3475			break;
3476		case MYRB_CMD_WRITE_SG_OLD:
3477			mbox->common.opcode = MYRB_CMD_WRITE_SG;
3478			myrb_translate_from_rw_command(cmd_blk);
3479			break;
3480		default:
3481			break;
3482		}
3483		if (id < 3)
3484			myrb_handle_cmdblk(cb, cmd_blk);
3485		else
3486			myrb_handle_scsi(cb, cmd_blk, scmd);
3487	}
3488	spin_unlock_irqrestore(&cb->queue_lock, flags);
3489	return IRQ_HANDLED;
3490}
3491
3492struct myrb_privdata DAC960_P_privdata = {
3493	.hw_init =	DAC960_P_hw_init,
3494	.irq_handler =	DAC960_P_intr_handler,
3495	.mmio_size =	DAC960_PD_mmio_size,
3496};
3497
3498static struct myrb_hba *myrb_detect(struct pci_dev *pdev,
3499		const struct pci_device_id *entry)
3500{
3501	struct myrb_privdata *privdata =
3502		(struct myrb_privdata *)entry->driver_data;
3503	irq_handler_t irq_handler = privdata->irq_handler;
3504	unsigned int mmio_size = privdata->mmio_size;
3505	struct Scsi_Host *shost;
3506	struct myrb_hba *cb = NULL;
3507
3508	shost = scsi_host_alloc(&myrb_template, sizeof(struct myrb_hba));
3509	if (!shost) {
3510		dev_err(&pdev->dev, "Unable to allocate Controller\n");
3511		return NULL;
3512	}
3513	shost->max_cmd_len = 12;
3514	shost->max_lun = 256;
3515	cb = shost_priv(shost);
3516	mutex_init(&cb->dcmd_mutex);
3517	mutex_init(&cb->dma_mutex);
3518	cb->pdev = pdev;
3519
3520	if (pci_enable_device(pdev))
3521		goto failure;
3522
3523	if (privdata->hw_init == DAC960_PD_hw_init ||
3524	    privdata->hw_init == DAC960_P_hw_init) {
3525		cb->io_addr = pci_resource_start(pdev, 0);
3526		cb->pci_addr = pci_resource_start(pdev, 1);
3527	} else
3528		cb->pci_addr = pci_resource_start(pdev, 0);
3529
3530	pci_set_drvdata(pdev, cb);
3531	spin_lock_init(&cb->queue_lock);
3532	if (mmio_size < PAGE_SIZE)
3533		mmio_size = PAGE_SIZE;
3534	cb->mmio_base = ioremap_nocache(cb->pci_addr & PAGE_MASK, mmio_size);
3535	if (cb->mmio_base == NULL) {
3536		dev_err(&pdev->dev,
3537			"Unable to map Controller Register Window\n");
3538		goto failure;
3539	}
3540
3541	cb->io_base = cb->mmio_base + (cb->pci_addr & ~PAGE_MASK);
3542	if (privdata->hw_init(pdev, cb, cb->io_base))
3543		goto failure;
3544
3545	if (request_irq(pdev->irq, irq_handler, IRQF_SHARED, "myrb", cb) < 0) {
3546		dev_err(&pdev->dev,
3547			"Unable to acquire IRQ Channel %d\n", pdev->irq);
3548		goto failure;
3549	}
3550	cb->irq = pdev->irq;
3551	return cb;
3552
3553failure:
3554	dev_err(&pdev->dev,
3555		"Failed to initialize Controller\n");
3556	myrb_cleanup(cb);
3557	return NULL;
3558}
3559
3560static int myrb_probe(struct pci_dev *dev, const struct pci_device_id *entry)
3561{
3562	struct myrb_hba *cb;
3563	int ret;
3564
3565	cb = myrb_detect(dev, entry);
3566	if (!cb)
3567		return -ENODEV;
3568
3569	ret = myrb_get_hba_config(cb);
3570	if (ret < 0) {
3571		myrb_cleanup(cb);
3572		return ret;
3573	}
3574
3575	if (!myrb_create_mempools(dev, cb)) {
3576		ret = -ENOMEM;
3577		goto failed;
3578	}
3579
3580	ret = scsi_add_host(cb->host, &dev->dev);
3581	if (ret) {
3582		dev_err(&dev->dev, "scsi_add_host failed with %d\n", ret);
3583		myrb_destroy_mempools(cb);
3584		goto failed;
3585	}
3586	scsi_scan_host(cb->host);
3587	return 0;
3588failed:
3589	myrb_cleanup(cb);
3590	return ret;
3591}
3592
3593
3594static void myrb_remove(struct pci_dev *pdev)
3595{
3596	struct myrb_hba *cb = pci_get_drvdata(pdev);
3597
3598	shost_printk(KERN_NOTICE, cb->host, "Flushing Cache...");
3599	myrb_exec_type3(cb, MYRB_CMD_FLUSH, 0);
3600	myrb_cleanup(cb);
3601	myrb_destroy_mempools(cb);
3602}
3603
3604
3605static const struct pci_device_id myrb_id_table[] = {
3606	{
3607		PCI_DEVICE_SUB(PCI_VENDOR_ID_DEC,
3608			       PCI_DEVICE_ID_DEC_21285,
3609			       PCI_VENDOR_ID_MYLEX,
3610			       PCI_DEVICE_ID_MYLEX_DAC960_LA),
3611		.driver_data	= (unsigned long) &DAC960_LA_privdata,
3612	},
3613	{
3614		PCI_DEVICE_DATA(MYLEX, DAC960_PG, &DAC960_PG_privdata),
3615	},
3616	{
3617		PCI_DEVICE_DATA(MYLEX, DAC960_PD, &DAC960_PD_privdata),
3618	},
3619	{
3620		PCI_DEVICE_DATA(MYLEX, DAC960_P, &DAC960_P_privdata),
3621	},
3622	{0, },
3623};
3624
3625MODULE_DEVICE_TABLE(pci, myrb_id_table);
3626
3627static struct pci_driver myrb_pci_driver = {
3628	.name		= "myrb",
3629	.id_table	= myrb_id_table,
3630	.probe		= myrb_probe,
3631	.remove		= myrb_remove,
3632};
3633
3634static int __init myrb_init_module(void)
3635{
3636	int ret;
3637
3638	myrb_raid_template = raid_class_attach(&myrb_raid_functions);
3639	if (!myrb_raid_template)
3640		return -ENODEV;
3641
3642	ret = pci_register_driver(&myrb_pci_driver);
3643	if (ret)
3644		raid_class_release(myrb_raid_template);
3645
3646	return ret;
3647}
3648
3649static void __exit myrb_cleanup_module(void)
3650{
3651	pci_unregister_driver(&myrb_pci_driver);
3652	raid_class_release(myrb_raid_template);
3653}
3654
3655module_init(myrb_init_module);
3656module_exit(myrb_cleanup_module);
3657
3658MODULE_DESCRIPTION("Mylex DAC960/AcceleRAID/eXtremeRAID driver (Block interface)");
3659MODULE_AUTHOR("Hannes Reinecke <hare@suse.com>");
3660MODULE_LICENSE("GPL");