Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * SuperTrak EX Series Storage Controller driver for Linux
   3 *
   4 *	Copyright (C) 2005-2015 Promise Technology Inc.
   5 *
   6 *	This program is free software; you can redistribute it and/or
   7 *	modify it under the terms of the GNU General Public License
   8 *	as published by the Free Software Foundation; either version
   9 *	2 of the License, or (at your option) any later version.
  10 *
  11 *	Written By:
  12 *		Ed Lin <promise_linux@promise.com>
  13 *
  14 */
  15
  16#include <linux/init.h>
  17#include <linux/errno.h>
  18#include <linux/kernel.h>
  19#include <linux/delay.h>
  20#include <linux/slab.h>
  21#include <linux/time.h>
  22#include <linux/pci.h>
  23#include <linux/blkdev.h>
  24#include <linux/interrupt.h>
  25#include <linux/types.h>
  26#include <linux/module.h>
  27#include <linux/spinlock.h>
  28#include <linux/ktime.h>
 
  29#include <asm/io.h>
  30#include <asm/irq.h>
  31#include <asm/byteorder.h>
  32#include <scsi/scsi.h>
  33#include <scsi/scsi_device.h>
  34#include <scsi/scsi_cmnd.h>
  35#include <scsi/scsi_host.h>
  36#include <scsi/scsi_tcq.h>
  37#include <scsi/scsi_dbg.h>
  38#include <scsi/scsi_eh.h>
  39
  40#define DRV_NAME "stex"
  41#define ST_DRIVER_VERSION	"5.00.0000.01"
  42#define ST_VER_MAJOR		5
  43#define ST_VER_MINOR		00
  44#define ST_OEM				0000
  45#define ST_BUILD_VER		01
  46
  47enum {
  48	/* MU register offset */
  49	IMR0	= 0x10,	/* MU_INBOUND_MESSAGE_REG0 */
  50	IMR1	= 0x14,	/* MU_INBOUND_MESSAGE_REG1 */
  51	OMR0	= 0x18,	/* MU_OUTBOUND_MESSAGE_REG0 */
  52	OMR1	= 0x1c,	/* MU_OUTBOUND_MESSAGE_REG1 */
  53	IDBL	= 0x20,	/* MU_INBOUND_DOORBELL */
  54	IIS	= 0x24,	/* MU_INBOUND_INTERRUPT_STATUS */
  55	IIM	= 0x28,	/* MU_INBOUND_INTERRUPT_MASK */
  56	ODBL	= 0x2c,	/* MU_OUTBOUND_DOORBELL */
  57	OIS	= 0x30,	/* MU_OUTBOUND_INTERRUPT_STATUS */
  58	OIM	= 0x3c,	/* MU_OUTBOUND_INTERRUPT_MASK */
  59
  60	YIOA_STATUS				= 0x00,
  61	YH2I_INT				= 0x20,
  62	YINT_EN					= 0x34,
  63	YI2H_INT				= 0x9c,
  64	YI2H_INT_C				= 0xa0,
  65	YH2I_REQ				= 0xc0,
  66	YH2I_REQ_HI				= 0xc4,
 
 
 
 
 
 
 
  67
  68	/* MU register value */
  69	MU_INBOUND_DOORBELL_HANDSHAKE		= (1 << 0),
  70	MU_INBOUND_DOORBELL_REQHEADCHANGED	= (1 << 1),
  71	MU_INBOUND_DOORBELL_STATUSTAILCHANGED	= (1 << 2),
  72	MU_INBOUND_DOORBELL_HMUSTOPPED		= (1 << 3),
  73	MU_INBOUND_DOORBELL_RESET		= (1 << 4),
  74
  75	MU_OUTBOUND_DOORBELL_HANDSHAKE		= (1 << 0),
  76	MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED	= (1 << 1),
  77	MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED	= (1 << 2),
  78	MU_OUTBOUND_DOORBELL_BUSCHANGE		= (1 << 3),
  79	MU_OUTBOUND_DOORBELL_HASEVENT		= (1 << 4),
  80	MU_OUTBOUND_DOORBELL_REQUEST_RESET	= (1 << 27),
  81
  82	/* MU status code */
  83	MU_STATE_STARTING			= 1,
  84	MU_STATE_STARTED			= 2,
  85	MU_STATE_RESETTING			= 3,
  86	MU_STATE_FAILED				= 4,
  87	MU_STATE_STOP				= 5,
  88	MU_STATE_NOCONNECT			= 6,
  89
  90	MU_MAX_DELAY				= 120,
  91	MU_HANDSHAKE_SIGNATURE			= 0x55aaaa55,
  92	MU_HANDSHAKE_SIGNATURE_HALF		= 0x5a5a0000,
  93	MU_HARD_RESET_WAIT			= 30000,
  94	HMU_PARTNER_TYPE			= 2,
  95
  96	/* firmware returned values */
  97	SRB_STATUS_SUCCESS			= 0x01,
  98	SRB_STATUS_ERROR			= 0x04,
  99	SRB_STATUS_BUSY				= 0x05,
 100	SRB_STATUS_INVALID_REQUEST		= 0x06,
 101	SRB_STATUS_SELECTION_TIMEOUT		= 0x0A,
 102	SRB_SEE_SENSE 				= 0x80,
 103
 104	/* task attribute */
 105	TASK_ATTRIBUTE_SIMPLE			= 0x0,
 106	TASK_ATTRIBUTE_HEADOFQUEUE		= 0x1,
 107	TASK_ATTRIBUTE_ORDERED			= 0x2,
 108	TASK_ATTRIBUTE_ACA			= 0x4,
 109
 110	SS_STS_NORMAL				= 0x80000000,
 111	SS_STS_DONE				= 0x40000000,
 112	SS_STS_HANDSHAKE			= 0x20000000,
 113
 114	SS_HEAD_HANDSHAKE			= 0x80,
 115
 116	SS_H2I_INT_RESET			= 0x100,
 117
 118	SS_I2H_REQUEST_RESET			= 0x2000,
 119
 120	SS_MU_OPERATIONAL			= 0x80000000,
 121
 122	STEX_CDB_LENGTH				= 16,
 123	STATUS_VAR_LEN				= 128,
 124
 125	/* sg flags */
 126	SG_CF_EOT				= 0x80,	/* end of table */
 127	SG_CF_64B				= 0x40,	/* 64 bit item */
 128	SG_CF_HOST				= 0x20,	/* sg in host memory */
 129	MSG_DATA_DIR_ND				= 0,
 130	MSG_DATA_DIR_IN				= 1,
 131	MSG_DATA_DIR_OUT			= 2,
 132
 133	st_shasta				= 0,
 134	st_vsc					= 1,
 135	st_yosemite				= 2,
 136	st_seq					= 3,
 137	st_yel					= 4,
 
 138
 139	PASSTHRU_REQ_TYPE			= 0x00000001,
 140	PASSTHRU_REQ_NO_WAKEUP			= 0x00000100,
 141	ST_INTERNAL_TIMEOUT			= 180,
 142
 143	ST_TO_CMD				= 0,
 144	ST_FROM_CMD				= 1,
 145
 146	/* vendor specific commands of Promise */
 147	MGT_CMD					= 0xd8,
 148	SINBAND_MGT_CMD				= 0xd9,
 149	ARRAY_CMD				= 0xe0,
 150	CONTROLLER_CMD				= 0xe1,
 151	DEBUGGING_CMD				= 0xe2,
 152	PASSTHRU_CMD				= 0xe3,
 153
 154	PASSTHRU_GET_ADAPTER			= 0x05,
 155	PASSTHRU_GET_DRVVER			= 0x10,
 156
 157	CTLR_CONFIG_CMD				= 0x03,
 158	CTLR_SHUTDOWN				= 0x0d,
 159
 160	CTLR_POWER_STATE_CHANGE			= 0x0e,
 161	CTLR_POWER_SAVING			= 0x01,
 162
 163	PASSTHRU_SIGNATURE			= 0x4e415041,
 164	MGT_CMD_SIGNATURE			= 0xba,
 165
 166	INQUIRY_EVPD				= 0x01,
 167
 168	ST_ADDITIONAL_MEM			= 0x200000,
 169	ST_ADDITIONAL_MEM_MIN			= 0x80000,
 170	PMIC_SHUTDOWN				= 0x0D,
 171	PMIC_REUMSE					= 0x10,
 172	ST_IGNORED					= -1,
 173	ST_NOTHANDLED				= 7,
 174	ST_S3						= 3,
 175	ST_S4						= 4,
 176	ST_S5						= 5,
 177	ST_S6						= 6,
 178};
 179
 180struct st_sgitem {
 181	u8 ctrl;	/* SG_CF_xxx */
 182	u8 reserved[3];
 183	__le32 count;
 184	__le64 addr;
 185};
 186
 187struct st_ss_sgitem {
 188	__le32 addr;
 189	__le32 addr_hi;
 190	__le32 count;
 191};
 192
 193struct st_sgtable {
 194	__le16 sg_count;
 195	__le16 max_sg_count;
 196	__le32 sz_in_byte;
 197};
 198
 199struct st_msg_header {
 200	__le64 handle;
 201	u8 flag;
 202	u8 channel;
 203	__le16 timeout;
 204	u32 reserved;
 205};
 206
 207struct handshake_frame {
 208	__le64 rb_phy;		/* request payload queue physical address */
 209	__le16 req_sz;		/* size of each request payload */
 210	__le16 req_cnt;		/* count of reqs the buffer can hold */
 211	__le16 status_sz;	/* size of each status payload */
 212	__le16 status_cnt;	/* count of status the buffer can hold */
 213	__le64 hosttime;	/* seconds from Jan 1, 1970 (GMT) */
 214	u8 partner_type;	/* who sends this frame */
 215	u8 reserved0[7];
 216	__le32 partner_ver_major;
 217	__le32 partner_ver_minor;
 218	__le32 partner_ver_oem;
 219	__le32 partner_ver_build;
 220	__le32 extra_offset;	/* NEW */
 221	__le32 extra_size;	/* NEW */
 222	__le32 scratch_size;
 223	u32 reserved1;
 224};
 225
 226struct req_msg {
 227	__le16 tag;
 228	u8 lun;
 229	u8 target;
 230	u8 task_attr;
 231	u8 task_manage;
 232	u8 data_dir;
 233	u8 payload_sz;		/* payload size in 4-byte, not used */
 234	u8 cdb[STEX_CDB_LENGTH];
 235	u32 variable[0];
 236};
 237
 238struct status_msg {
 239	__le16 tag;
 240	u8 lun;
 241	u8 target;
 242	u8 srb_status;
 243	u8 scsi_status;
 244	u8 reserved;
 245	u8 payload_sz;		/* payload size in 4-byte */
 246	u8 variable[STATUS_VAR_LEN];
 247};
 248
 249struct ver_info {
 250	u32 major;
 251	u32 minor;
 252	u32 oem;
 253	u32 build;
 254	u32 reserved[2];
 255};
 256
 257struct st_frame {
 258	u32 base[6];
 259	u32 rom_addr;
 260
 261	struct ver_info drv_ver;
 262	struct ver_info bios_ver;
 263
 264	u32 bus;
 265	u32 slot;
 266	u32 irq_level;
 267	u32 irq_vec;
 268	u32 id;
 269	u32 subid;
 270
 271	u32 dimm_size;
 272	u8 dimm_type;
 273	u8 reserved[3];
 274
 275	u32 channel;
 276	u32 reserved1;
 277};
 278
 279struct st_drvver {
 280	u32 major;
 281	u32 minor;
 282	u32 oem;
 283	u32 build;
 284	u32 signature[2];
 285	u8 console_id;
 286	u8 host_no;
 287	u8 reserved0[2];
 288	u32 reserved[3];
 289};
 290
 291struct st_ccb {
 292	struct req_msg *req;
 293	struct scsi_cmnd *cmd;
 294
 295	void *sense_buffer;
 296	unsigned int sense_bufflen;
 297	int sg_count;
 298
 299	u32 req_type;
 300	u8 srb_status;
 301	u8 scsi_status;
 302	u8 reserved[2];
 303};
 304
 305struct st_hba {
 306	void __iomem *mmio_base;	/* iomapped PCI memory space */
 307	void *dma_mem;
 308	dma_addr_t dma_handle;
 309	size_t dma_size;
 310
 311	struct Scsi_Host *host;
 312	struct pci_dev *pdev;
 313
 314	struct req_msg * (*alloc_rq) (struct st_hba *);
 315	int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
 316	void (*send) (struct st_hba *, struct req_msg *, u16);
 317
 318	u32 req_head;
 319	u32 req_tail;
 320	u32 status_head;
 321	u32 status_tail;
 322
 323	struct status_msg *status_buffer;
 324	void *copy_buffer; /* temp buffer for driver-handled commands */
 325	struct st_ccb *ccb;
 326	struct st_ccb *wait_ccb;
 327	__le32 *scratch;
 328
 329	char work_q_name[20];
 330	struct workqueue_struct *work_q;
 331	struct work_struct reset_work;
 332	wait_queue_head_t reset_waitq;
 333	unsigned int mu_status;
 334	unsigned int cardtype;
 335	int msi_enabled;
 336	int out_req_cnt;
 337	u32 extra_offset;
 338	u16 rq_count;
 339	u16 rq_size;
 340	u16 sts_count;
 341	u8  supports_pm;
 
 342};
 343
 344struct st_card_info {
 345	struct req_msg * (*alloc_rq) (struct st_hba *);
 346	int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
 347	void (*send) (struct st_hba *, struct req_msg *, u16);
 348	unsigned int max_id;
 349	unsigned int max_lun;
 350	unsigned int max_channel;
 351	u16 rq_count;
 352	u16 rq_size;
 353	u16 sts_count;
 354};
 355
 
 
 
 
 
 
 356static int msi;
 357module_param(msi, int, 0);
 358MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)");
 359
 360static const char console_inq_page[] =
 361{
 362	0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
 363	0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20,	/* "Promise " */
 364	0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E,	/* "RAID Con" */
 365	0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20,	/* "sole    " */
 366	0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20,	/* "1.00    " */
 367	0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D,	/* "SX/RSAF-" */
 368	0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20,	/* "TE1.00  " */
 369	0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
 370};
 371
 372MODULE_AUTHOR("Ed Lin");
 373MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
 374MODULE_LICENSE("GPL");
 375MODULE_VERSION(ST_DRIVER_VERSION);
 376
 377static struct status_msg *stex_get_status(struct st_hba *hba)
 378{
 379	struct status_msg *status = hba->status_buffer + hba->status_tail;
 380
 381	++hba->status_tail;
 382	hba->status_tail %= hba->sts_count+1;
 383
 384	return status;
 385}
 386
 387static void stex_invalid_field(struct scsi_cmnd *cmd,
 388			       void (*done)(struct scsi_cmnd *))
 389{
 390	cmd->result = (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
 391
 392	/* "Invalid field in cdb" */
 393	scsi_build_sense_buffer(0, cmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
 394				0x0);
 395	done(cmd);
 396}
 397
 398static struct req_msg *stex_alloc_req(struct st_hba *hba)
 399{
 400	struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size;
 401
 402	++hba->req_head;
 403	hba->req_head %= hba->rq_count+1;
 404
 405	return req;
 406}
 407
 408static struct req_msg *stex_ss_alloc_req(struct st_hba *hba)
 409{
 410	return (struct req_msg *)(hba->dma_mem +
 411		hba->req_head * hba->rq_size + sizeof(struct st_msg_header));
 412}
 413
 414static int stex_map_sg(struct st_hba *hba,
 415	struct req_msg *req, struct st_ccb *ccb)
 416{
 417	struct scsi_cmnd *cmd;
 418	struct scatterlist *sg;
 419	struct st_sgtable *dst;
 420	struct st_sgitem *table;
 421	int i, nseg;
 422
 423	cmd = ccb->cmd;
 424	nseg = scsi_dma_map(cmd);
 425	BUG_ON(nseg < 0);
 426	if (nseg) {
 427		dst = (struct st_sgtable *)req->variable;
 428
 429		ccb->sg_count = nseg;
 430		dst->sg_count = cpu_to_le16((u16)nseg);
 431		dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
 432		dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
 433
 434		table = (struct st_sgitem *)(dst + 1);
 435		scsi_for_each_sg(cmd, sg, nseg, i) {
 436			table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
 437			table[i].addr = cpu_to_le64(sg_dma_address(sg));
 438			table[i].ctrl = SG_CF_64B | SG_CF_HOST;
 439		}
 440		table[--i].ctrl |= SG_CF_EOT;
 441	}
 442
 443	return nseg;
 444}
 445
 446static int stex_ss_map_sg(struct st_hba *hba,
 447	struct req_msg *req, struct st_ccb *ccb)
 448{
 449	struct scsi_cmnd *cmd;
 450	struct scatterlist *sg;
 451	struct st_sgtable *dst;
 452	struct st_ss_sgitem *table;
 453	int i, nseg;
 454
 455	cmd = ccb->cmd;
 456	nseg = scsi_dma_map(cmd);
 457	BUG_ON(nseg < 0);
 458	if (nseg) {
 459		dst = (struct st_sgtable *)req->variable;
 460
 461		ccb->sg_count = nseg;
 462		dst->sg_count = cpu_to_le16((u16)nseg);
 463		dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
 464		dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
 465
 466		table = (struct st_ss_sgitem *)(dst + 1);
 467		scsi_for_each_sg(cmd, sg, nseg, i) {
 468			table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
 469			table[i].addr =
 470				cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
 471			table[i].addr_hi =
 472				cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
 473		}
 474	}
 475
 476	return nseg;
 477}
 478
 479static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
 480{
 481	struct st_frame *p;
 482	size_t count = sizeof(struct st_frame);
 483
 484	p = hba->copy_buffer;
 485	scsi_sg_copy_to_buffer(ccb->cmd, p, count);
 486	memset(p->base, 0, sizeof(u32)*6);
 487	*(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
 488	p->rom_addr = 0;
 489
 490	p->drv_ver.major = ST_VER_MAJOR;
 491	p->drv_ver.minor = ST_VER_MINOR;
 492	p->drv_ver.oem = ST_OEM;
 493	p->drv_ver.build = ST_BUILD_VER;
 494
 495	p->bus = hba->pdev->bus->number;
 496	p->slot = hba->pdev->devfn;
 497	p->irq_level = 0;
 498	p->irq_vec = hba->pdev->irq;
 499	p->id = hba->pdev->vendor << 16 | hba->pdev->device;
 500	p->subid =
 501		hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
 502
 503	scsi_sg_copy_from_buffer(ccb->cmd, p, count);
 504}
 505
 506static void
 507stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
 508{
 509	req->tag = cpu_to_le16(tag);
 510
 511	hba->ccb[tag].req = req;
 512	hba->out_req_cnt++;
 513
 514	writel(hba->req_head, hba->mmio_base + IMR0);
 515	writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
 516	readl(hba->mmio_base + IDBL); /* flush */
 517}
 518
 519static void
 520stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
 521{
 522	struct scsi_cmnd *cmd;
 523	struct st_msg_header *msg_h;
 524	dma_addr_t addr;
 525
 526	req->tag = cpu_to_le16(tag);
 527
 528	hba->ccb[tag].req = req;
 529	hba->out_req_cnt++;
 530
 531	cmd = hba->ccb[tag].cmd;
 532	msg_h = (struct st_msg_header *)req - 1;
 533	if (likely(cmd)) {
 534		msg_h->channel = (u8)cmd->device->channel;
 535		msg_h->timeout = cpu_to_le16(cmd->request->timeout/HZ);
 536	}
 537	addr = hba->dma_handle + hba->req_head * hba->rq_size;
 538	addr += (hba->ccb[tag].sg_count+4)/11;
 539	msg_h->handle = cpu_to_le64(addr);
 540
 541	++hba->req_head;
 542	hba->req_head %= hba->rq_count+1;
 543
 544	writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
 545	readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
 546	writel(addr, hba->mmio_base + YH2I_REQ);
 547	readl(hba->mmio_base + YH2I_REQ); /* flush */
 
 
 
 
 548}
 549
 550static void return_abnormal_state(struct st_hba *hba, int status)
 551{
 552	struct st_ccb *ccb;
 553	unsigned long flags;
 554	u16 tag;
 555
 556	spin_lock_irqsave(hba->host->host_lock, flags);
 557	for (tag = 0; tag < hba->host->can_queue; tag++) {
 558		ccb = &hba->ccb[tag];
 559		if (ccb->req == NULL)
 560			continue;
 561		ccb->req = NULL;
 562		if (ccb->cmd) {
 563			scsi_dma_unmap(ccb->cmd);
 564			ccb->cmd->result = status << 16;
 565			ccb->cmd->scsi_done(ccb->cmd);
 566			ccb->cmd = NULL;
 567		}
 568	}
 569	spin_unlock_irqrestore(hba->host->host_lock, flags);
 570}
 571static int
 572stex_slave_config(struct scsi_device *sdev)
 573{
 574	sdev->use_10_for_rw = 1;
 575	sdev->use_10_for_ms = 1;
 576	blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
 577
 578	return 0;
 579}
 580
 581static int
 582stex_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
 583{
 
 584	struct st_hba *hba;
 585	struct Scsi_Host *host;
 586	unsigned int id, lun;
 587	struct req_msg *req;
 588	u16 tag;
 589
 590	host = cmd->device->host;
 591	id = cmd->device->id;
 592	lun = cmd->device->lun;
 593	hba = (struct st_hba *) &host->hostdata[0];
 594	if (hba->mu_status == MU_STATE_NOCONNECT) {
 595		cmd->result = DID_NO_CONNECT;
 596		done(cmd);
 597		return 0;
 598	}
 599	if (unlikely(hba->mu_status != MU_STATE_STARTED))
 600		return SCSI_MLQUEUE_HOST_BUSY;
 601
 602	switch (cmd->cmnd[0]) {
 603	case MODE_SENSE_10:
 604	{
 605		static char ms10_caching_page[12] =
 606			{ 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
 607		unsigned char page;
 608
 609		page = cmd->cmnd[2] & 0x3f;
 610		if (page == 0x8 || page == 0x3f) {
 611			scsi_sg_copy_from_buffer(cmd, ms10_caching_page,
 612						 sizeof(ms10_caching_page));
 613			cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
 614			done(cmd);
 615		} else
 616			stex_invalid_field(cmd, done);
 617		return 0;
 618	}
 619	case REPORT_LUNS:
 620		/*
 621		 * The shasta firmware does not report actual luns in the
 622		 * target, so fail the command to force sequential lun scan.
 623		 * Also, the console device does not support this command.
 624		 */
 625		if (hba->cardtype == st_shasta || id == host->max_id - 1) {
 626			stex_invalid_field(cmd, done);
 627			return 0;
 628		}
 629		break;
 630	case TEST_UNIT_READY:
 631		if (id == host->max_id - 1) {
 632			cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
 633			done(cmd);
 634			return 0;
 635		}
 636		break;
 637	case INQUIRY:
 638		if (lun >= host->max_lun) {
 639			cmd->result = DID_NO_CONNECT << 16;
 640			done(cmd);
 641			return 0;
 642		}
 643		if (id != host->max_id - 1)
 644			break;
 645		if (!lun && !cmd->device->channel &&
 646			(cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
 647			scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page,
 648						 sizeof(console_inq_page));
 649			cmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8;
 650			done(cmd);
 651		} else
 652			stex_invalid_field(cmd, done);
 653		return 0;
 654	case PASSTHRU_CMD:
 655		if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
 656			struct st_drvver ver;
 
 
 
 
 
 
 
 
 657			size_t cp_len = sizeof(ver);
 658
 659			ver.major = ST_VER_MAJOR;
 660			ver.minor = ST_VER_MINOR;
 661			ver.oem = ST_OEM;
 662			ver.build = ST_BUILD_VER;
 663			ver.signature[0] = PASSTHRU_SIGNATURE;
 664			ver.console_id = host->max_id - 1;
 665			ver.host_no = hba->host->host_no;
 666			cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
 667			cmd->result = sizeof(ver) == cp_len ?
 668				DID_OK << 16 | COMMAND_COMPLETE << 8 :
 669				DID_ERROR << 16 | COMMAND_COMPLETE << 8;
 
 670			done(cmd);
 671			return 0;
 672		}
 
 673	default:
 674		break;
 675	}
 676
 677	cmd->scsi_done = done;
 678
 679	tag = cmd->request->tag;
 680
 681	if (unlikely(tag >= host->can_queue))
 682		return SCSI_MLQUEUE_HOST_BUSY;
 683
 684	req = hba->alloc_rq(hba);
 685
 686	req->lun = lun;
 687	req->target = id;
 688
 689	/* cdb */
 690	memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
 691
 692	if (cmd->sc_data_direction == DMA_FROM_DEVICE)
 693		req->data_dir = MSG_DATA_DIR_IN;
 694	else if (cmd->sc_data_direction == DMA_TO_DEVICE)
 695		req->data_dir = MSG_DATA_DIR_OUT;
 696	else
 697		req->data_dir = MSG_DATA_DIR_ND;
 698
 699	hba->ccb[tag].cmd = cmd;
 700	hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
 701	hba->ccb[tag].sense_buffer = cmd->sense_buffer;
 702
 703	if (!hba->map_sg(hba, req, &hba->ccb[tag])) {
 704		hba->ccb[tag].sg_count = 0;
 705		memset(&req->variable[0], 0, 8);
 706	}
 707
 708	hba->send(hba, req, tag);
 709	return 0;
 710}
 711
 712static DEF_SCSI_QCMD(stex_queuecommand)
 713
 714static void stex_scsi_done(struct st_ccb *ccb)
 715{
 716	struct scsi_cmnd *cmd = ccb->cmd;
 717	int result;
 718
 719	if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) {
 720		result = ccb->scsi_status;
 721		switch (ccb->scsi_status) {
 722		case SAM_STAT_GOOD:
 723			result |= DID_OK << 16 | COMMAND_COMPLETE << 8;
 724			break;
 725		case SAM_STAT_CHECK_CONDITION:
 726			result |= DRIVER_SENSE << 24;
 727			break;
 728		case SAM_STAT_BUSY:
 729			result |= DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
 730			break;
 731		default:
 732			result |= DID_ERROR << 16 | COMMAND_COMPLETE << 8;
 733			break;
 734		}
 735	}
 736	else if (ccb->srb_status & SRB_SEE_SENSE)
 737		result = DRIVER_SENSE << 24 | SAM_STAT_CHECK_CONDITION;
 738	else switch (ccb->srb_status) {
 739		case SRB_STATUS_SELECTION_TIMEOUT:
 740			result = DID_NO_CONNECT << 16 | COMMAND_COMPLETE << 8;
 741			break;
 742		case SRB_STATUS_BUSY:
 743			result = DID_BUS_BUSY << 16 | COMMAND_COMPLETE << 8;
 744			break;
 745		case SRB_STATUS_INVALID_REQUEST:
 746		case SRB_STATUS_ERROR:
 747		default:
 748			result = DID_ERROR << 16 | COMMAND_COMPLETE << 8;
 749			break;
 750	}
 751
 752	cmd->result = result;
 753	cmd->scsi_done(cmd);
 754}
 755
 756static void stex_copy_data(struct st_ccb *ccb,
 757	struct status_msg *resp, unsigned int variable)
 758{
 759	if (resp->scsi_status != SAM_STAT_GOOD) {
 760		if (ccb->sense_buffer != NULL)
 761			memcpy(ccb->sense_buffer, resp->variable,
 762				min(variable, ccb->sense_bufflen));
 763		return;
 764	}
 765
 766	if (ccb->cmd == NULL)
 767		return;
 768	scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, variable);
 769}
 770
 771static void stex_check_cmd(struct st_hba *hba,
 772	struct st_ccb *ccb, struct status_msg *resp)
 773{
 774	if (ccb->cmd->cmnd[0] == MGT_CMD &&
 775		resp->scsi_status != SAM_STAT_CHECK_CONDITION)
 776		scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
 777			le32_to_cpu(*(__le32 *)&resp->variable[0]));
 778}
 779
 780static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
 781{
 782	void __iomem *base = hba->mmio_base;
 783	struct status_msg *resp;
 784	struct st_ccb *ccb;
 785	unsigned int size;
 786	u16 tag;
 787
 788	if (unlikely(!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED)))
 789		return;
 790
 791	/* status payloads */
 792	hba->status_head = readl(base + OMR1);
 793	if (unlikely(hba->status_head > hba->sts_count)) {
 794		printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
 795			pci_name(hba->pdev));
 796		return;
 797	}
 798
 799	/*
 800	 * it's not a valid status payload if:
 801	 * 1. there are no pending requests(e.g. during init stage)
 802	 * 2. there are some pending requests, but the controller is in
 803	 *     reset status, and its type is not st_yosemite
 804	 * firmware of st_yosemite in reset status will return pending requests
 805	 * to driver, so we allow it to pass
 806	 */
 807	if (unlikely(hba->out_req_cnt <= 0 ||
 808			(hba->mu_status == MU_STATE_RESETTING &&
 809			 hba->cardtype != st_yosemite))) {
 810		hba->status_tail = hba->status_head;
 811		goto update_status;
 812	}
 813
 814	while (hba->status_tail != hba->status_head) {
 815		resp = stex_get_status(hba);
 816		tag = le16_to_cpu(resp->tag);
 817		if (unlikely(tag >= hba->host->can_queue)) {
 818			printk(KERN_WARNING DRV_NAME
 819				"(%s): invalid tag\n", pci_name(hba->pdev));
 820			continue;
 821		}
 822
 823		hba->out_req_cnt--;
 824		ccb = &hba->ccb[tag];
 825		if (unlikely(hba->wait_ccb == ccb))
 826			hba->wait_ccb = NULL;
 827		if (unlikely(ccb->req == NULL)) {
 828			printk(KERN_WARNING DRV_NAME
 829				"(%s): lagging req\n", pci_name(hba->pdev));
 830			continue;
 831		}
 832
 833		size = resp->payload_sz * sizeof(u32); /* payload size */
 834		if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
 835			size > sizeof(*resp))) {
 836			printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
 837				pci_name(hba->pdev));
 838		} else {
 839			size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
 840			if (size)
 841				stex_copy_data(ccb, resp, size);
 842		}
 843
 844		ccb->req = NULL;
 845		ccb->srb_status = resp->srb_status;
 846		ccb->scsi_status = resp->scsi_status;
 847
 848		if (likely(ccb->cmd != NULL)) {
 849			if (hba->cardtype == st_yosemite)
 850				stex_check_cmd(hba, ccb, resp);
 851
 852			if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
 853				ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
 854				stex_controller_info(hba, ccb);
 855
 856			scsi_dma_unmap(ccb->cmd);
 857			stex_scsi_done(ccb);
 858		} else
 859			ccb->req_type = 0;
 860	}
 861
 862update_status:
 863	writel(hba->status_head, base + IMR1);
 864	readl(base + IMR1); /* flush */
 865}
 866
 867static irqreturn_t stex_intr(int irq, void *__hba)
 868{
 869	struct st_hba *hba = __hba;
 870	void __iomem *base = hba->mmio_base;
 871	u32 data;
 872	unsigned long flags;
 873
 874	spin_lock_irqsave(hba->host->host_lock, flags);
 875
 876	data = readl(base + ODBL);
 877
 878	if (data && data != 0xffffffff) {
 879		/* clear the interrupt */
 880		writel(data, base + ODBL);
 881		readl(base + ODBL); /* flush */
 882		stex_mu_intr(hba, data);
 883		spin_unlock_irqrestore(hba->host->host_lock, flags);
 884		if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET &&
 885			hba->cardtype == st_shasta))
 886			queue_work(hba->work_q, &hba->reset_work);
 887		return IRQ_HANDLED;
 888	}
 889
 890	spin_unlock_irqrestore(hba->host->host_lock, flags);
 891
 892	return IRQ_NONE;
 893}
 894
 895static void stex_ss_mu_intr(struct st_hba *hba)
 896{
 897	struct status_msg *resp;
 898	struct st_ccb *ccb;
 899	__le32 *scratch;
 900	unsigned int size;
 901	int count = 0;
 902	u32 value;
 903	u16 tag;
 904
 905	if (unlikely(hba->out_req_cnt <= 0 ||
 906			hba->mu_status == MU_STATE_RESETTING))
 907		return;
 908
 909	while (count < hba->sts_count) {
 910		scratch = hba->scratch + hba->status_tail;
 911		value = le32_to_cpu(*scratch);
 912		if (unlikely(!(value & SS_STS_NORMAL)))
 913			return;
 914
 915		resp = hba->status_buffer + hba->status_tail;
 916		*scratch = 0;
 917		++count;
 918		++hba->status_tail;
 919		hba->status_tail %= hba->sts_count+1;
 920
 921		tag = (u16)value;
 922		if (unlikely(tag >= hba->host->can_queue)) {
 923			printk(KERN_WARNING DRV_NAME
 924				"(%s): invalid tag\n", pci_name(hba->pdev));
 925			continue;
 926		}
 927
 928		hba->out_req_cnt--;
 929		ccb = &hba->ccb[tag];
 930		if (unlikely(hba->wait_ccb == ccb))
 931			hba->wait_ccb = NULL;
 932		if (unlikely(ccb->req == NULL)) {
 933			printk(KERN_WARNING DRV_NAME
 934				"(%s): lagging req\n", pci_name(hba->pdev));
 935			continue;
 936		}
 937
 938		ccb->req = NULL;
 939		if (likely(value & SS_STS_DONE)) { /* normal case */
 940			ccb->srb_status = SRB_STATUS_SUCCESS;
 941			ccb->scsi_status = SAM_STAT_GOOD;
 942		} else {
 943			ccb->srb_status = resp->srb_status;
 944			ccb->scsi_status = resp->scsi_status;
 945			size = resp->payload_sz * sizeof(u32);
 946			if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
 947				size > sizeof(*resp))) {
 948				printk(KERN_WARNING DRV_NAME
 949					"(%s): bad status size\n",
 950					pci_name(hba->pdev));
 951			} else {
 952				size -= sizeof(*resp) - STATUS_VAR_LEN;
 953				if (size)
 954					stex_copy_data(ccb, resp, size);
 955			}
 956			if (likely(ccb->cmd != NULL))
 957				stex_check_cmd(hba, ccb, resp);
 958		}
 959
 960		if (likely(ccb->cmd != NULL)) {
 961			scsi_dma_unmap(ccb->cmd);
 962			stex_scsi_done(ccb);
 963		} else
 964			ccb->req_type = 0;
 965	}
 966}
 967
 968static irqreturn_t stex_ss_intr(int irq, void *__hba)
 969{
 970	struct st_hba *hba = __hba;
 971	void __iomem *base = hba->mmio_base;
 972	u32 data;
 973	unsigned long flags;
 974
 975	spin_lock_irqsave(hba->host->host_lock, flags);
 976
 977	data = readl(base + YI2H_INT);
 978	if (data && data != 0xffffffff) {
 979		/* clear the interrupt */
 980		writel(data, base + YI2H_INT_C);
 981		stex_ss_mu_intr(hba);
 982		spin_unlock_irqrestore(hba->host->host_lock, flags);
 983		if (unlikely(data & SS_I2H_REQUEST_RESET))
 984			queue_work(hba->work_q, &hba->reset_work);
 985		return IRQ_HANDLED;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 986	}
 987
 988	spin_unlock_irqrestore(hba->host->host_lock, flags);
 989
 990	return IRQ_NONE;
 991}
 992
 993static int stex_common_handshake(struct st_hba *hba)
 994{
 995	void __iomem *base = hba->mmio_base;
 996	struct handshake_frame *h;
 997	dma_addr_t status_phys;
 998	u32 data;
 999	unsigned long before;
1000
1001	if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1002		writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
1003		readl(base + IDBL);
1004		before = jiffies;
1005		while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1006			if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1007				printk(KERN_ERR DRV_NAME
1008					"(%s): no handshake signature\n",
1009					pci_name(hba->pdev));
1010				return -1;
1011			}
1012			rmb();
1013			msleep(1);
1014		}
1015	}
1016
1017	udelay(10);
1018
1019	data = readl(base + OMR1);
1020	if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
1021		data &= 0x0000ffff;
1022		if (hba->host->can_queue > data) {
1023			hba->host->can_queue = data;
1024			hba->host->cmd_per_lun = data;
1025		}
1026	}
1027
1028	h = (struct handshake_frame *)hba->status_buffer;
1029	h->rb_phy = cpu_to_le64(hba->dma_handle);
1030	h->req_sz = cpu_to_le16(hba->rq_size);
1031	h->req_cnt = cpu_to_le16(hba->rq_count+1);
1032	h->status_sz = cpu_to_le16(sizeof(struct status_msg));
1033	h->status_cnt = cpu_to_le16(hba->sts_count+1);
1034	h->hosttime = cpu_to_le64(ktime_get_real_seconds());
1035	h->partner_type = HMU_PARTNER_TYPE;
1036	if (hba->extra_offset) {
1037		h->extra_offset = cpu_to_le32(hba->extra_offset);
1038		h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
1039	} else
1040		h->extra_offset = h->extra_size = 0;
1041
1042	status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size;
1043	writel(status_phys, base + IMR0);
1044	readl(base + IMR0);
1045	writel((status_phys >> 16) >> 16, base + IMR1);
1046	readl(base + IMR1);
1047
1048	writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
1049	readl(base + OMR0);
1050	writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
1051	readl(base + IDBL); /* flush */
1052
1053	udelay(10);
1054	before = jiffies;
1055	while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1056		if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1057			printk(KERN_ERR DRV_NAME
1058				"(%s): no signature after handshake frame\n",
1059				pci_name(hba->pdev));
1060			return -1;
1061		}
1062		rmb();
1063		msleep(1);
1064	}
1065
1066	writel(0, base + IMR0);
1067	readl(base + IMR0);
1068	writel(0, base + OMR0);
1069	readl(base + OMR0);
1070	writel(0, base + IMR1);
1071	readl(base + IMR1);
1072	writel(0, base + OMR1);
1073	readl(base + OMR1); /* flush */
1074	return 0;
1075}
1076
1077static int stex_ss_handshake(struct st_hba *hba)
1078{
1079	void __iomem *base = hba->mmio_base;
1080	struct st_msg_header *msg_h;
1081	struct handshake_frame *h;
1082	__le32 *scratch;
1083	u32 data, scratch_size;
1084	unsigned long before;
1085	int ret = 0;
1086
1087	before = jiffies;
1088	while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) {
1089		if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1090			printk(KERN_ERR DRV_NAME
1091				"(%s): firmware not operational\n",
1092				pci_name(hba->pdev));
1093			return -1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1094		}
1095		msleep(1);
1096	}
1097
1098	msg_h = (struct st_msg_header *)hba->dma_mem;
1099	msg_h->handle = cpu_to_le64(hba->dma_handle);
1100	msg_h->flag = SS_HEAD_HANDSHAKE;
1101
1102	h = (struct handshake_frame *)(msg_h + 1);
1103	h->rb_phy = cpu_to_le64(hba->dma_handle);
1104	h->req_sz = cpu_to_le16(hba->rq_size);
1105	h->req_cnt = cpu_to_le16(hba->rq_count+1);
1106	h->status_sz = cpu_to_le16(sizeof(struct status_msg));
1107	h->status_cnt = cpu_to_le16(hba->sts_count+1);
1108	h->hosttime = cpu_to_le64(ktime_get_real_seconds());
1109	h->partner_type = HMU_PARTNER_TYPE;
1110	h->extra_offset = h->extra_size = 0;
1111	scratch_size = (hba->sts_count+1)*sizeof(u32);
1112	h->scratch_size = cpu_to_le32(scratch_size);
1113
1114	data = readl(base + YINT_EN);
1115	data &= ~4;
1116	writel(data, base + YINT_EN);
1117	writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1118	readl(base + YH2I_REQ_HI);
1119	writel(hba->dma_handle, base + YH2I_REQ);
1120	readl(base + YH2I_REQ); /* flush */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1121
1122	scratch = hba->scratch;
1123	before = jiffies;
1124	while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) {
1125		if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1126			printk(KERN_ERR DRV_NAME
1127				"(%s): no signature after handshake frame\n",
1128				pci_name(hba->pdev));
1129			ret = -1;
1130			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1131		}
1132		rmb();
1133		msleep(1);
1134	}
1135
1136	memset(scratch, 0, scratch_size);
1137	msg_h->flag = 0;
 
1138	return ret;
1139}
1140
1141static int stex_handshake(struct st_hba *hba)
1142{
1143	int err;
1144	unsigned long flags;
1145	unsigned int mu_status;
1146
1147	err = (hba->cardtype == st_yel) ?
1148		stex_ss_handshake(hba) : stex_common_handshake(hba);
 
 
1149	spin_lock_irqsave(hba->host->host_lock, flags);
1150	mu_status = hba->mu_status;
1151	if (err == 0) {
1152		hba->req_head = 0;
1153		hba->req_tail = 0;
1154		hba->status_head = 0;
1155		hba->status_tail = 0;
1156		hba->out_req_cnt = 0;
1157		hba->mu_status = MU_STATE_STARTED;
1158	} else
1159		hba->mu_status = MU_STATE_FAILED;
1160	if (mu_status == MU_STATE_RESETTING)
1161		wake_up_all(&hba->reset_waitq);
1162	spin_unlock_irqrestore(hba->host->host_lock, flags);
1163	return err;
1164}
1165
1166static int stex_abort(struct scsi_cmnd *cmd)
1167{
1168	struct Scsi_Host *host = cmd->device->host;
1169	struct st_hba *hba = (struct st_hba *)host->hostdata;
1170	u16 tag = cmd->request->tag;
1171	void __iomem *base;
1172	u32 data;
1173	int result = SUCCESS;
1174	unsigned long flags;
1175
1176	scmd_printk(KERN_INFO, cmd, "aborting command\n");
1177
1178	base = hba->mmio_base;
1179	spin_lock_irqsave(host->host_lock, flags);
1180	if (tag < host->can_queue &&
1181		hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
1182		hba->wait_ccb = &hba->ccb[tag];
1183	else
1184		goto out;
1185
1186	if (hba->cardtype == st_yel) {
1187		data = readl(base + YI2H_INT);
1188		if (data == 0 || data == 0xffffffff)
1189			goto fail_out;
1190
1191		writel(data, base + YI2H_INT_C);
1192		stex_ss_mu_intr(hba);
 
 
 
 
 
 
 
 
 
1193	} else {
1194		data = readl(base + ODBL);
1195		if (data == 0 || data == 0xffffffff)
1196			goto fail_out;
1197
1198		writel(data, base + ODBL);
1199		readl(base + ODBL); /* flush */
1200
1201		stex_mu_intr(hba, data);
1202	}
1203	if (hba->wait_ccb == NULL) {
1204		printk(KERN_WARNING DRV_NAME
1205			"(%s): lost interrupt\n", pci_name(hba->pdev));
1206		goto out;
1207	}
1208
1209fail_out:
1210	scsi_dma_unmap(cmd);
1211	hba->wait_ccb->req = NULL; /* nullify the req's future return */
1212	hba->wait_ccb = NULL;
1213	result = FAILED;
1214out:
1215	spin_unlock_irqrestore(host->host_lock, flags);
1216	return result;
1217}
1218
1219static void stex_hard_reset(struct st_hba *hba)
1220{
1221	struct pci_bus *bus;
1222	int i;
1223	u16 pci_cmd;
1224	u8 pci_bctl;
1225
1226	for (i = 0; i < 16; i++)
1227		pci_read_config_dword(hba->pdev, i * 4,
1228			&hba->pdev->saved_config_space[i]);
1229
1230	/* Reset secondary bus. Our controller(MU/ATU) is the only device on
1231	   secondary bus. Consult Intel 80331/3 developer's manual for detail */
1232	bus = hba->pdev->bus;
1233	pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
1234	pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
1235	pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1236
1237	/*
1238	 * 1 ms may be enough for 8-port controllers. But 16-port controllers
1239	 * require more time to finish bus reset. Use 100 ms here for safety
1240	 */
1241	msleep(100);
1242	pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
1243	pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1244
1245	for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
1246		pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
1247		if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
1248			break;
1249		msleep(1);
1250	}
1251
1252	ssleep(5);
1253	for (i = 0; i < 16; i++)
1254		pci_write_config_dword(hba->pdev, i * 4,
1255			hba->pdev->saved_config_space[i]);
1256}
1257
1258static int stex_yos_reset(struct st_hba *hba)
1259{
1260	void __iomem *base;
1261	unsigned long flags, before;
1262	int ret = 0;
1263
1264	base = hba->mmio_base;
1265	writel(MU_INBOUND_DOORBELL_RESET, base + IDBL);
1266	readl(base + IDBL); /* flush */
1267	before = jiffies;
1268	while (hba->out_req_cnt > 0) {
1269		if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1270			printk(KERN_WARNING DRV_NAME
1271				"(%s): reset timeout\n", pci_name(hba->pdev));
1272			ret = -1;
1273			break;
1274		}
1275		msleep(1);
1276	}
1277
1278	spin_lock_irqsave(hba->host->host_lock, flags);
1279	if (ret == -1)
1280		hba->mu_status = MU_STATE_FAILED;
1281	else
1282		hba->mu_status = MU_STATE_STARTED;
1283	wake_up_all(&hba->reset_waitq);
1284	spin_unlock_irqrestore(hba->host->host_lock, flags);
1285
1286	return ret;
1287}
1288
1289static void stex_ss_reset(struct st_hba *hba)
1290{
1291	writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1292	readl(hba->mmio_base + YH2I_INT);
1293	ssleep(5);
1294}
1295
 
 
 
 
 
 
1296static int stex_do_reset(struct st_hba *hba)
1297{
1298	unsigned long flags;
1299	unsigned int mu_status = MU_STATE_RESETTING;
1300
1301	spin_lock_irqsave(hba->host->host_lock, flags);
1302	if (hba->mu_status == MU_STATE_STARTING) {
1303		spin_unlock_irqrestore(hba->host->host_lock, flags);
1304		printk(KERN_INFO DRV_NAME "(%s): request reset during init\n",
1305			pci_name(hba->pdev));
1306		return 0;
1307	}
1308	while (hba->mu_status == MU_STATE_RESETTING) {
1309		spin_unlock_irqrestore(hba->host->host_lock, flags);
1310		wait_event_timeout(hba->reset_waitq,
1311				   hba->mu_status != MU_STATE_RESETTING,
1312				   MU_MAX_DELAY * HZ);
1313		spin_lock_irqsave(hba->host->host_lock, flags);
1314		mu_status = hba->mu_status;
1315	}
1316
1317	if (mu_status != MU_STATE_RESETTING) {
1318		spin_unlock_irqrestore(hba->host->host_lock, flags);
1319		return (mu_status == MU_STATE_STARTED) ? 0 : -1;
1320	}
1321
1322	hba->mu_status = MU_STATE_RESETTING;
1323	spin_unlock_irqrestore(hba->host->host_lock, flags);
1324
1325	if (hba->cardtype == st_yosemite)
1326		return stex_yos_reset(hba);
1327
1328	if (hba->cardtype == st_shasta)
1329		stex_hard_reset(hba);
1330	else if (hba->cardtype == st_yel)
1331		stex_ss_reset(hba);
1332
 
1333
1334	return_abnormal_state(hba, DID_RESET);
1335
1336	if (stex_handshake(hba) == 0)
1337		return 0;
1338
1339	printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n",
1340		pci_name(hba->pdev));
1341	return -1;
1342}
1343
1344static int stex_reset(struct scsi_cmnd *cmd)
1345{
1346	struct st_hba *hba;
1347
1348	hba = (struct st_hba *) &cmd->device->host->hostdata[0];
1349
1350	shost_printk(KERN_INFO, cmd->device->host,
1351		     "resetting host\n");
1352
1353	return stex_do_reset(hba) ? FAILED : SUCCESS;
1354}
1355
1356static void stex_reset_work(struct work_struct *work)
1357{
1358	struct st_hba *hba = container_of(work, struct st_hba, reset_work);
1359
1360	stex_do_reset(hba);
1361}
1362
1363static int stex_biosparam(struct scsi_device *sdev,
1364	struct block_device *bdev, sector_t capacity, int geom[])
1365{
1366	int heads = 255, sectors = 63;
1367
1368	if (capacity < 0x200000) {
1369		heads = 64;
1370		sectors = 32;
1371	}
1372
1373	sector_div(capacity, heads * sectors);
1374
1375	geom[0] = heads;
1376	geom[1] = sectors;
1377	geom[2] = capacity;
1378
1379	return 0;
1380}
1381
1382static struct scsi_host_template driver_template = {
1383	.module				= THIS_MODULE,
1384	.name				= DRV_NAME,
1385	.proc_name			= DRV_NAME,
1386	.bios_param			= stex_biosparam,
1387	.queuecommand			= stex_queuecommand,
1388	.slave_configure		= stex_slave_config,
1389	.eh_abort_handler		= stex_abort,
1390	.eh_host_reset_handler		= stex_reset,
1391	.this_id			= -1,
 
1392};
1393
1394static struct pci_device_id stex_pci_tbl[] = {
1395	/* st_shasta */
1396	{ 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1397		st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
1398	{ 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1399		st_shasta }, /* SuperTrak EX12350 */
1400	{ 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1401		st_shasta }, /* SuperTrak EX4350 */
1402	{ 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1403		st_shasta }, /* SuperTrak EX24350 */
1404
1405	/* st_vsc */
1406	{ 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1407
1408	/* st_yosemite */
1409	{ 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite },
1410
1411	/* st_seq */
1412	{ 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq },
1413
1414	/* st_yel */
1415	{ 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel },
1416	{ 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1417	{ }	/* terminate list */
1418};
1419
1420static struct st_card_info stex_card_info[] = {
1421	/* st_shasta */
1422	{
1423		.max_id		= 17,
1424		.max_lun	= 8,
1425		.max_channel	= 0,
1426		.rq_count	= 32,
1427		.rq_size	= 1048,
1428		.sts_count	= 32,
1429		.alloc_rq	= stex_alloc_req,
1430		.map_sg		= stex_map_sg,
1431		.send		= stex_send_cmd,
1432	},
1433
1434	/* st_vsc */
1435	{
1436		.max_id		= 129,
1437		.max_lun	= 1,
1438		.max_channel	= 0,
1439		.rq_count	= 32,
1440		.rq_size	= 1048,
1441		.sts_count	= 32,
1442		.alloc_rq	= stex_alloc_req,
1443		.map_sg		= stex_map_sg,
1444		.send		= stex_send_cmd,
1445	},
1446
1447	/* st_yosemite */
1448	{
1449		.max_id		= 2,
1450		.max_lun	= 256,
1451		.max_channel	= 0,
1452		.rq_count	= 256,
1453		.rq_size	= 1048,
1454		.sts_count	= 256,
1455		.alloc_rq	= stex_alloc_req,
1456		.map_sg		= stex_map_sg,
1457		.send		= stex_send_cmd,
1458	},
1459
1460	/* st_seq */
1461	{
1462		.max_id		= 129,
1463		.max_lun	= 1,
1464		.max_channel	= 0,
1465		.rq_count	= 32,
1466		.rq_size	= 1048,
1467		.sts_count	= 32,
1468		.alloc_rq	= stex_alloc_req,
1469		.map_sg		= stex_map_sg,
1470		.send		= stex_send_cmd,
1471	},
1472
1473	/* st_yel */
1474	{
1475		.max_id		= 129,
1476		.max_lun	= 256,
1477		.max_channel	= 3,
1478		.rq_count	= 801,
1479		.rq_size	= 512,
1480		.sts_count	= 801,
1481		.alloc_rq	= stex_ss_alloc_req,
1482		.map_sg		= stex_ss_map_sg,
1483		.send		= stex_ss_send_cmd,
1484	},
1485};
1486
1487static int stex_set_dma_mask(struct pci_dev * pdev)
1488{
1489	int ret;
1490
1491	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))
1492		&& !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)))
1493		return 0;
1494	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1495	if (!ret)
1496		ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1497	return ret;
1498}
 
1499
1500static int stex_request_irq(struct st_hba *hba)
1501{
1502	struct pci_dev *pdev = hba->pdev;
1503	int status;
1504
1505	if (msi) {
1506		status = pci_enable_msi(pdev);
1507		if (status != 0)
1508			printk(KERN_ERR DRV_NAME
1509				"(%s): error %d setting up MSI\n",
1510				pci_name(pdev), status);
1511		else
1512			hba->msi_enabled = 1;
1513	} else
1514		hba->msi_enabled = 0;
1515
1516	status = request_irq(pdev->irq, hba->cardtype == st_yel ?
 
1517		stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba);
1518
1519	if (status != 0) {
1520		if (hba->msi_enabled)
1521			pci_disable_msi(pdev);
1522	}
1523	return status;
1524}
1525
1526static void stex_free_irq(struct st_hba *hba)
1527{
1528	struct pci_dev *pdev = hba->pdev;
1529
1530	free_irq(pdev->irq, hba);
1531	if (hba->msi_enabled)
1532		pci_disable_msi(pdev);
1533}
1534
1535static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1536{
1537	struct st_hba *hba;
1538	struct Scsi_Host *host;
1539	const struct st_card_info *ci = NULL;
1540	u32 sts_offset, cp_offset, scratch_offset;
1541	int err;
1542
1543	err = pci_enable_device(pdev);
1544	if (err)
1545		return err;
1546
1547	pci_set_master(pdev);
1548
 
 
 
1549	host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
1550
1551	if (!host) {
1552		printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
1553			pci_name(pdev));
1554		err = -ENOMEM;
1555		goto out_disable;
1556	}
1557
1558	hba = (struct st_hba *)host->hostdata;
1559	memset(hba, 0, sizeof(struct st_hba));
1560
1561	err = pci_request_regions(pdev, DRV_NAME);
1562	if (err < 0) {
1563		printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
1564			pci_name(pdev));
1565		goto out_scsi_host_put;
1566	}
1567
1568	hba->mmio_base = pci_ioremap_bar(pdev, 0);
1569	if ( !hba->mmio_base) {
1570		printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
1571			pci_name(pdev));
1572		err = -ENOMEM;
1573		goto out_release_regions;
1574	}
1575
1576	err = stex_set_dma_mask(pdev);
 
 
1577	if (err) {
1578		printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
1579			pci_name(pdev));
1580		goto out_iounmap;
1581	}
1582
1583	hba->cardtype = (unsigned int) id->driver_data;
1584	ci = &stex_card_info[hba->cardtype];
1585	switch (id->subdevice) {
1586	case 0x4221:
1587	case 0x4222:
1588	case 0x4223:
1589	case 0x4224:
1590	case 0x4225:
1591	case 0x4226:
1592	case 0x4227:
1593	case 0x4261:
1594	case 0x4262:
1595	case 0x4263:
1596	case 0x4264:
1597	case 0x4265:
1598		break;
1599	default:
1600		if (hba->cardtype == st_yel)
1601			hba->supports_pm = 1;
1602	}
1603
1604	sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size;
1605	if (hba->cardtype == st_yel)
1606		sts_offset += (ci->sts_count+1) * sizeof(u32);
1607	cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg);
1608	hba->dma_size = cp_offset + sizeof(struct st_frame);
1609	if (hba->cardtype == st_seq ||
1610		(hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1611		hba->extra_offset = hba->dma_size;
1612		hba->dma_size += ST_ADDITIONAL_MEM;
1613	}
1614	hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1615		hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1616	if (!hba->dma_mem) {
1617		/* Retry minimum coherent mapping for st_seq and st_vsc */
1618		if (hba->cardtype == st_seq ||
1619		    (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1620			printk(KERN_WARNING DRV_NAME
1621				"(%s): allocating min buffer for controller\n",
1622				pci_name(pdev));
1623			hba->dma_size = hba->extra_offset
1624				+ ST_ADDITIONAL_MEM_MIN;
1625			hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1626				hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1627		}
1628
1629		if (!hba->dma_mem) {
1630			err = -ENOMEM;
1631			printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
1632				pci_name(pdev));
1633			goto out_iounmap;
1634		}
1635	}
1636
1637	hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
1638	if (!hba->ccb) {
1639		err = -ENOMEM;
1640		printk(KERN_ERR DRV_NAME "(%s): ccb alloc failed\n",
1641			pci_name(pdev));
1642		goto out_pci_free;
1643	}
1644
1645	if (hba->cardtype == st_yel)
1646		hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset);
1647	hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset);
1648	hba->copy_buffer = hba->dma_mem + cp_offset;
1649	hba->rq_count = ci->rq_count;
1650	hba->rq_size = ci->rq_size;
1651	hba->sts_count = ci->sts_count;
1652	hba->alloc_rq = ci->alloc_rq;
1653	hba->map_sg = ci->map_sg;
1654	hba->send = ci->send;
1655	hba->mu_status = MU_STATE_STARTING;
 
1656
1657	if (hba->cardtype == st_yel)
1658		host->sg_tablesize = 38;
1659	else
1660		host->sg_tablesize = 32;
1661	host->can_queue = ci->rq_count;
1662	host->cmd_per_lun = ci->rq_count;
1663	host->max_id = ci->max_id;
1664	host->max_lun = ci->max_lun;
1665	host->max_channel = ci->max_channel;
1666	host->unique_id = host->host_no;
1667	host->max_cmd_len = STEX_CDB_LENGTH;
1668
1669	hba->host = host;
1670	hba->pdev = pdev;
1671	init_waitqueue_head(&hba->reset_waitq);
1672
1673	snprintf(hba->work_q_name, sizeof(hba->work_q_name),
1674		 "stex_wq_%d", host->host_no);
1675	hba->work_q = create_singlethread_workqueue(hba->work_q_name);
1676	if (!hba->work_q) {
1677		printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n",
1678			pci_name(pdev));
1679		err = -ENOMEM;
1680		goto out_ccb_free;
1681	}
1682	INIT_WORK(&hba->reset_work, stex_reset_work);
1683
1684	err = stex_request_irq(hba);
1685	if (err) {
1686		printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
1687			pci_name(pdev));
1688		goto out_free_wq;
1689	}
1690
1691	err = stex_handshake(hba);
1692	if (err)
1693		goto out_free_irq;
1694
1695	pci_set_drvdata(pdev, hba);
1696
1697	err = scsi_add_host(host, &pdev->dev);
1698	if (err) {
1699		printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
1700			pci_name(pdev));
1701		goto out_free_irq;
1702	}
1703
1704	scsi_scan_host(host);
1705
1706	return 0;
1707
1708out_free_irq:
1709	stex_free_irq(hba);
1710out_free_wq:
1711	destroy_workqueue(hba->work_q);
1712out_ccb_free:
1713	kfree(hba->ccb);
1714out_pci_free:
1715	dma_free_coherent(&pdev->dev, hba->dma_size,
1716			  hba->dma_mem, hba->dma_handle);
1717out_iounmap:
1718	iounmap(hba->mmio_base);
1719out_release_regions:
1720	pci_release_regions(pdev);
1721out_scsi_host_put:
1722	scsi_host_put(host);
1723out_disable:
1724	pci_disable_device(pdev);
1725
1726	return err;
1727}
1728
1729static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic)
1730{
1731	struct req_msg *req;
1732	struct st_msg_header *msg_h;
1733	unsigned long flags;
1734	unsigned long before;
1735	u16 tag = 0;
1736
1737	spin_lock_irqsave(hba->host->host_lock, flags);
1738
1739	if (hba->cardtype == st_yel && hba->supports_pm == 1)
1740	{
1741		if(st_sleep_mic == ST_NOTHANDLED)
1742		{
1743			spin_unlock_irqrestore(hba->host->host_lock, flags);
1744			return;
1745		}
1746	}
1747	req = hba->alloc_rq(hba);
1748	if (hba->cardtype == st_yel) {
1749		msg_h = (struct st_msg_header *)req - 1;
1750		memset(msg_h, 0, hba->rq_size);
1751	} else
1752		memset(req, 0, hba->rq_size);
1753
1754	if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel)
 
1755		&& st_sleep_mic == ST_IGNORED) {
1756		req->cdb[0] = MGT_CMD;
1757		req->cdb[1] = MGT_CMD_SIGNATURE;
1758		req->cdb[2] = CTLR_CONFIG_CMD;
1759		req->cdb[3] = CTLR_SHUTDOWN;
1760	} else if (hba->cardtype == st_yel && st_sleep_mic != ST_IGNORED) {
 
1761		req->cdb[0] = MGT_CMD;
1762		req->cdb[1] = MGT_CMD_SIGNATURE;
1763		req->cdb[2] = CTLR_CONFIG_CMD;
1764		req->cdb[3] = PMIC_SHUTDOWN;
1765		req->cdb[4] = st_sleep_mic;
1766	} else {
1767		req->cdb[0] = CONTROLLER_CMD;
1768		req->cdb[1] = CTLR_POWER_STATE_CHANGE;
1769		req->cdb[2] = CTLR_POWER_SAVING;
1770	}
1771
1772	hba->ccb[tag].cmd = NULL;
1773	hba->ccb[tag].sg_count = 0;
1774	hba->ccb[tag].sense_bufflen = 0;
1775	hba->ccb[tag].sense_buffer = NULL;
1776	hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE;
1777
1778	hba->send(hba, req, tag);
1779	spin_unlock_irqrestore(hba->host->host_lock, flags);
1780
1781	before = jiffies;
1782	while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1783		if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1784			hba->ccb[tag].req_type = 0;
1785			hba->mu_status = MU_STATE_STOP;
1786			return;
1787		}
1788		msleep(1);
1789	}
1790	hba->mu_status = MU_STATE_STOP;
1791}
1792
1793static void stex_hba_free(struct st_hba *hba)
1794{
1795	stex_free_irq(hba);
1796
1797	destroy_workqueue(hba->work_q);
1798
1799	iounmap(hba->mmio_base);
1800
1801	pci_release_regions(hba->pdev);
1802
1803	kfree(hba->ccb);
1804
1805	dma_free_coherent(&hba->pdev->dev, hba->dma_size,
1806			  hba->dma_mem, hba->dma_handle);
1807}
1808
1809static void stex_remove(struct pci_dev *pdev)
1810{
1811	struct st_hba *hba = pci_get_drvdata(pdev);
1812
1813	hba->mu_status = MU_STATE_NOCONNECT;
1814	return_abnormal_state(hba, DID_NO_CONNECT);
1815	scsi_remove_host(hba->host);
1816
1817	scsi_block_requests(hba->host);
1818
1819	stex_hba_free(hba);
1820
1821	scsi_host_put(hba->host);
1822
1823	pci_disable_device(pdev);
 
 
1824}
1825
1826static void stex_shutdown(struct pci_dev *pdev)
1827{
1828	struct st_hba *hba = pci_get_drvdata(pdev);
1829
1830	if (hba->supports_pm == 0)
1831		stex_hba_stop(hba, ST_IGNORED);
1832	else
 
 
 
1833		stex_hba_stop(hba, ST_S5);
1834}
1835
1836static int stex_choice_sleep_mic(pm_message_t state)
1837{
1838	switch (state.event) {
1839	case PM_EVENT_SUSPEND:
1840		return ST_S3;
1841	case PM_EVENT_HIBERNATE:
 
1842		return ST_S4;
1843	default:
1844		return ST_NOTHANDLED;
1845	}
1846}
1847
1848static int stex_suspend(struct pci_dev *pdev, pm_message_t state)
1849{
1850	struct st_hba *hba = pci_get_drvdata(pdev);
1851
1852	if (hba->cardtype == st_yel && hba->supports_pm == 1)
1853		stex_hba_stop(hba, stex_choice_sleep_mic(state));
 
1854	else
1855		stex_hba_stop(hba, ST_IGNORED);
1856	return 0;
1857}
1858
1859static int stex_resume(struct pci_dev *pdev)
1860{
1861	struct st_hba *hba = pci_get_drvdata(pdev);
1862
1863	hba->mu_status = MU_STATE_STARTING;
1864	stex_handshake(hba);
1865	return 0;
 
 
 
 
 
 
1866}
1867MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
1868
1869static struct pci_driver stex_pci_driver = {
1870	.name		= DRV_NAME,
1871	.id_table	= stex_pci_tbl,
1872	.probe		= stex_probe,
1873	.remove		= stex_remove,
1874	.shutdown	= stex_shutdown,
1875	.suspend	= stex_suspend,
1876	.resume		= stex_resume,
1877};
1878
1879static int __init stex_init(void)
1880{
1881	printk(KERN_INFO DRV_NAME
1882		": Promise SuperTrak EX Driver version: %s\n",
1883		 ST_DRIVER_VERSION);
1884
1885	return pci_register_driver(&stex_pci_driver);
1886}
1887
1888static void __exit stex_exit(void)
1889{
1890	pci_unregister_driver(&stex_pci_driver);
1891}
1892
1893module_init(stex_init);
1894module_exit(stex_exit);
v6.2
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * SuperTrak EX Series Storage Controller driver for Linux
   4 *
   5 *	Copyright (C) 2005-2015 Promise Technology Inc.
   6 *
 
 
 
 
 
   7 *	Written By:
   8 *		Ed Lin <promise_linux@promise.com>
 
   9 */
  10
  11#include <linux/init.h>
  12#include <linux/errno.h>
  13#include <linux/kernel.h>
  14#include <linux/delay.h>
  15#include <linux/slab.h>
  16#include <linux/time.h>
  17#include <linux/pci.h>
  18#include <linux/blkdev.h>
  19#include <linux/interrupt.h>
  20#include <linux/types.h>
  21#include <linux/module.h>
  22#include <linux/spinlock.h>
  23#include <linux/ktime.h>
  24#include <linux/reboot.h>
  25#include <asm/io.h>
  26#include <asm/irq.h>
  27#include <asm/byteorder.h>
  28#include <scsi/scsi.h>
  29#include <scsi/scsi_device.h>
  30#include <scsi/scsi_cmnd.h>
  31#include <scsi/scsi_host.h>
  32#include <scsi/scsi_tcq.h>
  33#include <scsi/scsi_dbg.h>
  34#include <scsi/scsi_eh.h>
  35
  36#define DRV_NAME "stex"
  37#define ST_DRIVER_VERSION	"6.02.0000.01"
  38#define ST_VER_MAJOR		6
  39#define ST_VER_MINOR		02
  40#define ST_OEM				0000
  41#define ST_BUILD_VER		01
  42
  43enum {
  44	/* MU register offset */
  45	IMR0	= 0x10,	/* MU_INBOUND_MESSAGE_REG0 */
  46	IMR1	= 0x14,	/* MU_INBOUND_MESSAGE_REG1 */
  47	OMR0	= 0x18,	/* MU_OUTBOUND_MESSAGE_REG0 */
  48	OMR1	= 0x1c,	/* MU_OUTBOUND_MESSAGE_REG1 */
  49	IDBL	= 0x20,	/* MU_INBOUND_DOORBELL */
  50	IIS	= 0x24,	/* MU_INBOUND_INTERRUPT_STATUS */
  51	IIM	= 0x28,	/* MU_INBOUND_INTERRUPT_MASK */
  52	ODBL	= 0x2c,	/* MU_OUTBOUND_DOORBELL */
  53	OIS	= 0x30,	/* MU_OUTBOUND_INTERRUPT_STATUS */
  54	OIM	= 0x3c,	/* MU_OUTBOUND_INTERRUPT_MASK */
  55
  56	YIOA_STATUS				= 0x00,
  57	YH2I_INT				= 0x20,
  58	YINT_EN					= 0x34,
  59	YI2H_INT				= 0x9c,
  60	YI2H_INT_C				= 0xa0,
  61	YH2I_REQ				= 0xc0,
  62	YH2I_REQ_HI				= 0xc4,
  63	PSCRATCH0				= 0xb0,
  64	PSCRATCH1				= 0xb4,
  65	PSCRATCH2				= 0xb8,
  66	PSCRATCH3				= 0xbc,
  67	PSCRATCH4				= 0xc8,
  68	MAILBOX_BASE			= 0x1000,
  69	MAILBOX_HNDSHK_STS		= 0x0,
  70
  71	/* MU register value */
  72	MU_INBOUND_DOORBELL_HANDSHAKE		= (1 << 0),
  73	MU_INBOUND_DOORBELL_REQHEADCHANGED	= (1 << 1),
  74	MU_INBOUND_DOORBELL_STATUSTAILCHANGED	= (1 << 2),
  75	MU_INBOUND_DOORBELL_HMUSTOPPED		= (1 << 3),
  76	MU_INBOUND_DOORBELL_RESET		= (1 << 4),
  77
  78	MU_OUTBOUND_DOORBELL_HANDSHAKE		= (1 << 0),
  79	MU_OUTBOUND_DOORBELL_REQUESTTAILCHANGED	= (1 << 1),
  80	MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED	= (1 << 2),
  81	MU_OUTBOUND_DOORBELL_BUSCHANGE		= (1 << 3),
  82	MU_OUTBOUND_DOORBELL_HASEVENT		= (1 << 4),
  83	MU_OUTBOUND_DOORBELL_REQUEST_RESET	= (1 << 27),
  84
  85	/* MU status code */
  86	MU_STATE_STARTING			= 1,
  87	MU_STATE_STARTED			= 2,
  88	MU_STATE_RESETTING			= 3,
  89	MU_STATE_FAILED				= 4,
  90	MU_STATE_STOP				= 5,
  91	MU_STATE_NOCONNECT			= 6,
  92
  93	MU_MAX_DELAY				= 50,
  94	MU_HANDSHAKE_SIGNATURE			= 0x55aaaa55,
  95	MU_HANDSHAKE_SIGNATURE_HALF		= 0x5a5a0000,
  96	MU_HARD_RESET_WAIT			= 30000,
  97	HMU_PARTNER_TYPE			= 2,
  98
  99	/* firmware returned values */
 100	SRB_STATUS_SUCCESS			= 0x01,
 101	SRB_STATUS_ERROR			= 0x04,
 102	SRB_STATUS_BUSY				= 0x05,
 103	SRB_STATUS_INVALID_REQUEST		= 0x06,
 104	SRB_STATUS_SELECTION_TIMEOUT		= 0x0A,
 105	SRB_SEE_SENSE 				= 0x80,
 106
 107	/* task attribute */
 108	TASK_ATTRIBUTE_SIMPLE			= 0x0,
 109	TASK_ATTRIBUTE_HEADOFQUEUE		= 0x1,
 110	TASK_ATTRIBUTE_ORDERED			= 0x2,
 111	TASK_ATTRIBUTE_ACA			= 0x4,
 112
 113	SS_STS_NORMAL				= 0x80000000,
 114	SS_STS_DONE				= 0x40000000,
 115	SS_STS_HANDSHAKE			= 0x20000000,
 116
 117	SS_HEAD_HANDSHAKE			= 0x80,
 118
 119	SS_H2I_INT_RESET			= 0x100,
 120
 121	SS_I2H_REQUEST_RESET			= 0x2000,
 122
 123	SS_MU_OPERATIONAL			= 0x80000000,
 124
 125	STEX_CDB_LENGTH				= 16,
 126	STATUS_VAR_LEN				= 128,
 127
 128	/* sg flags */
 129	SG_CF_EOT				= 0x80,	/* end of table */
 130	SG_CF_64B				= 0x40,	/* 64 bit item */
 131	SG_CF_HOST				= 0x20,	/* sg in host memory */
 132	MSG_DATA_DIR_ND				= 0,
 133	MSG_DATA_DIR_IN				= 1,
 134	MSG_DATA_DIR_OUT			= 2,
 135
 136	st_shasta				= 0,
 137	st_vsc					= 1,
 138	st_yosemite				= 2,
 139	st_seq					= 3,
 140	st_yel					= 4,
 141	st_P3					= 5,
 142
 143	PASSTHRU_REQ_TYPE			= 0x00000001,
 144	PASSTHRU_REQ_NO_WAKEUP			= 0x00000100,
 145	ST_INTERNAL_TIMEOUT			= 180,
 146
 147	ST_TO_CMD				= 0,
 148	ST_FROM_CMD				= 1,
 149
 150	/* vendor specific commands of Promise */
 151	MGT_CMD					= 0xd8,
 152	SINBAND_MGT_CMD				= 0xd9,
 153	ARRAY_CMD				= 0xe0,
 154	CONTROLLER_CMD				= 0xe1,
 155	DEBUGGING_CMD				= 0xe2,
 156	PASSTHRU_CMD				= 0xe3,
 157
 158	PASSTHRU_GET_ADAPTER			= 0x05,
 159	PASSTHRU_GET_DRVVER			= 0x10,
 160
 161	CTLR_CONFIG_CMD				= 0x03,
 162	CTLR_SHUTDOWN				= 0x0d,
 163
 164	CTLR_POWER_STATE_CHANGE			= 0x0e,
 165	CTLR_POWER_SAVING			= 0x01,
 166
 167	PASSTHRU_SIGNATURE			= 0x4e415041,
 168	MGT_CMD_SIGNATURE			= 0xba,
 169
 170	INQUIRY_EVPD				= 0x01,
 171
 172	ST_ADDITIONAL_MEM			= 0x200000,
 173	ST_ADDITIONAL_MEM_MIN			= 0x80000,
 174	PMIC_SHUTDOWN				= 0x0D,
 175	PMIC_REUMSE					= 0x10,
 176	ST_IGNORED					= -1,
 177	ST_NOTHANDLED				= 7,
 178	ST_S3						= 3,
 179	ST_S4						= 4,
 180	ST_S5						= 5,
 181	ST_S6						= 6,
 182};
 183
 184struct st_sgitem {
 185	u8 ctrl;	/* SG_CF_xxx */
 186	u8 reserved[3];
 187	__le32 count;
 188	__le64 addr;
 189};
 190
 191struct st_ss_sgitem {
 192	__le32 addr;
 193	__le32 addr_hi;
 194	__le32 count;
 195};
 196
 197struct st_sgtable {
 198	__le16 sg_count;
 199	__le16 max_sg_count;
 200	__le32 sz_in_byte;
 201};
 202
 203struct st_msg_header {
 204	__le64 handle;
 205	u8 flag;
 206	u8 channel;
 207	__le16 timeout;
 208	u32 reserved;
 209};
 210
 211struct handshake_frame {
 212	__le64 rb_phy;		/* request payload queue physical address */
 213	__le16 req_sz;		/* size of each request payload */
 214	__le16 req_cnt;		/* count of reqs the buffer can hold */
 215	__le16 status_sz;	/* size of each status payload */
 216	__le16 status_cnt;	/* count of status the buffer can hold */
 217	__le64 hosttime;	/* seconds from Jan 1, 1970 (GMT) */
 218	u8 partner_type;	/* who sends this frame */
 219	u8 reserved0[7];
 220	__le32 partner_ver_major;
 221	__le32 partner_ver_minor;
 222	__le32 partner_ver_oem;
 223	__le32 partner_ver_build;
 224	__le32 extra_offset;	/* NEW */
 225	__le32 extra_size;	/* NEW */
 226	__le32 scratch_size;
 227	u32 reserved1;
 228};
 229
 230struct req_msg {
 231	__le16 tag;
 232	u8 lun;
 233	u8 target;
 234	u8 task_attr;
 235	u8 task_manage;
 236	u8 data_dir;
 237	u8 payload_sz;		/* payload size in 4-byte, not used */
 238	u8 cdb[STEX_CDB_LENGTH];
 239	u32 variable[];
 240};
 241
 242struct status_msg {
 243	__le16 tag;
 244	u8 lun;
 245	u8 target;
 246	u8 srb_status;
 247	u8 scsi_status;
 248	u8 reserved;
 249	u8 payload_sz;		/* payload size in 4-byte */
 250	u8 variable[STATUS_VAR_LEN];
 251};
 252
 253struct ver_info {
 254	u32 major;
 255	u32 minor;
 256	u32 oem;
 257	u32 build;
 258	u32 reserved[2];
 259};
 260
 261struct st_frame {
 262	u32 base[6];
 263	u32 rom_addr;
 264
 265	struct ver_info drv_ver;
 266	struct ver_info bios_ver;
 267
 268	u32 bus;
 269	u32 slot;
 270	u32 irq_level;
 271	u32 irq_vec;
 272	u32 id;
 273	u32 subid;
 274
 275	u32 dimm_size;
 276	u8 dimm_type;
 277	u8 reserved[3];
 278
 279	u32 channel;
 280	u32 reserved1;
 281};
 282
 283struct st_drvver {
 284	u32 major;
 285	u32 minor;
 286	u32 oem;
 287	u32 build;
 288	u32 signature[2];
 289	u8 console_id;
 290	u8 host_no;
 291	u8 reserved0[2];
 292	u32 reserved[3];
 293};
 294
 295struct st_ccb {
 296	struct req_msg *req;
 297	struct scsi_cmnd *cmd;
 298
 299	void *sense_buffer;
 300	unsigned int sense_bufflen;
 301	int sg_count;
 302
 303	u32 req_type;
 304	u8 srb_status;
 305	u8 scsi_status;
 306	u8 reserved[2];
 307};
 308
 309struct st_hba {
 310	void __iomem *mmio_base;	/* iomapped PCI memory space */
 311	void *dma_mem;
 312	dma_addr_t dma_handle;
 313	size_t dma_size;
 314
 315	struct Scsi_Host *host;
 316	struct pci_dev *pdev;
 317
 318	struct req_msg * (*alloc_rq) (struct st_hba *);
 319	int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
 320	void (*send) (struct st_hba *, struct req_msg *, u16);
 321
 322	u32 req_head;
 323	u32 req_tail;
 324	u32 status_head;
 325	u32 status_tail;
 326
 327	struct status_msg *status_buffer;
 328	void *copy_buffer; /* temp buffer for driver-handled commands */
 329	struct st_ccb *ccb;
 330	struct st_ccb *wait_ccb;
 331	__le32 *scratch;
 332
 333	char work_q_name[20];
 334	struct workqueue_struct *work_q;
 335	struct work_struct reset_work;
 336	wait_queue_head_t reset_waitq;
 337	unsigned int mu_status;
 338	unsigned int cardtype;
 339	int msi_enabled;
 340	int out_req_cnt;
 341	u32 extra_offset;
 342	u16 rq_count;
 343	u16 rq_size;
 344	u16 sts_count;
 345	u8  supports_pm;
 346	int msi_lock;
 347};
 348
 349struct st_card_info {
 350	struct req_msg * (*alloc_rq) (struct st_hba *);
 351	int (*map_sg)(struct st_hba *, struct req_msg *, struct st_ccb *);
 352	void (*send) (struct st_hba *, struct req_msg *, u16);
 353	unsigned int max_id;
 354	unsigned int max_lun;
 355	unsigned int max_channel;
 356	u16 rq_count;
 357	u16 rq_size;
 358	u16 sts_count;
 359};
 360
 361static int S6flag;
 362static int stex_halt(struct notifier_block *nb, ulong event, void *buf);
 363static struct notifier_block stex_notifier = {
 364	stex_halt, NULL, 0
 365};
 366
 367static int msi;
 368module_param(msi, int, 0);
 369MODULE_PARM_DESC(msi, "Enable Message Signaled Interrupts(0=off, 1=on)");
 370
 371static const char console_inq_page[] =
 372{
 373	0x03,0x00,0x03,0x03,0xFA,0x00,0x00,0x30,
 374	0x50,0x72,0x6F,0x6D,0x69,0x73,0x65,0x20,	/* "Promise " */
 375	0x52,0x41,0x49,0x44,0x20,0x43,0x6F,0x6E,	/* "RAID Con" */
 376	0x73,0x6F,0x6C,0x65,0x20,0x20,0x20,0x20,	/* "sole    " */
 377	0x31,0x2E,0x30,0x30,0x20,0x20,0x20,0x20,	/* "1.00    " */
 378	0x53,0x58,0x2F,0x52,0x53,0x41,0x46,0x2D,	/* "SX/RSAF-" */
 379	0x54,0x45,0x31,0x2E,0x30,0x30,0x20,0x20,	/* "TE1.00  " */
 380	0x0C,0x20,0x20,0x20,0x20,0x20,0x20,0x20
 381};
 382
 383MODULE_AUTHOR("Ed Lin");
 384MODULE_DESCRIPTION("Promise Technology SuperTrak EX Controllers");
 385MODULE_LICENSE("GPL");
 386MODULE_VERSION(ST_DRIVER_VERSION);
 387
 388static struct status_msg *stex_get_status(struct st_hba *hba)
 389{
 390	struct status_msg *status = hba->status_buffer + hba->status_tail;
 391
 392	++hba->status_tail;
 393	hba->status_tail %= hba->sts_count+1;
 394
 395	return status;
 396}
 397
 398static void stex_invalid_field(struct scsi_cmnd *cmd,
 399			       void (*done)(struct scsi_cmnd *))
 400{
 
 
 401	/* "Invalid field in cdb" */
 402	scsi_build_sense(cmd, 0, ILLEGAL_REQUEST, 0x24, 0x0);
 
 403	done(cmd);
 404}
 405
 406static struct req_msg *stex_alloc_req(struct st_hba *hba)
 407{
 408	struct req_msg *req = hba->dma_mem + hba->req_head * hba->rq_size;
 409
 410	++hba->req_head;
 411	hba->req_head %= hba->rq_count+1;
 412
 413	return req;
 414}
 415
 416static struct req_msg *stex_ss_alloc_req(struct st_hba *hba)
 417{
 418	return (struct req_msg *)(hba->dma_mem +
 419		hba->req_head * hba->rq_size + sizeof(struct st_msg_header));
 420}
 421
 422static int stex_map_sg(struct st_hba *hba,
 423	struct req_msg *req, struct st_ccb *ccb)
 424{
 425	struct scsi_cmnd *cmd;
 426	struct scatterlist *sg;
 427	struct st_sgtable *dst;
 428	struct st_sgitem *table;
 429	int i, nseg;
 430
 431	cmd = ccb->cmd;
 432	nseg = scsi_dma_map(cmd);
 433	BUG_ON(nseg < 0);
 434	if (nseg) {
 435		dst = (struct st_sgtable *)req->variable;
 436
 437		ccb->sg_count = nseg;
 438		dst->sg_count = cpu_to_le16((u16)nseg);
 439		dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
 440		dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
 441
 442		table = (struct st_sgitem *)(dst + 1);
 443		scsi_for_each_sg(cmd, sg, nseg, i) {
 444			table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
 445			table[i].addr = cpu_to_le64(sg_dma_address(sg));
 446			table[i].ctrl = SG_CF_64B | SG_CF_HOST;
 447		}
 448		table[--i].ctrl |= SG_CF_EOT;
 449	}
 450
 451	return nseg;
 452}
 453
 454static int stex_ss_map_sg(struct st_hba *hba,
 455	struct req_msg *req, struct st_ccb *ccb)
 456{
 457	struct scsi_cmnd *cmd;
 458	struct scatterlist *sg;
 459	struct st_sgtable *dst;
 460	struct st_ss_sgitem *table;
 461	int i, nseg;
 462
 463	cmd = ccb->cmd;
 464	nseg = scsi_dma_map(cmd);
 465	BUG_ON(nseg < 0);
 466	if (nseg) {
 467		dst = (struct st_sgtable *)req->variable;
 468
 469		ccb->sg_count = nseg;
 470		dst->sg_count = cpu_to_le16((u16)nseg);
 471		dst->max_sg_count = cpu_to_le16(hba->host->sg_tablesize);
 472		dst->sz_in_byte = cpu_to_le32(scsi_bufflen(cmd));
 473
 474		table = (struct st_ss_sgitem *)(dst + 1);
 475		scsi_for_each_sg(cmd, sg, nseg, i) {
 476			table[i].count = cpu_to_le32((u32)sg_dma_len(sg));
 477			table[i].addr =
 478				cpu_to_le32(sg_dma_address(sg) & 0xffffffff);
 479			table[i].addr_hi =
 480				cpu_to_le32((sg_dma_address(sg) >> 16) >> 16);
 481		}
 482	}
 483
 484	return nseg;
 485}
 486
 487static void stex_controller_info(struct st_hba *hba, struct st_ccb *ccb)
 488{
 489	struct st_frame *p;
 490	size_t count = sizeof(struct st_frame);
 491
 492	p = hba->copy_buffer;
 493	scsi_sg_copy_to_buffer(ccb->cmd, p, count);
 494	memset(p->base, 0, sizeof(u32)*6);
 495	*(unsigned long *)(p->base) = pci_resource_start(hba->pdev, 0);
 496	p->rom_addr = 0;
 497
 498	p->drv_ver.major = ST_VER_MAJOR;
 499	p->drv_ver.minor = ST_VER_MINOR;
 500	p->drv_ver.oem = ST_OEM;
 501	p->drv_ver.build = ST_BUILD_VER;
 502
 503	p->bus = hba->pdev->bus->number;
 504	p->slot = hba->pdev->devfn;
 505	p->irq_level = 0;
 506	p->irq_vec = hba->pdev->irq;
 507	p->id = hba->pdev->vendor << 16 | hba->pdev->device;
 508	p->subid =
 509		hba->pdev->subsystem_vendor << 16 | hba->pdev->subsystem_device;
 510
 511	scsi_sg_copy_from_buffer(ccb->cmd, p, count);
 512}
 513
 514static void
 515stex_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
 516{
 517	req->tag = cpu_to_le16(tag);
 518
 519	hba->ccb[tag].req = req;
 520	hba->out_req_cnt++;
 521
 522	writel(hba->req_head, hba->mmio_base + IMR0);
 523	writel(MU_INBOUND_DOORBELL_REQHEADCHANGED, hba->mmio_base + IDBL);
 524	readl(hba->mmio_base + IDBL); /* flush */
 525}
 526
 527static void
 528stex_ss_send_cmd(struct st_hba *hba, struct req_msg *req, u16 tag)
 529{
 530	struct scsi_cmnd *cmd;
 531	struct st_msg_header *msg_h;
 532	dma_addr_t addr;
 533
 534	req->tag = cpu_to_le16(tag);
 535
 536	hba->ccb[tag].req = req;
 537	hba->out_req_cnt++;
 538
 539	cmd = hba->ccb[tag].cmd;
 540	msg_h = (struct st_msg_header *)req - 1;
 541	if (likely(cmd)) {
 542		msg_h->channel = (u8)cmd->device->channel;
 543		msg_h->timeout = cpu_to_le16(scsi_cmd_to_rq(cmd)->timeout / HZ);
 544	}
 545	addr = hba->dma_handle + hba->req_head * hba->rq_size;
 546	addr += (hba->ccb[tag].sg_count+4)/11;
 547	msg_h->handle = cpu_to_le64(addr);
 548
 549	++hba->req_head;
 550	hba->req_head %= hba->rq_count+1;
 551	if (hba->cardtype == st_P3) {
 552		writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
 553		writel(addr, hba->mmio_base + YH2I_REQ);
 554	} else {
 555		writel((addr >> 16) >> 16, hba->mmio_base + YH2I_REQ_HI);
 556		readl(hba->mmio_base + YH2I_REQ_HI); /* flush */
 557		writel(addr, hba->mmio_base + YH2I_REQ);
 558		readl(hba->mmio_base + YH2I_REQ); /* flush */
 559	}
 560}
 561
 562static void return_abnormal_state(struct st_hba *hba, int status)
 563{
 564	struct st_ccb *ccb;
 565	unsigned long flags;
 566	u16 tag;
 567
 568	spin_lock_irqsave(hba->host->host_lock, flags);
 569	for (tag = 0; tag < hba->host->can_queue; tag++) {
 570		ccb = &hba->ccb[tag];
 571		if (ccb->req == NULL)
 572			continue;
 573		ccb->req = NULL;
 574		if (ccb->cmd) {
 575			scsi_dma_unmap(ccb->cmd);
 576			ccb->cmd->result = status << 16;
 577			scsi_done(ccb->cmd);
 578			ccb->cmd = NULL;
 579		}
 580	}
 581	spin_unlock_irqrestore(hba->host->host_lock, flags);
 582}
 583static int
 584stex_slave_config(struct scsi_device *sdev)
 585{
 586	sdev->use_10_for_rw = 1;
 587	sdev->use_10_for_ms = 1;
 588	blk_queue_rq_timeout(sdev->request_queue, 60 * HZ);
 589
 590	return 0;
 591}
 592
 593static int stex_queuecommand_lck(struct scsi_cmnd *cmd)
 
 594{
 595	void (*done)(struct scsi_cmnd *) = scsi_done;
 596	struct st_hba *hba;
 597	struct Scsi_Host *host;
 598	unsigned int id, lun;
 599	struct req_msg *req;
 600	u16 tag;
 601
 602	host = cmd->device->host;
 603	id = cmd->device->id;
 604	lun = cmd->device->lun;
 605	hba = (struct st_hba *) &host->hostdata[0];
 606	if (hba->mu_status == MU_STATE_NOCONNECT) {
 607		cmd->result = DID_NO_CONNECT;
 608		done(cmd);
 609		return 0;
 610	}
 611	if (unlikely(hba->mu_status != MU_STATE_STARTED))
 612		return SCSI_MLQUEUE_HOST_BUSY;
 613
 614	switch (cmd->cmnd[0]) {
 615	case MODE_SENSE_10:
 616	{
 617		static char ms10_caching_page[12] =
 618			{ 0, 0x12, 0, 0, 0, 0, 0, 0, 0x8, 0xa, 0x4, 0 };
 619		unsigned char page;
 620
 621		page = cmd->cmnd[2] & 0x3f;
 622		if (page == 0x8 || page == 0x3f) {
 623			scsi_sg_copy_from_buffer(cmd, ms10_caching_page,
 624						 sizeof(ms10_caching_page));
 625			cmd->result = DID_OK << 16;
 626			done(cmd);
 627		} else
 628			stex_invalid_field(cmd, done);
 629		return 0;
 630	}
 631	case REPORT_LUNS:
 632		/*
 633		 * The shasta firmware does not report actual luns in the
 634		 * target, so fail the command to force sequential lun scan.
 635		 * Also, the console device does not support this command.
 636		 */
 637		if (hba->cardtype == st_shasta || id == host->max_id - 1) {
 638			stex_invalid_field(cmd, done);
 639			return 0;
 640		}
 641		break;
 642	case TEST_UNIT_READY:
 643		if (id == host->max_id - 1) {
 644			cmd->result = DID_OK << 16;
 645			done(cmd);
 646			return 0;
 647		}
 648		break;
 649	case INQUIRY:
 650		if (lun >= host->max_lun) {
 651			cmd->result = DID_NO_CONNECT << 16;
 652			done(cmd);
 653			return 0;
 654		}
 655		if (id != host->max_id - 1)
 656			break;
 657		if (!lun && !cmd->device->channel &&
 658			(cmd->cmnd[1] & INQUIRY_EVPD) == 0) {
 659			scsi_sg_copy_from_buffer(cmd, (void *)console_inq_page,
 660						 sizeof(console_inq_page));
 661			cmd->result = DID_OK << 16;
 662			done(cmd);
 663		} else
 664			stex_invalid_field(cmd, done);
 665		return 0;
 666	case PASSTHRU_CMD:
 667		if (cmd->cmnd[1] == PASSTHRU_GET_DRVVER) {
 668			const struct st_drvver ver = {
 669				.major = ST_VER_MAJOR,
 670				.minor = ST_VER_MINOR,
 671				.oem = ST_OEM,
 672				.build = ST_BUILD_VER,
 673				.signature[0] = PASSTHRU_SIGNATURE,
 674				.console_id = host->max_id - 1,
 675				.host_no = hba->host->host_no,
 676			};
 677			size_t cp_len = sizeof(ver);
 678
 
 
 
 
 
 
 
 679			cp_len = scsi_sg_copy_from_buffer(cmd, &ver, cp_len);
 680			if (sizeof(ver) == cp_len)
 681				cmd->result = DID_OK << 16;
 682			else
 683				cmd->result = DID_ERROR << 16;
 684			done(cmd);
 685			return 0;
 686		}
 687		break;
 688	default:
 689		break;
 690	}
 691
 692	tag = scsi_cmd_to_rq(cmd)->tag;
 
 
 693
 694	if (unlikely(tag >= host->can_queue))
 695		return SCSI_MLQUEUE_HOST_BUSY;
 696
 697	req = hba->alloc_rq(hba);
 698
 699	req->lun = lun;
 700	req->target = id;
 701
 702	/* cdb */
 703	memcpy(req->cdb, cmd->cmnd, STEX_CDB_LENGTH);
 704
 705	if (cmd->sc_data_direction == DMA_FROM_DEVICE)
 706		req->data_dir = MSG_DATA_DIR_IN;
 707	else if (cmd->sc_data_direction == DMA_TO_DEVICE)
 708		req->data_dir = MSG_DATA_DIR_OUT;
 709	else
 710		req->data_dir = MSG_DATA_DIR_ND;
 711
 712	hba->ccb[tag].cmd = cmd;
 713	hba->ccb[tag].sense_bufflen = SCSI_SENSE_BUFFERSIZE;
 714	hba->ccb[tag].sense_buffer = cmd->sense_buffer;
 715
 716	if (!hba->map_sg(hba, req, &hba->ccb[tag])) {
 717		hba->ccb[tag].sg_count = 0;
 718		memset(&req->variable[0], 0, 8);
 719	}
 720
 721	hba->send(hba, req, tag);
 722	return 0;
 723}
 724
 725static DEF_SCSI_QCMD(stex_queuecommand)
 726
 727static void stex_scsi_done(struct st_ccb *ccb)
 728{
 729	struct scsi_cmnd *cmd = ccb->cmd;
 730	int result;
 731
 732	if (ccb->srb_status == SRB_STATUS_SUCCESS || ccb->srb_status == 0) {
 733		result = ccb->scsi_status;
 734		switch (ccb->scsi_status) {
 735		case SAM_STAT_GOOD:
 736			result |= DID_OK << 16;
 737			break;
 738		case SAM_STAT_CHECK_CONDITION:
 739			result |= DID_OK << 16;
 740			break;
 741		case SAM_STAT_BUSY:
 742			result |= DID_BUS_BUSY << 16;
 743			break;
 744		default:
 745			result |= DID_ERROR << 16;
 746			break;
 747		}
 748	}
 749	else if (ccb->srb_status & SRB_SEE_SENSE)
 750		result = SAM_STAT_CHECK_CONDITION;
 751	else switch (ccb->srb_status) {
 752		case SRB_STATUS_SELECTION_TIMEOUT:
 753			result = DID_NO_CONNECT << 16;
 754			break;
 755		case SRB_STATUS_BUSY:
 756			result = DID_BUS_BUSY << 16;
 757			break;
 758		case SRB_STATUS_INVALID_REQUEST:
 759		case SRB_STATUS_ERROR:
 760		default:
 761			result = DID_ERROR << 16;
 762			break;
 763	}
 764
 765	cmd->result = result;
 766	scsi_done(cmd);
 767}
 768
 769static void stex_copy_data(struct st_ccb *ccb,
 770	struct status_msg *resp, unsigned int variable)
 771{
 772	if (resp->scsi_status != SAM_STAT_GOOD) {
 773		if (ccb->sense_buffer != NULL)
 774			memcpy(ccb->sense_buffer, resp->variable,
 775				min(variable, ccb->sense_bufflen));
 776		return;
 777	}
 778
 779	if (ccb->cmd == NULL)
 780		return;
 781	scsi_sg_copy_from_buffer(ccb->cmd, resp->variable, variable);
 782}
 783
 784static void stex_check_cmd(struct st_hba *hba,
 785	struct st_ccb *ccb, struct status_msg *resp)
 786{
 787	if (ccb->cmd->cmnd[0] == MGT_CMD &&
 788		resp->scsi_status != SAM_STAT_CHECK_CONDITION)
 789		scsi_set_resid(ccb->cmd, scsi_bufflen(ccb->cmd) -
 790			le32_to_cpu(*(__le32 *)&resp->variable[0]));
 791}
 792
 793static void stex_mu_intr(struct st_hba *hba, u32 doorbell)
 794{
 795	void __iomem *base = hba->mmio_base;
 796	struct status_msg *resp;
 797	struct st_ccb *ccb;
 798	unsigned int size;
 799	u16 tag;
 800
 801	if (unlikely(!(doorbell & MU_OUTBOUND_DOORBELL_STATUSHEADCHANGED)))
 802		return;
 803
 804	/* status payloads */
 805	hba->status_head = readl(base + OMR1);
 806	if (unlikely(hba->status_head > hba->sts_count)) {
 807		printk(KERN_WARNING DRV_NAME "(%s): invalid status head\n",
 808			pci_name(hba->pdev));
 809		return;
 810	}
 811
 812	/*
 813	 * it's not a valid status payload if:
 814	 * 1. there are no pending requests(e.g. during init stage)
 815	 * 2. there are some pending requests, but the controller is in
 816	 *     reset status, and its type is not st_yosemite
 817	 * firmware of st_yosemite in reset status will return pending requests
 818	 * to driver, so we allow it to pass
 819	 */
 820	if (unlikely(hba->out_req_cnt <= 0 ||
 821			(hba->mu_status == MU_STATE_RESETTING &&
 822			 hba->cardtype != st_yosemite))) {
 823		hba->status_tail = hba->status_head;
 824		goto update_status;
 825	}
 826
 827	while (hba->status_tail != hba->status_head) {
 828		resp = stex_get_status(hba);
 829		tag = le16_to_cpu(resp->tag);
 830		if (unlikely(tag >= hba->host->can_queue)) {
 831			printk(KERN_WARNING DRV_NAME
 832				"(%s): invalid tag\n", pci_name(hba->pdev));
 833			continue;
 834		}
 835
 836		hba->out_req_cnt--;
 837		ccb = &hba->ccb[tag];
 838		if (unlikely(hba->wait_ccb == ccb))
 839			hba->wait_ccb = NULL;
 840		if (unlikely(ccb->req == NULL)) {
 841			printk(KERN_WARNING DRV_NAME
 842				"(%s): lagging req\n", pci_name(hba->pdev));
 843			continue;
 844		}
 845
 846		size = resp->payload_sz * sizeof(u32); /* payload size */
 847		if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
 848			size > sizeof(*resp))) {
 849			printk(KERN_WARNING DRV_NAME "(%s): bad status size\n",
 850				pci_name(hba->pdev));
 851		} else {
 852			size -= sizeof(*resp) - STATUS_VAR_LEN; /* copy size */
 853			if (size)
 854				stex_copy_data(ccb, resp, size);
 855		}
 856
 857		ccb->req = NULL;
 858		ccb->srb_status = resp->srb_status;
 859		ccb->scsi_status = resp->scsi_status;
 860
 861		if (likely(ccb->cmd != NULL)) {
 862			if (hba->cardtype == st_yosemite)
 863				stex_check_cmd(hba, ccb, resp);
 864
 865			if (unlikely(ccb->cmd->cmnd[0] == PASSTHRU_CMD &&
 866				ccb->cmd->cmnd[1] == PASSTHRU_GET_ADAPTER))
 867				stex_controller_info(hba, ccb);
 868
 869			scsi_dma_unmap(ccb->cmd);
 870			stex_scsi_done(ccb);
 871		} else
 872			ccb->req_type = 0;
 873	}
 874
 875update_status:
 876	writel(hba->status_head, base + IMR1);
 877	readl(base + IMR1); /* flush */
 878}
 879
 880static irqreturn_t stex_intr(int irq, void *__hba)
 881{
 882	struct st_hba *hba = __hba;
 883	void __iomem *base = hba->mmio_base;
 884	u32 data;
 885	unsigned long flags;
 886
 887	spin_lock_irqsave(hba->host->host_lock, flags);
 888
 889	data = readl(base + ODBL);
 890
 891	if (data && data != 0xffffffff) {
 892		/* clear the interrupt */
 893		writel(data, base + ODBL);
 894		readl(base + ODBL); /* flush */
 895		stex_mu_intr(hba, data);
 896		spin_unlock_irqrestore(hba->host->host_lock, flags);
 897		if (unlikely(data & MU_OUTBOUND_DOORBELL_REQUEST_RESET &&
 898			hba->cardtype == st_shasta))
 899			queue_work(hba->work_q, &hba->reset_work);
 900		return IRQ_HANDLED;
 901	}
 902
 903	spin_unlock_irqrestore(hba->host->host_lock, flags);
 904
 905	return IRQ_NONE;
 906}
 907
 908static void stex_ss_mu_intr(struct st_hba *hba)
 909{
 910	struct status_msg *resp;
 911	struct st_ccb *ccb;
 912	__le32 *scratch;
 913	unsigned int size;
 914	int count = 0;
 915	u32 value;
 916	u16 tag;
 917
 918	if (unlikely(hba->out_req_cnt <= 0 ||
 919			hba->mu_status == MU_STATE_RESETTING))
 920		return;
 921
 922	while (count < hba->sts_count) {
 923		scratch = hba->scratch + hba->status_tail;
 924		value = le32_to_cpu(*scratch);
 925		if (unlikely(!(value & SS_STS_NORMAL)))
 926			return;
 927
 928		resp = hba->status_buffer + hba->status_tail;
 929		*scratch = 0;
 930		++count;
 931		++hba->status_tail;
 932		hba->status_tail %= hba->sts_count+1;
 933
 934		tag = (u16)value;
 935		if (unlikely(tag >= hba->host->can_queue)) {
 936			printk(KERN_WARNING DRV_NAME
 937				"(%s): invalid tag\n", pci_name(hba->pdev));
 938			continue;
 939		}
 940
 941		hba->out_req_cnt--;
 942		ccb = &hba->ccb[tag];
 943		if (unlikely(hba->wait_ccb == ccb))
 944			hba->wait_ccb = NULL;
 945		if (unlikely(ccb->req == NULL)) {
 946			printk(KERN_WARNING DRV_NAME
 947				"(%s): lagging req\n", pci_name(hba->pdev));
 948			continue;
 949		}
 950
 951		ccb->req = NULL;
 952		if (likely(value & SS_STS_DONE)) { /* normal case */
 953			ccb->srb_status = SRB_STATUS_SUCCESS;
 954			ccb->scsi_status = SAM_STAT_GOOD;
 955		} else {
 956			ccb->srb_status = resp->srb_status;
 957			ccb->scsi_status = resp->scsi_status;
 958			size = resp->payload_sz * sizeof(u32);
 959			if (unlikely(size < sizeof(*resp) - STATUS_VAR_LEN ||
 960				size > sizeof(*resp))) {
 961				printk(KERN_WARNING DRV_NAME
 962					"(%s): bad status size\n",
 963					pci_name(hba->pdev));
 964			} else {
 965				size -= sizeof(*resp) - STATUS_VAR_LEN;
 966				if (size)
 967					stex_copy_data(ccb, resp, size);
 968			}
 969			if (likely(ccb->cmd != NULL))
 970				stex_check_cmd(hba, ccb, resp);
 971		}
 972
 973		if (likely(ccb->cmd != NULL)) {
 974			scsi_dma_unmap(ccb->cmd);
 975			stex_scsi_done(ccb);
 976		} else
 977			ccb->req_type = 0;
 978	}
 979}
 980
 981static irqreturn_t stex_ss_intr(int irq, void *__hba)
 982{
 983	struct st_hba *hba = __hba;
 984	void __iomem *base = hba->mmio_base;
 985	u32 data;
 986	unsigned long flags;
 987
 988	spin_lock_irqsave(hba->host->host_lock, flags);
 989
 990	if (hba->cardtype == st_yel) {
 991		data = readl(base + YI2H_INT);
 992		if (data && data != 0xffffffff) {
 993			/* clear the interrupt */
 994			writel(data, base + YI2H_INT_C);
 995			stex_ss_mu_intr(hba);
 996			spin_unlock_irqrestore(hba->host->host_lock, flags);
 997			if (unlikely(data & SS_I2H_REQUEST_RESET))
 998				queue_work(hba->work_q, &hba->reset_work);
 999			return IRQ_HANDLED;
1000		}
1001	} else {
1002		data = readl(base + PSCRATCH4);
1003		if (data != 0xffffffff) {
1004			if (data != 0) {
1005				/* clear the interrupt */
1006				writel(data, base + PSCRATCH1);
1007				writel((1 << 22), base + YH2I_INT);
1008			}
1009			stex_ss_mu_intr(hba);
1010			spin_unlock_irqrestore(hba->host->host_lock, flags);
1011			if (unlikely(data & SS_I2H_REQUEST_RESET))
1012				queue_work(hba->work_q, &hba->reset_work);
1013			return IRQ_HANDLED;
1014		}
1015	}
1016
1017	spin_unlock_irqrestore(hba->host->host_lock, flags);
1018
1019	return IRQ_NONE;
1020}
1021
1022static int stex_common_handshake(struct st_hba *hba)
1023{
1024	void __iomem *base = hba->mmio_base;
1025	struct handshake_frame *h;
1026	dma_addr_t status_phys;
1027	u32 data;
1028	unsigned long before;
1029
1030	if (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1031		writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
1032		readl(base + IDBL);
1033		before = jiffies;
1034		while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1035			if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1036				printk(KERN_ERR DRV_NAME
1037					"(%s): no handshake signature\n",
1038					pci_name(hba->pdev));
1039				return -1;
1040			}
1041			rmb();
1042			msleep(1);
1043		}
1044	}
1045
1046	udelay(10);
1047
1048	data = readl(base + OMR1);
1049	if ((data & 0xffff0000) == MU_HANDSHAKE_SIGNATURE_HALF) {
1050		data &= 0x0000ffff;
1051		if (hba->host->can_queue > data) {
1052			hba->host->can_queue = data;
1053			hba->host->cmd_per_lun = data;
1054		}
1055	}
1056
1057	h = (struct handshake_frame *)hba->status_buffer;
1058	h->rb_phy = cpu_to_le64(hba->dma_handle);
1059	h->req_sz = cpu_to_le16(hba->rq_size);
1060	h->req_cnt = cpu_to_le16(hba->rq_count+1);
1061	h->status_sz = cpu_to_le16(sizeof(struct status_msg));
1062	h->status_cnt = cpu_to_le16(hba->sts_count+1);
1063	h->hosttime = cpu_to_le64(ktime_get_real_seconds());
1064	h->partner_type = HMU_PARTNER_TYPE;
1065	if (hba->extra_offset) {
1066		h->extra_offset = cpu_to_le32(hba->extra_offset);
1067		h->extra_size = cpu_to_le32(hba->dma_size - hba->extra_offset);
1068	} else
1069		h->extra_offset = h->extra_size = 0;
1070
1071	status_phys = hba->dma_handle + (hba->rq_count+1) * hba->rq_size;
1072	writel(status_phys, base + IMR0);
1073	readl(base + IMR0);
1074	writel((status_phys >> 16) >> 16, base + IMR1);
1075	readl(base + IMR1);
1076
1077	writel((status_phys >> 16) >> 16, base + OMR0); /* old fw compatible */
1078	readl(base + OMR0);
1079	writel(MU_INBOUND_DOORBELL_HANDSHAKE, base + IDBL);
1080	readl(base + IDBL); /* flush */
1081
1082	udelay(10);
1083	before = jiffies;
1084	while (readl(base + OMR0) != MU_HANDSHAKE_SIGNATURE) {
1085		if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1086			printk(KERN_ERR DRV_NAME
1087				"(%s): no signature after handshake frame\n",
1088				pci_name(hba->pdev));
1089			return -1;
1090		}
1091		rmb();
1092		msleep(1);
1093	}
1094
1095	writel(0, base + IMR0);
1096	readl(base + IMR0);
1097	writel(0, base + OMR0);
1098	readl(base + OMR0);
1099	writel(0, base + IMR1);
1100	readl(base + IMR1);
1101	writel(0, base + OMR1);
1102	readl(base + OMR1); /* flush */
1103	return 0;
1104}
1105
1106static int stex_ss_handshake(struct st_hba *hba)
1107{
1108	void __iomem *base = hba->mmio_base;
1109	struct st_msg_header *msg_h;
1110	struct handshake_frame *h;
1111	__le32 *scratch;
1112	u32 data, scratch_size, mailboxdata, operationaldata;
1113	unsigned long before;
1114	int ret = 0;
1115
1116	before = jiffies;
1117
1118	if (hba->cardtype == st_yel) {
1119		operationaldata = readl(base + YIOA_STATUS);
1120		while (operationaldata != SS_MU_OPERATIONAL) {
1121			if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1122				printk(KERN_ERR DRV_NAME
1123					"(%s): firmware not operational\n",
1124					pci_name(hba->pdev));
1125				return -1;
1126			}
1127			msleep(1);
1128			operationaldata = readl(base + YIOA_STATUS);
1129		}
1130	} else {
1131		operationaldata = readl(base + PSCRATCH3);
1132		while (operationaldata != SS_MU_OPERATIONAL) {
1133			if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1134				printk(KERN_ERR DRV_NAME
1135					"(%s): firmware not operational\n",
1136					pci_name(hba->pdev));
1137				return -1;
1138			}
1139			msleep(1);
1140			operationaldata = readl(base + PSCRATCH3);
1141		}
 
1142	}
1143
1144	msg_h = (struct st_msg_header *)hba->dma_mem;
1145	msg_h->handle = cpu_to_le64(hba->dma_handle);
1146	msg_h->flag = SS_HEAD_HANDSHAKE;
1147
1148	h = (struct handshake_frame *)(msg_h + 1);
1149	h->rb_phy = cpu_to_le64(hba->dma_handle);
1150	h->req_sz = cpu_to_le16(hba->rq_size);
1151	h->req_cnt = cpu_to_le16(hba->rq_count+1);
1152	h->status_sz = cpu_to_le16(sizeof(struct status_msg));
1153	h->status_cnt = cpu_to_le16(hba->sts_count+1);
1154	h->hosttime = cpu_to_le64(ktime_get_real_seconds());
1155	h->partner_type = HMU_PARTNER_TYPE;
1156	h->extra_offset = h->extra_size = 0;
1157	scratch_size = (hba->sts_count+1)*sizeof(u32);
1158	h->scratch_size = cpu_to_le32(scratch_size);
1159
1160	if (hba->cardtype == st_yel) {
1161		data = readl(base + YINT_EN);
1162		data &= ~4;
1163		writel(data, base + YINT_EN);
1164		writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1165		readl(base + YH2I_REQ_HI);
1166		writel(hba->dma_handle, base + YH2I_REQ);
1167		readl(base + YH2I_REQ); /* flush */
1168	} else {
1169		data = readl(base + YINT_EN);
1170		data &= ~(1 << 0);
1171		data &= ~(1 << 2);
1172		writel(data, base + YINT_EN);
1173		if (hba->msi_lock == 0) {
1174			/* P3 MSI Register cannot access twice */
1175			writel((1 << 6), base + YH2I_INT);
1176			hba->msi_lock  = 1;
1177		}
1178		writel((hba->dma_handle >> 16) >> 16, base + YH2I_REQ_HI);
1179		writel(hba->dma_handle, base + YH2I_REQ);
1180	}
1181
 
1182	before = jiffies;
1183	scratch = hba->scratch;
1184	if (hba->cardtype == st_yel) {
1185		while (!(le32_to_cpu(*scratch) & SS_STS_HANDSHAKE)) {
1186			if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1187				printk(KERN_ERR DRV_NAME
1188					"(%s): no signature after handshake frame\n",
1189					pci_name(hba->pdev));
1190				ret = -1;
1191				break;
1192			}
1193			rmb();
1194			msleep(1);
1195		}
1196	} else {
1197		mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS);
1198		while (mailboxdata != SS_STS_HANDSHAKE) {
1199			if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
1200				printk(KERN_ERR DRV_NAME
1201					"(%s): no signature after handshake frame\n",
1202					pci_name(hba->pdev));
1203				ret = -1;
1204				break;
1205			}
1206			rmb();
1207			msleep(1);
1208			mailboxdata = readl(base + MAILBOX_BASE + MAILBOX_HNDSHK_STS);
1209		}
 
 
1210	}
 
1211	memset(scratch, 0, scratch_size);
1212	msg_h->flag = 0;
1213
1214	return ret;
1215}
1216
1217static int stex_handshake(struct st_hba *hba)
1218{
1219	int err;
1220	unsigned long flags;
1221	unsigned int mu_status;
1222
1223	if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1224		err = stex_ss_handshake(hba);
1225	else
1226		err = stex_common_handshake(hba);
1227	spin_lock_irqsave(hba->host->host_lock, flags);
1228	mu_status = hba->mu_status;
1229	if (err == 0) {
1230		hba->req_head = 0;
1231		hba->req_tail = 0;
1232		hba->status_head = 0;
1233		hba->status_tail = 0;
1234		hba->out_req_cnt = 0;
1235		hba->mu_status = MU_STATE_STARTED;
1236	} else
1237		hba->mu_status = MU_STATE_FAILED;
1238	if (mu_status == MU_STATE_RESETTING)
1239		wake_up_all(&hba->reset_waitq);
1240	spin_unlock_irqrestore(hba->host->host_lock, flags);
1241	return err;
1242}
1243
1244static int stex_abort(struct scsi_cmnd *cmd)
1245{
1246	struct Scsi_Host *host = cmd->device->host;
1247	struct st_hba *hba = (struct st_hba *)host->hostdata;
1248	u16 tag = scsi_cmd_to_rq(cmd)->tag;
1249	void __iomem *base;
1250	u32 data;
1251	int result = SUCCESS;
1252	unsigned long flags;
1253
1254	scmd_printk(KERN_INFO, cmd, "aborting command\n");
1255
1256	base = hba->mmio_base;
1257	spin_lock_irqsave(host->host_lock, flags);
1258	if (tag < host->can_queue &&
1259		hba->ccb[tag].req && hba->ccb[tag].cmd == cmd)
1260		hba->wait_ccb = &hba->ccb[tag];
1261	else
1262		goto out;
1263
1264	if (hba->cardtype == st_yel) {
1265		data = readl(base + YI2H_INT);
1266		if (data == 0 || data == 0xffffffff)
1267			goto fail_out;
1268
1269		writel(data, base + YI2H_INT_C);
1270		stex_ss_mu_intr(hba);
1271	} else if (hba->cardtype == st_P3) {
1272		data = readl(base + PSCRATCH4);
1273		if (data == 0xffffffff)
1274			goto fail_out;
1275		if (data != 0) {
1276			writel(data, base + PSCRATCH1);
1277			writel((1 << 22), base + YH2I_INT);
1278		}
1279		stex_ss_mu_intr(hba);
1280	} else {
1281		data = readl(base + ODBL);
1282		if (data == 0 || data == 0xffffffff)
1283			goto fail_out;
1284
1285		writel(data, base + ODBL);
1286		readl(base + ODBL); /* flush */
 
1287		stex_mu_intr(hba, data);
1288	}
1289	if (hba->wait_ccb == NULL) {
1290		printk(KERN_WARNING DRV_NAME
1291			"(%s): lost interrupt\n", pci_name(hba->pdev));
1292		goto out;
1293	}
1294
1295fail_out:
1296	scsi_dma_unmap(cmd);
1297	hba->wait_ccb->req = NULL; /* nullify the req's future return */
1298	hba->wait_ccb = NULL;
1299	result = FAILED;
1300out:
1301	spin_unlock_irqrestore(host->host_lock, flags);
1302	return result;
1303}
1304
1305static void stex_hard_reset(struct st_hba *hba)
1306{
1307	struct pci_bus *bus;
1308	int i;
1309	u16 pci_cmd;
1310	u8 pci_bctl;
1311
1312	for (i = 0; i < 16; i++)
1313		pci_read_config_dword(hba->pdev, i * 4,
1314			&hba->pdev->saved_config_space[i]);
1315
1316	/* Reset secondary bus. Our controller(MU/ATU) is the only device on
1317	   secondary bus. Consult Intel 80331/3 developer's manual for detail */
1318	bus = hba->pdev->bus;
1319	pci_read_config_byte(bus->self, PCI_BRIDGE_CONTROL, &pci_bctl);
1320	pci_bctl |= PCI_BRIDGE_CTL_BUS_RESET;
1321	pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1322
1323	/*
1324	 * 1 ms may be enough for 8-port controllers. But 16-port controllers
1325	 * require more time to finish bus reset. Use 100 ms here for safety
1326	 */
1327	msleep(100);
1328	pci_bctl &= ~PCI_BRIDGE_CTL_BUS_RESET;
1329	pci_write_config_byte(bus->self, PCI_BRIDGE_CONTROL, pci_bctl);
1330
1331	for (i = 0; i < MU_HARD_RESET_WAIT; i++) {
1332		pci_read_config_word(hba->pdev, PCI_COMMAND, &pci_cmd);
1333		if (pci_cmd != 0xffff && (pci_cmd & PCI_COMMAND_MASTER))
1334			break;
1335		msleep(1);
1336	}
1337
1338	ssleep(5);
1339	for (i = 0; i < 16; i++)
1340		pci_write_config_dword(hba->pdev, i * 4,
1341			hba->pdev->saved_config_space[i]);
1342}
1343
1344static int stex_yos_reset(struct st_hba *hba)
1345{
1346	void __iomem *base;
1347	unsigned long flags, before;
1348	int ret = 0;
1349
1350	base = hba->mmio_base;
1351	writel(MU_INBOUND_DOORBELL_RESET, base + IDBL);
1352	readl(base + IDBL); /* flush */
1353	before = jiffies;
1354	while (hba->out_req_cnt > 0) {
1355		if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1356			printk(KERN_WARNING DRV_NAME
1357				"(%s): reset timeout\n", pci_name(hba->pdev));
1358			ret = -1;
1359			break;
1360		}
1361		msleep(1);
1362	}
1363
1364	spin_lock_irqsave(hba->host->host_lock, flags);
1365	if (ret == -1)
1366		hba->mu_status = MU_STATE_FAILED;
1367	else
1368		hba->mu_status = MU_STATE_STARTED;
1369	wake_up_all(&hba->reset_waitq);
1370	spin_unlock_irqrestore(hba->host->host_lock, flags);
1371
1372	return ret;
1373}
1374
1375static void stex_ss_reset(struct st_hba *hba)
1376{
1377	writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1378	readl(hba->mmio_base + YH2I_INT);
1379	ssleep(5);
1380}
1381
1382static void stex_p3_reset(struct st_hba *hba)
1383{
1384	writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
1385	ssleep(5);
1386}
1387
1388static int stex_do_reset(struct st_hba *hba)
1389{
1390	unsigned long flags;
1391	unsigned int mu_status = MU_STATE_RESETTING;
1392
1393	spin_lock_irqsave(hba->host->host_lock, flags);
1394	if (hba->mu_status == MU_STATE_STARTING) {
1395		spin_unlock_irqrestore(hba->host->host_lock, flags);
1396		printk(KERN_INFO DRV_NAME "(%s): request reset during init\n",
1397			pci_name(hba->pdev));
1398		return 0;
1399	}
1400	while (hba->mu_status == MU_STATE_RESETTING) {
1401		spin_unlock_irqrestore(hba->host->host_lock, flags);
1402		wait_event_timeout(hba->reset_waitq,
1403				   hba->mu_status != MU_STATE_RESETTING,
1404				   MU_MAX_DELAY * HZ);
1405		spin_lock_irqsave(hba->host->host_lock, flags);
1406		mu_status = hba->mu_status;
1407	}
1408
1409	if (mu_status != MU_STATE_RESETTING) {
1410		spin_unlock_irqrestore(hba->host->host_lock, flags);
1411		return (mu_status == MU_STATE_STARTED) ? 0 : -1;
1412	}
1413
1414	hba->mu_status = MU_STATE_RESETTING;
1415	spin_unlock_irqrestore(hba->host->host_lock, flags);
1416
1417	if (hba->cardtype == st_yosemite)
1418		return stex_yos_reset(hba);
1419
1420	if (hba->cardtype == st_shasta)
1421		stex_hard_reset(hba);
1422	else if (hba->cardtype == st_yel)
1423		stex_ss_reset(hba);
1424	else if (hba->cardtype == st_P3)
1425		stex_p3_reset(hba);
1426
1427	return_abnormal_state(hba, DID_RESET);
1428
1429	if (stex_handshake(hba) == 0)
1430		return 0;
1431
1432	printk(KERN_WARNING DRV_NAME "(%s): resetting: handshake failed\n",
1433		pci_name(hba->pdev));
1434	return -1;
1435}
1436
1437static int stex_reset(struct scsi_cmnd *cmd)
1438{
1439	struct st_hba *hba;
1440
1441	hba = (struct st_hba *) &cmd->device->host->hostdata[0];
1442
1443	shost_printk(KERN_INFO, cmd->device->host,
1444		     "resetting host\n");
1445
1446	return stex_do_reset(hba) ? FAILED : SUCCESS;
1447}
1448
1449static void stex_reset_work(struct work_struct *work)
1450{
1451	struct st_hba *hba = container_of(work, struct st_hba, reset_work);
1452
1453	stex_do_reset(hba);
1454}
1455
1456static int stex_biosparam(struct scsi_device *sdev,
1457	struct block_device *bdev, sector_t capacity, int geom[])
1458{
1459	int heads = 255, sectors = 63;
1460
1461	if (capacity < 0x200000) {
1462		heads = 64;
1463		sectors = 32;
1464	}
1465
1466	sector_div(capacity, heads * sectors);
1467
1468	geom[0] = heads;
1469	geom[1] = sectors;
1470	geom[2] = capacity;
1471
1472	return 0;
1473}
1474
1475static struct scsi_host_template driver_template = {
1476	.module				= THIS_MODULE,
1477	.name				= DRV_NAME,
1478	.proc_name			= DRV_NAME,
1479	.bios_param			= stex_biosparam,
1480	.queuecommand			= stex_queuecommand,
1481	.slave_configure		= stex_slave_config,
1482	.eh_abort_handler		= stex_abort,
1483	.eh_host_reset_handler		= stex_reset,
1484	.this_id			= -1,
1485	.dma_boundary			= PAGE_SIZE - 1,
1486};
1487
1488static struct pci_device_id stex_pci_tbl[] = {
1489	/* st_shasta */
1490	{ 0x105a, 0x8350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1491		st_shasta }, /* SuperTrak EX8350/8300/16350/16300 */
1492	{ 0x105a, 0xc350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1493		st_shasta }, /* SuperTrak EX12350 */
1494	{ 0x105a, 0x4302, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1495		st_shasta }, /* SuperTrak EX4350 */
1496	{ 0x105a, 0xe350, PCI_ANY_ID, PCI_ANY_ID, 0, 0,
1497		st_shasta }, /* SuperTrak EX24350 */
1498
1499	/* st_vsc */
1500	{ 0x105a, 0x7250, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_vsc },
1501
1502	/* st_yosemite */
1503	{ 0x105a, 0x8650, 0x105a, PCI_ANY_ID, 0, 0, st_yosemite },
1504
1505	/* st_seq */
1506	{ 0x105a, 0x3360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_seq },
1507
1508	/* st_yel */
1509	{ 0x105a, 0x8650, 0x1033, PCI_ANY_ID, 0, 0, st_yel },
1510	{ 0x105a, 0x8760, PCI_ANY_ID, PCI_ANY_ID, 0, 0, st_yel },
1511
1512	/* st_P3, pluto */
1513	{ PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE,
1514		0x8870, 0, 0, st_P3 },
1515	/* st_P3, p3 */
1516	{ PCI_VENDOR_ID_PROMISE, 0x8870, PCI_VENDOR_ID_PROMISE,
1517		0x4300, 0, 0, st_P3 },
1518
1519	/* st_P3, SymplyStor4E */
1520	{ PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
1521		0x4311, 0, 0, st_P3 },
1522	/* st_P3, SymplyStor8E */
1523	{ PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
1524		0x4312, 0, 0, st_P3 },
1525	/* st_P3, SymplyStor4 */
1526	{ PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
1527		0x4321, 0, 0, st_P3 },
1528	/* st_P3, SymplyStor8 */
1529	{ PCI_VENDOR_ID_PROMISE, 0x8871, PCI_VENDOR_ID_PROMISE,
1530		0x4322, 0, 0, st_P3 },
1531	{ }	/* terminate list */
1532};
1533
1534static struct st_card_info stex_card_info[] = {
1535	/* st_shasta */
1536	{
1537		.max_id		= 17,
1538		.max_lun	= 8,
1539		.max_channel	= 0,
1540		.rq_count	= 32,
1541		.rq_size	= 1048,
1542		.sts_count	= 32,
1543		.alloc_rq	= stex_alloc_req,
1544		.map_sg		= stex_map_sg,
1545		.send		= stex_send_cmd,
1546	},
1547
1548	/* st_vsc */
1549	{
1550		.max_id		= 129,
1551		.max_lun	= 1,
1552		.max_channel	= 0,
1553		.rq_count	= 32,
1554		.rq_size	= 1048,
1555		.sts_count	= 32,
1556		.alloc_rq	= stex_alloc_req,
1557		.map_sg		= stex_map_sg,
1558		.send		= stex_send_cmd,
1559	},
1560
1561	/* st_yosemite */
1562	{
1563		.max_id		= 2,
1564		.max_lun	= 256,
1565		.max_channel	= 0,
1566		.rq_count	= 256,
1567		.rq_size	= 1048,
1568		.sts_count	= 256,
1569		.alloc_rq	= stex_alloc_req,
1570		.map_sg		= stex_map_sg,
1571		.send		= stex_send_cmd,
1572	},
1573
1574	/* st_seq */
1575	{
1576		.max_id		= 129,
1577		.max_lun	= 1,
1578		.max_channel	= 0,
1579		.rq_count	= 32,
1580		.rq_size	= 1048,
1581		.sts_count	= 32,
1582		.alloc_rq	= stex_alloc_req,
1583		.map_sg		= stex_map_sg,
1584		.send		= stex_send_cmd,
1585	},
1586
1587	/* st_yel */
1588	{
1589		.max_id		= 129,
1590		.max_lun	= 256,
1591		.max_channel	= 3,
1592		.rq_count	= 801,
1593		.rq_size	= 512,
1594		.sts_count	= 801,
1595		.alloc_rq	= stex_ss_alloc_req,
1596		.map_sg		= stex_ss_map_sg,
1597		.send		= stex_ss_send_cmd,
1598	},
 
1599
1600	/* st_P3 */
1601	{
1602		.max_id		= 129,
1603		.max_lun	= 256,
1604		.max_channel	= 0,
1605		.rq_count	= 801,
1606		.rq_size	= 512,
1607		.sts_count	= 801,
1608		.alloc_rq	= stex_ss_alloc_req,
1609		.map_sg		= stex_ss_map_sg,
1610		.send		= stex_ss_send_cmd,
1611	},
1612};
1613
1614static int stex_request_irq(struct st_hba *hba)
1615{
1616	struct pci_dev *pdev = hba->pdev;
1617	int status;
1618
1619	if (msi || hba->cardtype == st_P3) {
1620		status = pci_enable_msi(pdev);
1621		if (status != 0)
1622			printk(KERN_ERR DRV_NAME
1623				"(%s): error %d setting up MSI\n",
1624				pci_name(pdev), status);
1625		else
1626			hba->msi_enabled = 1;
1627	} else
1628		hba->msi_enabled = 0;
1629
1630	status = request_irq(pdev->irq,
1631		(hba->cardtype == st_yel || hba->cardtype == st_P3) ?
1632		stex_ss_intr : stex_intr, IRQF_SHARED, DRV_NAME, hba);
1633
1634	if (status != 0) {
1635		if (hba->msi_enabled)
1636			pci_disable_msi(pdev);
1637	}
1638	return status;
1639}
1640
1641static void stex_free_irq(struct st_hba *hba)
1642{
1643	struct pci_dev *pdev = hba->pdev;
1644
1645	free_irq(pdev->irq, hba);
1646	if (hba->msi_enabled)
1647		pci_disable_msi(pdev);
1648}
1649
1650static int stex_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1651{
1652	struct st_hba *hba;
1653	struct Scsi_Host *host;
1654	const struct st_card_info *ci = NULL;
1655	u32 sts_offset, cp_offset, scratch_offset;
1656	int err;
1657
1658	err = pci_enable_device(pdev);
1659	if (err)
1660		return err;
1661
1662	pci_set_master(pdev);
1663
1664	S6flag = 0;
1665	register_reboot_notifier(&stex_notifier);
1666
1667	host = scsi_host_alloc(&driver_template, sizeof(struct st_hba));
1668
1669	if (!host) {
1670		printk(KERN_ERR DRV_NAME "(%s): scsi_host_alloc failed\n",
1671			pci_name(pdev));
1672		err = -ENOMEM;
1673		goto out_disable;
1674	}
1675
1676	hba = (struct st_hba *)host->hostdata;
1677	memset(hba, 0, sizeof(struct st_hba));
1678
1679	err = pci_request_regions(pdev, DRV_NAME);
1680	if (err < 0) {
1681		printk(KERN_ERR DRV_NAME "(%s): request regions failed\n",
1682			pci_name(pdev));
1683		goto out_scsi_host_put;
1684	}
1685
1686	hba->mmio_base = pci_ioremap_bar(pdev, 0);
1687	if ( !hba->mmio_base) {
1688		printk(KERN_ERR DRV_NAME "(%s): memory map failed\n",
1689			pci_name(pdev));
1690		err = -ENOMEM;
1691		goto out_release_regions;
1692	}
1693
1694	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
1695	if (err)
1696		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
1697	if (err) {
1698		printk(KERN_ERR DRV_NAME "(%s): set dma mask failed\n",
1699			pci_name(pdev));
1700		goto out_iounmap;
1701	}
1702
1703	hba->cardtype = (unsigned int) id->driver_data;
1704	ci = &stex_card_info[hba->cardtype];
1705	switch (id->subdevice) {
1706	case 0x4221:
1707	case 0x4222:
1708	case 0x4223:
1709	case 0x4224:
1710	case 0x4225:
1711	case 0x4226:
1712	case 0x4227:
1713	case 0x4261:
1714	case 0x4262:
1715	case 0x4263:
1716	case 0x4264:
1717	case 0x4265:
1718		break;
1719	default:
1720		if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1721			hba->supports_pm = 1;
1722	}
1723
1724	sts_offset = scratch_offset = (ci->rq_count+1) * ci->rq_size;
1725	if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1726		sts_offset += (ci->sts_count+1) * sizeof(u32);
1727	cp_offset = sts_offset + (ci->sts_count+1) * sizeof(struct status_msg);
1728	hba->dma_size = cp_offset + sizeof(struct st_frame);
1729	if (hba->cardtype == st_seq ||
1730		(hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1731		hba->extra_offset = hba->dma_size;
1732		hba->dma_size += ST_ADDITIONAL_MEM;
1733	}
1734	hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1735		hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1736	if (!hba->dma_mem) {
1737		/* Retry minimum coherent mapping for st_seq and st_vsc */
1738		if (hba->cardtype == st_seq ||
1739		    (hba->cardtype == st_vsc && (pdev->subsystem_device & 1))) {
1740			printk(KERN_WARNING DRV_NAME
1741				"(%s): allocating min buffer for controller\n",
1742				pci_name(pdev));
1743			hba->dma_size = hba->extra_offset
1744				+ ST_ADDITIONAL_MEM_MIN;
1745			hba->dma_mem = dma_alloc_coherent(&pdev->dev,
1746				hba->dma_size, &hba->dma_handle, GFP_KERNEL);
1747		}
1748
1749		if (!hba->dma_mem) {
1750			err = -ENOMEM;
1751			printk(KERN_ERR DRV_NAME "(%s): dma mem alloc failed\n",
1752				pci_name(pdev));
1753			goto out_iounmap;
1754		}
1755	}
1756
1757	hba->ccb = kcalloc(ci->rq_count, sizeof(struct st_ccb), GFP_KERNEL);
1758	if (!hba->ccb) {
1759		err = -ENOMEM;
1760		printk(KERN_ERR DRV_NAME "(%s): ccb alloc failed\n",
1761			pci_name(pdev));
1762		goto out_pci_free;
1763	}
1764
1765	if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1766		hba->scratch = (__le32 *)(hba->dma_mem + scratch_offset);
1767	hba->status_buffer = (struct status_msg *)(hba->dma_mem + sts_offset);
1768	hba->copy_buffer = hba->dma_mem + cp_offset;
1769	hba->rq_count = ci->rq_count;
1770	hba->rq_size = ci->rq_size;
1771	hba->sts_count = ci->sts_count;
1772	hba->alloc_rq = ci->alloc_rq;
1773	hba->map_sg = ci->map_sg;
1774	hba->send = ci->send;
1775	hba->mu_status = MU_STATE_STARTING;
1776	hba->msi_lock = 0;
1777
1778	if (hba->cardtype == st_yel || hba->cardtype == st_P3)
1779		host->sg_tablesize = 38;
1780	else
1781		host->sg_tablesize = 32;
1782	host->can_queue = ci->rq_count;
1783	host->cmd_per_lun = ci->rq_count;
1784	host->max_id = ci->max_id;
1785	host->max_lun = ci->max_lun;
1786	host->max_channel = ci->max_channel;
1787	host->unique_id = host->host_no;
1788	host->max_cmd_len = STEX_CDB_LENGTH;
1789
1790	hba->host = host;
1791	hba->pdev = pdev;
1792	init_waitqueue_head(&hba->reset_waitq);
1793
1794	snprintf(hba->work_q_name, sizeof(hba->work_q_name),
1795		 "stex_wq_%d", host->host_no);
1796	hba->work_q = create_singlethread_workqueue(hba->work_q_name);
1797	if (!hba->work_q) {
1798		printk(KERN_ERR DRV_NAME "(%s): create workqueue failed\n",
1799			pci_name(pdev));
1800		err = -ENOMEM;
1801		goto out_ccb_free;
1802	}
1803	INIT_WORK(&hba->reset_work, stex_reset_work);
1804
1805	err = stex_request_irq(hba);
1806	if (err) {
1807		printk(KERN_ERR DRV_NAME "(%s): request irq failed\n",
1808			pci_name(pdev));
1809		goto out_free_wq;
1810	}
1811
1812	err = stex_handshake(hba);
1813	if (err)
1814		goto out_free_irq;
1815
1816	pci_set_drvdata(pdev, hba);
1817
1818	err = scsi_add_host(host, &pdev->dev);
1819	if (err) {
1820		printk(KERN_ERR DRV_NAME "(%s): scsi_add_host failed\n",
1821			pci_name(pdev));
1822		goto out_free_irq;
1823	}
1824
1825	scsi_scan_host(host);
1826
1827	return 0;
1828
1829out_free_irq:
1830	stex_free_irq(hba);
1831out_free_wq:
1832	destroy_workqueue(hba->work_q);
1833out_ccb_free:
1834	kfree(hba->ccb);
1835out_pci_free:
1836	dma_free_coherent(&pdev->dev, hba->dma_size,
1837			  hba->dma_mem, hba->dma_handle);
1838out_iounmap:
1839	iounmap(hba->mmio_base);
1840out_release_regions:
1841	pci_release_regions(pdev);
1842out_scsi_host_put:
1843	scsi_host_put(host);
1844out_disable:
1845	pci_disable_device(pdev);
1846
1847	return err;
1848}
1849
1850static void stex_hba_stop(struct st_hba *hba, int st_sleep_mic)
1851{
1852	struct req_msg *req;
1853	struct st_msg_header *msg_h;
1854	unsigned long flags;
1855	unsigned long before;
1856	u16 tag = 0;
1857
1858	spin_lock_irqsave(hba->host->host_lock, flags);
1859
1860	if ((hba->cardtype == st_yel || hba->cardtype == st_P3) &&
1861		hba->supports_pm == 1) {
1862		if (st_sleep_mic == ST_NOTHANDLED) {
 
1863			spin_unlock_irqrestore(hba->host->host_lock, flags);
1864			return;
1865		}
1866	}
1867	req = hba->alloc_rq(hba);
1868	if (hba->cardtype == st_yel || hba->cardtype == st_P3) {
1869		msg_h = (struct st_msg_header *)req - 1;
1870		memset(msg_h, 0, hba->rq_size);
1871	} else
1872		memset(req, 0, hba->rq_size);
1873
1874	if ((hba->cardtype == st_yosemite || hba->cardtype == st_yel
1875		|| hba->cardtype == st_P3)
1876		&& st_sleep_mic == ST_IGNORED) {
1877		req->cdb[0] = MGT_CMD;
1878		req->cdb[1] = MGT_CMD_SIGNATURE;
1879		req->cdb[2] = CTLR_CONFIG_CMD;
1880		req->cdb[3] = CTLR_SHUTDOWN;
1881	} else if ((hba->cardtype == st_yel || hba->cardtype == st_P3)
1882		&& st_sleep_mic != ST_IGNORED) {
1883		req->cdb[0] = MGT_CMD;
1884		req->cdb[1] = MGT_CMD_SIGNATURE;
1885		req->cdb[2] = CTLR_CONFIG_CMD;
1886		req->cdb[3] = PMIC_SHUTDOWN;
1887		req->cdb[4] = st_sleep_mic;
1888	} else {
1889		req->cdb[0] = CONTROLLER_CMD;
1890		req->cdb[1] = CTLR_POWER_STATE_CHANGE;
1891		req->cdb[2] = CTLR_POWER_SAVING;
1892	}
 
1893	hba->ccb[tag].cmd = NULL;
1894	hba->ccb[tag].sg_count = 0;
1895	hba->ccb[tag].sense_bufflen = 0;
1896	hba->ccb[tag].sense_buffer = NULL;
1897	hba->ccb[tag].req_type = PASSTHRU_REQ_TYPE;
 
1898	hba->send(hba, req, tag);
1899	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
1900	before = jiffies;
1901	while (hba->ccb[tag].req_type & PASSTHRU_REQ_TYPE) {
1902		if (time_after(jiffies, before + ST_INTERNAL_TIMEOUT * HZ)) {
1903			hba->ccb[tag].req_type = 0;
1904			hba->mu_status = MU_STATE_STOP;
1905			return;
1906		}
1907		msleep(1);
1908	}
1909	hba->mu_status = MU_STATE_STOP;
1910}
1911
1912static void stex_hba_free(struct st_hba *hba)
1913{
1914	stex_free_irq(hba);
1915
1916	destroy_workqueue(hba->work_q);
1917
1918	iounmap(hba->mmio_base);
1919
1920	pci_release_regions(hba->pdev);
1921
1922	kfree(hba->ccb);
1923
1924	dma_free_coherent(&hba->pdev->dev, hba->dma_size,
1925			  hba->dma_mem, hba->dma_handle);
1926}
1927
1928static void stex_remove(struct pci_dev *pdev)
1929{
1930	struct st_hba *hba = pci_get_drvdata(pdev);
1931
1932	hba->mu_status = MU_STATE_NOCONNECT;
1933	return_abnormal_state(hba, DID_NO_CONNECT);
1934	scsi_remove_host(hba->host);
1935
1936	scsi_block_requests(hba->host);
1937
1938	stex_hba_free(hba);
1939
1940	scsi_host_put(hba->host);
1941
1942	pci_disable_device(pdev);
1943
1944	unregister_reboot_notifier(&stex_notifier);
1945}
1946
1947static void stex_shutdown(struct pci_dev *pdev)
1948{
1949	struct st_hba *hba = pci_get_drvdata(pdev);
1950
1951	if (hba->supports_pm == 0) {
1952		stex_hba_stop(hba, ST_IGNORED);
1953	} else if (hba->supports_pm == 1 && S6flag) {
1954		unregister_reboot_notifier(&stex_notifier);
1955		stex_hba_stop(hba, ST_S6);
1956	} else
1957		stex_hba_stop(hba, ST_S5);
1958}
1959
1960static int stex_choice_sleep_mic(struct st_hba *hba, pm_message_t state)
1961{
1962	switch (state.event) {
1963	case PM_EVENT_SUSPEND:
1964		return ST_S3;
1965	case PM_EVENT_HIBERNATE:
1966		hba->msi_lock = 0;
1967		return ST_S4;
1968	default:
1969		return ST_NOTHANDLED;
1970	}
1971}
1972
1973static int stex_suspend(struct pci_dev *pdev, pm_message_t state)
1974{
1975	struct st_hba *hba = pci_get_drvdata(pdev);
1976
1977	if ((hba->cardtype == st_yel || hba->cardtype == st_P3)
1978		&& hba->supports_pm == 1)
1979		stex_hba_stop(hba, stex_choice_sleep_mic(hba, state));
1980	else
1981		stex_hba_stop(hba, ST_IGNORED);
1982	return 0;
1983}
1984
1985static int stex_resume(struct pci_dev *pdev)
1986{
1987	struct st_hba *hba = pci_get_drvdata(pdev);
1988
1989	hba->mu_status = MU_STATE_STARTING;
1990	stex_handshake(hba);
1991	return 0;
1992}
1993
1994static int stex_halt(struct notifier_block *nb, unsigned long event, void *buf)
1995{
1996	S6flag = 1;
1997	return NOTIFY_OK;
1998}
1999MODULE_DEVICE_TABLE(pci, stex_pci_tbl);
2000
2001static struct pci_driver stex_pci_driver = {
2002	.name		= DRV_NAME,
2003	.id_table	= stex_pci_tbl,
2004	.probe		= stex_probe,
2005	.remove		= stex_remove,
2006	.shutdown	= stex_shutdown,
2007	.suspend	= stex_suspend,
2008	.resume		= stex_resume,
2009};
2010
2011static int __init stex_init(void)
2012{
2013	printk(KERN_INFO DRV_NAME
2014		": Promise SuperTrak EX Driver version: %s\n",
2015		 ST_DRIVER_VERSION);
2016
2017	return pci_register_driver(&stex_pci_driver);
2018}
2019
2020static void __exit stex_exit(void)
2021{
2022	pci_unregister_driver(&stex_pci_driver);
2023}
2024
2025module_init(stex_init);
2026module_exit(stex_exit);