Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * Marvell UMI driver
   3 *
   4 * Copyright 2011 Marvell. <jyli@marvell.com>
   5 *
   6 * This file is licensed under GPLv2.
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License as
  10 * published by the Free Software Foundation; version 2 of the
  11 * License.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  21 * USA
  22*/
  23
  24#include <linux/kernel.h>
  25#include <linux/module.h>
  26#include <linux/moduleparam.h>
  27#include <linux/init.h>
  28#include <linux/device.h>
  29#include <linux/pci.h>
  30#include <linux/list.h>
  31#include <linux/spinlock.h>
  32#include <linux/interrupt.h>
  33#include <linux/delay.h>
 
  34#include <linux/blkdev.h>
  35#include <linux/io.h>
  36#include <scsi/scsi.h>
  37#include <scsi/scsi_cmnd.h>
  38#include <scsi/scsi_device.h>
  39#include <scsi/scsi_host.h>
  40#include <scsi/scsi_transport.h>
  41#include <scsi/scsi_eh.h>
  42#include <linux/uaccess.h>
  43#include <linux/kthread.h>
  44
  45#include "mvumi.h"
  46
  47MODULE_LICENSE("GPL");
  48MODULE_AUTHOR("jyli@marvell.com");
  49MODULE_DESCRIPTION("Marvell UMI Driver");
  50
  51static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = {
  52	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
  53	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
  54	{ 0 }
  55};
  56
  57MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
  58
  59static void tag_init(struct mvumi_tag *st, unsigned short size)
  60{
  61	unsigned short i;
  62	BUG_ON(size != st->size);
  63	st->top = size;
  64	for (i = 0; i < size; i++)
  65		st->stack[i] = size - 1 - i;
  66}
  67
  68static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
  69{
  70	BUG_ON(st->top <= 0);
  71	return st->stack[--st->top];
  72}
  73
  74static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
  75							unsigned short tag)
  76{
  77	BUG_ON(st->top >= st->size);
  78	st->stack[st->top++] = tag;
  79}
  80
  81static bool tag_is_empty(struct mvumi_tag *st)
  82{
  83	if (st->top == 0)
  84		return 1;
  85	else
  86		return 0;
  87}
  88
  89static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
  90{
  91	int i;
  92
  93	for (i = 0; i < MAX_BASE_ADDRESS; i++)
  94		if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
  95								addr_array[i])
  96			pci_iounmap(dev, addr_array[i]);
  97}
  98
  99static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
 100{
 101	int i;
 102
 103	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 104		if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
 105			addr_array[i] = pci_iomap(dev, i, 0);
 106			if (!addr_array[i]) {
 107				dev_err(&dev->dev, "failed to map Bar[%d]\n",
 108									i);
 109				mvumi_unmap_pci_addr(dev, addr_array);
 110				return -ENOMEM;
 111			}
 112		} else
 113			addr_array[i] = NULL;
 114
 115		dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
 116	}
 117
 118	return 0;
 119}
 120
 121static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
 122				enum resource_type type, unsigned int size)
 123{
 124	struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
 125
 126	if (!res) {
 127		dev_err(&mhba->pdev->dev,
 128			"Failed to allocate memory for resource manager.\n");
 129		return NULL;
 130	}
 131
 132	switch (type) {
 133	case RESOURCE_CACHED_MEMORY:
 134		res->virt_addr = kzalloc(size, GFP_ATOMIC);
 135		if (!res->virt_addr) {
 136			dev_err(&mhba->pdev->dev,
 137				"unable to allocate memory,size = %d.\n", size);
 138			kfree(res);
 139			return NULL;
 140		}
 141		break;
 142
 143	case RESOURCE_UNCACHED_MEMORY:
 144		size = round_up(size, 8);
 145		res->virt_addr = pci_alloc_consistent(mhba->pdev, size,
 146							&res->bus_addr);
 147		if (!res->virt_addr) {
 148			dev_err(&mhba->pdev->dev,
 149					"unable to allocate consistent mem,"
 150							"size = %d.\n", size);
 151			kfree(res);
 152			return NULL;
 153		}
 154		memset(res->virt_addr, 0, size);
 155		break;
 156
 157	default:
 158		dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
 159		kfree(res);
 160		return NULL;
 161	}
 162
 163	res->type = type;
 164	res->size = size;
 165	INIT_LIST_HEAD(&res->entry);
 166	list_add_tail(&res->entry, &mhba->res_list);
 167
 168	return res;
 169}
 170
 171static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
 172{
 173	struct mvumi_res *res, *tmp;
 174
 175	list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
 176		switch (res->type) {
 177		case RESOURCE_UNCACHED_MEMORY:
 178			pci_free_consistent(mhba->pdev, res->size,
 179						res->virt_addr, res->bus_addr);
 180			break;
 181		case RESOURCE_CACHED_MEMORY:
 182			kfree(res->virt_addr);
 183			break;
 184		default:
 185			dev_err(&mhba->pdev->dev,
 186				"unknown resource type %d\n", res->type);
 187			break;
 188		}
 189		list_del(&res->entry);
 190		kfree(res);
 191	}
 192	mhba->fw_flag &= ~MVUMI_FW_ALLOC;
 193}
 194
 195/**
 196 * mvumi_make_sgl -	Prepares  SGL
 197 * @mhba:		Adapter soft state
 198 * @scmd:		SCSI command from the mid-layer
 199 * @sgl_p:		SGL to be filled in
 200 * @sg_count		return the number of SG elements
 201 *
 202 * If successful, this function returns 0. otherwise, it returns -1.
 203 */
 204static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
 205					void *sgl_p, unsigned char *sg_count)
 206{
 207	struct scatterlist *sg;
 208	struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
 209	unsigned int i;
 210	unsigned int sgnum = scsi_sg_count(scmd);
 211	dma_addr_t busaddr;
 212
 213	if (sgnum) {
 214		sg = scsi_sglist(scmd);
 215		*sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
 216				(int) scmd->sc_data_direction);
 217		if (*sg_count > mhba->max_sge) {
 218			dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
 219						"than max sg[0x%x].\n",
 220						*sg_count, mhba->max_sge);
 221			return -1;
 222		}
 223		for (i = 0; i < *sg_count; i++) {
 224			busaddr = sg_dma_address(&sg[i]);
 225			m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
 226			m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
 227			m_sg->flags = 0;
 228			sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
 229			if ((i + 1) == *sg_count)
 230				m_sg->flags |= 1U << mhba->eot_flag;
 231
 232			sgd_inc(mhba, m_sg);
 233		}
 234	} else {
 235		scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
 236			pci_map_single(mhba->pdev, scsi_sglist(scmd),
 237				scsi_bufflen(scmd),
 238				(int) scmd->sc_data_direction)
 239			: 0;
 240		busaddr = scmd->SCp.dma_handle;
 241		m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
 242		m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
 243		m_sg->flags = 1U << mhba->eot_flag;
 244		sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd)));
 245		*sg_count = 1;
 
 
 
 246	}
 247
 248	return 0;
 249}
 250
 251static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
 252							unsigned int size)
 253{
 254	struct mvumi_sgl *m_sg;
 255	void *virt_addr;
 256	dma_addr_t phy_addr;
 257
 258	if (size == 0)
 259		return 0;
 260
 261	virt_addr = pci_alloc_consistent(mhba->pdev, size, &phy_addr);
 262	if (!virt_addr)
 263		return -1;
 264
 265	memset(virt_addr, 0, size);
 266
 267	m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
 268	cmd->frame->sg_counts = 1;
 269	cmd->data_buf = virt_addr;
 270
 271	m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
 272	m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
 273	m_sg->flags = 1U << mhba->eot_flag;
 274	sgd_setsz(mhba, m_sg, cpu_to_le32(size));
 275
 276	return 0;
 277}
 278
 279static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
 280				unsigned int buf_size)
 281{
 282	struct mvumi_cmd *cmd;
 283
 284	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 285	if (!cmd) {
 286		dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
 287		return NULL;
 288	}
 289	INIT_LIST_HEAD(&cmd->queue_pointer);
 290
 291	cmd->frame = pci_alloc_consistent(mhba->pdev,
 292				mhba->ib_max_size, &cmd->frame_phys);
 293	if (!cmd->frame) {
 294		dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
 295			" frame,size = %d.\n", mhba->ib_max_size);
 296		kfree(cmd);
 297		return NULL;
 298	}
 299
 300	if (buf_size) {
 301		if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
 302			dev_err(&mhba->pdev->dev, "failed to allocate memory"
 303						" for internal frame\n");
 304			pci_free_consistent(mhba->pdev, mhba->ib_max_size,
 305					cmd->frame, cmd->frame_phys);
 306			kfree(cmd);
 307			return NULL;
 308		}
 309	} else
 310		cmd->frame->sg_counts = 0;
 311
 312	return cmd;
 313}
 314
 315static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
 316						struct mvumi_cmd *cmd)
 317{
 318	struct mvumi_sgl *m_sg;
 319	unsigned int size;
 320	dma_addr_t phy_addr;
 321
 322	if (cmd && cmd->frame) {
 323		if (cmd->frame->sg_counts) {
 324			m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
 325			sgd_getsz(mhba, m_sg, size);
 326
 327			phy_addr = (dma_addr_t) m_sg->baseaddr_l |
 328				(dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
 329
 330			pci_free_consistent(mhba->pdev, size, cmd->data_buf,
 331								phy_addr);
 332		}
 333		pci_free_consistent(mhba->pdev, mhba->ib_max_size,
 334				cmd->frame, cmd->frame_phys);
 335		kfree(cmd);
 336	}
 337}
 338
 339/**
 340 * mvumi_get_cmd -	Get a command from the free pool
 341 * @mhba:		Adapter soft state
 342 *
 343 * Returns a free command from the pool
 344 */
 345static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
 346{
 347	struct mvumi_cmd *cmd = NULL;
 348
 349	if (likely(!list_empty(&mhba->cmd_pool))) {
 350		cmd = list_entry((&mhba->cmd_pool)->next,
 351				struct mvumi_cmd, queue_pointer);
 352		list_del_init(&cmd->queue_pointer);
 353	} else
 354		dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
 355
 356	return cmd;
 357}
 358
 359/**
 360 * mvumi_return_cmd -	Return a cmd to free command pool
 361 * @mhba:		Adapter soft state
 362 * @cmd:		Command packet to be returned to free command pool
 363 */
 364static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
 365						struct mvumi_cmd *cmd)
 366{
 367	cmd->scmd = NULL;
 368	list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
 369}
 370
 371/**
 372 * mvumi_free_cmds -	Free all the cmds in the free cmd pool
 373 * @mhba:		Adapter soft state
 374 */
 375static void mvumi_free_cmds(struct mvumi_hba *mhba)
 376{
 377	struct mvumi_cmd *cmd;
 378
 379	while (!list_empty(&mhba->cmd_pool)) {
 380		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 381							queue_pointer);
 382		list_del(&cmd->queue_pointer);
 383		if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
 384			kfree(cmd->frame);
 385		kfree(cmd);
 386	}
 387}
 388
 389/**
 390 * mvumi_alloc_cmds -	Allocates the command packets
 391 * @mhba:		Adapter soft state
 392 *
 393 */
 394static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
 395{
 396	int i;
 397	struct mvumi_cmd *cmd;
 398
 399	for (i = 0; i < mhba->max_io; i++) {
 400		cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 401		if (!cmd)
 402			goto err_exit;
 403
 404		INIT_LIST_HEAD(&cmd->queue_pointer);
 405		list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
 406		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 407			cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
 408			cmd->frame_phys = mhba->ib_frame_phys
 409						+ i * mhba->ib_max_size;
 410		} else
 411			cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
 412		if (!cmd->frame)
 413			goto err_exit;
 414	}
 415	return 0;
 416
 417err_exit:
 418	dev_err(&mhba->pdev->dev,
 419			"failed to allocate memory for cmd[0x%x].\n", i);
 420	while (!list_empty(&mhba->cmd_pool)) {
 421		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 422						queue_pointer);
 423		list_del(&cmd->queue_pointer);
 424		if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
 425			kfree(cmd->frame);
 426		kfree(cmd);
 427	}
 428	return -ENOMEM;
 429}
 430
 431static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
 432{
 433	unsigned int ib_rp_reg;
 434	struct mvumi_hw_regs *regs = mhba->regs;
 435
 436	ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
 437
 438	if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
 439			(mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
 440			((ib_rp_reg & regs->cl_pointer_toggle)
 441			 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
 442		dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
 443		return 0;
 444	}
 445	if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
 446		dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
 447		return 0;
 448	} else {
 449		return mhba->max_io - atomic_read(&mhba->fw_outstanding);
 450	}
 451}
 452
 453static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
 454{
 455	unsigned int count;
 456	if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
 457		return 0;
 458	count = ioread32(mhba->ib_shadow);
 459	if (count == 0xffff)
 460		return 0;
 461	return count;
 462}
 463
 464static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
 465{
 466	unsigned int cur_ib_entry;
 467
 468	cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
 469	cur_ib_entry++;
 470	if (cur_ib_entry >= mhba->list_num_io) {
 471		cur_ib_entry -= mhba->list_num_io;
 472		mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
 473	}
 474	mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
 475	mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
 476	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 477		*ib_entry = mhba->ib_list + cur_ib_entry *
 478				sizeof(struct mvumi_dyn_list_entry);
 479	} else {
 480		*ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
 481	}
 482	atomic_inc(&mhba->fw_outstanding);
 483}
 484
 485static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
 486{
 487	iowrite32(0xffff, mhba->ib_shadow);
 488	iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
 489}
 490
 491static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
 492		unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
 493{
 494	unsigned short tag, request_id;
 495
 496	udelay(1);
 497	p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
 498	request_id = p_outb_frame->request_id;
 499	tag = p_outb_frame->tag;
 500	if (tag > mhba->tag_pool.size) {
 501		dev_err(&mhba->pdev->dev, "ob frame data error\n");
 502		return -1;
 503	}
 504	if (mhba->tag_cmd[tag] == NULL) {
 505		dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
 506		return -1;
 507	} else if (mhba->tag_cmd[tag]->request_id != request_id &&
 508						mhba->request_id_enabled) {
 509			dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
 510					"cmd request ID:0x%x\n", request_id,
 511					mhba->tag_cmd[tag]->request_id);
 512			return -1;
 513	}
 514
 515	return 0;
 516}
 517
 518static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
 519			unsigned int *cur_obf, unsigned int *assign_obf_end)
 520{
 521	unsigned int ob_write, ob_write_shadow;
 522	struct mvumi_hw_regs *regs = mhba->regs;
 523
 524	do {
 525		ob_write = ioread32(regs->outb_copy_pointer);
 526		ob_write_shadow = ioread32(mhba->ob_shadow);
 527	} while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
 528
 529	*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
 530	*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
 531
 532	if ((ob_write & regs->cl_pointer_toggle) !=
 533			(mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
 534		*assign_obf_end += mhba->list_num_io;
 535	}
 536	return 0;
 537}
 538
 539static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
 540			unsigned int *cur_obf, unsigned int *assign_obf_end)
 541{
 542	unsigned int ob_write;
 543	struct mvumi_hw_regs *regs = mhba->regs;
 544
 545	ob_write = ioread32(regs->outb_read_pointer);
 546	ob_write = ioread32(regs->outb_copy_pointer);
 547	*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
 548	*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
 549	if (*assign_obf_end < *cur_obf)
 550		*assign_obf_end += mhba->list_num_io;
 551	else if (*assign_obf_end == *cur_obf)
 552		return -1;
 553	return 0;
 554}
 555
 556static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
 557{
 558	unsigned int cur_obf, assign_obf_end, i;
 559	struct mvumi_ob_data *ob_data;
 560	struct mvumi_rsp_frame *p_outb_frame;
 561	struct mvumi_hw_regs *regs = mhba->regs;
 562
 563	if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
 564		return;
 565
 566	for (i = (assign_obf_end - cur_obf); i != 0; i--) {
 567		cur_obf++;
 568		if (cur_obf >= mhba->list_num_io) {
 569			cur_obf -= mhba->list_num_io;
 570			mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
 571		}
 572
 573		p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
 574
 575		/* Copy pointer may point to entry in outbound list
 576		*  before entry has valid data
 577		*/
 578		if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
 579			mhba->tag_cmd[p_outb_frame->tag] == NULL ||
 580			p_outb_frame->request_id !=
 581				mhba->tag_cmd[p_outb_frame->tag]->request_id))
 582			if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
 583				continue;
 584
 585		if (!list_empty(&mhba->ob_data_list)) {
 586			ob_data = (struct mvumi_ob_data *)
 587				list_first_entry(&mhba->ob_data_list,
 588					struct mvumi_ob_data, list);
 589			list_del_init(&ob_data->list);
 590		} else {
 591			ob_data = NULL;
 592			if (cur_obf == 0) {
 593				cur_obf = mhba->list_num_io - 1;
 594				mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
 595			} else
 596				cur_obf -= 1;
 597			break;
 598		}
 599
 600		memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
 601		p_outb_frame->tag = 0xff;
 602
 603		list_add_tail(&ob_data->list, &mhba->free_ob_list);
 604	}
 605	mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
 606	mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
 607	iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
 608}
 609
 610static void mvumi_reset(struct mvumi_hba *mhba)
 611{
 612	struct mvumi_hw_regs *regs = mhba->regs;
 613
 614	iowrite32(0, regs->enpointa_mask_reg);
 615	if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
 616		return;
 617
 618	iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
 619}
 620
 621static unsigned char mvumi_start(struct mvumi_hba *mhba);
 622
 623static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
 624{
 625	mhba->fw_state = FW_STATE_ABORT;
 626	mvumi_reset(mhba);
 627
 628	if (mvumi_start(mhba))
 629		return FAILED;
 630	else
 631		return SUCCESS;
 632}
 633
 634static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
 635{
 636	struct mvumi_hw_regs *regs = mhba->regs;
 637	u32 tmp;
 638	unsigned long before;
 639	before = jiffies;
 640
 641	iowrite32(0, regs->enpointa_mask_reg);
 642	tmp = ioread32(regs->arm_to_pciea_msg1);
 643	while (tmp != HANDSHAKE_READYSTATE) {
 644		iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
 645		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
 646			dev_err(&mhba->pdev->dev,
 647				"FW reset failed [0x%x].\n", tmp);
 648			return FAILED;
 649		}
 650
 651		msleep(500);
 652		rmb();
 653		tmp = ioread32(regs->arm_to_pciea_msg1);
 654	}
 655
 656	return SUCCESS;
 657}
 658
 659static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
 660{
 661	unsigned char i;
 662
 663	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 664		pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
 665						&mhba->pci_base[i]);
 666	}
 667}
 668
 669static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
 670{
 671	unsigned char i;
 672
 673	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 674		if (mhba->pci_base[i])
 675			pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
 676						mhba->pci_base[i]);
 677	}
 678}
 679
 680static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
 681{
 682	unsigned int ret = 0;
 683	pci_set_master(pdev);
 684
 685	if (IS_DMA64) {
 686		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
 687			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 688	} else
 689		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 690
 691	return ret;
 692}
 693
 694static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
 695{
 696	mhba->fw_state = FW_STATE_ABORT;
 697
 698	iowrite32(0, mhba->regs->reset_enable);
 699	iowrite32(0xf, mhba->regs->reset_request);
 700
 701	iowrite32(0x10, mhba->regs->reset_enable);
 702	iowrite32(0x10, mhba->regs->reset_request);
 703	msleep(100);
 704	pci_disable_device(mhba->pdev);
 705
 706	if (pci_enable_device(mhba->pdev)) {
 707		dev_err(&mhba->pdev->dev, "enable device failed\n");
 708		return FAILED;
 709	}
 710	if (mvumi_pci_set_master(mhba->pdev)) {
 711		dev_err(&mhba->pdev->dev, "set master failed\n");
 712		return FAILED;
 713	}
 714	mvumi_restore_bar_addr(mhba);
 715	if (mvumi_wait_for_fw(mhba) == FAILED)
 716		return FAILED;
 717
 718	return mvumi_wait_for_outstanding(mhba);
 719}
 720
 721static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
 722{
 723	return mvumi_wait_for_outstanding(mhba);
 724}
 725
 726static int mvumi_host_reset(struct scsi_cmnd *scmd)
 727{
 728	struct mvumi_hba *mhba;
 729
 730	mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
 731
 732	scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
 733			scmd->serial_number, scmd->cmnd[0], scmd->retries);
 734
 735	return mhba->instancet->reset_host(mhba);
 736}
 737
 738static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
 739						struct mvumi_cmd *cmd)
 740{
 741	unsigned long flags;
 742
 743	cmd->cmd_status = REQ_STATUS_PENDING;
 744
 745	if (atomic_read(&cmd->sync_cmd)) {
 746		dev_err(&mhba->pdev->dev,
 747			"last blocked cmd not finished, sync_cmd = %d\n",
 748						atomic_read(&cmd->sync_cmd));
 749		BUG_ON(1);
 750		return -1;
 751	}
 752	atomic_inc(&cmd->sync_cmd);
 753	spin_lock_irqsave(mhba->shost->host_lock, flags);
 754	mhba->instancet->fire_cmd(mhba, cmd);
 755	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
 756
 757	wait_event_timeout(mhba->int_cmd_wait_q,
 758		(cmd->cmd_status != REQ_STATUS_PENDING),
 759		MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
 760
 761	/* command timeout */
 762	if (atomic_read(&cmd->sync_cmd)) {
 763		spin_lock_irqsave(mhba->shost->host_lock, flags);
 764		atomic_dec(&cmd->sync_cmd);
 765		if (mhba->tag_cmd[cmd->frame->tag]) {
 766			mhba->tag_cmd[cmd->frame->tag] = 0;
 767			dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
 768							cmd->frame->tag);
 769			tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
 770		}
 771		if (!list_empty(&cmd->queue_pointer)) {
 772			dev_warn(&mhba->pdev->dev,
 773				"TIMEOUT:A internal command doesn't send!\n");
 774			list_del_init(&cmd->queue_pointer);
 775		} else
 776			atomic_dec(&mhba->fw_outstanding);
 777
 778		spin_unlock_irqrestore(mhba->shost->host_lock, flags);
 779	}
 780	return 0;
 781}
 782
 783static void mvumi_release_fw(struct mvumi_hba *mhba)
 784{
 785	mvumi_free_cmds(mhba);
 786	mvumi_release_mem_resource(mhba);
 787	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
 788	pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
 789		mhba->handshake_page, mhba->handshake_page_phys);
 790	kfree(mhba->regs);
 791	pci_release_regions(mhba->pdev);
 792}
 793
 794static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
 795{
 796	struct mvumi_cmd *cmd;
 797	struct mvumi_msg_frame *frame;
 798	unsigned char device_id, retry = 0;
 799	unsigned char bitcount = sizeof(unsigned char) * 8;
 800
 801	for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
 802		if (!(mhba->target_map[device_id / bitcount] &
 803				(1 << (device_id % bitcount))))
 804			continue;
 805get_cmd:	cmd = mvumi_create_internal_cmd(mhba, 0);
 806		if (!cmd) {
 807			if (retry++ >= 5) {
 808				dev_err(&mhba->pdev->dev, "failed to get memory"
 809					" for internal flush cache cmd for "
 810					"device %d", device_id);
 811				retry = 0;
 812				continue;
 813			} else
 814				goto get_cmd;
 815		}
 816		cmd->scmd = NULL;
 817		cmd->cmd_status = REQ_STATUS_PENDING;
 818		atomic_set(&cmd->sync_cmd, 0);
 819		frame = cmd->frame;
 820		frame->req_function = CL_FUN_SCSI_CMD;
 821		frame->device_id = device_id;
 822		frame->cmd_flag = CMD_FLAG_NON_DATA;
 823		frame->data_transfer_length = 0;
 824		frame->cdb_length = MAX_COMMAND_SIZE;
 825		memset(frame->cdb, 0, MAX_COMMAND_SIZE);
 826		frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
 827		frame->cdb[1] = CDB_CORE_MODULE;
 828		frame->cdb[2] = CDB_CORE_SHUTDOWN;
 829
 830		mvumi_issue_blocked_cmd(mhba, cmd);
 831		if (cmd->cmd_status != SAM_STAT_GOOD) {
 832			dev_err(&mhba->pdev->dev,
 833				"device %d flush cache failed, status=0x%x.\n",
 834				device_id, cmd->cmd_status);
 835		}
 836
 837		mvumi_delete_internal_cmd(mhba, cmd);
 838	}
 839	return 0;
 840}
 841
 842static unsigned char
 843mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
 844							unsigned short len)
 845{
 846	unsigned char *ptr;
 847	unsigned char ret = 0, i;
 848
 849	ptr = (unsigned char *) p_header->frame_content;
 850	for (i = 0; i < len; i++) {
 851		ret ^= *ptr;
 852		ptr++;
 853	}
 854
 855	return ret;
 856}
 857
 858static void mvumi_hs_build_page(struct mvumi_hba *mhba,
 859				struct mvumi_hs_header *hs_header)
 860{
 861	struct mvumi_hs_page2 *hs_page2;
 862	struct mvumi_hs_page4 *hs_page4;
 863	struct mvumi_hs_page3 *hs_page3;
 864	struct timeval time;
 865	unsigned int local_time;
 866
 867	switch (hs_header->page_code) {
 868	case HS_PAGE_HOST_INFO:
 869		hs_page2 = (struct mvumi_hs_page2 *) hs_header;
 870		hs_header->frame_length = sizeof(*hs_page2) - 4;
 871		memset(hs_header->frame_content, 0, hs_header->frame_length);
 872		hs_page2->host_type = 3; /* 3 mean linux*/
 873		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
 874			hs_page2->host_cap = 0x08;/* host dynamic source mode */
 875		hs_page2->host_ver.ver_major = VER_MAJOR;
 876		hs_page2->host_ver.ver_minor = VER_MINOR;
 877		hs_page2->host_ver.ver_oem = VER_OEM;
 878		hs_page2->host_ver.ver_build = VER_BUILD;
 879		hs_page2->system_io_bus = 0;
 880		hs_page2->slot_number = 0;
 881		hs_page2->intr_level = 0;
 882		hs_page2->intr_vector = 0;
 883		do_gettimeofday(&time);
 884		local_time = (unsigned int) (time.tv_sec -
 885						(sys_tz.tz_minuteswest * 60));
 886		hs_page2->seconds_since1970 = local_time;
 887		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 888						hs_header->frame_length);
 889		break;
 890
 891	case HS_PAGE_FIRM_CTL:
 892		hs_page3 = (struct mvumi_hs_page3 *) hs_header;
 893		hs_header->frame_length = sizeof(*hs_page3) - 4;
 894		memset(hs_header->frame_content, 0, hs_header->frame_length);
 895		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 896						hs_header->frame_length);
 897		break;
 898
 899	case HS_PAGE_CL_INFO:
 900		hs_page4 = (struct mvumi_hs_page4 *) hs_header;
 901		hs_header->frame_length = sizeof(*hs_page4) - 4;
 902		memset(hs_header->frame_content, 0, hs_header->frame_length);
 903		hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
 904		hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
 905
 906		hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
 907		hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
 908		hs_page4->ib_entry_size = mhba->ib_max_size_setting;
 909		hs_page4->ob_entry_size = mhba->ob_max_size_setting;
 910		if (mhba->hba_capability
 911			& HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
 912			hs_page4->ob_depth = find_first_bit((unsigned long *)
 913							    &mhba->list_num_io,
 914							    BITS_PER_LONG);
 915			hs_page4->ib_depth = find_first_bit((unsigned long *)
 916							    &mhba->list_num_io,
 917							    BITS_PER_LONG);
 918		} else {
 919			hs_page4->ob_depth = (u8) mhba->list_num_io;
 920			hs_page4->ib_depth = (u8) mhba->list_num_io;
 921		}
 922		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 923						hs_header->frame_length);
 924		break;
 925
 926	default:
 927		dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
 928			hs_header->page_code);
 929		break;
 930	}
 931}
 932
 933/**
 934 * mvumi_init_data -	Initialize requested date for FW
 935 * @mhba:			Adapter soft state
 936 */
 937static int mvumi_init_data(struct mvumi_hba *mhba)
 938{
 939	struct mvumi_ob_data *ob_pool;
 940	struct mvumi_res *res_mgnt;
 941	unsigned int tmp_size, offset, i;
 942	void *virmem, *v;
 943	dma_addr_t p;
 944
 945	if (mhba->fw_flag & MVUMI_FW_ALLOC)
 946		return 0;
 947
 948	tmp_size = mhba->ib_max_size * mhba->max_io;
 949	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
 950		tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 951
 952	tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
 953	tmp_size += 8 + sizeof(u32)*2 + 16;
 954
 955	res_mgnt = mvumi_alloc_mem_resource(mhba,
 956					RESOURCE_UNCACHED_MEMORY, tmp_size);
 957	if (!res_mgnt) {
 958		dev_err(&mhba->pdev->dev,
 959			"failed to allocate memory for inbound list\n");
 960		goto fail_alloc_dma_buf;
 961	}
 962
 963	p = res_mgnt->bus_addr;
 964	v = res_mgnt->virt_addr;
 965	/* ib_list */
 966	offset = round_up(p, 128) - p;
 967	p += offset;
 968	v += offset;
 969	mhba->ib_list = v;
 970	mhba->ib_list_phys = p;
 971	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 972		v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 973		p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 974		mhba->ib_frame = v;
 975		mhba->ib_frame_phys = p;
 976	}
 977	v += mhba->ib_max_size * mhba->max_io;
 978	p += mhba->ib_max_size * mhba->max_io;
 979
 980	/* ib shadow */
 981	offset = round_up(p, 8) - p;
 982	p += offset;
 983	v += offset;
 984	mhba->ib_shadow = v;
 985	mhba->ib_shadow_phys = p;
 986	p += sizeof(u32)*2;
 987	v += sizeof(u32)*2;
 988	/* ob shadow */
 989	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
 990		offset = round_up(p, 8) - p;
 991		p += offset;
 992		v += offset;
 993		mhba->ob_shadow = v;
 994		mhba->ob_shadow_phys = p;
 995		p += 8;
 996		v += 8;
 997	} else {
 998		offset = round_up(p, 4) - p;
 999		p += offset;
1000		v += offset;
1001		mhba->ob_shadow = v;
1002		mhba->ob_shadow_phys = p;
1003		p += 4;
1004		v += 4;
1005	}
1006
1007	/* ob list */
1008	offset = round_up(p, 128) - p;
1009	p += offset;
1010	v += offset;
1011
1012	mhba->ob_list = v;
1013	mhba->ob_list_phys = p;
1014
1015	/* ob data pool */
1016	tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
1017	tmp_size = round_up(tmp_size, 8);
1018
1019	res_mgnt = mvumi_alloc_mem_resource(mhba,
1020				RESOURCE_CACHED_MEMORY, tmp_size);
1021	if (!res_mgnt) {
1022		dev_err(&mhba->pdev->dev,
1023			"failed to allocate memory for outbound data buffer\n");
1024		goto fail_alloc_dma_buf;
1025	}
1026	virmem = res_mgnt->virt_addr;
1027
1028	for (i = mhba->max_io; i != 0; i--) {
1029		ob_pool = (struct mvumi_ob_data *) virmem;
1030		list_add_tail(&ob_pool->list, &mhba->ob_data_list);
1031		virmem += mhba->ob_max_size + sizeof(*ob_pool);
1032	}
1033
1034	tmp_size = sizeof(unsigned short) * mhba->max_io +
1035				sizeof(struct mvumi_cmd *) * mhba->max_io;
1036	tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
1037						(sizeof(unsigned char) * 8);
1038
1039	res_mgnt = mvumi_alloc_mem_resource(mhba,
1040				RESOURCE_CACHED_MEMORY, tmp_size);
1041	if (!res_mgnt) {
1042		dev_err(&mhba->pdev->dev,
1043			"failed to allocate memory for tag and target map\n");
1044		goto fail_alloc_dma_buf;
1045	}
1046
1047	virmem = res_mgnt->virt_addr;
1048	mhba->tag_pool.stack = virmem;
1049	mhba->tag_pool.size = mhba->max_io;
1050	tag_init(&mhba->tag_pool, mhba->max_io);
1051	virmem += sizeof(unsigned short) * mhba->max_io;
1052
1053	mhba->tag_cmd = virmem;
1054	virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
1055
1056	mhba->target_map = virmem;
1057
1058	mhba->fw_flag |= MVUMI_FW_ALLOC;
1059	return 0;
1060
1061fail_alloc_dma_buf:
1062	mvumi_release_mem_resource(mhba);
1063	return -1;
1064}
1065
1066static int mvumi_hs_process_page(struct mvumi_hba *mhba,
1067				struct mvumi_hs_header *hs_header)
1068{
1069	struct mvumi_hs_page1 *hs_page1;
1070	unsigned char page_checksum;
1071
1072	page_checksum = mvumi_calculate_checksum(hs_header,
1073						hs_header->frame_length);
1074	if (page_checksum != hs_header->checksum) {
1075		dev_err(&mhba->pdev->dev, "checksum error\n");
1076		return -1;
1077	}
1078
1079	switch (hs_header->page_code) {
1080	case HS_PAGE_FIRM_CAP:
1081		hs_page1 = (struct mvumi_hs_page1 *) hs_header;
1082
1083		mhba->max_io = hs_page1->max_io_support;
1084		mhba->list_num_io = hs_page1->cl_inout_list_depth;
1085		mhba->max_transfer_size = hs_page1->max_transfer_size;
1086		mhba->max_target_id = hs_page1->max_devices_support;
1087		mhba->hba_capability = hs_page1->capability;
1088		mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
1089		mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
1090
1091		mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
1092		mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
1093
1094		dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
1095						hs_page1->fw_ver.ver_build);
1096
1097		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1098			mhba->eot_flag = 22;
1099		else
1100			mhba->eot_flag = 27;
1101		if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1102			mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
1103		break;
1104	default:
1105		dev_err(&mhba->pdev->dev, "handshake: page code error\n");
1106		return -1;
1107	}
1108	return 0;
1109}
1110
1111/**
1112 * mvumi_handshake -	Move the FW to READY state
1113 * @mhba:				Adapter soft state
1114 *
1115 * During the initialization, FW passes can potentially be in any one of
1116 * several possible states. If the FW in operational, waiting-for-handshake
1117 * states, driver must take steps to bring it to ready state. Otherwise, it
1118 * has to wait for the ready state.
1119 */
1120static int mvumi_handshake(struct mvumi_hba *mhba)
1121{
1122	unsigned int hs_state, tmp, hs_fun;
1123	struct mvumi_hs_header *hs_header;
1124	struct mvumi_hw_regs *regs = mhba->regs;
1125
1126	if (mhba->fw_state == FW_STATE_STARTING)
1127		hs_state = HS_S_START;
1128	else {
1129		tmp = ioread32(regs->arm_to_pciea_msg0);
1130		hs_state = HS_GET_STATE(tmp);
1131		dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
1132		if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
1133			mhba->fw_state = FW_STATE_STARTING;
1134			return -1;
1135		}
1136	}
1137
1138	hs_fun = 0;
1139	switch (hs_state) {
1140	case HS_S_START:
1141		mhba->fw_state = FW_STATE_HANDSHAKING;
1142		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1143		HS_SET_STATE(hs_fun, HS_S_RESET);
1144		iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
1145		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1146		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1147		break;
1148
1149	case HS_S_RESET:
1150		iowrite32(lower_32_bits(mhba->handshake_page_phys),
1151					regs->pciea_to_arm_msg1);
1152		iowrite32(upper_32_bits(mhba->handshake_page_phys),
1153					regs->arm_to_pciea_msg1);
1154		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1155		HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
1156		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1157		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1158		break;
1159
1160	case HS_S_PAGE_ADDR:
1161	case HS_S_QUERY_PAGE:
1162	case HS_S_SEND_PAGE:
1163		hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
1164		if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
1165			mhba->hba_total_pages =
1166			((struct mvumi_hs_page1 *) hs_header)->total_pages;
1167
1168			if (mhba->hba_total_pages == 0)
1169				mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1170		}
1171
1172		if (hs_state == HS_S_QUERY_PAGE) {
1173			if (mvumi_hs_process_page(mhba, hs_header)) {
1174				HS_SET_STATE(hs_fun, HS_S_ABORT);
1175				return -1;
1176			}
1177			if (mvumi_init_data(mhba)) {
1178				HS_SET_STATE(hs_fun, HS_S_ABORT);
1179				return -1;
1180			}
1181		} else if (hs_state == HS_S_PAGE_ADDR) {
1182			hs_header->page_code = 0;
1183			mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1184		}
1185
1186		if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
1187			hs_header->page_code++;
1188			if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
1189				mvumi_hs_build_page(mhba, hs_header);
1190				HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
1191			} else
1192				HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
1193		} else
1194			HS_SET_STATE(hs_fun, HS_S_END);
1195
1196		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1197		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1198		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1199		break;
1200
1201	case HS_S_END:
1202		/* Set communication list ISR */
1203		tmp = ioread32(regs->enpointa_mask_reg);
1204		tmp |= regs->int_comaout | regs->int_comaerr;
1205		iowrite32(tmp, regs->enpointa_mask_reg);
1206		iowrite32(mhba->list_num_io, mhba->ib_shadow);
1207		/* Set InBound List Available count shadow */
1208		iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1209					regs->inb_aval_count_basel);
1210		iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1211					regs->inb_aval_count_baseh);
1212
1213		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1214			/* Set OutBound List Available count shadow */
1215			iowrite32((mhba->list_num_io-1) |
1216							regs->cl_pointer_toggle,
1217							mhba->ob_shadow);
1218			iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1219							regs->outb_copy_basel);
1220			iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1221							regs->outb_copy_baseh);
1222		}
1223
1224		mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1225							regs->cl_pointer_toggle;
1226		mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1227							regs->cl_pointer_toggle;
1228		mhba->fw_state = FW_STATE_STARTED;
1229
1230		break;
1231	default:
1232		dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1233								hs_state);
1234		return -1;
1235	}
1236	return 0;
1237}
1238
1239static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1240{
1241	unsigned int isr_status;
1242	unsigned long before;
1243
1244	before = jiffies;
1245	mvumi_handshake(mhba);
1246	do {
1247		isr_status = mhba->instancet->read_fw_status_reg(mhba);
1248
1249		if (mhba->fw_state == FW_STATE_STARTED)
1250			return 0;
1251		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1252			dev_err(&mhba->pdev->dev,
1253				"no handshake response at state 0x%x.\n",
1254				  mhba->fw_state);
1255			dev_err(&mhba->pdev->dev,
1256				"isr : global=0x%x,status=0x%x.\n",
1257					mhba->global_isr, isr_status);
1258			return -1;
1259		}
1260		rmb();
1261		usleep_range(1000, 2000);
1262	} while (!(isr_status & DRBL_HANDSHAKE_ISR));
1263
1264	return 0;
1265}
1266
1267static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1268{
1269	unsigned int tmp;
1270	unsigned long before;
1271
1272	before = jiffies;
1273	tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1274	while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1275		if (tmp != HANDSHAKE_READYSTATE)
1276			iowrite32(DRBL_MU_RESET,
1277					mhba->regs->pciea_to_arm_drbl_reg);
1278		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1279			dev_err(&mhba->pdev->dev,
1280				"invalid signature [0x%x].\n", tmp);
1281			return -1;
1282		}
1283		usleep_range(1000, 2000);
1284		rmb();
1285		tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1286	}
1287
1288	mhba->fw_state = FW_STATE_STARTING;
1289	dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1290	do {
1291		if (mvumi_handshake_event(mhba)) {
1292			dev_err(&mhba->pdev->dev,
1293					"handshake failed at state 0x%x.\n",
1294						mhba->fw_state);
1295			return -1;
1296		}
1297	} while (mhba->fw_state != FW_STATE_STARTED);
1298
1299	dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1300
1301	return 0;
1302}
1303
1304static unsigned char mvumi_start(struct mvumi_hba *mhba)
1305{
1306	unsigned int tmp;
1307	struct mvumi_hw_regs *regs = mhba->regs;
1308
1309	/* clear Door bell */
1310	tmp = ioread32(regs->arm_to_pciea_drbl_reg);
1311	iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
1312
1313	iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1314	tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
1315	iowrite32(tmp, regs->enpointa_mask_reg);
1316	msleep(100);
1317	if (mvumi_check_handshake(mhba))
1318		return -1;
1319
1320	return 0;
1321}
1322
1323/**
1324 * mvumi_complete_cmd -	Completes a command
1325 * @mhba:			Adapter soft state
1326 * @cmd:			Command to be completed
1327 */
1328static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1329					struct mvumi_rsp_frame *ob_frame)
1330{
1331	struct scsi_cmnd *scmd = cmd->scmd;
1332
1333	cmd->scmd->SCp.ptr = NULL;
1334	scmd->result = ob_frame->req_status;
1335
1336	switch (ob_frame->req_status) {
1337	case SAM_STAT_GOOD:
1338		scmd->result |= DID_OK << 16;
1339		break;
1340	case SAM_STAT_BUSY:
1341		scmd->result |= DID_BUS_BUSY << 16;
1342		break;
1343	case SAM_STAT_CHECK_CONDITION:
1344		scmd->result |= (DID_OK << 16);
1345		if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1346			memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1347				sizeof(struct mvumi_sense_data));
1348			scmd->result |=  (DRIVER_SENSE << 24);
1349		}
1350		break;
1351	default:
1352		scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1353		break;
1354	}
1355
1356	if (scsi_bufflen(scmd)) {
1357		if (scsi_sg_count(scmd)) {
1358			pci_unmap_sg(mhba->pdev,
1359				scsi_sglist(scmd),
1360				scsi_sg_count(scmd),
1361				(int) scmd->sc_data_direction);
1362		} else {
1363			pci_unmap_single(mhba->pdev,
1364				scmd->SCp.dma_handle,
1365				scsi_bufflen(scmd),
1366				(int) scmd->sc_data_direction);
1367
1368			scmd->SCp.dma_handle = 0;
1369		}
1370	}
1371	cmd->scmd->scsi_done(scmd);
1372	mvumi_return_cmd(mhba, cmd);
1373}
1374
1375static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1376						struct mvumi_cmd *cmd,
1377					struct mvumi_rsp_frame *ob_frame)
1378{
1379	if (atomic_read(&cmd->sync_cmd)) {
1380		cmd->cmd_status = ob_frame->req_status;
1381
1382		if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1383				(ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1384				cmd->data_buf) {
1385			memcpy(cmd->data_buf, ob_frame->payload,
1386					sizeof(struct mvumi_sense_data));
1387		}
1388		atomic_dec(&cmd->sync_cmd);
1389		wake_up(&mhba->int_cmd_wait_q);
1390	}
1391}
1392
1393static void mvumi_show_event(struct mvumi_hba *mhba,
1394			struct mvumi_driver_event *ptr)
1395{
1396	unsigned int i;
1397
1398	dev_warn(&mhba->pdev->dev,
1399		"Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1400		ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1401	if (ptr->param_count) {
1402		printk(KERN_WARNING "Event param(len 0x%x): ",
1403						ptr->param_count);
1404		for (i = 0; i < ptr->param_count; i++)
1405			printk(KERN_WARNING "0x%x ", ptr->params[i]);
1406
1407		printk(KERN_WARNING "\n");
1408	}
1409
1410	if (ptr->sense_data_length) {
1411		printk(KERN_WARNING "Event sense data(len 0x%x): ",
1412						ptr->sense_data_length);
1413		for (i = 0; i < ptr->sense_data_length; i++)
1414			printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1415		printk(KERN_WARNING "\n");
1416	}
1417}
1418
1419static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1420{
1421	struct scsi_device *sdev;
1422	int ret = -1;
1423
1424	if (status == DEVICE_OFFLINE) {
1425		sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1426		if (sdev) {
1427			dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1428								sdev->id, 0);
1429			scsi_remove_device(sdev);
1430			scsi_device_put(sdev);
1431			ret = 0;
1432		} else
1433			dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1434									devid);
1435	} else if (status == DEVICE_ONLINE) {
1436		sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1437		if (!sdev) {
1438			scsi_add_device(mhba->shost, 0, devid, 0);
1439			dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1440								devid, 0);
1441			ret = 0;
1442		} else {
1443			dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1444								0, devid, 0);
1445			scsi_device_put(sdev);
1446		}
1447	}
1448	return ret;
1449}
1450
1451static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1452	unsigned int id, struct mvumi_cmd *cmd)
1453{
1454	struct mvumi_msg_frame *frame;
1455	u64 wwid = 0;
1456	int cmd_alloc = 0;
1457	int data_buf_len = 64;
1458
1459	if (!cmd) {
1460		cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1461		if (cmd)
1462			cmd_alloc = 1;
1463		else
1464			return 0;
1465	} else {
1466		memset(cmd->data_buf, 0, data_buf_len);
1467	}
1468	cmd->scmd = NULL;
1469	cmd->cmd_status = REQ_STATUS_PENDING;
1470	atomic_set(&cmd->sync_cmd, 0);
1471	frame = cmd->frame;
1472	frame->device_id = (u16) id;
1473	frame->cmd_flag = CMD_FLAG_DATA_IN;
1474	frame->req_function = CL_FUN_SCSI_CMD;
1475	frame->cdb_length = 6;
1476	frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
1477	memset(frame->cdb, 0, frame->cdb_length);
1478	frame->cdb[0] = INQUIRY;
1479	frame->cdb[4] = frame->data_transfer_length;
1480
1481	mvumi_issue_blocked_cmd(mhba, cmd);
1482
1483	if (cmd->cmd_status == SAM_STAT_GOOD) {
1484		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1485			wwid = id + 1;
1486		else
1487			memcpy((void *)&wwid,
1488			       (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
1489			       MVUMI_INQUIRY_UUID_LEN);
1490		dev_dbg(&mhba->pdev->dev,
1491			"inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
1492	} else {
1493		wwid = 0;
1494	}
1495	if (cmd_alloc)
1496		mvumi_delete_internal_cmd(mhba, cmd);
1497
1498	return wwid;
1499}
1500
1501static void mvumi_detach_devices(struct mvumi_hba *mhba)
1502{
1503	struct mvumi_device *mv_dev = NULL , *dev_next;
1504	struct scsi_device *sdev = NULL;
1505
1506	mutex_lock(&mhba->device_lock);
1507
1508	/* detach Hard Disk */
1509	list_for_each_entry_safe(mv_dev, dev_next,
1510		&mhba->shost_dev_list, list) {
1511		mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1512		list_del_init(&mv_dev->list);
1513		dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1514			mv_dev->id, mv_dev->wwid);
1515		kfree(mv_dev);
1516	}
1517	list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1518		list_del_init(&mv_dev->list);
1519		dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1520			mv_dev->id, mv_dev->wwid);
1521		kfree(mv_dev);
1522	}
1523
1524	/* detach virtual device */
1525	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1526		sdev = scsi_device_lookup(mhba->shost, 0,
1527						mhba->max_target_id - 1, 0);
1528
1529	if (sdev) {
1530		scsi_remove_device(sdev);
1531		scsi_device_put(sdev);
1532	}
1533
1534	mutex_unlock(&mhba->device_lock);
1535}
1536
1537static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1538{
1539	struct scsi_device *sdev;
1540
1541	sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1542	if (sdev) {
1543		scsi_rescan_device(&sdev->sdev_gendev);
1544		scsi_device_put(sdev);
1545	}
1546}
1547
1548static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1549{
1550	struct mvumi_device *mv_dev = NULL;
1551
1552	list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1553		if (mv_dev->wwid == wwid) {
1554			if (mv_dev->id != id) {
1555				dev_err(&mhba->pdev->dev,
1556					"%s has same wwid[%llx] ,"
1557					" but different id[%d %d]\n",
1558					__func__, mv_dev->wwid, mv_dev->id, id);
1559				return -1;
1560			} else {
1561				if (mhba->pdev->device ==
1562						PCI_DEVICE_ID_MARVELL_MV9143)
1563					mvumi_rescan_devices(mhba, id);
1564				return 1;
1565			}
1566		}
1567	}
1568	return 0;
1569}
1570
1571static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1572{
1573	struct mvumi_device *mv_dev = NULL, *dev_next;
1574
1575	list_for_each_entry_safe(mv_dev, dev_next,
1576				&mhba->shost_dev_list, list) {
1577		if (mv_dev->id == id) {
1578			dev_dbg(&mhba->pdev->dev,
1579				"detach device(0:%d:0) wwid(%llx) from HOST\n",
1580				mv_dev->id, mv_dev->wwid);
1581			mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1582			list_del_init(&mv_dev->list);
1583			kfree(mv_dev);
1584		}
1585	}
1586}
1587
1588static int mvumi_probe_devices(struct mvumi_hba *mhba)
1589{
1590	int id, maxid;
1591	u64 wwid = 0;
1592	struct mvumi_device *mv_dev = NULL;
1593	struct mvumi_cmd *cmd = NULL;
1594	int found = 0;
1595
1596	cmd = mvumi_create_internal_cmd(mhba, 64);
1597	if (!cmd)
1598		return -1;
1599
1600	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1601		maxid = mhba->max_target_id;
1602	else
1603		maxid = mhba->max_target_id - 1;
1604
1605	for (id = 0; id < maxid; id++) {
1606		wwid = mvumi_inquiry(mhba, id, cmd);
1607		if (!wwid) {
1608			/* device no response, remove it */
1609			mvumi_remove_devices(mhba, id);
1610		} else {
1611			/* device response, add it */
1612			found = mvumi_match_devices(mhba, id, wwid);
1613			if (!found) {
1614				mvumi_remove_devices(mhba, id);
1615				mv_dev = kzalloc(sizeof(struct mvumi_device),
1616								GFP_KERNEL);
1617				if (!mv_dev) {
1618					dev_err(&mhba->pdev->dev,
1619						"%s alloc mv_dev failed\n",
1620						__func__);
1621					continue;
1622				}
1623				mv_dev->id = id;
1624				mv_dev->wwid = wwid;
1625				mv_dev->sdev = NULL;
1626				INIT_LIST_HEAD(&mv_dev->list);
1627				list_add_tail(&mv_dev->list,
1628					      &mhba->mhba_dev_list);
1629				dev_dbg(&mhba->pdev->dev,
1630					"probe a new device(0:%d:0)"
1631					" wwid(%llx)\n", id, mv_dev->wwid);
1632			} else if (found == -1)
1633				return -1;
1634			else
1635				continue;
1636		}
1637	}
1638
1639	if (cmd)
1640		mvumi_delete_internal_cmd(mhba, cmd);
1641
1642	return 0;
1643}
1644
1645static int mvumi_rescan_bus(void *data)
1646{
1647	int ret = 0;
1648	struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1649	struct mvumi_device *mv_dev = NULL , *dev_next;
1650
1651	while (!kthread_should_stop()) {
1652
1653		set_current_state(TASK_INTERRUPTIBLE);
1654		if (!atomic_read(&mhba->pnp_count))
1655			schedule();
1656		msleep(1000);
1657		atomic_set(&mhba->pnp_count, 0);
1658		__set_current_state(TASK_RUNNING);
1659
1660		mutex_lock(&mhba->device_lock);
1661		ret = mvumi_probe_devices(mhba);
1662		if (!ret) {
1663			list_for_each_entry_safe(mv_dev, dev_next,
1664						 &mhba->mhba_dev_list, list) {
1665				if (mvumi_handle_hotplug(mhba, mv_dev->id,
1666							 DEVICE_ONLINE)) {
1667					dev_err(&mhba->pdev->dev,
1668						"%s add device(0:%d:0) failed"
1669						"wwid(%llx) has exist\n",
1670						__func__,
1671						mv_dev->id, mv_dev->wwid);
1672					list_del_init(&mv_dev->list);
1673					kfree(mv_dev);
1674				} else {
1675					list_move_tail(&mv_dev->list,
1676						       &mhba->shost_dev_list);
1677				}
1678			}
1679		}
1680		mutex_unlock(&mhba->device_lock);
1681	}
1682	return 0;
1683}
1684
1685static void mvumi_proc_msg(struct mvumi_hba *mhba,
1686					struct mvumi_hotplug_event *param)
1687{
1688	u16 size = param->size;
1689	const unsigned long *ar_bitmap;
1690	const unsigned long *re_bitmap;
1691	int index;
1692
1693	if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1694		index = -1;
1695		ar_bitmap = (const unsigned long *) param->bitmap;
1696		re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
1697
1698		mutex_lock(&mhba->sas_discovery_mutex);
1699		do {
1700			index = find_next_zero_bit(ar_bitmap, size, index + 1);
1701			if (index >= size)
1702				break;
1703			mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1704		} while (1);
1705
1706		index = -1;
1707		do {
1708			index = find_next_zero_bit(re_bitmap, size, index + 1);
1709			if (index >= size)
1710				break;
1711			mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1712		} while (1);
1713		mutex_unlock(&mhba->sas_discovery_mutex);
1714	}
1715}
1716
1717static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1718{
1719	if (msg == APICDB1_EVENT_GETEVENT) {
1720		int i, count;
1721		struct mvumi_driver_event *param = NULL;
1722		struct mvumi_event_req *er = buffer;
1723		count = er->count;
1724		if (count > MAX_EVENTS_RETURNED) {
1725			dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1726					" than max event count[0x%x].\n",
1727					count, MAX_EVENTS_RETURNED);
1728			return;
1729		}
1730		for (i = 0; i < count; i++) {
1731			param = &er->events[i];
1732			mvumi_show_event(mhba, param);
1733		}
1734	} else if (msg == APICDB1_HOST_GETEVENT) {
1735		mvumi_proc_msg(mhba, buffer);
1736	}
1737}
1738
1739static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1740{
1741	struct mvumi_cmd *cmd;
1742	struct mvumi_msg_frame *frame;
1743
1744	cmd = mvumi_create_internal_cmd(mhba, 512);
1745	if (!cmd)
1746		return -1;
1747	cmd->scmd = NULL;
1748	cmd->cmd_status = REQ_STATUS_PENDING;
1749	atomic_set(&cmd->sync_cmd, 0);
1750	frame = cmd->frame;
1751	frame->device_id = 0;
1752	frame->cmd_flag = CMD_FLAG_DATA_IN;
1753	frame->req_function = CL_FUN_SCSI_CMD;
1754	frame->cdb_length = MAX_COMMAND_SIZE;
1755	frame->data_transfer_length = sizeof(struct mvumi_event_req);
1756	memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1757	frame->cdb[0] = APICDB0_EVENT;
1758	frame->cdb[1] = msg;
1759	mvumi_issue_blocked_cmd(mhba, cmd);
1760
1761	if (cmd->cmd_status != SAM_STAT_GOOD)
1762		dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1763							cmd->cmd_status);
1764	else
1765		mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1766
1767	mvumi_delete_internal_cmd(mhba, cmd);
1768	return 0;
1769}
1770
1771static void mvumi_scan_events(struct work_struct *work)
1772{
1773	struct mvumi_events_wq *mu_ev =
1774		container_of(work, struct mvumi_events_wq, work_q);
1775
1776	mvumi_get_event(mu_ev->mhba, mu_ev->event);
1777	kfree(mu_ev);
1778}
1779
1780static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1781{
1782	struct mvumi_events_wq *mu_ev;
1783
1784	while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
1785		if (isr_status & DRBL_BUS_CHANGE) {
1786			atomic_inc(&mhba->pnp_count);
1787			wake_up_process(mhba->dm_thread);
1788			isr_status &= ~(DRBL_BUS_CHANGE);
1789			continue;
1790		}
1791
1792		mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1793		if (mu_ev) {
1794			INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1795			mu_ev->mhba = mhba;
1796			mu_ev->event = APICDB1_EVENT_GETEVENT;
1797			isr_status &= ~(DRBL_EVENT_NOTIFY);
1798			mu_ev->param = NULL;
1799			schedule_work(&mu_ev->work_q);
1800		}
1801	}
1802}
1803
1804static void mvumi_handle_clob(struct mvumi_hba *mhba)
1805{
1806	struct mvumi_rsp_frame *ob_frame;
1807	struct mvumi_cmd *cmd;
1808	struct mvumi_ob_data *pool;
1809
1810	while (!list_empty(&mhba->free_ob_list)) {
1811		pool = list_first_entry(&mhba->free_ob_list,
1812						struct mvumi_ob_data, list);
1813		list_del_init(&pool->list);
1814		list_add_tail(&pool->list, &mhba->ob_data_list);
1815
1816		ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1817		cmd = mhba->tag_cmd[ob_frame->tag];
1818
1819		atomic_dec(&mhba->fw_outstanding);
1820		mhba->tag_cmd[ob_frame->tag] = 0;
1821		tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1822		if (cmd->scmd)
1823			mvumi_complete_cmd(mhba, cmd, ob_frame);
1824		else
1825			mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1826	}
1827	mhba->instancet->fire_cmd(mhba, NULL);
1828}
1829
1830static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1831{
1832	struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1833	unsigned long flags;
1834
1835	spin_lock_irqsave(mhba->shost->host_lock, flags);
1836	if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1837		spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1838		return IRQ_NONE;
1839	}
1840
1841	if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1842		if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1843			mvumi_launch_events(mhba, mhba->isr_status);
1844		if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1845			dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1846			mvumi_handshake(mhba);
1847		}
1848
1849	}
1850
1851	if (mhba->global_isr & mhba->regs->int_comaout)
1852		mvumi_receive_ob_list_entry(mhba);
1853
1854	mhba->global_isr = 0;
1855	mhba->isr_status = 0;
1856	if (mhba->fw_state == FW_STATE_STARTED)
1857		mvumi_handle_clob(mhba);
1858	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1859	return IRQ_HANDLED;
1860}
1861
1862static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1863						struct mvumi_cmd *cmd)
1864{
1865	void *ib_entry;
1866	struct mvumi_msg_frame *ib_frame;
1867	unsigned int frame_len;
1868
1869	ib_frame = cmd->frame;
1870	if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1871		dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1872		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1873	}
1874	if (tag_is_empty(&mhba->tag_pool)) {
1875		dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1876		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1877	}
1878	mvumi_get_ib_list_entry(mhba, &ib_entry);
1879
1880	cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1881	cmd->frame->request_id = mhba->io_seq++;
1882	cmd->request_id = cmd->frame->request_id;
1883	mhba->tag_cmd[cmd->frame->tag] = cmd;
1884	frame_len = sizeof(*ib_frame) - 4 +
1885				ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1886	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1887		struct mvumi_dyn_list_entry *dle;
1888		dle = ib_entry;
1889		dle->src_low_addr =
1890			cpu_to_le32(lower_32_bits(cmd->frame_phys));
1891		dle->src_high_addr =
1892			cpu_to_le32(upper_32_bits(cmd->frame_phys));
1893		dle->if_length = (frame_len >> 2) & 0xFFF;
1894	} else {
1895		memcpy(ib_entry, ib_frame, frame_len);
1896	}
1897	return MV_QUEUE_COMMAND_RESULT_SENT;
1898}
1899
1900static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1901{
1902	unsigned short num_of_cl_sent = 0;
1903	unsigned int count;
1904	enum mvumi_qc_result result;
1905
1906	if (cmd)
1907		list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1908	count = mhba->instancet->check_ib_list(mhba);
1909	if (list_empty(&mhba->waiting_req_list) || !count)
1910		return;
1911
1912	do {
1913		cmd = list_first_entry(&mhba->waiting_req_list,
1914				       struct mvumi_cmd, queue_pointer);
1915		list_del_init(&cmd->queue_pointer);
1916		result = mvumi_send_command(mhba, cmd);
1917		switch (result) {
1918		case MV_QUEUE_COMMAND_RESULT_SENT:
1919			num_of_cl_sent++;
1920			break;
1921		case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1922			list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1923			if (num_of_cl_sent > 0)
1924				mvumi_send_ib_list_entry(mhba);
1925
1926			return;
1927		}
1928	} while (!list_empty(&mhba->waiting_req_list) && count--);
1929
1930	if (num_of_cl_sent > 0)
1931		mvumi_send_ib_list_entry(mhba);
1932}
1933
1934/**
1935 * mvumi_enable_intr -	Enables interrupts
1936 * @mhba:		Adapter soft state
1937 */
1938static void mvumi_enable_intr(struct mvumi_hba *mhba)
1939{
1940	unsigned int mask;
1941	struct mvumi_hw_regs *regs = mhba->regs;
1942
1943	iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1944	mask = ioread32(regs->enpointa_mask_reg);
1945	mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
1946	iowrite32(mask, regs->enpointa_mask_reg);
1947}
1948
1949/**
1950 * mvumi_disable_intr -Disables interrupt
1951 * @mhba:		Adapter soft state
1952 */
1953static void mvumi_disable_intr(struct mvumi_hba *mhba)
1954{
1955	unsigned int mask;
1956	struct mvumi_hw_regs *regs = mhba->regs;
1957
1958	iowrite32(0, regs->arm_to_pciea_mask_reg);
1959	mask = ioread32(regs->enpointa_mask_reg);
1960	mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
1961							regs->int_comaerr);
1962	iowrite32(mask, regs->enpointa_mask_reg);
1963}
1964
1965static int mvumi_clear_intr(void *extend)
1966{
1967	struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1968	unsigned int status, isr_status = 0, tmp = 0;
1969	struct mvumi_hw_regs *regs = mhba->regs;
1970
1971	status = ioread32(regs->main_int_cause_reg);
1972	if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
1973		return 1;
1974	if (unlikely(status & regs->int_comaerr)) {
1975		tmp = ioread32(regs->outb_isr_cause);
1976		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1977			if (tmp & regs->clic_out_err) {
1978				iowrite32(tmp & regs->clic_out_err,
1979							regs->outb_isr_cause);
1980			}
1981		} else {
1982			if (tmp & (regs->clic_in_err | regs->clic_out_err))
1983				iowrite32(tmp & (regs->clic_in_err |
1984						regs->clic_out_err),
1985						regs->outb_isr_cause);
1986		}
1987		status ^= mhba->regs->int_comaerr;
1988		/* inbound or outbound parity error, command will timeout */
1989	}
1990	if (status & regs->int_comaout) {
1991		tmp = ioread32(regs->outb_isr_cause);
1992		if (tmp & regs->clic_irq)
1993			iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
1994	}
1995	if (status & regs->int_dl_cpu2pciea) {
1996		isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
1997		if (isr_status)
1998			iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
1999	}
2000
2001	mhba->global_isr = status;
2002	mhba->isr_status = isr_status;
2003
2004	return 0;
2005}
2006
2007/**
2008 * mvumi_read_fw_status_reg - returns the current FW status value
2009 * @mhba:		Adapter soft state
2010 */
2011static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
2012{
2013	unsigned int status;
2014
2015	status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
2016	if (status)
2017		iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
2018	return status;
2019}
2020
2021static struct mvumi_instance_template mvumi_instance_9143 = {
2022	.fire_cmd = mvumi_fire_cmd,
2023	.enable_intr = mvumi_enable_intr,
2024	.disable_intr = mvumi_disable_intr,
2025	.clear_intr = mvumi_clear_intr,
2026	.read_fw_status_reg = mvumi_read_fw_status_reg,
2027	.check_ib_list = mvumi_check_ib_list_9143,
2028	.check_ob_list = mvumi_check_ob_list_9143,
2029	.reset_host = mvumi_reset_host_9143,
2030};
2031
2032static struct mvumi_instance_template mvumi_instance_9580 = {
2033	.fire_cmd = mvumi_fire_cmd,
2034	.enable_intr = mvumi_enable_intr,
2035	.disable_intr = mvumi_disable_intr,
2036	.clear_intr = mvumi_clear_intr,
2037	.read_fw_status_reg = mvumi_read_fw_status_reg,
2038	.check_ib_list = mvumi_check_ib_list_9580,
2039	.check_ob_list = mvumi_check_ob_list_9580,
2040	.reset_host = mvumi_reset_host_9580,
2041};
2042
2043static int mvumi_slave_configure(struct scsi_device *sdev)
2044{
2045	struct mvumi_hba *mhba;
2046	unsigned char bitcount = sizeof(unsigned char) * 8;
2047
2048	mhba = (struct mvumi_hba *) sdev->host->hostdata;
2049	if (sdev->id >= mhba->max_target_id)
2050		return -EINVAL;
2051
2052	mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
2053	return 0;
2054}
2055
2056/**
2057 * mvumi_build_frame -	Prepares a direct cdb (DCDB) command
2058 * @mhba:		Adapter soft state
2059 * @scmd:		SCSI command
2060 * @cmd:		Command to be prepared in
2061 *
2062 * This function prepares CDB commands. These are typcially pass-through
2063 * commands to the devices.
2064 */
2065static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
2066				struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
2067{
2068	struct mvumi_msg_frame *pframe;
2069
2070	cmd->scmd = scmd;
2071	cmd->cmd_status = REQ_STATUS_PENDING;
2072	pframe = cmd->frame;
2073	pframe->device_id = ((unsigned short) scmd->device->id) |
2074				(((unsigned short) scmd->device->lun) << 8);
2075	pframe->cmd_flag = 0;
2076
2077	switch (scmd->sc_data_direction) {
2078	case DMA_NONE:
2079		pframe->cmd_flag |= CMD_FLAG_NON_DATA;
2080		break;
2081	case DMA_FROM_DEVICE:
2082		pframe->cmd_flag |= CMD_FLAG_DATA_IN;
2083		break;
2084	case DMA_TO_DEVICE:
2085		pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
2086		break;
2087	case DMA_BIDIRECTIONAL:
2088	default:
2089		dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
2090			"cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
2091		goto error;
2092	}
2093
2094	pframe->cdb_length = scmd->cmd_len;
2095	memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
2096	pframe->req_function = CL_FUN_SCSI_CMD;
2097	if (scsi_bufflen(scmd)) {
2098		if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
2099			&pframe->sg_counts))
2100			goto error;
2101
2102		pframe->data_transfer_length = scsi_bufflen(scmd);
2103	} else {
2104		pframe->sg_counts = 0;
2105		pframe->data_transfer_length = 0;
2106	}
2107	return 0;
2108
2109error:
2110	scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
2111		SAM_STAT_CHECK_CONDITION;
2112	scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
2113									0);
2114	return -1;
2115}
2116
2117/**
2118 * mvumi_queue_command -	Queue entry point
2119 * @scmd:			SCSI command to be queued
2120 * @done:			Callback entry point
2121 */
2122static int mvumi_queue_command(struct Scsi_Host *shost,
2123					struct scsi_cmnd *scmd)
2124{
2125	struct mvumi_cmd *cmd;
2126	struct mvumi_hba *mhba;
2127	unsigned long irq_flags;
2128
2129	spin_lock_irqsave(shost->host_lock, irq_flags);
2130	scsi_cmd_get_serial(shost, scmd);
2131
2132	mhba = (struct mvumi_hba *) shost->hostdata;
2133	scmd->result = 0;
2134	cmd = mvumi_get_cmd(mhba);
2135	if (unlikely(!cmd)) {
2136		spin_unlock_irqrestore(shost->host_lock, irq_flags);
2137		return SCSI_MLQUEUE_HOST_BUSY;
2138	}
2139
2140	if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
2141		goto out_return_cmd;
2142
2143	cmd->scmd = scmd;
2144	scmd->SCp.ptr = (char *) cmd;
2145	mhba->instancet->fire_cmd(mhba, cmd);
2146	spin_unlock_irqrestore(shost->host_lock, irq_flags);
2147	return 0;
2148
2149out_return_cmd:
2150	mvumi_return_cmd(mhba, cmd);
2151	scmd->scsi_done(scmd);
2152	spin_unlock_irqrestore(shost->host_lock, irq_flags);
2153	return 0;
2154}
2155
2156static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
2157{
2158	struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
2159	struct Scsi_Host *host = scmd->device->host;
2160	struct mvumi_hba *mhba = shost_priv(host);
2161	unsigned long flags;
2162
2163	spin_lock_irqsave(mhba->shost->host_lock, flags);
2164
2165	if (mhba->tag_cmd[cmd->frame->tag]) {
2166		mhba->tag_cmd[cmd->frame->tag] = 0;
2167		tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
2168	}
2169	if (!list_empty(&cmd->queue_pointer))
2170		list_del_init(&cmd->queue_pointer);
2171	else
2172		atomic_dec(&mhba->fw_outstanding);
2173
2174	scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
2175	scmd->SCp.ptr = NULL;
2176	if (scsi_bufflen(scmd)) {
2177		if (scsi_sg_count(scmd)) {
2178			pci_unmap_sg(mhba->pdev,
2179				scsi_sglist(scmd),
2180				scsi_sg_count(scmd),
2181				(int)scmd->sc_data_direction);
2182		} else {
2183			pci_unmap_single(mhba->pdev,
2184				scmd->SCp.dma_handle,
2185				scsi_bufflen(scmd),
2186				(int)scmd->sc_data_direction);
2187
2188			scmd->SCp.dma_handle = 0;
2189		}
2190	}
2191	mvumi_return_cmd(mhba, cmd);
2192	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
2193
2194	return BLK_EH_NOT_HANDLED;
2195}
2196
2197static int
2198mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2199			sector_t capacity, int geom[])
2200{
2201	int heads, sectors;
2202	sector_t cylinders;
2203	unsigned long tmp;
2204
2205	heads = 64;
2206	sectors = 32;
2207	tmp = heads * sectors;
2208	cylinders = capacity;
2209	sector_div(cylinders, tmp);
2210
2211	if (capacity >= 0x200000) {
2212		heads = 255;
2213		sectors = 63;
2214		tmp = heads * sectors;
2215		cylinders = capacity;
2216		sector_div(cylinders, tmp);
2217	}
2218	geom[0] = heads;
2219	geom[1] = sectors;
2220	geom[2] = cylinders;
2221
2222	return 0;
2223}
2224
2225static struct scsi_host_template mvumi_template = {
2226
2227	.module = THIS_MODULE,
2228	.name = "Marvell Storage Controller",
2229	.slave_configure = mvumi_slave_configure,
2230	.queuecommand = mvumi_queue_command,
 
2231	.eh_host_reset_handler = mvumi_host_reset,
2232	.bios_param = mvumi_bios_param,
2233	.this_id = -1,
2234};
2235
2236static struct scsi_transport_template mvumi_transport_template = {
2237	.eh_timed_out = mvumi_timed_out,
2238};
2239
2240static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2241{
2242	void *base = NULL;
2243	struct mvumi_hw_regs *regs;
2244
2245	switch (mhba->pdev->device) {
2246	case PCI_DEVICE_ID_MARVELL_MV9143:
2247		mhba->mmio = mhba->base_addr[0];
2248		base = mhba->mmio;
2249		if (!mhba->regs) {
2250			mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2251			if (mhba->regs == NULL)
2252				return -ENOMEM;
2253		}
2254		regs = mhba->regs;
2255
2256		/* For Arm */
2257		regs->ctrl_sts_reg          = base + 0x20104;
2258		regs->rstoutn_mask_reg      = base + 0x20108;
2259		regs->sys_soft_rst_reg      = base + 0x2010C;
2260		regs->main_int_cause_reg    = base + 0x20200;
2261		regs->enpointa_mask_reg     = base + 0x2020C;
2262		regs->rstoutn_en_reg        = base + 0xF1400;
2263		/* For Doorbell */
2264		regs->pciea_to_arm_drbl_reg = base + 0x20400;
2265		regs->arm_to_pciea_drbl_reg = base + 0x20408;
2266		regs->arm_to_pciea_mask_reg = base + 0x2040C;
2267		regs->pciea_to_arm_msg0     = base + 0x20430;
2268		regs->pciea_to_arm_msg1     = base + 0x20434;
2269		regs->arm_to_pciea_msg0     = base + 0x20438;
2270		regs->arm_to_pciea_msg1     = base + 0x2043C;
2271
2272		/* For Message Unit */
2273
2274		regs->inb_aval_count_basel  = base + 0x508;
2275		regs->inb_aval_count_baseh  = base + 0x50C;
2276		regs->inb_write_pointer     = base + 0x518;
2277		regs->inb_read_pointer      = base + 0x51C;
2278		regs->outb_coal_cfg         = base + 0x568;
2279		regs->outb_copy_basel       = base + 0x5B0;
2280		regs->outb_copy_baseh       = base + 0x5B4;
2281		regs->outb_copy_pointer     = base + 0x544;
2282		regs->outb_read_pointer     = base + 0x548;
2283		regs->outb_isr_cause        = base + 0x560;
2284		regs->outb_coal_cfg         = base + 0x568;
2285		/* Bit setting for HW */
2286		regs->int_comaout           = 1 << 8;
2287		regs->int_comaerr           = 1 << 6;
2288		regs->int_dl_cpu2pciea      = 1 << 1;
2289		regs->cl_pointer_toggle     = 1 << 12;
2290		regs->clic_irq              = 1 << 1;
2291		regs->clic_in_err           = 1 << 8;
2292		regs->clic_out_err          = 1 << 12;
2293		regs->cl_slot_num_mask      = 0xFFF;
2294		regs->int_drbl_int_mask     = 0x3FFFFFFF;
2295		regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
2296							regs->int_comaerr;
2297		break;
2298	case PCI_DEVICE_ID_MARVELL_MV9580:
2299		mhba->mmio = mhba->base_addr[2];
2300		base = mhba->mmio;
2301		if (!mhba->regs) {
2302			mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2303			if (mhba->regs == NULL)
2304				return -ENOMEM;
2305		}
2306		regs = mhba->regs;
2307		/* For Arm */
2308		regs->ctrl_sts_reg          = base + 0x20104;
2309		regs->rstoutn_mask_reg      = base + 0x1010C;
2310		regs->sys_soft_rst_reg      = base + 0x10108;
2311		regs->main_int_cause_reg    = base + 0x10200;
2312		regs->enpointa_mask_reg     = base + 0x1020C;
2313		regs->rstoutn_en_reg        = base + 0xF1400;
2314
2315		/* For Doorbell */
2316		regs->pciea_to_arm_drbl_reg = base + 0x10460;
2317		regs->arm_to_pciea_drbl_reg = base + 0x10480;
2318		regs->arm_to_pciea_mask_reg = base + 0x10484;
2319		regs->pciea_to_arm_msg0     = base + 0x10400;
2320		regs->pciea_to_arm_msg1     = base + 0x10404;
2321		regs->arm_to_pciea_msg0     = base + 0x10420;
2322		regs->arm_to_pciea_msg1     = base + 0x10424;
2323
2324		/* For reset*/
2325		regs->reset_request         = base + 0x10108;
2326		regs->reset_enable          = base + 0x1010c;
2327
2328		/* For Message Unit */
2329		regs->inb_aval_count_basel  = base + 0x4008;
2330		regs->inb_aval_count_baseh  = base + 0x400C;
2331		regs->inb_write_pointer     = base + 0x4018;
2332		regs->inb_read_pointer      = base + 0x401C;
2333		regs->outb_copy_basel       = base + 0x4058;
2334		regs->outb_copy_baseh       = base + 0x405C;
2335		regs->outb_copy_pointer     = base + 0x406C;
2336		regs->outb_read_pointer     = base + 0x4070;
2337		regs->outb_coal_cfg         = base + 0x4080;
2338		regs->outb_isr_cause        = base + 0x4088;
2339		/* Bit setting for HW */
2340		regs->int_comaout           = 1 << 4;
2341		regs->int_dl_cpu2pciea      = 1 << 12;
2342		regs->int_comaerr           = 1 << 29;
2343		regs->cl_pointer_toggle     = 1 << 14;
2344		regs->cl_slot_num_mask      = 0x3FFF;
2345		regs->clic_irq              = 1 << 0;
2346		regs->clic_out_err          = 1 << 1;
2347		regs->int_drbl_int_mask     = 0x3FFFFFFF;
2348		regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
2349		break;
2350	default:
2351		return -1;
2352		break;
2353	}
2354
2355	return 0;
2356}
2357
2358/**
2359 * mvumi_init_fw -	Initializes the FW
2360 * @mhba:		Adapter soft state
2361 *
2362 * This is the main function for initializing firmware.
2363 */
2364static int mvumi_init_fw(struct mvumi_hba *mhba)
2365{
2366	int ret = 0;
2367
2368	if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
2369		dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
2370		return -EBUSY;
2371	}
2372	ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2373	if (ret)
2374		goto fail_ioremap;
2375
2376	switch (mhba->pdev->device) {
2377	case PCI_DEVICE_ID_MARVELL_MV9143:
2378		mhba->instancet = &mvumi_instance_9143;
2379		mhba->io_seq = 0;
2380		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2381		mhba->request_id_enabled = 1;
2382		break;
2383	case PCI_DEVICE_ID_MARVELL_MV9580:
2384		mhba->instancet = &mvumi_instance_9580;
2385		mhba->io_seq = 0;
2386		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2387		break;
2388	default:
2389		dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
2390							mhba->pdev->device);
2391		mhba->instancet = NULL;
2392		ret = -EINVAL;
2393		goto fail_alloc_mem;
2394	}
2395	dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
2396							mhba->pdev->device);
2397	ret = mvumi_cfg_hw_reg(mhba);
2398	if (ret) {
2399		dev_err(&mhba->pdev->dev,
2400			"failed to allocate memory for reg\n");
2401		ret = -ENOMEM;
2402		goto fail_alloc_mem;
2403	}
2404	mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
2405						&mhba->handshake_page_phys);
2406	if (!mhba->handshake_page) {
2407		dev_err(&mhba->pdev->dev,
2408			"failed to allocate memory for handshake\n");
2409		ret = -ENOMEM;
2410		goto fail_alloc_page;
2411	}
2412
2413	if (mvumi_start(mhba)) {
2414		ret = -EINVAL;
2415		goto fail_ready_state;
2416	}
2417	ret = mvumi_alloc_cmds(mhba);
2418	if (ret)
2419		goto fail_ready_state;
2420
2421	return 0;
2422
2423fail_ready_state:
2424	mvumi_release_mem_resource(mhba);
2425	pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
2426		mhba->handshake_page, mhba->handshake_page_phys);
2427fail_alloc_page:
2428	kfree(mhba->regs);
2429fail_alloc_mem:
2430	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
2431fail_ioremap:
2432	pci_release_regions(mhba->pdev);
2433
2434	return ret;
2435}
2436
2437/**
2438 * mvumi_io_attach -	Attaches this driver to SCSI mid-layer
2439 * @mhba:		Adapter soft state
2440 */
2441static int mvumi_io_attach(struct mvumi_hba *mhba)
2442{
2443	struct Scsi_Host *host = mhba->shost;
2444	struct scsi_device *sdev = NULL;
2445	int ret;
2446	unsigned int max_sg = (mhba->ib_max_size + 4 -
2447		sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
2448
2449	host->irq = mhba->pdev->irq;
2450	host->unique_id = mhba->unique_id;
2451	host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2452	host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
2453	host->max_sectors = mhba->max_transfer_size / 512;
2454	host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2455	host->max_id = mhba->max_target_id;
2456	host->max_cmd_len = MAX_COMMAND_SIZE;
2457	host->transportt = &mvumi_transport_template;
2458
2459	ret = scsi_add_host(host, &mhba->pdev->dev);
2460	if (ret) {
2461		dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
2462		return ret;
2463	}
2464	mhba->fw_flag |= MVUMI_FW_ATTACH;
2465
2466	mutex_lock(&mhba->sas_discovery_mutex);
2467	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2468		ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2469	else
2470		ret = 0;
2471	if (ret) {
2472		dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2473		mutex_unlock(&mhba->sas_discovery_mutex);
2474		goto fail_add_device;
2475	}
2476
2477	mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2478						mhba, "mvumi_scanthread");
2479	if (IS_ERR(mhba->dm_thread)) {
2480		dev_err(&mhba->pdev->dev,
2481			"failed to create device scan thread\n");
2482		mutex_unlock(&mhba->sas_discovery_mutex);
2483		goto fail_create_thread;
2484	}
2485	atomic_set(&mhba->pnp_count, 1);
2486	wake_up_process(mhba->dm_thread);
2487
2488	mutex_unlock(&mhba->sas_discovery_mutex);
2489	return 0;
2490
2491fail_create_thread:
2492	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2493		sdev = scsi_device_lookup(mhba->shost, 0,
2494						mhba->max_target_id - 1, 0);
2495	if (sdev) {
2496		scsi_remove_device(sdev);
2497		scsi_device_put(sdev);
2498	}
2499fail_add_device:
2500	scsi_remove_host(mhba->shost);
2501	return ret;
2502}
2503
2504/**
2505 * mvumi_probe_one -	PCI hotplug entry point
2506 * @pdev:		PCI device structure
2507 * @id:			PCI ids of supported hotplugged adapter
2508 */
2509static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2510{
2511	struct Scsi_Host *host;
2512	struct mvumi_hba *mhba;
2513	int ret;
2514
2515	dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2516			pdev->vendor, pdev->device, pdev->subsystem_vendor,
2517			pdev->subsystem_device);
2518
2519	ret = pci_enable_device(pdev);
2520	if (ret)
2521		return ret;
2522
2523	pci_set_master(pdev);
2524
2525	if (IS_DMA64) {
2526		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2527		if (ret) {
2528			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2529			if (ret)
2530				goto fail_set_dma_mask;
2531		}
2532	} else {
2533		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2534		if (ret)
2535			goto fail_set_dma_mask;
2536	}
2537
2538	host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2539	if (!host) {
2540		dev_err(&pdev->dev, "scsi_host_alloc failed\n");
2541		ret = -ENOMEM;
2542		goto fail_alloc_instance;
2543	}
2544	mhba = shost_priv(host);
2545
2546	INIT_LIST_HEAD(&mhba->cmd_pool);
2547	INIT_LIST_HEAD(&mhba->ob_data_list);
2548	INIT_LIST_HEAD(&mhba->free_ob_list);
2549	INIT_LIST_HEAD(&mhba->res_list);
2550	INIT_LIST_HEAD(&mhba->waiting_req_list);
2551	mutex_init(&mhba->device_lock);
2552	INIT_LIST_HEAD(&mhba->mhba_dev_list);
2553	INIT_LIST_HEAD(&mhba->shost_dev_list);
2554	atomic_set(&mhba->fw_outstanding, 0);
2555	init_waitqueue_head(&mhba->int_cmd_wait_q);
2556	mutex_init(&mhba->sas_discovery_mutex);
2557
2558	mhba->pdev = pdev;
2559	mhba->shost = host;
2560	mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
2561
2562	ret = mvumi_init_fw(mhba);
2563	if (ret)
2564		goto fail_init_fw;
2565
2566	ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2567				"mvumi", mhba);
2568	if (ret) {
2569		dev_err(&pdev->dev, "failed to register IRQ\n");
2570		goto fail_init_irq;
2571	}
2572
2573	mhba->instancet->enable_intr(mhba);
2574	pci_set_drvdata(pdev, mhba);
2575
2576	ret = mvumi_io_attach(mhba);
2577	if (ret)
2578		goto fail_io_attach;
2579
2580	mvumi_backup_bar_addr(mhba);
2581	dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
2582
2583	return 0;
2584
2585fail_io_attach:
2586	mhba->instancet->disable_intr(mhba);
2587	free_irq(mhba->pdev->irq, mhba);
2588fail_init_irq:
2589	mvumi_release_fw(mhba);
2590fail_init_fw:
2591	scsi_host_put(host);
2592
2593fail_alloc_instance:
2594fail_set_dma_mask:
2595	pci_disable_device(pdev);
2596
2597	return ret;
2598}
2599
2600static void mvumi_detach_one(struct pci_dev *pdev)
2601{
2602	struct Scsi_Host *host;
2603	struct mvumi_hba *mhba;
2604
2605	mhba = pci_get_drvdata(pdev);
2606	if (mhba->dm_thread) {
2607		kthread_stop(mhba->dm_thread);
2608		mhba->dm_thread = NULL;
2609	}
2610
2611	mvumi_detach_devices(mhba);
2612	host = mhba->shost;
2613	scsi_remove_host(mhba->shost);
2614	mvumi_flush_cache(mhba);
2615
2616	mhba->instancet->disable_intr(mhba);
2617	free_irq(mhba->pdev->irq, mhba);
2618	mvumi_release_fw(mhba);
2619	scsi_host_put(host);
2620	pci_disable_device(pdev);
2621	dev_dbg(&pdev->dev, "driver is removed!\n");
2622}
2623
2624/**
2625 * mvumi_shutdown -	Shutdown entry point
2626 * @device:		Generic device structure
2627 */
2628static void mvumi_shutdown(struct pci_dev *pdev)
2629{
2630	struct mvumi_hba *mhba = pci_get_drvdata(pdev);
2631
2632	mvumi_flush_cache(mhba);
2633}
2634
2635static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
2636{
2637	struct mvumi_hba *mhba = NULL;
2638
2639	mhba = pci_get_drvdata(pdev);
2640	mvumi_flush_cache(mhba);
2641
2642	pci_set_drvdata(pdev, mhba);
2643	mhba->instancet->disable_intr(mhba);
2644	free_irq(mhba->pdev->irq, mhba);
2645	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2646	pci_release_regions(pdev);
2647	pci_save_state(pdev);
2648	pci_disable_device(pdev);
2649	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2650
2651	return 0;
2652}
2653
2654static int mvumi_resume(struct pci_dev *pdev)
2655{
2656	int ret;
2657	struct mvumi_hba *mhba = NULL;
2658
2659	mhba = pci_get_drvdata(pdev);
2660
2661	pci_set_power_state(pdev, PCI_D0);
2662	pci_enable_wake(pdev, PCI_D0, 0);
2663	pci_restore_state(pdev);
2664
2665	ret = pci_enable_device(pdev);
2666	if (ret) {
2667		dev_err(&pdev->dev, "enable device failed\n");
2668		return ret;
2669	}
2670	pci_set_master(pdev);
2671	if (IS_DMA64) {
2672		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2673		if (ret) {
2674			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2675			if (ret)
2676				goto fail;
2677		}
2678	} else {
2679		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2680		if (ret)
2681			goto fail;
2682	}
2683	ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
2684	if (ret)
2685		goto fail;
2686	ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2687	if (ret)
2688		goto release_regions;
2689
2690	if (mvumi_cfg_hw_reg(mhba)) {
2691		ret = -EINVAL;
2692		goto unmap_pci_addr;
2693	}
2694
2695	mhba->mmio = mhba->base_addr[0];
2696	mvumi_reset(mhba);
2697
2698	if (mvumi_start(mhba)) {
2699		ret = -EINVAL;
2700		goto unmap_pci_addr;
2701	}
2702
2703	ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2704				"mvumi", mhba);
2705	if (ret) {
2706		dev_err(&pdev->dev, "failed to register IRQ\n");
2707		goto unmap_pci_addr;
2708	}
2709	mhba->instancet->enable_intr(mhba);
2710
2711	return 0;
2712
2713unmap_pci_addr:
2714	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2715release_regions:
2716	pci_release_regions(pdev);
2717fail:
2718	pci_disable_device(pdev);
2719
2720	return ret;
2721}
2722
2723static struct pci_driver mvumi_pci_driver = {
2724
2725	.name = MV_DRIVER_NAME,
2726	.id_table = mvumi_pci_table,
2727	.probe = mvumi_probe_one,
2728	.remove = mvumi_detach_one,
2729	.shutdown = mvumi_shutdown,
2730#ifdef CONFIG_PM
2731	.suspend = mvumi_suspend,
2732	.resume = mvumi_resume,
2733#endif
2734};
2735
2736/**
2737 * mvumi_init - Driver load entry point
2738 */
2739static int __init mvumi_init(void)
2740{
2741	return pci_register_driver(&mvumi_pci_driver);
2742}
2743
2744/**
2745 * mvumi_exit - Driver unload entry point
2746 */
2747static void __exit mvumi_exit(void)
2748{
2749
2750	pci_unregister_driver(&mvumi_pci_driver);
2751}
2752
2753module_init(mvumi_init);
2754module_exit(mvumi_exit);
v4.17
   1/*
   2 * Marvell UMI driver
   3 *
   4 * Copyright 2011 Marvell. <jyli@marvell.com>
   5 *
   6 * This file is licensed under GPLv2.
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License as
  10 * published by the Free Software Foundation; version 2 of the
  11 * License.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  21 * USA
  22*/
  23
  24#include <linux/kernel.h>
  25#include <linux/module.h>
  26#include <linux/moduleparam.h>
  27#include <linux/init.h>
  28#include <linux/device.h>
  29#include <linux/pci.h>
  30#include <linux/list.h>
  31#include <linux/spinlock.h>
  32#include <linux/interrupt.h>
  33#include <linux/delay.h>
  34#include <linux/ktime.h>
  35#include <linux/blkdev.h>
  36#include <linux/io.h>
  37#include <scsi/scsi.h>
  38#include <scsi/scsi_cmnd.h>
  39#include <scsi/scsi_device.h>
  40#include <scsi/scsi_host.h>
  41#include <scsi/scsi_transport.h>
  42#include <scsi/scsi_eh.h>
  43#include <linux/uaccess.h>
  44#include <linux/kthread.h>
  45
  46#include "mvumi.h"
  47
  48MODULE_LICENSE("GPL");
  49MODULE_AUTHOR("jyli@marvell.com");
  50MODULE_DESCRIPTION("Marvell UMI Driver");
  51
  52static const struct pci_device_id mvumi_pci_table[] = {
  53	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
  54	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
  55	{ 0 }
  56};
  57
  58MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
  59
  60static void tag_init(struct mvumi_tag *st, unsigned short size)
  61{
  62	unsigned short i;
  63	BUG_ON(size != st->size);
  64	st->top = size;
  65	for (i = 0; i < size; i++)
  66		st->stack[i] = size - 1 - i;
  67}
  68
  69static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
  70{
  71	BUG_ON(st->top <= 0);
  72	return st->stack[--st->top];
  73}
  74
  75static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
  76							unsigned short tag)
  77{
  78	BUG_ON(st->top >= st->size);
  79	st->stack[st->top++] = tag;
  80}
  81
  82static bool tag_is_empty(struct mvumi_tag *st)
  83{
  84	if (st->top == 0)
  85		return 1;
  86	else
  87		return 0;
  88}
  89
  90static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
  91{
  92	int i;
  93
  94	for (i = 0; i < MAX_BASE_ADDRESS; i++)
  95		if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
  96								addr_array[i])
  97			pci_iounmap(dev, addr_array[i]);
  98}
  99
 100static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
 101{
 102	int i;
 103
 104	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 105		if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
 106			addr_array[i] = pci_iomap(dev, i, 0);
 107			if (!addr_array[i]) {
 108				dev_err(&dev->dev, "failed to map Bar[%d]\n",
 109									i);
 110				mvumi_unmap_pci_addr(dev, addr_array);
 111				return -ENOMEM;
 112			}
 113		} else
 114			addr_array[i] = NULL;
 115
 116		dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
 117	}
 118
 119	return 0;
 120}
 121
 122static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
 123				enum resource_type type, unsigned int size)
 124{
 125	struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
 126
 127	if (!res) {
 128		dev_err(&mhba->pdev->dev,
 129			"Failed to allocate memory for resource manager.\n");
 130		return NULL;
 131	}
 132
 133	switch (type) {
 134	case RESOURCE_CACHED_MEMORY:
 135		res->virt_addr = kzalloc(size, GFP_ATOMIC);
 136		if (!res->virt_addr) {
 137			dev_err(&mhba->pdev->dev,
 138				"unable to allocate memory,size = %d.\n", size);
 139			kfree(res);
 140			return NULL;
 141		}
 142		break;
 143
 144	case RESOURCE_UNCACHED_MEMORY:
 145		size = round_up(size, 8);
 146		res->virt_addr = pci_zalloc_consistent(mhba->pdev, size,
 147						       &res->bus_addr);
 148		if (!res->virt_addr) {
 149			dev_err(&mhba->pdev->dev,
 150					"unable to allocate consistent mem,"
 151							"size = %d.\n", size);
 152			kfree(res);
 153			return NULL;
 154		}
 
 155		break;
 156
 157	default:
 158		dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
 159		kfree(res);
 160		return NULL;
 161	}
 162
 163	res->type = type;
 164	res->size = size;
 165	INIT_LIST_HEAD(&res->entry);
 166	list_add_tail(&res->entry, &mhba->res_list);
 167
 168	return res;
 169}
 170
 171static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
 172{
 173	struct mvumi_res *res, *tmp;
 174
 175	list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
 176		switch (res->type) {
 177		case RESOURCE_UNCACHED_MEMORY:
 178			pci_free_consistent(mhba->pdev, res->size,
 179						res->virt_addr, res->bus_addr);
 180			break;
 181		case RESOURCE_CACHED_MEMORY:
 182			kfree(res->virt_addr);
 183			break;
 184		default:
 185			dev_err(&mhba->pdev->dev,
 186				"unknown resource type %d\n", res->type);
 187			break;
 188		}
 189		list_del(&res->entry);
 190		kfree(res);
 191	}
 192	mhba->fw_flag &= ~MVUMI_FW_ALLOC;
 193}
 194
 195/**
 196 * mvumi_make_sgl -	Prepares  SGL
 197 * @mhba:		Adapter soft state
 198 * @scmd:		SCSI command from the mid-layer
 199 * @sgl_p:		SGL to be filled in
 200 * @sg_count		return the number of SG elements
 201 *
 202 * If successful, this function returns 0. otherwise, it returns -1.
 203 */
 204static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
 205					void *sgl_p, unsigned char *sg_count)
 206{
 207	struct scatterlist *sg;
 208	struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
 209	unsigned int i;
 210	unsigned int sgnum = scsi_sg_count(scmd);
 211	dma_addr_t busaddr;
 212
 213	sg = scsi_sglist(scmd);
 214	*sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
 215			       (int) scmd->sc_data_direction);
 216	if (*sg_count > mhba->max_sge) {
 217		dev_err(&mhba->pdev->dev,
 218			"sg count[0x%x] is bigger than max sg[0x%x].\n",
 219			*sg_count, mhba->max_sge);
 220		pci_unmap_sg(mhba->pdev, sg, sgnum,
 221			     (int) scmd->sc_data_direction);
 222		return -1;
 223	}
 224	for (i = 0; i < *sg_count; i++) {
 225		busaddr = sg_dma_address(&sg[i]);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 226		m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
 227		m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
 228		m_sg->flags = 0;
 229		sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
 230		if ((i + 1) == *sg_count)
 231			m_sg->flags |= 1U << mhba->eot_flag;
 232
 233		sgd_inc(mhba, m_sg);
 234	}
 235
 236	return 0;
 237}
 238
 239static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
 240							unsigned int size)
 241{
 242	struct mvumi_sgl *m_sg;
 243	void *virt_addr;
 244	dma_addr_t phy_addr;
 245
 246	if (size == 0)
 247		return 0;
 248
 249	virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr);
 250	if (!virt_addr)
 251		return -1;
 252
 
 
 253	m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
 254	cmd->frame->sg_counts = 1;
 255	cmd->data_buf = virt_addr;
 256
 257	m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
 258	m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
 259	m_sg->flags = 1U << mhba->eot_flag;
 260	sgd_setsz(mhba, m_sg, cpu_to_le32(size));
 261
 262	return 0;
 263}
 264
 265static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
 266				unsigned int buf_size)
 267{
 268	struct mvumi_cmd *cmd;
 269
 270	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 271	if (!cmd) {
 272		dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
 273		return NULL;
 274	}
 275	INIT_LIST_HEAD(&cmd->queue_pointer);
 276
 277	cmd->frame = pci_alloc_consistent(mhba->pdev,
 278				mhba->ib_max_size, &cmd->frame_phys);
 279	if (!cmd->frame) {
 280		dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
 281			" frame,size = %d.\n", mhba->ib_max_size);
 282		kfree(cmd);
 283		return NULL;
 284	}
 285
 286	if (buf_size) {
 287		if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
 288			dev_err(&mhba->pdev->dev, "failed to allocate memory"
 289						" for internal frame\n");
 290			pci_free_consistent(mhba->pdev, mhba->ib_max_size,
 291					cmd->frame, cmd->frame_phys);
 292			kfree(cmd);
 293			return NULL;
 294		}
 295	} else
 296		cmd->frame->sg_counts = 0;
 297
 298	return cmd;
 299}
 300
 301static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
 302						struct mvumi_cmd *cmd)
 303{
 304	struct mvumi_sgl *m_sg;
 305	unsigned int size;
 306	dma_addr_t phy_addr;
 307
 308	if (cmd && cmd->frame) {
 309		if (cmd->frame->sg_counts) {
 310			m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
 311			sgd_getsz(mhba, m_sg, size);
 312
 313			phy_addr = (dma_addr_t) m_sg->baseaddr_l |
 314				(dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
 315
 316			pci_free_consistent(mhba->pdev, size, cmd->data_buf,
 317								phy_addr);
 318		}
 319		pci_free_consistent(mhba->pdev, mhba->ib_max_size,
 320				cmd->frame, cmd->frame_phys);
 321		kfree(cmd);
 322	}
 323}
 324
 325/**
 326 * mvumi_get_cmd -	Get a command from the free pool
 327 * @mhba:		Adapter soft state
 328 *
 329 * Returns a free command from the pool
 330 */
 331static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
 332{
 333	struct mvumi_cmd *cmd = NULL;
 334
 335	if (likely(!list_empty(&mhba->cmd_pool))) {
 336		cmd = list_entry((&mhba->cmd_pool)->next,
 337				struct mvumi_cmd, queue_pointer);
 338		list_del_init(&cmd->queue_pointer);
 339	} else
 340		dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
 341
 342	return cmd;
 343}
 344
 345/**
 346 * mvumi_return_cmd -	Return a cmd to free command pool
 347 * @mhba:		Adapter soft state
 348 * @cmd:		Command packet to be returned to free command pool
 349 */
 350static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
 351						struct mvumi_cmd *cmd)
 352{
 353	cmd->scmd = NULL;
 354	list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
 355}
 356
 357/**
 358 * mvumi_free_cmds -	Free all the cmds in the free cmd pool
 359 * @mhba:		Adapter soft state
 360 */
 361static void mvumi_free_cmds(struct mvumi_hba *mhba)
 362{
 363	struct mvumi_cmd *cmd;
 364
 365	while (!list_empty(&mhba->cmd_pool)) {
 366		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 367							queue_pointer);
 368		list_del(&cmd->queue_pointer);
 369		if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
 370			kfree(cmd->frame);
 371		kfree(cmd);
 372	}
 373}
 374
 375/**
 376 * mvumi_alloc_cmds -	Allocates the command packets
 377 * @mhba:		Adapter soft state
 378 *
 379 */
 380static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
 381{
 382	int i;
 383	struct mvumi_cmd *cmd;
 384
 385	for (i = 0; i < mhba->max_io; i++) {
 386		cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 387		if (!cmd)
 388			goto err_exit;
 389
 390		INIT_LIST_HEAD(&cmd->queue_pointer);
 391		list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
 392		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 393			cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
 394			cmd->frame_phys = mhba->ib_frame_phys
 395						+ i * mhba->ib_max_size;
 396		} else
 397			cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
 398		if (!cmd->frame)
 399			goto err_exit;
 400	}
 401	return 0;
 402
 403err_exit:
 404	dev_err(&mhba->pdev->dev,
 405			"failed to allocate memory for cmd[0x%x].\n", i);
 406	while (!list_empty(&mhba->cmd_pool)) {
 407		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 408						queue_pointer);
 409		list_del(&cmd->queue_pointer);
 410		if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
 411			kfree(cmd->frame);
 412		kfree(cmd);
 413	}
 414	return -ENOMEM;
 415}
 416
 417static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
 418{
 419	unsigned int ib_rp_reg;
 420	struct mvumi_hw_regs *regs = mhba->regs;
 421
 422	ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
 423
 424	if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
 425			(mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
 426			((ib_rp_reg & regs->cl_pointer_toggle)
 427			 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
 428		dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
 429		return 0;
 430	}
 431	if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
 432		dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
 433		return 0;
 434	} else {
 435		return mhba->max_io - atomic_read(&mhba->fw_outstanding);
 436	}
 437}
 438
 439static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
 440{
 441	unsigned int count;
 442	if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
 443		return 0;
 444	count = ioread32(mhba->ib_shadow);
 445	if (count == 0xffff)
 446		return 0;
 447	return count;
 448}
 449
 450static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
 451{
 452	unsigned int cur_ib_entry;
 453
 454	cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
 455	cur_ib_entry++;
 456	if (cur_ib_entry >= mhba->list_num_io) {
 457		cur_ib_entry -= mhba->list_num_io;
 458		mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
 459	}
 460	mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
 461	mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
 462	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 463		*ib_entry = mhba->ib_list + cur_ib_entry *
 464				sizeof(struct mvumi_dyn_list_entry);
 465	} else {
 466		*ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
 467	}
 468	atomic_inc(&mhba->fw_outstanding);
 469}
 470
 471static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
 472{
 473	iowrite32(0xffff, mhba->ib_shadow);
 474	iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
 475}
 476
 477static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
 478		unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
 479{
 480	unsigned short tag, request_id;
 481
 482	udelay(1);
 483	p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
 484	request_id = p_outb_frame->request_id;
 485	tag = p_outb_frame->tag;
 486	if (tag > mhba->tag_pool.size) {
 487		dev_err(&mhba->pdev->dev, "ob frame data error\n");
 488		return -1;
 489	}
 490	if (mhba->tag_cmd[tag] == NULL) {
 491		dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
 492		return -1;
 493	} else if (mhba->tag_cmd[tag]->request_id != request_id &&
 494						mhba->request_id_enabled) {
 495			dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
 496					"cmd request ID:0x%x\n", request_id,
 497					mhba->tag_cmd[tag]->request_id);
 498			return -1;
 499	}
 500
 501	return 0;
 502}
 503
 504static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
 505			unsigned int *cur_obf, unsigned int *assign_obf_end)
 506{
 507	unsigned int ob_write, ob_write_shadow;
 508	struct mvumi_hw_regs *regs = mhba->regs;
 509
 510	do {
 511		ob_write = ioread32(regs->outb_copy_pointer);
 512		ob_write_shadow = ioread32(mhba->ob_shadow);
 513	} while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
 514
 515	*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
 516	*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
 517
 518	if ((ob_write & regs->cl_pointer_toggle) !=
 519			(mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
 520		*assign_obf_end += mhba->list_num_io;
 521	}
 522	return 0;
 523}
 524
 525static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
 526			unsigned int *cur_obf, unsigned int *assign_obf_end)
 527{
 528	unsigned int ob_write;
 529	struct mvumi_hw_regs *regs = mhba->regs;
 530
 531	ob_write = ioread32(regs->outb_read_pointer);
 532	ob_write = ioread32(regs->outb_copy_pointer);
 533	*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
 534	*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
 535	if (*assign_obf_end < *cur_obf)
 536		*assign_obf_end += mhba->list_num_io;
 537	else if (*assign_obf_end == *cur_obf)
 538		return -1;
 539	return 0;
 540}
 541
 542static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
 543{
 544	unsigned int cur_obf, assign_obf_end, i;
 545	struct mvumi_ob_data *ob_data;
 546	struct mvumi_rsp_frame *p_outb_frame;
 547	struct mvumi_hw_regs *regs = mhba->regs;
 548
 549	if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
 550		return;
 551
 552	for (i = (assign_obf_end - cur_obf); i != 0; i--) {
 553		cur_obf++;
 554		if (cur_obf >= mhba->list_num_io) {
 555			cur_obf -= mhba->list_num_io;
 556			mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
 557		}
 558
 559		p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
 560
 561		/* Copy pointer may point to entry in outbound list
 562		*  before entry has valid data
 563		*/
 564		if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
 565			mhba->tag_cmd[p_outb_frame->tag] == NULL ||
 566			p_outb_frame->request_id !=
 567				mhba->tag_cmd[p_outb_frame->tag]->request_id))
 568			if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
 569				continue;
 570
 571		if (!list_empty(&mhba->ob_data_list)) {
 572			ob_data = (struct mvumi_ob_data *)
 573				list_first_entry(&mhba->ob_data_list,
 574					struct mvumi_ob_data, list);
 575			list_del_init(&ob_data->list);
 576		} else {
 577			ob_data = NULL;
 578			if (cur_obf == 0) {
 579				cur_obf = mhba->list_num_io - 1;
 580				mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
 581			} else
 582				cur_obf -= 1;
 583			break;
 584		}
 585
 586		memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
 587		p_outb_frame->tag = 0xff;
 588
 589		list_add_tail(&ob_data->list, &mhba->free_ob_list);
 590	}
 591	mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
 592	mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
 593	iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
 594}
 595
 596static void mvumi_reset(struct mvumi_hba *mhba)
 597{
 598	struct mvumi_hw_regs *regs = mhba->regs;
 599
 600	iowrite32(0, regs->enpointa_mask_reg);
 601	if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
 602		return;
 603
 604	iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
 605}
 606
 607static unsigned char mvumi_start(struct mvumi_hba *mhba);
 608
 609static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
 610{
 611	mhba->fw_state = FW_STATE_ABORT;
 612	mvumi_reset(mhba);
 613
 614	if (mvumi_start(mhba))
 615		return FAILED;
 616	else
 617		return SUCCESS;
 618}
 619
 620static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
 621{
 622	struct mvumi_hw_regs *regs = mhba->regs;
 623	u32 tmp;
 624	unsigned long before;
 625	before = jiffies;
 626
 627	iowrite32(0, regs->enpointa_mask_reg);
 628	tmp = ioread32(regs->arm_to_pciea_msg1);
 629	while (tmp != HANDSHAKE_READYSTATE) {
 630		iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
 631		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
 632			dev_err(&mhba->pdev->dev,
 633				"FW reset failed [0x%x].\n", tmp);
 634			return FAILED;
 635		}
 636
 637		msleep(500);
 638		rmb();
 639		tmp = ioread32(regs->arm_to_pciea_msg1);
 640	}
 641
 642	return SUCCESS;
 643}
 644
 645static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
 646{
 647	unsigned char i;
 648
 649	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 650		pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
 651						&mhba->pci_base[i]);
 652	}
 653}
 654
 655static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
 656{
 657	unsigned char i;
 658
 659	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 660		if (mhba->pci_base[i])
 661			pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
 662						mhba->pci_base[i]);
 663	}
 664}
 665
 666static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
 667{
 668	unsigned int ret = 0;
 669	pci_set_master(pdev);
 670
 671	if (IS_DMA64) {
 672		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
 673			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 674	} else
 675		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 676
 677	return ret;
 678}
 679
 680static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
 681{
 682	mhba->fw_state = FW_STATE_ABORT;
 683
 684	iowrite32(0, mhba->regs->reset_enable);
 685	iowrite32(0xf, mhba->regs->reset_request);
 686
 687	iowrite32(0x10, mhba->regs->reset_enable);
 688	iowrite32(0x10, mhba->regs->reset_request);
 689	msleep(100);
 690	pci_disable_device(mhba->pdev);
 691
 692	if (pci_enable_device(mhba->pdev)) {
 693		dev_err(&mhba->pdev->dev, "enable device failed\n");
 694		return FAILED;
 695	}
 696	if (mvumi_pci_set_master(mhba->pdev)) {
 697		dev_err(&mhba->pdev->dev, "set master failed\n");
 698		return FAILED;
 699	}
 700	mvumi_restore_bar_addr(mhba);
 701	if (mvumi_wait_for_fw(mhba) == FAILED)
 702		return FAILED;
 703
 704	return mvumi_wait_for_outstanding(mhba);
 705}
 706
 707static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
 708{
 709	return mvumi_wait_for_outstanding(mhba);
 710}
 711
 712static int mvumi_host_reset(struct scsi_cmnd *scmd)
 713{
 714	struct mvumi_hba *mhba;
 715
 716	mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
 717
 718	scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
 719			scmd->serial_number, scmd->cmnd[0], scmd->retries);
 720
 721	return mhba->instancet->reset_host(mhba);
 722}
 723
 724static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
 725						struct mvumi_cmd *cmd)
 726{
 727	unsigned long flags;
 728
 729	cmd->cmd_status = REQ_STATUS_PENDING;
 730
 731	if (atomic_read(&cmd->sync_cmd)) {
 732		dev_err(&mhba->pdev->dev,
 733			"last blocked cmd not finished, sync_cmd = %d\n",
 734						atomic_read(&cmd->sync_cmd));
 735		BUG_ON(1);
 736		return -1;
 737	}
 738	atomic_inc(&cmd->sync_cmd);
 739	spin_lock_irqsave(mhba->shost->host_lock, flags);
 740	mhba->instancet->fire_cmd(mhba, cmd);
 741	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
 742
 743	wait_event_timeout(mhba->int_cmd_wait_q,
 744		(cmd->cmd_status != REQ_STATUS_PENDING),
 745		MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
 746
 747	/* command timeout */
 748	if (atomic_read(&cmd->sync_cmd)) {
 749		spin_lock_irqsave(mhba->shost->host_lock, flags);
 750		atomic_dec(&cmd->sync_cmd);
 751		if (mhba->tag_cmd[cmd->frame->tag]) {
 752			mhba->tag_cmd[cmd->frame->tag] = 0;
 753			dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
 754							cmd->frame->tag);
 755			tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
 756		}
 757		if (!list_empty(&cmd->queue_pointer)) {
 758			dev_warn(&mhba->pdev->dev,
 759				"TIMEOUT:A internal command doesn't send!\n");
 760			list_del_init(&cmd->queue_pointer);
 761		} else
 762			atomic_dec(&mhba->fw_outstanding);
 763
 764		spin_unlock_irqrestore(mhba->shost->host_lock, flags);
 765	}
 766	return 0;
 767}
 768
 769static void mvumi_release_fw(struct mvumi_hba *mhba)
 770{
 771	mvumi_free_cmds(mhba);
 772	mvumi_release_mem_resource(mhba);
 773	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
 774	pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
 775		mhba->handshake_page, mhba->handshake_page_phys);
 776	kfree(mhba->regs);
 777	pci_release_regions(mhba->pdev);
 778}
 779
 780static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
 781{
 782	struct mvumi_cmd *cmd;
 783	struct mvumi_msg_frame *frame;
 784	unsigned char device_id, retry = 0;
 785	unsigned char bitcount = sizeof(unsigned char) * 8;
 786
 787	for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
 788		if (!(mhba->target_map[device_id / bitcount] &
 789				(1 << (device_id % bitcount))))
 790			continue;
 791get_cmd:	cmd = mvumi_create_internal_cmd(mhba, 0);
 792		if (!cmd) {
 793			if (retry++ >= 5) {
 794				dev_err(&mhba->pdev->dev, "failed to get memory"
 795					" for internal flush cache cmd for "
 796					"device %d", device_id);
 797				retry = 0;
 798				continue;
 799			} else
 800				goto get_cmd;
 801		}
 802		cmd->scmd = NULL;
 803		cmd->cmd_status = REQ_STATUS_PENDING;
 804		atomic_set(&cmd->sync_cmd, 0);
 805		frame = cmd->frame;
 806		frame->req_function = CL_FUN_SCSI_CMD;
 807		frame->device_id = device_id;
 808		frame->cmd_flag = CMD_FLAG_NON_DATA;
 809		frame->data_transfer_length = 0;
 810		frame->cdb_length = MAX_COMMAND_SIZE;
 811		memset(frame->cdb, 0, MAX_COMMAND_SIZE);
 812		frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
 813		frame->cdb[1] = CDB_CORE_MODULE;
 814		frame->cdb[2] = CDB_CORE_SHUTDOWN;
 815
 816		mvumi_issue_blocked_cmd(mhba, cmd);
 817		if (cmd->cmd_status != SAM_STAT_GOOD) {
 818			dev_err(&mhba->pdev->dev,
 819				"device %d flush cache failed, status=0x%x.\n",
 820				device_id, cmd->cmd_status);
 821		}
 822
 823		mvumi_delete_internal_cmd(mhba, cmd);
 824	}
 825	return 0;
 826}
 827
 828static unsigned char
 829mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
 830							unsigned short len)
 831{
 832	unsigned char *ptr;
 833	unsigned char ret = 0, i;
 834
 835	ptr = (unsigned char *) p_header->frame_content;
 836	for (i = 0; i < len; i++) {
 837		ret ^= *ptr;
 838		ptr++;
 839	}
 840
 841	return ret;
 842}
 843
 844static void mvumi_hs_build_page(struct mvumi_hba *mhba,
 845				struct mvumi_hs_header *hs_header)
 846{
 847	struct mvumi_hs_page2 *hs_page2;
 848	struct mvumi_hs_page4 *hs_page4;
 849	struct mvumi_hs_page3 *hs_page3;
 850	u64 time;
 851	u64 local_time;
 852
 853	switch (hs_header->page_code) {
 854	case HS_PAGE_HOST_INFO:
 855		hs_page2 = (struct mvumi_hs_page2 *) hs_header;
 856		hs_header->frame_length = sizeof(*hs_page2) - 4;
 857		memset(hs_header->frame_content, 0, hs_header->frame_length);
 858		hs_page2->host_type = 3; /* 3 mean linux*/
 859		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
 860			hs_page2->host_cap = 0x08;/* host dynamic source mode */
 861		hs_page2->host_ver.ver_major = VER_MAJOR;
 862		hs_page2->host_ver.ver_minor = VER_MINOR;
 863		hs_page2->host_ver.ver_oem = VER_OEM;
 864		hs_page2->host_ver.ver_build = VER_BUILD;
 865		hs_page2->system_io_bus = 0;
 866		hs_page2->slot_number = 0;
 867		hs_page2->intr_level = 0;
 868		hs_page2->intr_vector = 0;
 869		time = ktime_get_real_seconds();
 870		local_time = (time - (sys_tz.tz_minuteswest * 60));
 
 871		hs_page2->seconds_since1970 = local_time;
 872		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 873						hs_header->frame_length);
 874		break;
 875
 876	case HS_PAGE_FIRM_CTL:
 877		hs_page3 = (struct mvumi_hs_page3 *) hs_header;
 878		hs_header->frame_length = sizeof(*hs_page3) - 4;
 879		memset(hs_header->frame_content, 0, hs_header->frame_length);
 880		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 881						hs_header->frame_length);
 882		break;
 883
 884	case HS_PAGE_CL_INFO:
 885		hs_page4 = (struct mvumi_hs_page4 *) hs_header;
 886		hs_header->frame_length = sizeof(*hs_page4) - 4;
 887		memset(hs_header->frame_content, 0, hs_header->frame_length);
 888		hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
 889		hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
 890
 891		hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
 892		hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
 893		hs_page4->ib_entry_size = mhba->ib_max_size_setting;
 894		hs_page4->ob_entry_size = mhba->ob_max_size_setting;
 895		if (mhba->hba_capability
 896			& HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
 897			hs_page4->ob_depth = find_first_bit((unsigned long *)
 898							    &mhba->list_num_io,
 899							    BITS_PER_LONG);
 900			hs_page4->ib_depth = find_first_bit((unsigned long *)
 901							    &mhba->list_num_io,
 902							    BITS_PER_LONG);
 903		} else {
 904			hs_page4->ob_depth = (u8) mhba->list_num_io;
 905			hs_page4->ib_depth = (u8) mhba->list_num_io;
 906		}
 907		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 908						hs_header->frame_length);
 909		break;
 910
 911	default:
 912		dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
 913			hs_header->page_code);
 914		break;
 915	}
 916}
 917
 918/**
 919 * mvumi_init_data -	Initialize requested date for FW
 920 * @mhba:			Adapter soft state
 921 */
 922static int mvumi_init_data(struct mvumi_hba *mhba)
 923{
 924	struct mvumi_ob_data *ob_pool;
 925	struct mvumi_res *res_mgnt;
 926	unsigned int tmp_size, offset, i;
 927	void *virmem, *v;
 928	dma_addr_t p;
 929
 930	if (mhba->fw_flag & MVUMI_FW_ALLOC)
 931		return 0;
 932
 933	tmp_size = mhba->ib_max_size * mhba->max_io;
 934	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
 935		tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 936
 937	tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
 938	tmp_size += 8 + sizeof(u32)*2 + 16;
 939
 940	res_mgnt = mvumi_alloc_mem_resource(mhba,
 941					RESOURCE_UNCACHED_MEMORY, tmp_size);
 942	if (!res_mgnt) {
 943		dev_err(&mhba->pdev->dev,
 944			"failed to allocate memory for inbound list\n");
 945		goto fail_alloc_dma_buf;
 946	}
 947
 948	p = res_mgnt->bus_addr;
 949	v = res_mgnt->virt_addr;
 950	/* ib_list */
 951	offset = round_up(p, 128) - p;
 952	p += offset;
 953	v += offset;
 954	mhba->ib_list = v;
 955	mhba->ib_list_phys = p;
 956	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 957		v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 958		p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 959		mhba->ib_frame = v;
 960		mhba->ib_frame_phys = p;
 961	}
 962	v += mhba->ib_max_size * mhba->max_io;
 963	p += mhba->ib_max_size * mhba->max_io;
 964
 965	/* ib shadow */
 966	offset = round_up(p, 8) - p;
 967	p += offset;
 968	v += offset;
 969	mhba->ib_shadow = v;
 970	mhba->ib_shadow_phys = p;
 971	p += sizeof(u32)*2;
 972	v += sizeof(u32)*2;
 973	/* ob shadow */
 974	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
 975		offset = round_up(p, 8) - p;
 976		p += offset;
 977		v += offset;
 978		mhba->ob_shadow = v;
 979		mhba->ob_shadow_phys = p;
 980		p += 8;
 981		v += 8;
 982	} else {
 983		offset = round_up(p, 4) - p;
 984		p += offset;
 985		v += offset;
 986		mhba->ob_shadow = v;
 987		mhba->ob_shadow_phys = p;
 988		p += 4;
 989		v += 4;
 990	}
 991
 992	/* ob list */
 993	offset = round_up(p, 128) - p;
 994	p += offset;
 995	v += offset;
 996
 997	mhba->ob_list = v;
 998	mhba->ob_list_phys = p;
 999
1000	/* ob data pool */
1001	tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
1002	tmp_size = round_up(tmp_size, 8);
1003
1004	res_mgnt = mvumi_alloc_mem_resource(mhba,
1005				RESOURCE_CACHED_MEMORY, tmp_size);
1006	if (!res_mgnt) {
1007		dev_err(&mhba->pdev->dev,
1008			"failed to allocate memory for outbound data buffer\n");
1009		goto fail_alloc_dma_buf;
1010	}
1011	virmem = res_mgnt->virt_addr;
1012
1013	for (i = mhba->max_io; i != 0; i--) {
1014		ob_pool = (struct mvumi_ob_data *) virmem;
1015		list_add_tail(&ob_pool->list, &mhba->ob_data_list);
1016		virmem += mhba->ob_max_size + sizeof(*ob_pool);
1017	}
1018
1019	tmp_size = sizeof(unsigned short) * mhba->max_io +
1020				sizeof(struct mvumi_cmd *) * mhba->max_io;
1021	tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
1022						(sizeof(unsigned char) * 8);
1023
1024	res_mgnt = mvumi_alloc_mem_resource(mhba,
1025				RESOURCE_CACHED_MEMORY, tmp_size);
1026	if (!res_mgnt) {
1027		dev_err(&mhba->pdev->dev,
1028			"failed to allocate memory for tag and target map\n");
1029		goto fail_alloc_dma_buf;
1030	}
1031
1032	virmem = res_mgnt->virt_addr;
1033	mhba->tag_pool.stack = virmem;
1034	mhba->tag_pool.size = mhba->max_io;
1035	tag_init(&mhba->tag_pool, mhba->max_io);
1036	virmem += sizeof(unsigned short) * mhba->max_io;
1037
1038	mhba->tag_cmd = virmem;
1039	virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
1040
1041	mhba->target_map = virmem;
1042
1043	mhba->fw_flag |= MVUMI_FW_ALLOC;
1044	return 0;
1045
1046fail_alloc_dma_buf:
1047	mvumi_release_mem_resource(mhba);
1048	return -1;
1049}
1050
1051static int mvumi_hs_process_page(struct mvumi_hba *mhba,
1052				struct mvumi_hs_header *hs_header)
1053{
1054	struct mvumi_hs_page1 *hs_page1;
1055	unsigned char page_checksum;
1056
1057	page_checksum = mvumi_calculate_checksum(hs_header,
1058						hs_header->frame_length);
1059	if (page_checksum != hs_header->checksum) {
1060		dev_err(&mhba->pdev->dev, "checksum error\n");
1061		return -1;
1062	}
1063
1064	switch (hs_header->page_code) {
1065	case HS_PAGE_FIRM_CAP:
1066		hs_page1 = (struct mvumi_hs_page1 *) hs_header;
1067
1068		mhba->max_io = hs_page1->max_io_support;
1069		mhba->list_num_io = hs_page1->cl_inout_list_depth;
1070		mhba->max_transfer_size = hs_page1->max_transfer_size;
1071		mhba->max_target_id = hs_page1->max_devices_support;
1072		mhba->hba_capability = hs_page1->capability;
1073		mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
1074		mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
1075
1076		mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
1077		mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
1078
1079		dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
1080						hs_page1->fw_ver.ver_build);
1081
1082		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1083			mhba->eot_flag = 22;
1084		else
1085			mhba->eot_flag = 27;
1086		if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1087			mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
1088		break;
1089	default:
1090		dev_err(&mhba->pdev->dev, "handshake: page code error\n");
1091		return -1;
1092	}
1093	return 0;
1094}
1095
1096/**
1097 * mvumi_handshake -	Move the FW to READY state
1098 * @mhba:				Adapter soft state
1099 *
1100 * During the initialization, FW passes can potentially be in any one of
1101 * several possible states. If the FW in operational, waiting-for-handshake
1102 * states, driver must take steps to bring it to ready state. Otherwise, it
1103 * has to wait for the ready state.
1104 */
1105static int mvumi_handshake(struct mvumi_hba *mhba)
1106{
1107	unsigned int hs_state, tmp, hs_fun;
1108	struct mvumi_hs_header *hs_header;
1109	struct mvumi_hw_regs *regs = mhba->regs;
1110
1111	if (mhba->fw_state == FW_STATE_STARTING)
1112		hs_state = HS_S_START;
1113	else {
1114		tmp = ioread32(regs->arm_to_pciea_msg0);
1115		hs_state = HS_GET_STATE(tmp);
1116		dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
1117		if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
1118			mhba->fw_state = FW_STATE_STARTING;
1119			return -1;
1120		}
1121	}
1122
1123	hs_fun = 0;
1124	switch (hs_state) {
1125	case HS_S_START:
1126		mhba->fw_state = FW_STATE_HANDSHAKING;
1127		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1128		HS_SET_STATE(hs_fun, HS_S_RESET);
1129		iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
1130		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1131		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1132		break;
1133
1134	case HS_S_RESET:
1135		iowrite32(lower_32_bits(mhba->handshake_page_phys),
1136					regs->pciea_to_arm_msg1);
1137		iowrite32(upper_32_bits(mhba->handshake_page_phys),
1138					regs->arm_to_pciea_msg1);
1139		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1140		HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
1141		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1142		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1143		break;
1144
1145	case HS_S_PAGE_ADDR:
1146	case HS_S_QUERY_PAGE:
1147	case HS_S_SEND_PAGE:
1148		hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
1149		if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
1150			mhba->hba_total_pages =
1151			((struct mvumi_hs_page1 *) hs_header)->total_pages;
1152
1153			if (mhba->hba_total_pages == 0)
1154				mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1155		}
1156
1157		if (hs_state == HS_S_QUERY_PAGE) {
1158			if (mvumi_hs_process_page(mhba, hs_header)) {
1159				HS_SET_STATE(hs_fun, HS_S_ABORT);
1160				return -1;
1161			}
1162			if (mvumi_init_data(mhba)) {
1163				HS_SET_STATE(hs_fun, HS_S_ABORT);
1164				return -1;
1165			}
1166		} else if (hs_state == HS_S_PAGE_ADDR) {
1167			hs_header->page_code = 0;
1168			mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1169		}
1170
1171		if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
1172			hs_header->page_code++;
1173			if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
1174				mvumi_hs_build_page(mhba, hs_header);
1175				HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
1176			} else
1177				HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
1178		} else
1179			HS_SET_STATE(hs_fun, HS_S_END);
1180
1181		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1182		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1183		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1184		break;
1185
1186	case HS_S_END:
1187		/* Set communication list ISR */
1188		tmp = ioread32(regs->enpointa_mask_reg);
1189		tmp |= regs->int_comaout | regs->int_comaerr;
1190		iowrite32(tmp, regs->enpointa_mask_reg);
1191		iowrite32(mhba->list_num_io, mhba->ib_shadow);
1192		/* Set InBound List Available count shadow */
1193		iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1194					regs->inb_aval_count_basel);
1195		iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1196					regs->inb_aval_count_baseh);
1197
1198		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1199			/* Set OutBound List Available count shadow */
1200			iowrite32((mhba->list_num_io-1) |
1201							regs->cl_pointer_toggle,
1202							mhba->ob_shadow);
1203			iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1204							regs->outb_copy_basel);
1205			iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1206							regs->outb_copy_baseh);
1207		}
1208
1209		mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1210							regs->cl_pointer_toggle;
1211		mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1212							regs->cl_pointer_toggle;
1213		mhba->fw_state = FW_STATE_STARTED;
1214
1215		break;
1216	default:
1217		dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1218								hs_state);
1219		return -1;
1220	}
1221	return 0;
1222}
1223
1224static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1225{
1226	unsigned int isr_status;
1227	unsigned long before;
1228
1229	before = jiffies;
1230	mvumi_handshake(mhba);
1231	do {
1232		isr_status = mhba->instancet->read_fw_status_reg(mhba);
1233
1234		if (mhba->fw_state == FW_STATE_STARTED)
1235			return 0;
1236		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1237			dev_err(&mhba->pdev->dev,
1238				"no handshake response at state 0x%x.\n",
1239				  mhba->fw_state);
1240			dev_err(&mhba->pdev->dev,
1241				"isr : global=0x%x,status=0x%x.\n",
1242					mhba->global_isr, isr_status);
1243			return -1;
1244		}
1245		rmb();
1246		usleep_range(1000, 2000);
1247	} while (!(isr_status & DRBL_HANDSHAKE_ISR));
1248
1249	return 0;
1250}
1251
1252static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1253{
1254	unsigned int tmp;
1255	unsigned long before;
1256
1257	before = jiffies;
1258	tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1259	while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1260		if (tmp != HANDSHAKE_READYSTATE)
1261			iowrite32(DRBL_MU_RESET,
1262					mhba->regs->pciea_to_arm_drbl_reg);
1263		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1264			dev_err(&mhba->pdev->dev,
1265				"invalid signature [0x%x].\n", tmp);
1266			return -1;
1267		}
1268		usleep_range(1000, 2000);
1269		rmb();
1270		tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1271	}
1272
1273	mhba->fw_state = FW_STATE_STARTING;
1274	dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1275	do {
1276		if (mvumi_handshake_event(mhba)) {
1277			dev_err(&mhba->pdev->dev,
1278					"handshake failed at state 0x%x.\n",
1279						mhba->fw_state);
1280			return -1;
1281		}
1282	} while (mhba->fw_state != FW_STATE_STARTED);
1283
1284	dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1285
1286	return 0;
1287}
1288
1289static unsigned char mvumi_start(struct mvumi_hba *mhba)
1290{
1291	unsigned int tmp;
1292	struct mvumi_hw_regs *regs = mhba->regs;
1293
1294	/* clear Door bell */
1295	tmp = ioread32(regs->arm_to_pciea_drbl_reg);
1296	iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
1297
1298	iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1299	tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
1300	iowrite32(tmp, regs->enpointa_mask_reg);
1301	msleep(100);
1302	if (mvumi_check_handshake(mhba))
1303		return -1;
1304
1305	return 0;
1306}
1307
1308/**
1309 * mvumi_complete_cmd -	Completes a command
1310 * @mhba:			Adapter soft state
1311 * @cmd:			Command to be completed
1312 */
1313static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1314					struct mvumi_rsp_frame *ob_frame)
1315{
1316	struct scsi_cmnd *scmd = cmd->scmd;
1317
1318	cmd->scmd->SCp.ptr = NULL;
1319	scmd->result = ob_frame->req_status;
1320
1321	switch (ob_frame->req_status) {
1322	case SAM_STAT_GOOD:
1323		scmd->result |= DID_OK << 16;
1324		break;
1325	case SAM_STAT_BUSY:
1326		scmd->result |= DID_BUS_BUSY << 16;
1327		break;
1328	case SAM_STAT_CHECK_CONDITION:
1329		scmd->result |= (DID_OK << 16);
1330		if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1331			memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1332				sizeof(struct mvumi_sense_data));
1333			scmd->result |=  (DRIVER_SENSE << 24);
1334		}
1335		break;
1336	default:
1337		scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1338		break;
1339	}
1340
1341	if (scsi_bufflen(scmd))
1342		pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
1343			     scsi_sg_count(scmd),
1344			     (int) scmd->sc_data_direction);
 
 
 
 
 
 
 
 
 
 
 
1345	cmd->scmd->scsi_done(scmd);
1346	mvumi_return_cmd(mhba, cmd);
1347}
1348
1349static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1350						struct mvumi_cmd *cmd,
1351					struct mvumi_rsp_frame *ob_frame)
1352{
1353	if (atomic_read(&cmd->sync_cmd)) {
1354		cmd->cmd_status = ob_frame->req_status;
1355
1356		if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1357				(ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1358				cmd->data_buf) {
1359			memcpy(cmd->data_buf, ob_frame->payload,
1360					sizeof(struct mvumi_sense_data));
1361		}
1362		atomic_dec(&cmd->sync_cmd);
1363		wake_up(&mhba->int_cmd_wait_q);
1364	}
1365}
1366
1367static void mvumi_show_event(struct mvumi_hba *mhba,
1368			struct mvumi_driver_event *ptr)
1369{
1370	unsigned int i;
1371
1372	dev_warn(&mhba->pdev->dev,
1373		"Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1374		ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1375	if (ptr->param_count) {
1376		printk(KERN_WARNING "Event param(len 0x%x): ",
1377						ptr->param_count);
1378		for (i = 0; i < ptr->param_count; i++)
1379			printk(KERN_WARNING "0x%x ", ptr->params[i]);
1380
1381		printk(KERN_WARNING "\n");
1382	}
1383
1384	if (ptr->sense_data_length) {
1385		printk(KERN_WARNING "Event sense data(len 0x%x): ",
1386						ptr->sense_data_length);
1387		for (i = 0; i < ptr->sense_data_length; i++)
1388			printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1389		printk(KERN_WARNING "\n");
1390	}
1391}
1392
1393static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1394{
1395	struct scsi_device *sdev;
1396	int ret = -1;
1397
1398	if (status == DEVICE_OFFLINE) {
1399		sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1400		if (sdev) {
1401			dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1402								sdev->id, 0);
1403			scsi_remove_device(sdev);
1404			scsi_device_put(sdev);
1405			ret = 0;
1406		} else
1407			dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1408									devid);
1409	} else if (status == DEVICE_ONLINE) {
1410		sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1411		if (!sdev) {
1412			scsi_add_device(mhba->shost, 0, devid, 0);
1413			dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1414								devid, 0);
1415			ret = 0;
1416		} else {
1417			dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1418								0, devid, 0);
1419			scsi_device_put(sdev);
1420		}
1421	}
1422	return ret;
1423}
1424
1425static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1426	unsigned int id, struct mvumi_cmd *cmd)
1427{
1428	struct mvumi_msg_frame *frame;
1429	u64 wwid = 0;
1430	int cmd_alloc = 0;
1431	int data_buf_len = 64;
1432
1433	if (!cmd) {
1434		cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1435		if (cmd)
1436			cmd_alloc = 1;
1437		else
1438			return 0;
1439	} else {
1440		memset(cmd->data_buf, 0, data_buf_len);
1441	}
1442	cmd->scmd = NULL;
1443	cmd->cmd_status = REQ_STATUS_PENDING;
1444	atomic_set(&cmd->sync_cmd, 0);
1445	frame = cmd->frame;
1446	frame->device_id = (u16) id;
1447	frame->cmd_flag = CMD_FLAG_DATA_IN;
1448	frame->req_function = CL_FUN_SCSI_CMD;
1449	frame->cdb_length = 6;
1450	frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
1451	memset(frame->cdb, 0, frame->cdb_length);
1452	frame->cdb[0] = INQUIRY;
1453	frame->cdb[4] = frame->data_transfer_length;
1454
1455	mvumi_issue_blocked_cmd(mhba, cmd);
1456
1457	if (cmd->cmd_status == SAM_STAT_GOOD) {
1458		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1459			wwid = id + 1;
1460		else
1461			memcpy((void *)&wwid,
1462			       (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
1463			       MVUMI_INQUIRY_UUID_LEN);
1464		dev_dbg(&mhba->pdev->dev,
1465			"inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
1466	} else {
1467		wwid = 0;
1468	}
1469	if (cmd_alloc)
1470		mvumi_delete_internal_cmd(mhba, cmd);
1471
1472	return wwid;
1473}
1474
1475static void mvumi_detach_devices(struct mvumi_hba *mhba)
1476{
1477	struct mvumi_device *mv_dev = NULL , *dev_next;
1478	struct scsi_device *sdev = NULL;
1479
1480	mutex_lock(&mhba->device_lock);
1481
1482	/* detach Hard Disk */
1483	list_for_each_entry_safe(mv_dev, dev_next,
1484		&mhba->shost_dev_list, list) {
1485		mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1486		list_del_init(&mv_dev->list);
1487		dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1488			mv_dev->id, mv_dev->wwid);
1489		kfree(mv_dev);
1490	}
1491	list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1492		list_del_init(&mv_dev->list);
1493		dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1494			mv_dev->id, mv_dev->wwid);
1495		kfree(mv_dev);
1496	}
1497
1498	/* detach virtual device */
1499	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1500		sdev = scsi_device_lookup(mhba->shost, 0,
1501						mhba->max_target_id - 1, 0);
1502
1503	if (sdev) {
1504		scsi_remove_device(sdev);
1505		scsi_device_put(sdev);
1506	}
1507
1508	mutex_unlock(&mhba->device_lock);
1509}
1510
1511static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1512{
1513	struct scsi_device *sdev;
1514
1515	sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1516	if (sdev) {
1517		scsi_rescan_device(&sdev->sdev_gendev);
1518		scsi_device_put(sdev);
1519	}
1520}
1521
1522static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1523{
1524	struct mvumi_device *mv_dev = NULL;
1525
1526	list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1527		if (mv_dev->wwid == wwid) {
1528			if (mv_dev->id != id) {
1529				dev_err(&mhba->pdev->dev,
1530					"%s has same wwid[%llx] ,"
1531					" but different id[%d %d]\n",
1532					__func__, mv_dev->wwid, mv_dev->id, id);
1533				return -1;
1534			} else {
1535				if (mhba->pdev->device ==
1536						PCI_DEVICE_ID_MARVELL_MV9143)
1537					mvumi_rescan_devices(mhba, id);
1538				return 1;
1539			}
1540		}
1541	}
1542	return 0;
1543}
1544
1545static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1546{
1547	struct mvumi_device *mv_dev = NULL, *dev_next;
1548
1549	list_for_each_entry_safe(mv_dev, dev_next,
1550				&mhba->shost_dev_list, list) {
1551		if (mv_dev->id == id) {
1552			dev_dbg(&mhba->pdev->dev,
1553				"detach device(0:%d:0) wwid(%llx) from HOST\n",
1554				mv_dev->id, mv_dev->wwid);
1555			mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1556			list_del_init(&mv_dev->list);
1557			kfree(mv_dev);
1558		}
1559	}
1560}
1561
1562static int mvumi_probe_devices(struct mvumi_hba *mhba)
1563{
1564	int id, maxid;
1565	u64 wwid = 0;
1566	struct mvumi_device *mv_dev = NULL;
1567	struct mvumi_cmd *cmd = NULL;
1568	int found = 0;
1569
1570	cmd = mvumi_create_internal_cmd(mhba, 64);
1571	if (!cmd)
1572		return -1;
1573
1574	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1575		maxid = mhba->max_target_id;
1576	else
1577		maxid = mhba->max_target_id - 1;
1578
1579	for (id = 0; id < maxid; id++) {
1580		wwid = mvumi_inquiry(mhba, id, cmd);
1581		if (!wwid) {
1582			/* device no response, remove it */
1583			mvumi_remove_devices(mhba, id);
1584		} else {
1585			/* device response, add it */
1586			found = mvumi_match_devices(mhba, id, wwid);
1587			if (!found) {
1588				mvumi_remove_devices(mhba, id);
1589				mv_dev = kzalloc(sizeof(struct mvumi_device),
1590								GFP_KERNEL);
1591				if (!mv_dev) {
1592					dev_err(&mhba->pdev->dev,
1593						"%s alloc mv_dev failed\n",
1594						__func__);
1595					continue;
1596				}
1597				mv_dev->id = id;
1598				mv_dev->wwid = wwid;
1599				mv_dev->sdev = NULL;
1600				INIT_LIST_HEAD(&mv_dev->list);
1601				list_add_tail(&mv_dev->list,
1602					      &mhba->mhba_dev_list);
1603				dev_dbg(&mhba->pdev->dev,
1604					"probe a new device(0:%d:0)"
1605					" wwid(%llx)\n", id, mv_dev->wwid);
1606			} else if (found == -1)
1607				return -1;
1608			else
1609				continue;
1610		}
1611	}
1612
1613	if (cmd)
1614		mvumi_delete_internal_cmd(mhba, cmd);
1615
1616	return 0;
1617}
1618
1619static int mvumi_rescan_bus(void *data)
1620{
1621	int ret = 0;
1622	struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1623	struct mvumi_device *mv_dev = NULL , *dev_next;
1624
1625	while (!kthread_should_stop()) {
1626
1627		set_current_state(TASK_INTERRUPTIBLE);
1628		if (!atomic_read(&mhba->pnp_count))
1629			schedule();
1630		msleep(1000);
1631		atomic_set(&mhba->pnp_count, 0);
1632		__set_current_state(TASK_RUNNING);
1633
1634		mutex_lock(&mhba->device_lock);
1635		ret = mvumi_probe_devices(mhba);
1636		if (!ret) {
1637			list_for_each_entry_safe(mv_dev, dev_next,
1638						 &mhba->mhba_dev_list, list) {
1639				if (mvumi_handle_hotplug(mhba, mv_dev->id,
1640							 DEVICE_ONLINE)) {
1641					dev_err(&mhba->pdev->dev,
1642						"%s add device(0:%d:0) failed"
1643						"wwid(%llx) has exist\n",
1644						__func__,
1645						mv_dev->id, mv_dev->wwid);
1646					list_del_init(&mv_dev->list);
1647					kfree(mv_dev);
1648				} else {
1649					list_move_tail(&mv_dev->list,
1650						       &mhba->shost_dev_list);
1651				}
1652			}
1653		}
1654		mutex_unlock(&mhba->device_lock);
1655	}
1656	return 0;
1657}
1658
1659static void mvumi_proc_msg(struct mvumi_hba *mhba,
1660					struct mvumi_hotplug_event *param)
1661{
1662	u16 size = param->size;
1663	const unsigned long *ar_bitmap;
1664	const unsigned long *re_bitmap;
1665	int index;
1666
1667	if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1668		index = -1;
1669		ar_bitmap = (const unsigned long *) param->bitmap;
1670		re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
1671
1672		mutex_lock(&mhba->sas_discovery_mutex);
1673		do {
1674			index = find_next_zero_bit(ar_bitmap, size, index + 1);
1675			if (index >= size)
1676				break;
1677			mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1678		} while (1);
1679
1680		index = -1;
1681		do {
1682			index = find_next_zero_bit(re_bitmap, size, index + 1);
1683			if (index >= size)
1684				break;
1685			mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1686		} while (1);
1687		mutex_unlock(&mhba->sas_discovery_mutex);
1688	}
1689}
1690
1691static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1692{
1693	if (msg == APICDB1_EVENT_GETEVENT) {
1694		int i, count;
1695		struct mvumi_driver_event *param = NULL;
1696		struct mvumi_event_req *er = buffer;
1697		count = er->count;
1698		if (count > MAX_EVENTS_RETURNED) {
1699			dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1700					" than max event count[0x%x].\n",
1701					count, MAX_EVENTS_RETURNED);
1702			return;
1703		}
1704		for (i = 0; i < count; i++) {
1705			param = &er->events[i];
1706			mvumi_show_event(mhba, param);
1707		}
1708	} else if (msg == APICDB1_HOST_GETEVENT) {
1709		mvumi_proc_msg(mhba, buffer);
1710	}
1711}
1712
1713static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1714{
1715	struct mvumi_cmd *cmd;
1716	struct mvumi_msg_frame *frame;
1717
1718	cmd = mvumi_create_internal_cmd(mhba, 512);
1719	if (!cmd)
1720		return -1;
1721	cmd->scmd = NULL;
1722	cmd->cmd_status = REQ_STATUS_PENDING;
1723	atomic_set(&cmd->sync_cmd, 0);
1724	frame = cmd->frame;
1725	frame->device_id = 0;
1726	frame->cmd_flag = CMD_FLAG_DATA_IN;
1727	frame->req_function = CL_FUN_SCSI_CMD;
1728	frame->cdb_length = MAX_COMMAND_SIZE;
1729	frame->data_transfer_length = sizeof(struct mvumi_event_req);
1730	memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1731	frame->cdb[0] = APICDB0_EVENT;
1732	frame->cdb[1] = msg;
1733	mvumi_issue_blocked_cmd(mhba, cmd);
1734
1735	if (cmd->cmd_status != SAM_STAT_GOOD)
1736		dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1737							cmd->cmd_status);
1738	else
1739		mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1740
1741	mvumi_delete_internal_cmd(mhba, cmd);
1742	return 0;
1743}
1744
1745static void mvumi_scan_events(struct work_struct *work)
1746{
1747	struct mvumi_events_wq *mu_ev =
1748		container_of(work, struct mvumi_events_wq, work_q);
1749
1750	mvumi_get_event(mu_ev->mhba, mu_ev->event);
1751	kfree(mu_ev);
1752}
1753
1754static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1755{
1756	struct mvumi_events_wq *mu_ev;
1757
1758	while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
1759		if (isr_status & DRBL_BUS_CHANGE) {
1760			atomic_inc(&mhba->pnp_count);
1761			wake_up_process(mhba->dm_thread);
1762			isr_status &= ~(DRBL_BUS_CHANGE);
1763			continue;
1764		}
1765
1766		mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1767		if (mu_ev) {
1768			INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1769			mu_ev->mhba = mhba;
1770			mu_ev->event = APICDB1_EVENT_GETEVENT;
1771			isr_status &= ~(DRBL_EVENT_NOTIFY);
1772			mu_ev->param = NULL;
1773			schedule_work(&mu_ev->work_q);
1774		}
1775	}
1776}
1777
1778static void mvumi_handle_clob(struct mvumi_hba *mhba)
1779{
1780	struct mvumi_rsp_frame *ob_frame;
1781	struct mvumi_cmd *cmd;
1782	struct mvumi_ob_data *pool;
1783
1784	while (!list_empty(&mhba->free_ob_list)) {
1785		pool = list_first_entry(&mhba->free_ob_list,
1786						struct mvumi_ob_data, list);
1787		list_del_init(&pool->list);
1788		list_add_tail(&pool->list, &mhba->ob_data_list);
1789
1790		ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1791		cmd = mhba->tag_cmd[ob_frame->tag];
1792
1793		atomic_dec(&mhba->fw_outstanding);
1794		mhba->tag_cmd[ob_frame->tag] = 0;
1795		tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1796		if (cmd->scmd)
1797			mvumi_complete_cmd(mhba, cmd, ob_frame);
1798		else
1799			mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1800	}
1801	mhba->instancet->fire_cmd(mhba, NULL);
1802}
1803
1804static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1805{
1806	struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1807	unsigned long flags;
1808
1809	spin_lock_irqsave(mhba->shost->host_lock, flags);
1810	if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1811		spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1812		return IRQ_NONE;
1813	}
1814
1815	if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1816		if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1817			mvumi_launch_events(mhba, mhba->isr_status);
1818		if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1819			dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1820			mvumi_handshake(mhba);
1821		}
1822
1823	}
1824
1825	if (mhba->global_isr & mhba->regs->int_comaout)
1826		mvumi_receive_ob_list_entry(mhba);
1827
1828	mhba->global_isr = 0;
1829	mhba->isr_status = 0;
1830	if (mhba->fw_state == FW_STATE_STARTED)
1831		mvumi_handle_clob(mhba);
1832	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1833	return IRQ_HANDLED;
1834}
1835
1836static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1837						struct mvumi_cmd *cmd)
1838{
1839	void *ib_entry;
1840	struct mvumi_msg_frame *ib_frame;
1841	unsigned int frame_len;
1842
1843	ib_frame = cmd->frame;
1844	if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1845		dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1846		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1847	}
1848	if (tag_is_empty(&mhba->tag_pool)) {
1849		dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1850		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1851	}
1852	mvumi_get_ib_list_entry(mhba, &ib_entry);
1853
1854	cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1855	cmd->frame->request_id = mhba->io_seq++;
1856	cmd->request_id = cmd->frame->request_id;
1857	mhba->tag_cmd[cmd->frame->tag] = cmd;
1858	frame_len = sizeof(*ib_frame) - 4 +
1859				ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1860	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1861		struct mvumi_dyn_list_entry *dle;
1862		dle = ib_entry;
1863		dle->src_low_addr =
1864			cpu_to_le32(lower_32_bits(cmd->frame_phys));
1865		dle->src_high_addr =
1866			cpu_to_le32(upper_32_bits(cmd->frame_phys));
1867		dle->if_length = (frame_len >> 2) & 0xFFF;
1868	} else {
1869		memcpy(ib_entry, ib_frame, frame_len);
1870	}
1871	return MV_QUEUE_COMMAND_RESULT_SENT;
1872}
1873
1874static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1875{
1876	unsigned short num_of_cl_sent = 0;
1877	unsigned int count;
1878	enum mvumi_qc_result result;
1879
1880	if (cmd)
1881		list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1882	count = mhba->instancet->check_ib_list(mhba);
1883	if (list_empty(&mhba->waiting_req_list) || !count)
1884		return;
1885
1886	do {
1887		cmd = list_first_entry(&mhba->waiting_req_list,
1888				       struct mvumi_cmd, queue_pointer);
1889		list_del_init(&cmd->queue_pointer);
1890		result = mvumi_send_command(mhba, cmd);
1891		switch (result) {
1892		case MV_QUEUE_COMMAND_RESULT_SENT:
1893			num_of_cl_sent++;
1894			break;
1895		case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1896			list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1897			if (num_of_cl_sent > 0)
1898				mvumi_send_ib_list_entry(mhba);
1899
1900			return;
1901		}
1902	} while (!list_empty(&mhba->waiting_req_list) && count--);
1903
1904	if (num_of_cl_sent > 0)
1905		mvumi_send_ib_list_entry(mhba);
1906}
1907
1908/**
1909 * mvumi_enable_intr -	Enables interrupts
1910 * @mhba:		Adapter soft state
1911 */
1912static void mvumi_enable_intr(struct mvumi_hba *mhba)
1913{
1914	unsigned int mask;
1915	struct mvumi_hw_regs *regs = mhba->regs;
1916
1917	iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1918	mask = ioread32(regs->enpointa_mask_reg);
1919	mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
1920	iowrite32(mask, regs->enpointa_mask_reg);
1921}
1922
1923/**
1924 * mvumi_disable_intr -Disables interrupt
1925 * @mhba:		Adapter soft state
1926 */
1927static void mvumi_disable_intr(struct mvumi_hba *mhba)
1928{
1929	unsigned int mask;
1930	struct mvumi_hw_regs *regs = mhba->regs;
1931
1932	iowrite32(0, regs->arm_to_pciea_mask_reg);
1933	mask = ioread32(regs->enpointa_mask_reg);
1934	mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
1935							regs->int_comaerr);
1936	iowrite32(mask, regs->enpointa_mask_reg);
1937}
1938
1939static int mvumi_clear_intr(void *extend)
1940{
1941	struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1942	unsigned int status, isr_status = 0, tmp = 0;
1943	struct mvumi_hw_regs *regs = mhba->regs;
1944
1945	status = ioread32(regs->main_int_cause_reg);
1946	if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
1947		return 1;
1948	if (unlikely(status & regs->int_comaerr)) {
1949		tmp = ioread32(regs->outb_isr_cause);
1950		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1951			if (tmp & regs->clic_out_err) {
1952				iowrite32(tmp & regs->clic_out_err,
1953							regs->outb_isr_cause);
1954			}
1955		} else {
1956			if (tmp & (regs->clic_in_err | regs->clic_out_err))
1957				iowrite32(tmp & (regs->clic_in_err |
1958						regs->clic_out_err),
1959						regs->outb_isr_cause);
1960		}
1961		status ^= mhba->regs->int_comaerr;
1962		/* inbound or outbound parity error, command will timeout */
1963	}
1964	if (status & regs->int_comaout) {
1965		tmp = ioread32(regs->outb_isr_cause);
1966		if (tmp & regs->clic_irq)
1967			iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
1968	}
1969	if (status & regs->int_dl_cpu2pciea) {
1970		isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
1971		if (isr_status)
1972			iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
1973	}
1974
1975	mhba->global_isr = status;
1976	mhba->isr_status = isr_status;
1977
1978	return 0;
1979}
1980
1981/**
1982 * mvumi_read_fw_status_reg - returns the current FW status value
1983 * @mhba:		Adapter soft state
1984 */
1985static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
1986{
1987	unsigned int status;
1988
1989	status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
1990	if (status)
1991		iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
1992	return status;
1993}
1994
1995static struct mvumi_instance_template mvumi_instance_9143 = {
1996	.fire_cmd = mvumi_fire_cmd,
1997	.enable_intr = mvumi_enable_intr,
1998	.disable_intr = mvumi_disable_intr,
1999	.clear_intr = mvumi_clear_intr,
2000	.read_fw_status_reg = mvumi_read_fw_status_reg,
2001	.check_ib_list = mvumi_check_ib_list_9143,
2002	.check_ob_list = mvumi_check_ob_list_9143,
2003	.reset_host = mvumi_reset_host_9143,
2004};
2005
2006static struct mvumi_instance_template mvumi_instance_9580 = {
2007	.fire_cmd = mvumi_fire_cmd,
2008	.enable_intr = mvumi_enable_intr,
2009	.disable_intr = mvumi_disable_intr,
2010	.clear_intr = mvumi_clear_intr,
2011	.read_fw_status_reg = mvumi_read_fw_status_reg,
2012	.check_ib_list = mvumi_check_ib_list_9580,
2013	.check_ob_list = mvumi_check_ob_list_9580,
2014	.reset_host = mvumi_reset_host_9580,
2015};
2016
2017static int mvumi_slave_configure(struct scsi_device *sdev)
2018{
2019	struct mvumi_hba *mhba;
2020	unsigned char bitcount = sizeof(unsigned char) * 8;
2021
2022	mhba = (struct mvumi_hba *) sdev->host->hostdata;
2023	if (sdev->id >= mhba->max_target_id)
2024		return -EINVAL;
2025
2026	mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
2027	return 0;
2028}
2029
2030/**
2031 * mvumi_build_frame -	Prepares a direct cdb (DCDB) command
2032 * @mhba:		Adapter soft state
2033 * @scmd:		SCSI command
2034 * @cmd:		Command to be prepared in
2035 *
2036 * This function prepares CDB commands. These are typcially pass-through
2037 * commands to the devices.
2038 */
2039static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
2040				struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
2041{
2042	struct mvumi_msg_frame *pframe;
2043
2044	cmd->scmd = scmd;
2045	cmd->cmd_status = REQ_STATUS_PENDING;
2046	pframe = cmd->frame;
2047	pframe->device_id = ((unsigned short) scmd->device->id) |
2048				(((unsigned short) scmd->device->lun) << 8);
2049	pframe->cmd_flag = 0;
2050
2051	switch (scmd->sc_data_direction) {
2052	case DMA_NONE:
2053		pframe->cmd_flag |= CMD_FLAG_NON_DATA;
2054		break;
2055	case DMA_FROM_DEVICE:
2056		pframe->cmd_flag |= CMD_FLAG_DATA_IN;
2057		break;
2058	case DMA_TO_DEVICE:
2059		pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
2060		break;
2061	case DMA_BIDIRECTIONAL:
2062	default:
2063		dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
2064			"cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
2065		goto error;
2066	}
2067
2068	pframe->cdb_length = scmd->cmd_len;
2069	memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
2070	pframe->req_function = CL_FUN_SCSI_CMD;
2071	if (scsi_bufflen(scmd)) {
2072		if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
2073			&pframe->sg_counts))
2074			goto error;
2075
2076		pframe->data_transfer_length = scsi_bufflen(scmd);
2077	} else {
2078		pframe->sg_counts = 0;
2079		pframe->data_transfer_length = 0;
2080	}
2081	return 0;
2082
2083error:
2084	scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
2085		SAM_STAT_CHECK_CONDITION;
2086	scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
2087									0);
2088	return -1;
2089}
2090
2091/**
2092 * mvumi_queue_command -	Queue entry point
2093 * @scmd:			SCSI command to be queued
2094 * @done:			Callback entry point
2095 */
2096static int mvumi_queue_command(struct Scsi_Host *shost,
2097					struct scsi_cmnd *scmd)
2098{
2099	struct mvumi_cmd *cmd;
2100	struct mvumi_hba *mhba;
2101	unsigned long irq_flags;
2102
2103	spin_lock_irqsave(shost->host_lock, irq_flags);
2104	scsi_cmd_get_serial(shost, scmd);
2105
2106	mhba = (struct mvumi_hba *) shost->hostdata;
2107	scmd->result = 0;
2108	cmd = mvumi_get_cmd(mhba);
2109	if (unlikely(!cmd)) {
2110		spin_unlock_irqrestore(shost->host_lock, irq_flags);
2111		return SCSI_MLQUEUE_HOST_BUSY;
2112	}
2113
2114	if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
2115		goto out_return_cmd;
2116
2117	cmd->scmd = scmd;
2118	scmd->SCp.ptr = (char *) cmd;
2119	mhba->instancet->fire_cmd(mhba, cmd);
2120	spin_unlock_irqrestore(shost->host_lock, irq_flags);
2121	return 0;
2122
2123out_return_cmd:
2124	mvumi_return_cmd(mhba, cmd);
2125	scmd->scsi_done(scmd);
2126	spin_unlock_irqrestore(shost->host_lock, irq_flags);
2127	return 0;
2128}
2129
2130static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
2131{
2132	struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
2133	struct Scsi_Host *host = scmd->device->host;
2134	struct mvumi_hba *mhba = shost_priv(host);
2135	unsigned long flags;
2136
2137	spin_lock_irqsave(mhba->shost->host_lock, flags);
2138
2139	if (mhba->tag_cmd[cmd->frame->tag]) {
2140		mhba->tag_cmd[cmd->frame->tag] = 0;
2141		tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
2142	}
2143	if (!list_empty(&cmd->queue_pointer))
2144		list_del_init(&cmd->queue_pointer);
2145	else
2146		atomic_dec(&mhba->fw_outstanding);
2147
2148	scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
2149	scmd->SCp.ptr = NULL;
2150	if (scsi_bufflen(scmd)) {
2151		pci_unmap_sg(mhba->pdev, scsi_sglist(scmd),
2152			     scsi_sg_count(scmd),
2153			     (int)scmd->sc_data_direction);
 
 
 
 
 
 
 
 
 
 
2154	}
2155	mvumi_return_cmd(mhba, cmd);
2156	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
2157
2158	return BLK_EH_NOT_HANDLED;
2159}
2160
2161static int
2162mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2163			sector_t capacity, int geom[])
2164{
2165	int heads, sectors;
2166	sector_t cylinders;
2167	unsigned long tmp;
2168
2169	heads = 64;
2170	sectors = 32;
2171	tmp = heads * sectors;
2172	cylinders = capacity;
2173	sector_div(cylinders, tmp);
2174
2175	if (capacity >= 0x200000) {
2176		heads = 255;
2177		sectors = 63;
2178		tmp = heads * sectors;
2179		cylinders = capacity;
2180		sector_div(cylinders, tmp);
2181	}
2182	geom[0] = heads;
2183	geom[1] = sectors;
2184	geom[2] = cylinders;
2185
2186	return 0;
2187}
2188
2189static struct scsi_host_template mvumi_template = {
2190
2191	.module = THIS_MODULE,
2192	.name = "Marvell Storage Controller",
2193	.slave_configure = mvumi_slave_configure,
2194	.queuecommand = mvumi_queue_command,
2195	.eh_timed_out = mvumi_timed_out,
2196	.eh_host_reset_handler = mvumi_host_reset,
2197	.bios_param = mvumi_bios_param,
2198	.this_id = -1,
2199};
2200
 
 
 
 
2201static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2202{
2203	void *base = NULL;
2204	struct mvumi_hw_regs *regs;
2205
2206	switch (mhba->pdev->device) {
2207	case PCI_DEVICE_ID_MARVELL_MV9143:
2208		mhba->mmio = mhba->base_addr[0];
2209		base = mhba->mmio;
2210		if (!mhba->regs) {
2211			mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2212			if (mhba->regs == NULL)
2213				return -ENOMEM;
2214		}
2215		regs = mhba->regs;
2216
2217		/* For Arm */
2218		regs->ctrl_sts_reg          = base + 0x20104;
2219		regs->rstoutn_mask_reg      = base + 0x20108;
2220		regs->sys_soft_rst_reg      = base + 0x2010C;
2221		regs->main_int_cause_reg    = base + 0x20200;
2222		regs->enpointa_mask_reg     = base + 0x2020C;
2223		regs->rstoutn_en_reg        = base + 0xF1400;
2224		/* For Doorbell */
2225		regs->pciea_to_arm_drbl_reg = base + 0x20400;
2226		regs->arm_to_pciea_drbl_reg = base + 0x20408;
2227		regs->arm_to_pciea_mask_reg = base + 0x2040C;
2228		regs->pciea_to_arm_msg0     = base + 0x20430;
2229		regs->pciea_to_arm_msg1     = base + 0x20434;
2230		regs->arm_to_pciea_msg0     = base + 0x20438;
2231		regs->arm_to_pciea_msg1     = base + 0x2043C;
2232
2233		/* For Message Unit */
2234
2235		regs->inb_aval_count_basel  = base + 0x508;
2236		regs->inb_aval_count_baseh  = base + 0x50C;
2237		regs->inb_write_pointer     = base + 0x518;
2238		regs->inb_read_pointer      = base + 0x51C;
2239		regs->outb_coal_cfg         = base + 0x568;
2240		regs->outb_copy_basel       = base + 0x5B0;
2241		regs->outb_copy_baseh       = base + 0x5B4;
2242		regs->outb_copy_pointer     = base + 0x544;
2243		regs->outb_read_pointer     = base + 0x548;
2244		regs->outb_isr_cause        = base + 0x560;
2245		regs->outb_coal_cfg         = base + 0x568;
2246		/* Bit setting for HW */
2247		regs->int_comaout           = 1 << 8;
2248		regs->int_comaerr           = 1 << 6;
2249		regs->int_dl_cpu2pciea      = 1 << 1;
2250		regs->cl_pointer_toggle     = 1 << 12;
2251		regs->clic_irq              = 1 << 1;
2252		regs->clic_in_err           = 1 << 8;
2253		regs->clic_out_err          = 1 << 12;
2254		regs->cl_slot_num_mask      = 0xFFF;
2255		regs->int_drbl_int_mask     = 0x3FFFFFFF;
2256		regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
2257							regs->int_comaerr;
2258		break;
2259	case PCI_DEVICE_ID_MARVELL_MV9580:
2260		mhba->mmio = mhba->base_addr[2];
2261		base = mhba->mmio;
2262		if (!mhba->regs) {
2263			mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2264			if (mhba->regs == NULL)
2265				return -ENOMEM;
2266		}
2267		regs = mhba->regs;
2268		/* For Arm */
2269		regs->ctrl_sts_reg          = base + 0x20104;
2270		regs->rstoutn_mask_reg      = base + 0x1010C;
2271		regs->sys_soft_rst_reg      = base + 0x10108;
2272		regs->main_int_cause_reg    = base + 0x10200;
2273		regs->enpointa_mask_reg     = base + 0x1020C;
2274		regs->rstoutn_en_reg        = base + 0xF1400;
2275
2276		/* For Doorbell */
2277		regs->pciea_to_arm_drbl_reg = base + 0x10460;
2278		regs->arm_to_pciea_drbl_reg = base + 0x10480;
2279		regs->arm_to_pciea_mask_reg = base + 0x10484;
2280		regs->pciea_to_arm_msg0     = base + 0x10400;
2281		regs->pciea_to_arm_msg1     = base + 0x10404;
2282		regs->arm_to_pciea_msg0     = base + 0x10420;
2283		regs->arm_to_pciea_msg1     = base + 0x10424;
2284
2285		/* For reset*/
2286		regs->reset_request         = base + 0x10108;
2287		regs->reset_enable          = base + 0x1010c;
2288
2289		/* For Message Unit */
2290		regs->inb_aval_count_basel  = base + 0x4008;
2291		regs->inb_aval_count_baseh  = base + 0x400C;
2292		regs->inb_write_pointer     = base + 0x4018;
2293		regs->inb_read_pointer      = base + 0x401C;
2294		regs->outb_copy_basel       = base + 0x4058;
2295		regs->outb_copy_baseh       = base + 0x405C;
2296		regs->outb_copy_pointer     = base + 0x406C;
2297		regs->outb_read_pointer     = base + 0x4070;
2298		regs->outb_coal_cfg         = base + 0x4080;
2299		regs->outb_isr_cause        = base + 0x4088;
2300		/* Bit setting for HW */
2301		regs->int_comaout           = 1 << 4;
2302		regs->int_dl_cpu2pciea      = 1 << 12;
2303		regs->int_comaerr           = 1 << 29;
2304		regs->cl_pointer_toggle     = 1 << 14;
2305		regs->cl_slot_num_mask      = 0x3FFF;
2306		regs->clic_irq              = 1 << 0;
2307		regs->clic_out_err          = 1 << 1;
2308		regs->int_drbl_int_mask     = 0x3FFFFFFF;
2309		regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
2310		break;
2311	default:
2312		return -1;
2313		break;
2314	}
2315
2316	return 0;
2317}
2318
2319/**
2320 * mvumi_init_fw -	Initializes the FW
2321 * @mhba:		Adapter soft state
2322 *
2323 * This is the main function for initializing firmware.
2324 */
2325static int mvumi_init_fw(struct mvumi_hba *mhba)
2326{
2327	int ret = 0;
2328
2329	if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
2330		dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
2331		return -EBUSY;
2332	}
2333	ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2334	if (ret)
2335		goto fail_ioremap;
2336
2337	switch (mhba->pdev->device) {
2338	case PCI_DEVICE_ID_MARVELL_MV9143:
2339		mhba->instancet = &mvumi_instance_9143;
2340		mhba->io_seq = 0;
2341		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2342		mhba->request_id_enabled = 1;
2343		break;
2344	case PCI_DEVICE_ID_MARVELL_MV9580:
2345		mhba->instancet = &mvumi_instance_9580;
2346		mhba->io_seq = 0;
2347		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2348		break;
2349	default:
2350		dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
2351							mhba->pdev->device);
2352		mhba->instancet = NULL;
2353		ret = -EINVAL;
2354		goto fail_alloc_mem;
2355	}
2356	dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
2357							mhba->pdev->device);
2358	ret = mvumi_cfg_hw_reg(mhba);
2359	if (ret) {
2360		dev_err(&mhba->pdev->dev,
2361			"failed to allocate memory for reg\n");
2362		ret = -ENOMEM;
2363		goto fail_alloc_mem;
2364	}
2365	mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
2366						&mhba->handshake_page_phys);
2367	if (!mhba->handshake_page) {
2368		dev_err(&mhba->pdev->dev,
2369			"failed to allocate memory for handshake\n");
2370		ret = -ENOMEM;
2371		goto fail_alloc_page;
2372	}
2373
2374	if (mvumi_start(mhba)) {
2375		ret = -EINVAL;
2376		goto fail_ready_state;
2377	}
2378	ret = mvumi_alloc_cmds(mhba);
2379	if (ret)
2380		goto fail_ready_state;
2381
2382	return 0;
2383
2384fail_ready_state:
2385	mvumi_release_mem_resource(mhba);
2386	pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
2387		mhba->handshake_page, mhba->handshake_page_phys);
2388fail_alloc_page:
2389	kfree(mhba->regs);
2390fail_alloc_mem:
2391	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
2392fail_ioremap:
2393	pci_release_regions(mhba->pdev);
2394
2395	return ret;
2396}
2397
2398/**
2399 * mvumi_io_attach -	Attaches this driver to SCSI mid-layer
2400 * @mhba:		Adapter soft state
2401 */
2402static int mvumi_io_attach(struct mvumi_hba *mhba)
2403{
2404	struct Scsi_Host *host = mhba->shost;
2405	struct scsi_device *sdev = NULL;
2406	int ret;
2407	unsigned int max_sg = (mhba->ib_max_size + 4 -
2408		sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
2409
2410	host->irq = mhba->pdev->irq;
2411	host->unique_id = mhba->unique_id;
2412	host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2413	host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
2414	host->max_sectors = mhba->max_transfer_size / 512;
2415	host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2416	host->max_id = mhba->max_target_id;
2417	host->max_cmd_len = MAX_COMMAND_SIZE;
 
2418
2419	ret = scsi_add_host(host, &mhba->pdev->dev);
2420	if (ret) {
2421		dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
2422		return ret;
2423	}
2424	mhba->fw_flag |= MVUMI_FW_ATTACH;
2425
2426	mutex_lock(&mhba->sas_discovery_mutex);
2427	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2428		ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2429	else
2430		ret = 0;
2431	if (ret) {
2432		dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2433		mutex_unlock(&mhba->sas_discovery_mutex);
2434		goto fail_add_device;
2435	}
2436
2437	mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2438						mhba, "mvumi_scanthread");
2439	if (IS_ERR(mhba->dm_thread)) {
2440		dev_err(&mhba->pdev->dev,
2441			"failed to create device scan thread\n");
2442		mutex_unlock(&mhba->sas_discovery_mutex);
2443		goto fail_create_thread;
2444	}
2445	atomic_set(&mhba->pnp_count, 1);
2446	wake_up_process(mhba->dm_thread);
2447
2448	mutex_unlock(&mhba->sas_discovery_mutex);
2449	return 0;
2450
2451fail_create_thread:
2452	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2453		sdev = scsi_device_lookup(mhba->shost, 0,
2454						mhba->max_target_id - 1, 0);
2455	if (sdev) {
2456		scsi_remove_device(sdev);
2457		scsi_device_put(sdev);
2458	}
2459fail_add_device:
2460	scsi_remove_host(mhba->shost);
2461	return ret;
2462}
2463
2464/**
2465 * mvumi_probe_one -	PCI hotplug entry point
2466 * @pdev:		PCI device structure
2467 * @id:			PCI ids of supported hotplugged adapter
2468 */
2469static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2470{
2471	struct Scsi_Host *host;
2472	struct mvumi_hba *mhba;
2473	int ret;
2474
2475	dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2476			pdev->vendor, pdev->device, pdev->subsystem_vendor,
2477			pdev->subsystem_device);
2478
2479	ret = pci_enable_device(pdev);
2480	if (ret)
2481		return ret;
2482
2483	pci_set_master(pdev);
2484
2485	if (IS_DMA64) {
2486		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2487		if (ret) {
2488			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2489			if (ret)
2490				goto fail_set_dma_mask;
2491		}
2492	} else {
2493		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2494		if (ret)
2495			goto fail_set_dma_mask;
2496	}
2497
2498	host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2499	if (!host) {
2500		dev_err(&pdev->dev, "scsi_host_alloc failed\n");
2501		ret = -ENOMEM;
2502		goto fail_alloc_instance;
2503	}
2504	mhba = shost_priv(host);
2505
2506	INIT_LIST_HEAD(&mhba->cmd_pool);
2507	INIT_LIST_HEAD(&mhba->ob_data_list);
2508	INIT_LIST_HEAD(&mhba->free_ob_list);
2509	INIT_LIST_HEAD(&mhba->res_list);
2510	INIT_LIST_HEAD(&mhba->waiting_req_list);
2511	mutex_init(&mhba->device_lock);
2512	INIT_LIST_HEAD(&mhba->mhba_dev_list);
2513	INIT_LIST_HEAD(&mhba->shost_dev_list);
2514	atomic_set(&mhba->fw_outstanding, 0);
2515	init_waitqueue_head(&mhba->int_cmd_wait_q);
2516	mutex_init(&mhba->sas_discovery_mutex);
2517
2518	mhba->pdev = pdev;
2519	mhba->shost = host;
2520	mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
2521
2522	ret = mvumi_init_fw(mhba);
2523	if (ret)
2524		goto fail_init_fw;
2525
2526	ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2527				"mvumi", mhba);
2528	if (ret) {
2529		dev_err(&pdev->dev, "failed to register IRQ\n");
2530		goto fail_init_irq;
2531	}
2532
2533	mhba->instancet->enable_intr(mhba);
2534	pci_set_drvdata(pdev, mhba);
2535
2536	ret = mvumi_io_attach(mhba);
2537	if (ret)
2538		goto fail_io_attach;
2539
2540	mvumi_backup_bar_addr(mhba);
2541	dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
2542
2543	return 0;
2544
2545fail_io_attach:
2546	mhba->instancet->disable_intr(mhba);
2547	free_irq(mhba->pdev->irq, mhba);
2548fail_init_irq:
2549	mvumi_release_fw(mhba);
2550fail_init_fw:
2551	scsi_host_put(host);
2552
2553fail_alloc_instance:
2554fail_set_dma_mask:
2555	pci_disable_device(pdev);
2556
2557	return ret;
2558}
2559
2560static void mvumi_detach_one(struct pci_dev *pdev)
2561{
2562	struct Scsi_Host *host;
2563	struct mvumi_hba *mhba;
2564
2565	mhba = pci_get_drvdata(pdev);
2566	if (mhba->dm_thread) {
2567		kthread_stop(mhba->dm_thread);
2568		mhba->dm_thread = NULL;
2569	}
2570
2571	mvumi_detach_devices(mhba);
2572	host = mhba->shost;
2573	scsi_remove_host(mhba->shost);
2574	mvumi_flush_cache(mhba);
2575
2576	mhba->instancet->disable_intr(mhba);
2577	free_irq(mhba->pdev->irq, mhba);
2578	mvumi_release_fw(mhba);
2579	scsi_host_put(host);
2580	pci_disable_device(pdev);
2581	dev_dbg(&pdev->dev, "driver is removed!\n");
2582}
2583
2584/**
2585 * mvumi_shutdown -	Shutdown entry point
2586 * @device:		Generic device structure
2587 */
2588static void mvumi_shutdown(struct pci_dev *pdev)
2589{
2590	struct mvumi_hba *mhba = pci_get_drvdata(pdev);
2591
2592	mvumi_flush_cache(mhba);
2593}
2594
2595static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
2596{
2597	struct mvumi_hba *mhba = NULL;
2598
2599	mhba = pci_get_drvdata(pdev);
2600	mvumi_flush_cache(mhba);
2601
2602	pci_set_drvdata(pdev, mhba);
2603	mhba->instancet->disable_intr(mhba);
2604	free_irq(mhba->pdev->irq, mhba);
2605	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2606	pci_release_regions(pdev);
2607	pci_save_state(pdev);
2608	pci_disable_device(pdev);
2609	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2610
2611	return 0;
2612}
2613
2614static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
2615{
2616	int ret;
2617	struct mvumi_hba *mhba = NULL;
2618
2619	mhba = pci_get_drvdata(pdev);
2620
2621	pci_set_power_state(pdev, PCI_D0);
2622	pci_enable_wake(pdev, PCI_D0, 0);
2623	pci_restore_state(pdev);
2624
2625	ret = pci_enable_device(pdev);
2626	if (ret) {
2627		dev_err(&pdev->dev, "enable device failed\n");
2628		return ret;
2629	}
2630	pci_set_master(pdev);
2631	if (IS_DMA64) {
2632		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2633		if (ret) {
2634			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2635			if (ret)
2636				goto fail;
2637		}
2638	} else {
2639		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2640		if (ret)
2641			goto fail;
2642	}
2643	ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
2644	if (ret)
2645		goto fail;
2646	ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2647	if (ret)
2648		goto release_regions;
2649
2650	if (mvumi_cfg_hw_reg(mhba)) {
2651		ret = -EINVAL;
2652		goto unmap_pci_addr;
2653	}
2654
2655	mhba->mmio = mhba->base_addr[0];
2656	mvumi_reset(mhba);
2657
2658	if (mvumi_start(mhba)) {
2659		ret = -EINVAL;
2660		goto unmap_pci_addr;
2661	}
2662
2663	ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2664				"mvumi", mhba);
2665	if (ret) {
2666		dev_err(&pdev->dev, "failed to register IRQ\n");
2667		goto unmap_pci_addr;
2668	}
2669	mhba->instancet->enable_intr(mhba);
2670
2671	return 0;
2672
2673unmap_pci_addr:
2674	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2675release_regions:
2676	pci_release_regions(pdev);
2677fail:
2678	pci_disable_device(pdev);
2679
2680	return ret;
2681}
2682
2683static struct pci_driver mvumi_pci_driver = {
2684
2685	.name = MV_DRIVER_NAME,
2686	.id_table = mvumi_pci_table,
2687	.probe = mvumi_probe_one,
2688	.remove = mvumi_detach_one,
2689	.shutdown = mvumi_shutdown,
2690#ifdef CONFIG_PM
2691	.suspend = mvumi_suspend,
2692	.resume = mvumi_resume,
2693#endif
2694};
2695
2696/**
2697 * mvumi_init - Driver load entry point
2698 */
2699static int __init mvumi_init(void)
2700{
2701	return pci_register_driver(&mvumi_pci_driver);
2702}
2703
2704/**
2705 * mvumi_exit - Driver unload entry point
2706 */
2707static void __exit mvumi_exit(void)
2708{
2709
2710	pci_unregister_driver(&mvumi_pci_driver);
2711}
2712
2713module_init(mvumi_init);
2714module_exit(mvumi_exit);