Linux Audio

Check our new training course

Loading...
v6.8
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Marvell UMI driver
   4 *
   5 * Copyright 2011 Marvell. <jyli@marvell.com>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   6*/
   7
   8#include <linux/kernel.h>
   9#include <linux/module.h>
  10#include <linux/moduleparam.h>
  11#include <linux/init.h>
  12#include <linux/device.h>
  13#include <linux/pci.h>
  14#include <linux/list.h>
  15#include <linux/spinlock.h>
  16#include <linux/interrupt.h>
  17#include <linux/delay.h>
  18#include <linux/ktime.h>
  19#include <linux/blkdev.h>
  20#include <linux/io.h>
  21#include <scsi/scsi.h>
  22#include <scsi/scsi_cmnd.h>
  23#include <scsi/scsi_device.h>
  24#include <scsi/scsi_host.h>
  25#include <scsi/scsi_transport.h>
  26#include <scsi/scsi_eh.h>
  27#include <linux/uaccess.h>
  28#include <linux/kthread.h>
  29
  30#include "mvumi.h"
  31
  32MODULE_LICENSE("GPL");
  33MODULE_AUTHOR("jyli@marvell.com");
  34MODULE_DESCRIPTION("Marvell UMI Driver");
  35
  36static const struct pci_device_id mvumi_pci_table[] = {
  37	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
  38	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
  39	{ 0 }
  40};
  41
  42MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
  43
  44static void tag_init(struct mvumi_tag *st, unsigned short size)
  45{
  46	unsigned short i;
  47	BUG_ON(size != st->size);
  48	st->top = size;
  49	for (i = 0; i < size; i++)
  50		st->stack[i] = size - 1 - i;
  51}
  52
  53static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
  54{
  55	BUG_ON(st->top <= 0);
  56	return st->stack[--st->top];
  57}
  58
  59static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
  60							unsigned short tag)
  61{
  62	BUG_ON(st->top >= st->size);
  63	st->stack[st->top++] = tag;
  64}
  65
  66static bool tag_is_empty(struct mvumi_tag *st)
  67{
  68	if (st->top == 0)
  69		return true;
  70	else
  71		return false;
  72}
  73
  74static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
  75{
  76	int i;
  77
  78	for (i = 0; i < MAX_BASE_ADDRESS; i++)
  79		if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
  80								addr_array[i])
  81			pci_iounmap(dev, addr_array[i]);
  82}
  83
  84static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
  85{
  86	int i;
  87
  88	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
  89		if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
  90			addr_array[i] = pci_iomap(dev, i, 0);
  91			if (!addr_array[i]) {
  92				dev_err(&dev->dev, "failed to map Bar[%d]\n",
  93									i);
  94				mvumi_unmap_pci_addr(dev, addr_array);
  95				return -ENOMEM;
  96			}
  97		} else
  98			addr_array[i] = NULL;
  99
 100		dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
 101	}
 102
 103	return 0;
 104}
 105
 106static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
 107				enum resource_type type, unsigned int size)
 108{
 109	struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
 110
 111	if (!res) {
 112		dev_err(&mhba->pdev->dev,
 113			"Failed to allocate memory for resource manager.\n");
 114		return NULL;
 115	}
 116
 117	switch (type) {
 118	case RESOURCE_CACHED_MEMORY:
 119		res->virt_addr = kzalloc(size, GFP_ATOMIC);
 120		if (!res->virt_addr) {
 121			dev_err(&mhba->pdev->dev,
 122				"unable to allocate memory,size = %d.\n", size);
 123			kfree(res);
 124			return NULL;
 125		}
 126		break;
 127
 128	case RESOURCE_UNCACHED_MEMORY:
 129		size = round_up(size, 8);
 130		res->virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size,
 131						    &res->bus_addr,
 132						    GFP_KERNEL);
 133		if (!res->virt_addr) {
 134			dev_err(&mhba->pdev->dev,
 135					"unable to allocate consistent mem,"
 136							"size = %d.\n", size);
 137			kfree(res);
 138			return NULL;
 139		}
 140		break;
 141
 142	default:
 143		dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
 144		kfree(res);
 145		return NULL;
 146	}
 147
 148	res->type = type;
 149	res->size = size;
 150	INIT_LIST_HEAD(&res->entry);
 151	list_add_tail(&res->entry, &mhba->res_list);
 152
 153	return res;
 154}
 155
 156static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
 157{
 158	struct mvumi_res *res, *tmp;
 159
 160	list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
 161		switch (res->type) {
 162		case RESOURCE_UNCACHED_MEMORY:
 163			dma_free_coherent(&mhba->pdev->dev, res->size,
 164						res->virt_addr, res->bus_addr);
 165			break;
 166		case RESOURCE_CACHED_MEMORY:
 167			kfree(res->virt_addr);
 168			break;
 169		default:
 170			dev_err(&mhba->pdev->dev,
 171				"unknown resource type %d\n", res->type);
 172			break;
 173		}
 174		list_del(&res->entry);
 175		kfree(res);
 176	}
 177	mhba->fw_flag &= ~MVUMI_FW_ALLOC;
 178}
 179
 180/**
 181 * mvumi_make_sgl -	Prepares  SGL
 182 * @mhba:		Adapter soft state
 183 * @scmd:		SCSI command from the mid-layer
 184 * @sgl_p:		SGL to be filled in
 185 * @sg_count:		return the number of SG elements
 186 *
 187 * If successful, this function returns 0. otherwise, it returns -1.
 188 */
 189static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
 190					void *sgl_p, unsigned char *sg_count)
 191{
 192	struct scatterlist *sg;
 193	struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
 194	unsigned int i;
 195	unsigned int sgnum = scsi_sg_count(scmd);
 196	dma_addr_t busaddr;
 197
 198	*sg_count = dma_map_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
 199			       scmd->sc_data_direction);
 200	if (*sg_count > mhba->max_sge) {
 201		dev_err(&mhba->pdev->dev,
 202			"sg count[0x%x] is bigger than max sg[0x%x].\n",
 203			*sg_count, mhba->max_sge);
 204		dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd), sgnum,
 205			     scmd->sc_data_direction);
 206		return -1;
 207	}
 208	scsi_for_each_sg(scmd, sg, *sg_count, i) {
 209		busaddr = sg_dma_address(sg);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 210		m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
 211		m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
 212		m_sg->flags = 0;
 213		sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(sg)));
 214		if ((i + 1) == *sg_count)
 215			m_sg->flags |= 1U << mhba->eot_flag;
 216
 217		sgd_inc(mhba, m_sg);
 218	}
 219
 220	return 0;
 221}
 222
 223static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
 224							unsigned int size)
 225{
 226	struct mvumi_sgl *m_sg;
 227	void *virt_addr;
 228	dma_addr_t phy_addr;
 229
 230	if (size == 0)
 231		return 0;
 232
 233	virt_addr = dma_alloc_coherent(&mhba->pdev->dev, size, &phy_addr,
 234				       GFP_KERNEL);
 235	if (!virt_addr)
 236		return -1;
 237
 238	m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
 239	cmd->frame->sg_counts = 1;
 240	cmd->data_buf = virt_addr;
 241
 242	m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
 243	m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
 244	m_sg->flags = 1U << mhba->eot_flag;
 245	sgd_setsz(mhba, m_sg, cpu_to_le32(size));
 246
 247	return 0;
 248}
 249
 250static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
 251				unsigned int buf_size)
 252{
 253	struct mvumi_cmd *cmd;
 254
 255	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 256	if (!cmd) {
 257		dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
 258		return NULL;
 259	}
 260	INIT_LIST_HEAD(&cmd->queue_pointer);
 261
 262	cmd->frame = dma_alloc_coherent(&mhba->pdev->dev, mhba->ib_max_size,
 263			&cmd->frame_phys, GFP_KERNEL);
 264	if (!cmd->frame) {
 265		dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
 266			" frame,size = %d.\n", mhba->ib_max_size);
 267		kfree(cmd);
 268		return NULL;
 269	}
 270
 271	if (buf_size) {
 272		if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
 273			dev_err(&mhba->pdev->dev, "failed to allocate memory"
 274						" for internal frame\n");
 275			dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
 276					cmd->frame, cmd->frame_phys);
 277			kfree(cmd);
 278			return NULL;
 279		}
 280	} else
 281		cmd->frame->sg_counts = 0;
 282
 283	return cmd;
 284}
 285
 286static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
 287						struct mvumi_cmd *cmd)
 288{
 289	struct mvumi_sgl *m_sg;
 290	unsigned int size;
 291	dma_addr_t phy_addr;
 292
 293	if (cmd && cmd->frame) {
 294		if (cmd->frame->sg_counts) {
 295			m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
 296			sgd_getsz(mhba, m_sg, size);
 297
 298			phy_addr = (dma_addr_t) m_sg->baseaddr_l |
 299				(dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
 300
 301			dma_free_coherent(&mhba->pdev->dev, size, cmd->data_buf,
 302								phy_addr);
 303		}
 304		dma_free_coherent(&mhba->pdev->dev, mhba->ib_max_size,
 305				cmd->frame, cmd->frame_phys);
 306		kfree(cmd);
 307	}
 308}
 309
 310/**
 311 * mvumi_get_cmd -	Get a command from the free pool
 312 * @mhba:		Adapter soft state
 313 *
 314 * Returns a free command from the pool
 315 */
 316static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
 317{
 318	struct mvumi_cmd *cmd = NULL;
 319
 320	if (likely(!list_empty(&mhba->cmd_pool))) {
 321		cmd = list_entry((&mhba->cmd_pool)->next,
 322				struct mvumi_cmd, queue_pointer);
 323		list_del_init(&cmd->queue_pointer);
 324	} else
 325		dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
 326
 327	return cmd;
 328}
 329
 330/**
 331 * mvumi_return_cmd -	Return a cmd to free command pool
 332 * @mhba:		Adapter soft state
 333 * @cmd:		Command packet to be returned to free command pool
 334 */
 335static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
 336						struct mvumi_cmd *cmd)
 337{
 338	cmd->scmd = NULL;
 339	list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
 340}
 341
 342/**
 343 * mvumi_free_cmds -	Free all the cmds in the free cmd pool
 344 * @mhba:		Adapter soft state
 345 */
 346static void mvumi_free_cmds(struct mvumi_hba *mhba)
 347{
 348	struct mvumi_cmd *cmd;
 349
 350	while (!list_empty(&mhba->cmd_pool)) {
 351		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 352							queue_pointer);
 353		list_del(&cmd->queue_pointer);
 354		if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
 355			kfree(cmd->frame);
 356		kfree(cmd);
 357	}
 358}
 359
 360/**
 361 * mvumi_alloc_cmds -	Allocates the command packets
 362 * @mhba:		Adapter soft state
 363 *
 364 */
 365static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
 366{
 367	int i;
 368	struct mvumi_cmd *cmd;
 369
 370	for (i = 0; i < mhba->max_io; i++) {
 371		cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 372		if (!cmd)
 373			goto err_exit;
 374
 375		INIT_LIST_HEAD(&cmd->queue_pointer);
 376		list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
 377		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 378			cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
 379			cmd->frame_phys = mhba->ib_frame_phys
 380						+ i * mhba->ib_max_size;
 381		} else
 382			cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
 383		if (!cmd->frame)
 384			goto err_exit;
 385	}
 386	return 0;
 387
 388err_exit:
 389	dev_err(&mhba->pdev->dev,
 390			"failed to allocate memory for cmd[0x%x].\n", i);
 391	while (!list_empty(&mhba->cmd_pool)) {
 392		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 393						queue_pointer);
 394		list_del(&cmd->queue_pointer);
 395		if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
 396			kfree(cmd->frame);
 397		kfree(cmd);
 398	}
 399	return -ENOMEM;
 400}
 401
 402static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
 403{
 404	unsigned int ib_rp_reg;
 405	struct mvumi_hw_regs *regs = mhba->regs;
 406
 407	ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
 408
 409	if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
 410			(mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
 411			((ib_rp_reg & regs->cl_pointer_toggle)
 412			 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
 413		dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
 414		return 0;
 415	}
 416	if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
 417		dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
 418		return 0;
 419	} else {
 420		return mhba->max_io - atomic_read(&mhba->fw_outstanding);
 421	}
 422}
 423
 424static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
 425{
 426	unsigned int count;
 427	if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
 428		return 0;
 429	count = ioread32(mhba->ib_shadow);
 430	if (count == 0xffff)
 431		return 0;
 432	return count;
 433}
 434
 435static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
 436{
 437	unsigned int cur_ib_entry;
 438
 439	cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
 440	cur_ib_entry++;
 441	if (cur_ib_entry >= mhba->list_num_io) {
 442		cur_ib_entry -= mhba->list_num_io;
 443		mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
 444	}
 445	mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
 446	mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
 447	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 448		*ib_entry = mhba->ib_list + cur_ib_entry *
 449				sizeof(struct mvumi_dyn_list_entry);
 450	} else {
 451		*ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
 452	}
 453	atomic_inc(&mhba->fw_outstanding);
 454}
 455
 456static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
 457{
 458	iowrite32(0xffff, mhba->ib_shadow);
 459	iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
 460}
 461
 462static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
 463		unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
 464{
 465	unsigned short tag, request_id;
 466
 467	udelay(1);
 468	p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
 469	request_id = p_outb_frame->request_id;
 470	tag = p_outb_frame->tag;
 471	if (tag > mhba->tag_pool.size) {
 472		dev_err(&mhba->pdev->dev, "ob frame data error\n");
 473		return -1;
 474	}
 475	if (mhba->tag_cmd[tag] == NULL) {
 476		dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
 477		return -1;
 478	} else if (mhba->tag_cmd[tag]->request_id != request_id &&
 479						mhba->request_id_enabled) {
 480			dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
 481					"cmd request ID:0x%x\n", request_id,
 482					mhba->tag_cmd[tag]->request_id);
 483			return -1;
 484	}
 485
 486	return 0;
 487}
 488
 489static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
 490			unsigned int *cur_obf, unsigned int *assign_obf_end)
 491{
 492	unsigned int ob_write, ob_write_shadow;
 493	struct mvumi_hw_regs *regs = mhba->regs;
 494
 495	do {
 496		ob_write = ioread32(regs->outb_copy_pointer);
 497		ob_write_shadow = ioread32(mhba->ob_shadow);
 498	} while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
 499
 500	*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
 501	*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
 502
 503	if ((ob_write & regs->cl_pointer_toggle) !=
 504			(mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
 505		*assign_obf_end += mhba->list_num_io;
 506	}
 507	return 0;
 508}
 509
 510static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
 511			unsigned int *cur_obf, unsigned int *assign_obf_end)
 512{
 513	unsigned int ob_write;
 514	struct mvumi_hw_regs *regs = mhba->regs;
 515
 516	ob_write = ioread32(regs->outb_read_pointer);
 517	ob_write = ioread32(regs->outb_copy_pointer);
 518	*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
 519	*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
 520	if (*assign_obf_end < *cur_obf)
 521		*assign_obf_end += mhba->list_num_io;
 522	else if (*assign_obf_end == *cur_obf)
 523		return -1;
 524	return 0;
 525}
 526
 527static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
 528{
 529	unsigned int cur_obf, assign_obf_end, i;
 530	struct mvumi_ob_data *ob_data;
 531	struct mvumi_rsp_frame *p_outb_frame;
 532	struct mvumi_hw_regs *regs = mhba->regs;
 533
 534	if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
 535		return;
 536
 537	for (i = (assign_obf_end - cur_obf); i != 0; i--) {
 538		cur_obf++;
 539		if (cur_obf >= mhba->list_num_io) {
 540			cur_obf -= mhba->list_num_io;
 541			mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
 542		}
 543
 544		p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
 545
 546		/* Copy pointer may point to entry in outbound list
 547		*  before entry has valid data
 548		*/
 549		if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
 550			mhba->tag_cmd[p_outb_frame->tag] == NULL ||
 551			p_outb_frame->request_id !=
 552				mhba->tag_cmd[p_outb_frame->tag]->request_id))
 553			if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
 554				continue;
 555
 556		if (!list_empty(&mhba->ob_data_list)) {
 557			ob_data = (struct mvumi_ob_data *)
 558				list_first_entry(&mhba->ob_data_list,
 559					struct mvumi_ob_data, list);
 560			list_del_init(&ob_data->list);
 561		} else {
 562			ob_data = NULL;
 563			if (cur_obf == 0) {
 564				cur_obf = mhba->list_num_io - 1;
 565				mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
 566			} else
 567				cur_obf -= 1;
 568			break;
 569		}
 570
 571		memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
 572		p_outb_frame->tag = 0xff;
 573
 574		list_add_tail(&ob_data->list, &mhba->free_ob_list);
 575	}
 576	mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
 577	mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
 578	iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
 579}
 580
 581static void mvumi_reset(struct mvumi_hba *mhba)
 582{
 583	struct mvumi_hw_regs *regs = mhba->regs;
 584
 585	iowrite32(0, regs->enpointa_mask_reg);
 586	if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
 587		return;
 588
 589	iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
 590}
 591
 592static unsigned char mvumi_start(struct mvumi_hba *mhba);
 593
 594static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
 595{
 596	mhba->fw_state = FW_STATE_ABORT;
 597	mvumi_reset(mhba);
 598
 599	if (mvumi_start(mhba))
 600		return FAILED;
 601	else
 602		return SUCCESS;
 603}
 604
 605static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
 606{
 607	struct mvumi_hw_regs *regs = mhba->regs;
 608	u32 tmp;
 609	unsigned long before;
 610	before = jiffies;
 611
 612	iowrite32(0, regs->enpointa_mask_reg);
 613	tmp = ioread32(regs->arm_to_pciea_msg1);
 614	while (tmp != HANDSHAKE_READYSTATE) {
 615		iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
 616		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
 617			dev_err(&mhba->pdev->dev,
 618				"FW reset failed [0x%x].\n", tmp);
 619			return FAILED;
 620		}
 621
 622		msleep(500);
 623		rmb();
 624		tmp = ioread32(regs->arm_to_pciea_msg1);
 625	}
 626
 627	return SUCCESS;
 628}
 629
 630static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
 631{
 632	unsigned char i;
 633
 634	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 635		pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
 636						&mhba->pci_base[i]);
 637	}
 638}
 639
 640static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
 641{
 642	unsigned char i;
 643
 644	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 645		if (mhba->pci_base[i])
 646			pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
 647						mhba->pci_base[i]);
 648	}
 649}
 650
 651static int mvumi_pci_set_master(struct pci_dev *pdev)
 652{
 653	int ret = 0;
 654
 655	pci_set_master(pdev);
 656
 657	if (IS_DMA64) {
 658		if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)))
 659			ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 660	} else
 661		ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 662
 663	return ret;
 664}
 665
 666static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
 667{
 668	mhba->fw_state = FW_STATE_ABORT;
 669
 670	iowrite32(0, mhba->regs->reset_enable);
 671	iowrite32(0xf, mhba->regs->reset_request);
 672
 673	iowrite32(0x10, mhba->regs->reset_enable);
 674	iowrite32(0x10, mhba->regs->reset_request);
 675	msleep(100);
 676	pci_disable_device(mhba->pdev);
 677
 678	if (pci_enable_device(mhba->pdev)) {
 679		dev_err(&mhba->pdev->dev, "enable device failed\n");
 680		return FAILED;
 681	}
 682	if (mvumi_pci_set_master(mhba->pdev)) {
 683		dev_err(&mhba->pdev->dev, "set master failed\n");
 684		return FAILED;
 685	}
 686	mvumi_restore_bar_addr(mhba);
 687	if (mvumi_wait_for_fw(mhba) == FAILED)
 688		return FAILED;
 689
 690	return mvumi_wait_for_outstanding(mhba);
 691}
 692
 693static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
 694{
 695	return mvumi_wait_for_outstanding(mhba);
 696}
 697
 698static int mvumi_host_reset(struct scsi_cmnd *scmd)
 699{
 700	struct mvumi_hba *mhba;
 701
 702	mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
 703
 704	scmd_printk(KERN_NOTICE, scmd, "RESET -%u cmd=%x retries=%x\n",
 705			scsi_cmd_to_rq(scmd)->tag, scmd->cmnd[0], scmd->retries);
 706
 707	return mhba->instancet->reset_host(mhba);
 708}
 709
 710static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
 711						struct mvumi_cmd *cmd)
 712{
 713	unsigned long flags;
 714
 715	cmd->cmd_status = REQ_STATUS_PENDING;
 716
 717	if (atomic_read(&cmd->sync_cmd)) {
 718		dev_err(&mhba->pdev->dev,
 719			"last blocked cmd not finished, sync_cmd = %d\n",
 720						atomic_read(&cmd->sync_cmd));
 721		BUG_ON(1);
 722		return -1;
 723	}
 724	atomic_inc(&cmd->sync_cmd);
 725	spin_lock_irqsave(mhba->shost->host_lock, flags);
 726	mhba->instancet->fire_cmd(mhba, cmd);
 727	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
 728
 729	wait_event_timeout(mhba->int_cmd_wait_q,
 730		(cmd->cmd_status != REQ_STATUS_PENDING),
 731		MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
 732
 733	/* command timeout */
 734	if (atomic_read(&cmd->sync_cmd)) {
 735		spin_lock_irqsave(mhba->shost->host_lock, flags);
 736		atomic_dec(&cmd->sync_cmd);
 737		if (mhba->tag_cmd[cmd->frame->tag]) {
 738			mhba->tag_cmd[cmd->frame->tag] = NULL;
 739			dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
 740							cmd->frame->tag);
 741			tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
 742		}
 743		if (!list_empty(&cmd->queue_pointer)) {
 744			dev_warn(&mhba->pdev->dev,
 745				"TIMEOUT:A internal command doesn't send!\n");
 746			list_del_init(&cmd->queue_pointer);
 747		} else
 748			atomic_dec(&mhba->fw_outstanding);
 749
 750		spin_unlock_irqrestore(mhba->shost->host_lock, flags);
 751	}
 752	return 0;
 753}
 754
 755static void mvumi_release_fw(struct mvumi_hba *mhba)
 756{
 757	mvumi_free_cmds(mhba);
 758	mvumi_release_mem_resource(mhba);
 759	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
 760	dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
 761		mhba->handshake_page, mhba->handshake_page_phys);
 762	kfree(mhba->regs);
 763	pci_release_regions(mhba->pdev);
 764}
 765
 766static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
 767{
 768	struct mvumi_cmd *cmd;
 769	struct mvumi_msg_frame *frame;
 770	unsigned char device_id, retry = 0;
 771	unsigned char bitcount = sizeof(unsigned char) * 8;
 772
 773	for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
 774		if (!(mhba->target_map[device_id / bitcount] &
 775				(1 << (device_id % bitcount))))
 776			continue;
 777get_cmd:	cmd = mvumi_create_internal_cmd(mhba, 0);
 778		if (!cmd) {
 779			if (retry++ >= 5) {
 780				dev_err(&mhba->pdev->dev, "failed to get memory"
 781					" for internal flush cache cmd for "
 782					"device %d", device_id);
 783				retry = 0;
 784				continue;
 785			} else
 786				goto get_cmd;
 787		}
 788		cmd->scmd = NULL;
 789		cmd->cmd_status = REQ_STATUS_PENDING;
 790		atomic_set(&cmd->sync_cmd, 0);
 791		frame = cmd->frame;
 792		frame->req_function = CL_FUN_SCSI_CMD;
 793		frame->device_id = device_id;
 794		frame->cmd_flag = CMD_FLAG_NON_DATA;
 795		frame->data_transfer_length = 0;
 796		frame->cdb_length = MAX_COMMAND_SIZE;
 797		memset(frame->cdb, 0, MAX_COMMAND_SIZE);
 798		frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
 799		frame->cdb[1] = CDB_CORE_MODULE;
 800		frame->cdb[2] = CDB_CORE_SHUTDOWN;
 801
 802		mvumi_issue_blocked_cmd(mhba, cmd);
 803		if (cmd->cmd_status != SAM_STAT_GOOD) {
 804			dev_err(&mhba->pdev->dev,
 805				"device %d flush cache failed, status=0x%x.\n",
 806				device_id, cmd->cmd_status);
 807		}
 808
 809		mvumi_delete_internal_cmd(mhba, cmd);
 810	}
 811	return 0;
 812}
 813
 814static unsigned char
 815mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
 816							unsigned short len)
 817{
 818	unsigned char *ptr;
 819	unsigned char ret = 0, i;
 820
 821	ptr = (unsigned char *) p_header->frame_content;
 822	for (i = 0; i < len; i++) {
 823		ret ^= *ptr;
 824		ptr++;
 825	}
 826
 827	return ret;
 828}
 829
 830static void mvumi_hs_build_page(struct mvumi_hba *mhba,
 831				struct mvumi_hs_header *hs_header)
 832{
 833	struct mvumi_hs_page2 *hs_page2;
 834	struct mvumi_hs_page4 *hs_page4;
 835	struct mvumi_hs_page3 *hs_page3;
 836	u64 time;
 837	u64 local_time;
 838
 839	switch (hs_header->page_code) {
 840	case HS_PAGE_HOST_INFO:
 841		hs_page2 = (struct mvumi_hs_page2 *) hs_header;
 842		hs_header->frame_length = sizeof(*hs_page2) - 4;
 843		memset(hs_header->frame_content, 0, hs_header->frame_length);
 844		hs_page2->host_type = 3; /* 3 mean linux*/
 845		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
 846			hs_page2->host_cap = 0x08;/* host dynamic source mode */
 847		hs_page2->host_ver.ver_major = VER_MAJOR;
 848		hs_page2->host_ver.ver_minor = VER_MINOR;
 849		hs_page2->host_ver.ver_oem = VER_OEM;
 850		hs_page2->host_ver.ver_build = VER_BUILD;
 851		hs_page2->system_io_bus = 0;
 852		hs_page2->slot_number = 0;
 853		hs_page2->intr_level = 0;
 854		hs_page2->intr_vector = 0;
 855		time = ktime_get_real_seconds();
 856		local_time = (time - (sys_tz.tz_minuteswest * 60));
 857		hs_page2->seconds_since1970 = local_time;
 858		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 859						hs_header->frame_length);
 860		break;
 861
 862	case HS_PAGE_FIRM_CTL:
 863		hs_page3 = (struct mvumi_hs_page3 *) hs_header;
 864		hs_header->frame_length = sizeof(*hs_page3) - 4;
 865		memset(hs_header->frame_content, 0, hs_header->frame_length);
 866		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 867						hs_header->frame_length);
 868		break;
 869
 870	case HS_PAGE_CL_INFO:
 871		hs_page4 = (struct mvumi_hs_page4 *) hs_header;
 872		hs_header->frame_length = sizeof(*hs_page4) - 4;
 873		memset(hs_header->frame_content, 0, hs_header->frame_length);
 874		hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
 875		hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
 876
 877		hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
 878		hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
 879		hs_page4->ib_entry_size = mhba->ib_max_size_setting;
 880		hs_page4->ob_entry_size = mhba->ob_max_size_setting;
 881		if (mhba->hba_capability
 882			& HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
 883			hs_page4->ob_depth = find_first_bit((unsigned long *)
 884							    &mhba->list_num_io,
 885							    BITS_PER_LONG);
 886			hs_page4->ib_depth = find_first_bit((unsigned long *)
 887							    &mhba->list_num_io,
 888							    BITS_PER_LONG);
 889		} else {
 890			hs_page4->ob_depth = (u8) mhba->list_num_io;
 891			hs_page4->ib_depth = (u8) mhba->list_num_io;
 892		}
 893		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 894						hs_header->frame_length);
 895		break;
 896
 897	default:
 898		dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
 899			hs_header->page_code);
 900		break;
 901	}
 902}
 903
 904/**
 905 * mvumi_init_data -	Initialize requested date for FW
 906 * @mhba:			Adapter soft state
 907 */
 908static int mvumi_init_data(struct mvumi_hba *mhba)
 909{
 910	struct mvumi_ob_data *ob_pool;
 911	struct mvumi_res *res_mgnt;
 912	unsigned int tmp_size, offset, i;
 913	void *virmem, *v;
 914	dma_addr_t p;
 915
 916	if (mhba->fw_flag & MVUMI_FW_ALLOC)
 917		return 0;
 918
 919	tmp_size = mhba->ib_max_size * mhba->max_io;
 920	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
 921		tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 922
 923	tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
 924	tmp_size += 8 + sizeof(u32)*2 + 16;
 925
 926	res_mgnt = mvumi_alloc_mem_resource(mhba,
 927					RESOURCE_UNCACHED_MEMORY, tmp_size);
 928	if (!res_mgnt) {
 929		dev_err(&mhba->pdev->dev,
 930			"failed to allocate memory for inbound list\n");
 931		goto fail_alloc_dma_buf;
 932	}
 933
 934	p = res_mgnt->bus_addr;
 935	v = res_mgnt->virt_addr;
 936	/* ib_list */
 937	offset = round_up(p, 128) - p;
 938	p += offset;
 939	v += offset;
 940	mhba->ib_list = v;
 941	mhba->ib_list_phys = p;
 942	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 943		v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 944		p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 945		mhba->ib_frame = v;
 946		mhba->ib_frame_phys = p;
 947	}
 948	v += mhba->ib_max_size * mhba->max_io;
 949	p += mhba->ib_max_size * mhba->max_io;
 950
 951	/* ib shadow */
 952	offset = round_up(p, 8) - p;
 953	p += offset;
 954	v += offset;
 955	mhba->ib_shadow = v;
 956	mhba->ib_shadow_phys = p;
 957	p += sizeof(u32)*2;
 958	v += sizeof(u32)*2;
 959	/* ob shadow */
 960	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
 961		offset = round_up(p, 8) - p;
 962		p += offset;
 963		v += offset;
 964		mhba->ob_shadow = v;
 965		mhba->ob_shadow_phys = p;
 966		p += 8;
 967		v += 8;
 968	} else {
 969		offset = round_up(p, 4) - p;
 970		p += offset;
 971		v += offset;
 972		mhba->ob_shadow = v;
 973		mhba->ob_shadow_phys = p;
 974		p += 4;
 975		v += 4;
 976	}
 977
 978	/* ob list */
 979	offset = round_up(p, 128) - p;
 980	p += offset;
 981	v += offset;
 982
 983	mhba->ob_list = v;
 984	mhba->ob_list_phys = p;
 985
 986	/* ob data pool */
 987	tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
 988	tmp_size = round_up(tmp_size, 8);
 989
 990	res_mgnt = mvumi_alloc_mem_resource(mhba,
 991				RESOURCE_CACHED_MEMORY, tmp_size);
 992	if (!res_mgnt) {
 993		dev_err(&mhba->pdev->dev,
 994			"failed to allocate memory for outbound data buffer\n");
 995		goto fail_alloc_dma_buf;
 996	}
 997	virmem = res_mgnt->virt_addr;
 998
 999	for (i = mhba->max_io; i != 0; i--) {
1000		ob_pool = (struct mvumi_ob_data *) virmem;
1001		list_add_tail(&ob_pool->list, &mhba->ob_data_list);
1002		virmem += mhba->ob_max_size + sizeof(*ob_pool);
1003	}
1004
1005	tmp_size = sizeof(unsigned short) * mhba->max_io +
1006				sizeof(struct mvumi_cmd *) * mhba->max_io;
1007	tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
1008						(sizeof(unsigned char) * 8);
1009
1010	res_mgnt = mvumi_alloc_mem_resource(mhba,
1011				RESOURCE_CACHED_MEMORY, tmp_size);
1012	if (!res_mgnt) {
1013		dev_err(&mhba->pdev->dev,
1014			"failed to allocate memory for tag and target map\n");
1015		goto fail_alloc_dma_buf;
1016	}
1017
1018	virmem = res_mgnt->virt_addr;
1019	mhba->tag_pool.stack = virmem;
1020	mhba->tag_pool.size = mhba->max_io;
1021	tag_init(&mhba->tag_pool, mhba->max_io);
1022	virmem += sizeof(unsigned short) * mhba->max_io;
1023
1024	mhba->tag_cmd = virmem;
1025	virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
1026
1027	mhba->target_map = virmem;
1028
1029	mhba->fw_flag |= MVUMI_FW_ALLOC;
1030	return 0;
1031
1032fail_alloc_dma_buf:
1033	mvumi_release_mem_resource(mhba);
1034	return -1;
1035}
1036
1037static int mvumi_hs_process_page(struct mvumi_hba *mhba,
1038				struct mvumi_hs_header *hs_header)
1039{
1040	struct mvumi_hs_page1 *hs_page1;
1041	unsigned char page_checksum;
1042
1043	page_checksum = mvumi_calculate_checksum(hs_header,
1044						hs_header->frame_length);
1045	if (page_checksum != hs_header->checksum) {
1046		dev_err(&mhba->pdev->dev, "checksum error\n");
1047		return -1;
1048	}
1049
1050	switch (hs_header->page_code) {
1051	case HS_PAGE_FIRM_CAP:
1052		hs_page1 = (struct mvumi_hs_page1 *) hs_header;
1053
1054		mhba->max_io = hs_page1->max_io_support;
1055		mhba->list_num_io = hs_page1->cl_inout_list_depth;
1056		mhba->max_transfer_size = hs_page1->max_transfer_size;
1057		mhba->max_target_id = hs_page1->max_devices_support;
1058		mhba->hba_capability = hs_page1->capability;
1059		mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
1060		mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
1061
1062		mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
1063		mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
1064
1065		dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
1066						hs_page1->fw_ver.ver_build);
1067
1068		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1069			mhba->eot_flag = 22;
1070		else
1071			mhba->eot_flag = 27;
1072		if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1073			mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
1074		break;
1075	default:
1076		dev_err(&mhba->pdev->dev, "handshake: page code error\n");
1077		return -1;
1078	}
1079	return 0;
1080}
1081
1082/**
1083 * mvumi_handshake -	Move the FW to READY state
1084 * @mhba:				Adapter soft state
1085 *
1086 * During the initialization, FW passes can potentially be in any one of
1087 * several possible states. If the FW in operational, waiting-for-handshake
1088 * states, driver must take steps to bring it to ready state. Otherwise, it
1089 * has to wait for the ready state.
1090 */
1091static int mvumi_handshake(struct mvumi_hba *mhba)
1092{
1093	unsigned int hs_state, tmp, hs_fun;
1094	struct mvumi_hs_header *hs_header;
1095	struct mvumi_hw_regs *regs = mhba->regs;
1096
1097	if (mhba->fw_state == FW_STATE_STARTING)
1098		hs_state = HS_S_START;
1099	else {
1100		tmp = ioread32(regs->arm_to_pciea_msg0);
1101		hs_state = HS_GET_STATE(tmp);
1102		dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
1103		if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
1104			mhba->fw_state = FW_STATE_STARTING;
1105			return -1;
1106		}
1107	}
1108
1109	hs_fun = 0;
1110	switch (hs_state) {
1111	case HS_S_START:
1112		mhba->fw_state = FW_STATE_HANDSHAKING;
1113		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1114		HS_SET_STATE(hs_fun, HS_S_RESET);
1115		iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
1116		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1117		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1118		break;
1119
1120	case HS_S_RESET:
1121		iowrite32(lower_32_bits(mhba->handshake_page_phys),
1122					regs->pciea_to_arm_msg1);
1123		iowrite32(upper_32_bits(mhba->handshake_page_phys),
1124					regs->arm_to_pciea_msg1);
1125		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1126		HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
1127		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1128		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1129		break;
1130
1131	case HS_S_PAGE_ADDR:
1132	case HS_S_QUERY_PAGE:
1133	case HS_S_SEND_PAGE:
1134		hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
1135		if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
1136			mhba->hba_total_pages =
1137			((struct mvumi_hs_page1 *) hs_header)->total_pages;
1138
1139			if (mhba->hba_total_pages == 0)
1140				mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1141		}
1142
1143		if (hs_state == HS_S_QUERY_PAGE) {
1144			if (mvumi_hs_process_page(mhba, hs_header)) {
1145				HS_SET_STATE(hs_fun, HS_S_ABORT);
1146				return -1;
1147			}
1148			if (mvumi_init_data(mhba)) {
1149				HS_SET_STATE(hs_fun, HS_S_ABORT);
1150				return -1;
1151			}
1152		} else if (hs_state == HS_S_PAGE_ADDR) {
1153			hs_header->page_code = 0;
1154			mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1155		}
1156
1157		if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
1158			hs_header->page_code++;
1159			if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
1160				mvumi_hs_build_page(mhba, hs_header);
1161				HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
1162			} else
1163				HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
1164		} else
1165			HS_SET_STATE(hs_fun, HS_S_END);
1166
1167		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1168		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1169		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1170		break;
1171
1172	case HS_S_END:
1173		/* Set communication list ISR */
1174		tmp = ioread32(regs->enpointa_mask_reg);
1175		tmp |= regs->int_comaout | regs->int_comaerr;
1176		iowrite32(tmp, regs->enpointa_mask_reg);
1177		iowrite32(mhba->list_num_io, mhba->ib_shadow);
1178		/* Set InBound List Available count shadow */
1179		iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1180					regs->inb_aval_count_basel);
1181		iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1182					regs->inb_aval_count_baseh);
1183
1184		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1185			/* Set OutBound List Available count shadow */
1186			iowrite32((mhba->list_num_io-1) |
1187							regs->cl_pointer_toggle,
1188							mhba->ob_shadow);
1189			iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1190							regs->outb_copy_basel);
1191			iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1192							regs->outb_copy_baseh);
1193		}
1194
1195		mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1196							regs->cl_pointer_toggle;
1197		mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1198							regs->cl_pointer_toggle;
1199		mhba->fw_state = FW_STATE_STARTED;
1200
1201		break;
1202	default:
1203		dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1204								hs_state);
1205		return -1;
1206	}
1207	return 0;
1208}
1209
1210static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1211{
1212	unsigned int isr_status;
1213	unsigned long before;
1214
1215	before = jiffies;
1216	mvumi_handshake(mhba);
1217	do {
1218		isr_status = mhba->instancet->read_fw_status_reg(mhba);
1219
1220		if (mhba->fw_state == FW_STATE_STARTED)
1221			return 0;
1222		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1223			dev_err(&mhba->pdev->dev,
1224				"no handshake response at state 0x%x.\n",
1225				  mhba->fw_state);
1226			dev_err(&mhba->pdev->dev,
1227				"isr : global=0x%x,status=0x%x.\n",
1228					mhba->global_isr, isr_status);
1229			return -1;
1230		}
1231		rmb();
1232		usleep_range(1000, 2000);
1233	} while (!(isr_status & DRBL_HANDSHAKE_ISR));
1234
1235	return 0;
1236}
1237
1238static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1239{
1240	unsigned int tmp;
1241	unsigned long before;
1242
1243	before = jiffies;
1244	tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1245	while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1246		if (tmp != HANDSHAKE_READYSTATE)
1247			iowrite32(DRBL_MU_RESET,
1248					mhba->regs->pciea_to_arm_drbl_reg);
1249		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1250			dev_err(&mhba->pdev->dev,
1251				"invalid signature [0x%x].\n", tmp);
1252			return -1;
1253		}
1254		usleep_range(1000, 2000);
1255		rmb();
1256		tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1257	}
1258
1259	mhba->fw_state = FW_STATE_STARTING;
1260	dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1261	do {
1262		if (mvumi_handshake_event(mhba)) {
1263			dev_err(&mhba->pdev->dev,
1264					"handshake failed at state 0x%x.\n",
1265						mhba->fw_state);
1266			return -1;
1267		}
1268	} while (mhba->fw_state != FW_STATE_STARTED);
1269
1270	dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1271
1272	return 0;
1273}
1274
1275static unsigned char mvumi_start(struct mvumi_hba *mhba)
1276{
1277	unsigned int tmp;
1278	struct mvumi_hw_regs *regs = mhba->regs;
1279
1280	/* clear Door bell */
1281	tmp = ioread32(regs->arm_to_pciea_drbl_reg);
1282	iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
1283
1284	iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1285	tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
1286	iowrite32(tmp, regs->enpointa_mask_reg);
1287	msleep(100);
1288	if (mvumi_check_handshake(mhba))
1289		return -1;
1290
1291	return 0;
1292}
1293
1294/**
1295 * mvumi_complete_cmd -	Completes a command
1296 * @mhba:			Adapter soft state
1297 * @cmd:			Command to be completed
1298 * @ob_frame:			Command response
1299 */
1300static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1301					struct mvumi_rsp_frame *ob_frame)
1302{
1303	struct scsi_cmnd *scmd = cmd->scmd;
1304
1305	mvumi_priv(cmd->scmd)->cmd_priv = NULL;
1306	scmd->result = ob_frame->req_status;
1307
1308	switch (ob_frame->req_status) {
1309	case SAM_STAT_GOOD:
1310		scmd->result |= DID_OK << 16;
1311		break;
1312	case SAM_STAT_BUSY:
1313		scmd->result |= DID_BUS_BUSY << 16;
1314		break;
1315	case SAM_STAT_CHECK_CONDITION:
1316		scmd->result |= (DID_OK << 16);
1317		if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1318			memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1319				sizeof(struct mvumi_sense_data));
 
1320		}
1321		break;
1322	default:
1323		scmd->result |= (DID_ABORT << 16);
1324		break;
1325	}
1326
1327	if (scsi_bufflen(scmd))
1328		dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
1329			     scsi_sg_count(scmd),
1330			     scmd->sc_data_direction);
1331	scsi_done(scmd);
 
 
 
 
 
 
 
 
 
 
 
1332	mvumi_return_cmd(mhba, cmd);
1333}
1334
1335static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1336						struct mvumi_cmd *cmd,
1337					struct mvumi_rsp_frame *ob_frame)
1338{
1339	if (atomic_read(&cmd->sync_cmd)) {
1340		cmd->cmd_status = ob_frame->req_status;
1341
1342		if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1343				(ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1344				cmd->data_buf) {
1345			memcpy(cmd->data_buf, ob_frame->payload,
1346					sizeof(struct mvumi_sense_data));
1347		}
1348		atomic_dec(&cmd->sync_cmd);
1349		wake_up(&mhba->int_cmd_wait_q);
1350	}
1351}
1352
1353static void mvumi_show_event(struct mvumi_hba *mhba,
1354			struct mvumi_driver_event *ptr)
1355{
1356	unsigned int i;
1357
1358	dev_warn(&mhba->pdev->dev,
1359		"Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1360		ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1361	if (ptr->param_count) {
1362		printk(KERN_WARNING "Event param(len 0x%x): ",
1363						ptr->param_count);
1364		for (i = 0; i < ptr->param_count; i++)
1365			printk(KERN_WARNING "0x%x ", ptr->params[i]);
1366
1367		printk(KERN_WARNING "\n");
1368	}
1369
1370	if (ptr->sense_data_length) {
1371		printk(KERN_WARNING "Event sense data(len 0x%x): ",
1372						ptr->sense_data_length);
1373		for (i = 0; i < ptr->sense_data_length; i++)
1374			printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1375		printk(KERN_WARNING "\n");
1376	}
1377}
1378
1379static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1380{
1381	struct scsi_device *sdev;
1382	int ret = -1;
1383
1384	if (status == DEVICE_OFFLINE) {
1385		sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1386		if (sdev) {
1387			dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1388								sdev->id, 0);
1389			scsi_remove_device(sdev);
1390			scsi_device_put(sdev);
1391			ret = 0;
1392		} else
1393			dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1394									devid);
1395	} else if (status == DEVICE_ONLINE) {
1396		sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1397		if (!sdev) {
1398			scsi_add_device(mhba->shost, 0, devid, 0);
1399			dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1400								devid, 0);
1401			ret = 0;
1402		} else {
1403			dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1404								0, devid, 0);
1405			scsi_device_put(sdev);
1406		}
1407	}
1408	return ret;
1409}
1410
1411static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1412	unsigned int id, struct mvumi_cmd *cmd)
1413{
1414	struct mvumi_msg_frame *frame;
1415	u64 wwid = 0;
1416	int cmd_alloc = 0;
1417	int data_buf_len = 64;
1418
1419	if (!cmd) {
1420		cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1421		if (cmd)
1422			cmd_alloc = 1;
1423		else
1424			return 0;
1425	} else {
1426		memset(cmd->data_buf, 0, data_buf_len);
1427	}
1428	cmd->scmd = NULL;
1429	cmd->cmd_status = REQ_STATUS_PENDING;
1430	atomic_set(&cmd->sync_cmd, 0);
1431	frame = cmd->frame;
1432	frame->device_id = (u16) id;
1433	frame->cmd_flag = CMD_FLAG_DATA_IN;
1434	frame->req_function = CL_FUN_SCSI_CMD;
1435	frame->cdb_length = 6;
1436	frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
1437	memset(frame->cdb, 0, frame->cdb_length);
1438	frame->cdb[0] = INQUIRY;
1439	frame->cdb[4] = frame->data_transfer_length;
1440
1441	mvumi_issue_blocked_cmd(mhba, cmd);
1442
1443	if (cmd->cmd_status == SAM_STAT_GOOD) {
1444		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1445			wwid = id + 1;
1446		else
1447			memcpy((void *)&wwid,
1448			       (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
1449			       MVUMI_INQUIRY_UUID_LEN);
1450		dev_dbg(&mhba->pdev->dev,
1451			"inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
1452	} else {
1453		wwid = 0;
1454	}
1455	if (cmd_alloc)
1456		mvumi_delete_internal_cmd(mhba, cmd);
1457
1458	return wwid;
1459}
1460
1461static void mvumi_detach_devices(struct mvumi_hba *mhba)
1462{
1463	struct mvumi_device *mv_dev = NULL , *dev_next;
1464	struct scsi_device *sdev = NULL;
1465
1466	mutex_lock(&mhba->device_lock);
1467
1468	/* detach Hard Disk */
1469	list_for_each_entry_safe(mv_dev, dev_next,
1470		&mhba->shost_dev_list, list) {
1471		mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1472		list_del_init(&mv_dev->list);
1473		dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1474			mv_dev->id, mv_dev->wwid);
1475		kfree(mv_dev);
1476	}
1477	list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1478		list_del_init(&mv_dev->list);
1479		dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1480			mv_dev->id, mv_dev->wwid);
1481		kfree(mv_dev);
1482	}
1483
1484	/* detach virtual device */
1485	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1486		sdev = scsi_device_lookup(mhba->shost, 0,
1487						mhba->max_target_id - 1, 0);
1488
1489	if (sdev) {
1490		scsi_remove_device(sdev);
1491		scsi_device_put(sdev);
1492	}
1493
1494	mutex_unlock(&mhba->device_lock);
1495}
1496
1497static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1498{
1499	struct scsi_device *sdev;
1500
1501	sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1502	if (sdev) {
1503		scsi_rescan_device(sdev);
1504		scsi_device_put(sdev);
1505	}
1506}
1507
1508static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1509{
1510	struct mvumi_device *mv_dev = NULL;
1511
1512	list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1513		if (mv_dev->wwid == wwid) {
1514			if (mv_dev->id != id) {
1515				dev_err(&mhba->pdev->dev,
1516					"%s has same wwid[%llx] ,"
1517					" but different id[%d %d]\n",
1518					__func__, mv_dev->wwid, mv_dev->id, id);
1519				return -1;
1520			} else {
1521				if (mhba->pdev->device ==
1522						PCI_DEVICE_ID_MARVELL_MV9143)
1523					mvumi_rescan_devices(mhba, id);
1524				return 1;
1525			}
1526		}
1527	}
1528	return 0;
1529}
1530
1531static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1532{
1533	struct mvumi_device *mv_dev = NULL, *dev_next;
1534
1535	list_for_each_entry_safe(mv_dev, dev_next,
1536				&mhba->shost_dev_list, list) {
1537		if (mv_dev->id == id) {
1538			dev_dbg(&mhba->pdev->dev,
1539				"detach device(0:%d:0) wwid(%llx) from HOST\n",
1540				mv_dev->id, mv_dev->wwid);
1541			mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1542			list_del_init(&mv_dev->list);
1543			kfree(mv_dev);
1544		}
1545	}
1546}
1547
1548static int mvumi_probe_devices(struct mvumi_hba *mhba)
1549{
1550	int id, maxid;
1551	u64 wwid = 0;
1552	struct mvumi_device *mv_dev = NULL;
1553	struct mvumi_cmd *cmd = NULL;
1554	int found = 0;
1555
1556	cmd = mvumi_create_internal_cmd(mhba, 64);
1557	if (!cmd)
1558		return -1;
1559
1560	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1561		maxid = mhba->max_target_id;
1562	else
1563		maxid = mhba->max_target_id - 1;
1564
1565	for (id = 0; id < maxid; id++) {
1566		wwid = mvumi_inquiry(mhba, id, cmd);
1567		if (!wwid) {
1568			/* device no response, remove it */
1569			mvumi_remove_devices(mhba, id);
1570		} else {
1571			/* device response, add it */
1572			found = mvumi_match_devices(mhba, id, wwid);
1573			if (!found) {
1574				mvumi_remove_devices(mhba, id);
1575				mv_dev = kzalloc(sizeof(struct mvumi_device),
1576								GFP_KERNEL);
1577				if (!mv_dev) {
1578					dev_err(&mhba->pdev->dev,
1579						"%s alloc mv_dev failed\n",
1580						__func__);
1581					continue;
1582				}
1583				mv_dev->id = id;
1584				mv_dev->wwid = wwid;
1585				mv_dev->sdev = NULL;
1586				INIT_LIST_HEAD(&mv_dev->list);
1587				list_add_tail(&mv_dev->list,
1588					      &mhba->mhba_dev_list);
1589				dev_dbg(&mhba->pdev->dev,
1590					"probe a new device(0:%d:0)"
1591					" wwid(%llx)\n", id, mv_dev->wwid);
1592			} else if (found == -1)
1593				return -1;
1594			else
1595				continue;
1596		}
1597	}
1598
1599	if (cmd)
1600		mvumi_delete_internal_cmd(mhba, cmd);
1601
1602	return 0;
1603}
1604
1605static int mvumi_rescan_bus(void *data)
1606{
1607	int ret = 0;
1608	struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1609	struct mvumi_device *mv_dev = NULL , *dev_next;
1610
1611	while (!kthread_should_stop()) {
1612
1613		set_current_state(TASK_INTERRUPTIBLE);
1614		if (!atomic_read(&mhba->pnp_count))
1615			schedule();
1616		msleep(1000);
1617		atomic_set(&mhba->pnp_count, 0);
1618		__set_current_state(TASK_RUNNING);
1619
1620		mutex_lock(&mhba->device_lock);
1621		ret = mvumi_probe_devices(mhba);
1622		if (!ret) {
1623			list_for_each_entry_safe(mv_dev, dev_next,
1624						 &mhba->mhba_dev_list, list) {
1625				if (mvumi_handle_hotplug(mhba, mv_dev->id,
1626							 DEVICE_ONLINE)) {
1627					dev_err(&mhba->pdev->dev,
1628						"%s add device(0:%d:0) failed"
1629						"wwid(%llx) has exist\n",
1630						__func__,
1631						mv_dev->id, mv_dev->wwid);
1632					list_del_init(&mv_dev->list);
1633					kfree(mv_dev);
1634				} else {
1635					list_move_tail(&mv_dev->list,
1636						       &mhba->shost_dev_list);
1637				}
1638			}
1639		}
1640		mutex_unlock(&mhba->device_lock);
1641	}
1642	return 0;
1643}
1644
1645static void mvumi_proc_msg(struct mvumi_hba *mhba,
1646					struct mvumi_hotplug_event *param)
1647{
1648	u16 size = param->size;
1649	const unsigned long *ar_bitmap;
1650	const unsigned long *re_bitmap;
1651	int index;
1652
1653	if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1654		index = -1;
1655		ar_bitmap = (const unsigned long *) param->bitmap;
1656		re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
1657
1658		mutex_lock(&mhba->sas_discovery_mutex);
1659		do {
1660			index = find_next_zero_bit(ar_bitmap, size, index + 1);
1661			if (index >= size)
1662				break;
1663			mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1664		} while (1);
1665
1666		index = -1;
1667		do {
1668			index = find_next_zero_bit(re_bitmap, size, index + 1);
1669			if (index >= size)
1670				break;
1671			mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1672		} while (1);
1673		mutex_unlock(&mhba->sas_discovery_mutex);
1674	}
1675}
1676
1677static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1678{
1679	if (msg == APICDB1_EVENT_GETEVENT) {
1680		int i, count;
1681		struct mvumi_driver_event *param = NULL;
1682		struct mvumi_event_req *er = buffer;
1683		count = er->count;
1684		if (count > MAX_EVENTS_RETURNED) {
1685			dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1686					" than max event count[0x%x].\n",
1687					count, MAX_EVENTS_RETURNED);
1688			return;
1689		}
1690		for (i = 0; i < count; i++) {
1691			param = &er->events[i];
1692			mvumi_show_event(mhba, param);
1693		}
1694	} else if (msg == APICDB1_HOST_GETEVENT) {
1695		mvumi_proc_msg(mhba, buffer);
1696	}
1697}
1698
1699static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1700{
1701	struct mvumi_cmd *cmd;
1702	struct mvumi_msg_frame *frame;
1703
1704	cmd = mvumi_create_internal_cmd(mhba, 512);
1705	if (!cmd)
1706		return -1;
1707	cmd->scmd = NULL;
1708	cmd->cmd_status = REQ_STATUS_PENDING;
1709	atomic_set(&cmd->sync_cmd, 0);
1710	frame = cmd->frame;
1711	frame->device_id = 0;
1712	frame->cmd_flag = CMD_FLAG_DATA_IN;
1713	frame->req_function = CL_FUN_SCSI_CMD;
1714	frame->cdb_length = MAX_COMMAND_SIZE;
1715	frame->data_transfer_length = sizeof(struct mvumi_event_req);
1716	memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1717	frame->cdb[0] = APICDB0_EVENT;
1718	frame->cdb[1] = msg;
1719	mvumi_issue_blocked_cmd(mhba, cmd);
1720
1721	if (cmd->cmd_status != SAM_STAT_GOOD)
1722		dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1723							cmd->cmd_status);
1724	else
1725		mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1726
1727	mvumi_delete_internal_cmd(mhba, cmd);
1728	return 0;
1729}
1730
1731static void mvumi_scan_events(struct work_struct *work)
1732{
1733	struct mvumi_events_wq *mu_ev =
1734		container_of(work, struct mvumi_events_wq, work_q);
1735
1736	mvumi_get_event(mu_ev->mhba, mu_ev->event);
1737	kfree(mu_ev);
1738}
1739
1740static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1741{
1742	struct mvumi_events_wq *mu_ev;
1743
1744	while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
1745		if (isr_status & DRBL_BUS_CHANGE) {
1746			atomic_inc(&mhba->pnp_count);
1747			wake_up_process(mhba->dm_thread);
1748			isr_status &= ~(DRBL_BUS_CHANGE);
1749			continue;
1750		}
1751
1752		mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1753		if (mu_ev) {
1754			INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1755			mu_ev->mhba = mhba;
1756			mu_ev->event = APICDB1_EVENT_GETEVENT;
1757			isr_status &= ~(DRBL_EVENT_NOTIFY);
1758			mu_ev->param = NULL;
1759			schedule_work(&mu_ev->work_q);
1760		}
1761	}
1762}
1763
1764static void mvumi_handle_clob(struct mvumi_hba *mhba)
1765{
1766	struct mvumi_rsp_frame *ob_frame;
1767	struct mvumi_cmd *cmd;
1768	struct mvumi_ob_data *pool;
1769
1770	while (!list_empty(&mhba->free_ob_list)) {
1771		pool = list_first_entry(&mhba->free_ob_list,
1772						struct mvumi_ob_data, list);
1773		list_del_init(&pool->list);
1774		list_add_tail(&pool->list, &mhba->ob_data_list);
1775
1776		ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1777		cmd = mhba->tag_cmd[ob_frame->tag];
1778
1779		atomic_dec(&mhba->fw_outstanding);
1780		mhba->tag_cmd[ob_frame->tag] = NULL;
1781		tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1782		if (cmd->scmd)
1783			mvumi_complete_cmd(mhba, cmd, ob_frame);
1784		else
1785			mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1786	}
1787	mhba->instancet->fire_cmd(mhba, NULL);
1788}
1789
1790static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1791{
1792	struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1793	unsigned long flags;
1794
1795	spin_lock_irqsave(mhba->shost->host_lock, flags);
1796	if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1797		spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1798		return IRQ_NONE;
1799	}
1800
1801	if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1802		if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1803			mvumi_launch_events(mhba, mhba->isr_status);
1804		if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1805			dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1806			mvumi_handshake(mhba);
1807		}
1808
1809	}
1810
1811	if (mhba->global_isr & mhba->regs->int_comaout)
1812		mvumi_receive_ob_list_entry(mhba);
1813
1814	mhba->global_isr = 0;
1815	mhba->isr_status = 0;
1816	if (mhba->fw_state == FW_STATE_STARTED)
1817		mvumi_handle_clob(mhba);
1818	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1819	return IRQ_HANDLED;
1820}
1821
1822static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1823						struct mvumi_cmd *cmd)
1824{
1825	void *ib_entry;
1826	struct mvumi_msg_frame *ib_frame;
1827	unsigned int frame_len;
1828
1829	ib_frame = cmd->frame;
1830	if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1831		dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1832		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1833	}
1834	if (tag_is_empty(&mhba->tag_pool)) {
1835		dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1836		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1837	}
1838	mvumi_get_ib_list_entry(mhba, &ib_entry);
1839
1840	cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1841	cmd->frame->request_id = mhba->io_seq++;
1842	cmd->request_id = cmd->frame->request_id;
1843	mhba->tag_cmd[cmd->frame->tag] = cmd;
1844	frame_len = sizeof(*ib_frame) +
1845				ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1846	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1847		struct mvumi_dyn_list_entry *dle;
1848		dle = ib_entry;
1849		dle->src_low_addr =
1850			cpu_to_le32(lower_32_bits(cmd->frame_phys));
1851		dle->src_high_addr =
1852			cpu_to_le32(upper_32_bits(cmd->frame_phys));
1853		dle->if_length = (frame_len >> 2) & 0xFFF;
1854	} else {
1855		memcpy(ib_entry, ib_frame, frame_len);
1856	}
1857	return MV_QUEUE_COMMAND_RESULT_SENT;
1858}
1859
1860static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1861{
1862	unsigned short num_of_cl_sent = 0;
1863	unsigned int count;
1864	enum mvumi_qc_result result;
1865
1866	if (cmd)
1867		list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1868	count = mhba->instancet->check_ib_list(mhba);
1869	if (list_empty(&mhba->waiting_req_list) || !count)
1870		return;
1871
1872	do {
1873		cmd = list_first_entry(&mhba->waiting_req_list,
1874				       struct mvumi_cmd, queue_pointer);
1875		list_del_init(&cmd->queue_pointer);
1876		result = mvumi_send_command(mhba, cmd);
1877		switch (result) {
1878		case MV_QUEUE_COMMAND_RESULT_SENT:
1879			num_of_cl_sent++;
1880			break;
1881		case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1882			list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1883			if (num_of_cl_sent > 0)
1884				mvumi_send_ib_list_entry(mhba);
1885
1886			return;
1887		}
1888	} while (!list_empty(&mhba->waiting_req_list) && count--);
1889
1890	if (num_of_cl_sent > 0)
1891		mvumi_send_ib_list_entry(mhba);
1892}
1893
1894/**
1895 * mvumi_enable_intr -	Enables interrupts
1896 * @mhba:		Adapter soft state
1897 */
1898static void mvumi_enable_intr(struct mvumi_hba *mhba)
1899{
1900	unsigned int mask;
1901	struct mvumi_hw_regs *regs = mhba->regs;
1902
1903	iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1904	mask = ioread32(regs->enpointa_mask_reg);
1905	mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
1906	iowrite32(mask, regs->enpointa_mask_reg);
1907}
1908
1909/**
1910 * mvumi_disable_intr -Disables interrupt
1911 * @mhba:		Adapter soft state
1912 */
1913static void mvumi_disable_intr(struct mvumi_hba *mhba)
1914{
1915	unsigned int mask;
1916	struct mvumi_hw_regs *regs = mhba->regs;
1917
1918	iowrite32(0, regs->arm_to_pciea_mask_reg);
1919	mask = ioread32(regs->enpointa_mask_reg);
1920	mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
1921							regs->int_comaerr);
1922	iowrite32(mask, regs->enpointa_mask_reg);
1923}
1924
1925static int mvumi_clear_intr(void *extend)
1926{
1927	struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1928	unsigned int status, isr_status = 0, tmp = 0;
1929	struct mvumi_hw_regs *regs = mhba->regs;
1930
1931	status = ioread32(regs->main_int_cause_reg);
1932	if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
1933		return 1;
1934	if (unlikely(status & regs->int_comaerr)) {
1935		tmp = ioread32(regs->outb_isr_cause);
1936		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1937			if (tmp & regs->clic_out_err) {
1938				iowrite32(tmp & regs->clic_out_err,
1939							regs->outb_isr_cause);
1940			}
1941		} else {
1942			if (tmp & (regs->clic_in_err | regs->clic_out_err))
1943				iowrite32(tmp & (regs->clic_in_err |
1944						regs->clic_out_err),
1945						regs->outb_isr_cause);
1946		}
1947		status ^= mhba->regs->int_comaerr;
1948		/* inbound or outbound parity error, command will timeout */
1949	}
1950	if (status & regs->int_comaout) {
1951		tmp = ioread32(regs->outb_isr_cause);
1952		if (tmp & regs->clic_irq)
1953			iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
1954	}
1955	if (status & regs->int_dl_cpu2pciea) {
1956		isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
1957		if (isr_status)
1958			iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
1959	}
1960
1961	mhba->global_isr = status;
1962	mhba->isr_status = isr_status;
1963
1964	return 0;
1965}
1966
1967/**
1968 * mvumi_read_fw_status_reg - returns the current FW status value
1969 * @mhba:		Adapter soft state
1970 */
1971static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
1972{
1973	unsigned int status;
1974
1975	status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
1976	if (status)
1977		iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
1978	return status;
1979}
1980
1981static struct mvumi_instance_template mvumi_instance_9143 = {
1982	.fire_cmd = mvumi_fire_cmd,
1983	.enable_intr = mvumi_enable_intr,
1984	.disable_intr = mvumi_disable_intr,
1985	.clear_intr = mvumi_clear_intr,
1986	.read_fw_status_reg = mvumi_read_fw_status_reg,
1987	.check_ib_list = mvumi_check_ib_list_9143,
1988	.check_ob_list = mvumi_check_ob_list_9143,
1989	.reset_host = mvumi_reset_host_9143,
1990};
1991
1992static struct mvumi_instance_template mvumi_instance_9580 = {
1993	.fire_cmd = mvumi_fire_cmd,
1994	.enable_intr = mvumi_enable_intr,
1995	.disable_intr = mvumi_disable_intr,
1996	.clear_intr = mvumi_clear_intr,
1997	.read_fw_status_reg = mvumi_read_fw_status_reg,
1998	.check_ib_list = mvumi_check_ib_list_9580,
1999	.check_ob_list = mvumi_check_ob_list_9580,
2000	.reset_host = mvumi_reset_host_9580,
2001};
2002
2003static int mvumi_slave_configure(struct scsi_device *sdev)
2004{
2005	struct mvumi_hba *mhba;
2006	unsigned char bitcount = sizeof(unsigned char) * 8;
2007
2008	mhba = (struct mvumi_hba *) sdev->host->hostdata;
2009	if (sdev->id >= mhba->max_target_id)
2010		return -EINVAL;
2011
2012	mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
2013	return 0;
2014}
2015
2016/**
2017 * mvumi_build_frame -	Prepares a direct cdb (DCDB) command
2018 * @mhba:		Adapter soft state
2019 * @scmd:		SCSI command
2020 * @cmd:		Command to be prepared in
2021 *
2022 * This function prepares CDB commands. These are typcially pass-through
2023 * commands to the devices.
2024 */
2025static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
2026				struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
2027{
2028	struct mvumi_msg_frame *pframe;
2029
2030	cmd->scmd = scmd;
2031	cmd->cmd_status = REQ_STATUS_PENDING;
2032	pframe = cmd->frame;
2033	pframe->device_id = ((unsigned short) scmd->device->id) |
2034				(((unsigned short) scmd->device->lun) << 8);
2035	pframe->cmd_flag = 0;
2036
2037	switch (scmd->sc_data_direction) {
2038	case DMA_NONE:
2039		pframe->cmd_flag |= CMD_FLAG_NON_DATA;
2040		break;
2041	case DMA_FROM_DEVICE:
2042		pframe->cmd_flag |= CMD_FLAG_DATA_IN;
2043		break;
2044	case DMA_TO_DEVICE:
2045		pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
2046		break;
2047	case DMA_BIDIRECTIONAL:
2048	default:
2049		dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
2050			"cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
2051		goto error;
2052	}
2053
2054	pframe->cdb_length = scmd->cmd_len;
2055	memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
2056	pframe->req_function = CL_FUN_SCSI_CMD;
2057	if (scsi_bufflen(scmd)) {
2058		if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
2059			&pframe->sg_counts))
2060			goto error;
2061
2062		pframe->data_transfer_length = scsi_bufflen(scmd);
2063	} else {
2064		pframe->sg_counts = 0;
2065		pframe->data_transfer_length = 0;
2066	}
2067	return 0;
2068
2069error:
2070	scsi_build_sense(scmd, 0, ILLEGAL_REQUEST, 0x24, 0);
 
 
 
2071	return -1;
2072}
2073
2074/**
2075 * mvumi_queue_command -	Queue entry point
2076 * @shost:			Scsi host to queue command on
2077 * @scmd:			SCSI command to be queued
 
2078 */
2079static int mvumi_queue_command(struct Scsi_Host *shost,
2080					struct scsi_cmnd *scmd)
2081{
2082	struct mvumi_cmd *cmd;
2083	struct mvumi_hba *mhba;
2084	unsigned long irq_flags;
2085
2086	spin_lock_irqsave(shost->host_lock, irq_flags);
 
2087
2088	mhba = (struct mvumi_hba *) shost->hostdata;
2089	scmd->result = 0;
2090	cmd = mvumi_get_cmd(mhba);
2091	if (unlikely(!cmd)) {
2092		spin_unlock_irqrestore(shost->host_lock, irq_flags);
2093		return SCSI_MLQUEUE_HOST_BUSY;
2094	}
2095
2096	if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
2097		goto out_return_cmd;
2098
2099	cmd->scmd = scmd;
2100	mvumi_priv(scmd)->cmd_priv = cmd;
2101	mhba->instancet->fire_cmd(mhba, cmd);
2102	spin_unlock_irqrestore(shost->host_lock, irq_flags);
2103	return 0;
2104
2105out_return_cmd:
2106	mvumi_return_cmd(mhba, cmd);
2107	scsi_done(scmd);
2108	spin_unlock_irqrestore(shost->host_lock, irq_flags);
2109	return 0;
2110}
2111
2112static enum scsi_timeout_action mvumi_timed_out(struct scsi_cmnd *scmd)
2113{
2114	struct mvumi_cmd *cmd = mvumi_priv(scmd)->cmd_priv;
2115	struct Scsi_Host *host = scmd->device->host;
2116	struct mvumi_hba *mhba = shost_priv(host);
2117	unsigned long flags;
2118
2119	spin_lock_irqsave(mhba->shost->host_lock, flags);
2120
2121	if (mhba->tag_cmd[cmd->frame->tag]) {
2122		mhba->tag_cmd[cmd->frame->tag] = NULL;
2123		tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
2124	}
2125	if (!list_empty(&cmd->queue_pointer))
2126		list_del_init(&cmd->queue_pointer);
2127	else
2128		atomic_dec(&mhba->fw_outstanding);
2129
2130	scmd->result = (DID_ABORT << 16);
2131	mvumi_priv(scmd)->cmd_priv = NULL;
2132	if (scsi_bufflen(scmd)) {
2133		dma_unmap_sg(&mhba->pdev->dev, scsi_sglist(scmd),
2134			     scsi_sg_count(scmd),
2135			     scmd->sc_data_direction);
 
 
 
 
 
 
 
 
 
 
2136	}
2137	mvumi_return_cmd(mhba, cmd);
2138	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
2139
2140	return SCSI_EH_NOT_HANDLED;
2141}
2142
2143static int
2144mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2145			sector_t capacity, int geom[])
2146{
2147	int heads, sectors;
2148	sector_t cylinders;
2149	unsigned long tmp;
2150
2151	heads = 64;
2152	sectors = 32;
2153	tmp = heads * sectors;
2154	cylinders = capacity;
2155	sector_div(cylinders, tmp);
2156
2157	if (capacity >= 0x200000) {
2158		heads = 255;
2159		sectors = 63;
2160		tmp = heads * sectors;
2161		cylinders = capacity;
2162		sector_div(cylinders, tmp);
2163	}
2164	geom[0] = heads;
2165	geom[1] = sectors;
2166	geom[2] = cylinders;
2167
2168	return 0;
2169}
2170
2171static const struct scsi_host_template mvumi_template = {
2172
2173	.module = THIS_MODULE,
2174	.name = "Marvell Storage Controller",
2175	.slave_configure = mvumi_slave_configure,
2176	.queuecommand = mvumi_queue_command,
2177	.eh_timed_out = mvumi_timed_out,
2178	.eh_host_reset_handler = mvumi_host_reset,
2179	.bios_param = mvumi_bios_param,
2180	.dma_boundary = PAGE_SIZE - 1,
2181	.this_id = -1,
2182	.cmd_size = sizeof(struct mvumi_cmd_priv),
 
 
 
2183};
2184
2185static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2186{
2187	void *base = NULL;
2188	struct mvumi_hw_regs *regs;
2189
2190	switch (mhba->pdev->device) {
2191	case PCI_DEVICE_ID_MARVELL_MV9143:
2192		mhba->mmio = mhba->base_addr[0];
2193		base = mhba->mmio;
2194		if (!mhba->regs) {
2195			mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2196			if (mhba->regs == NULL)
2197				return -ENOMEM;
2198		}
2199		regs = mhba->regs;
2200
2201		/* For Arm */
2202		regs->ctrl_sts_reg          = base + 0x20104;
2203		regs->rstoutn_mask_reg      = base + 0x20108;
2204		regs->sys_soft_rst_reg      = base + 0x2010C;
2205		regs->main_int_cause_reg    = base + 0x20200;
2206		regs->enpointa_mask_reg     = base + 0x2020C;
2207		regs->rstoutn_en_reg        = base + 0xF1400;
2208		/* For Doorbell */
2209		regs->pciea_to_arm_drbl_reg = base + 0x20400;
2210		regs->arm_to_pciea_drbl_reg = base + 0x20408;
2211		regs->arm_to_pciea_mask_reg = base + 0x2040C;
2212		regs->pciea_to_arm_msg0     = base + 0x20430;
2213		regs->pciea_to_arm_msg1     = base + 0x20434;
2214		regs->arm_to_pciea_msg0     = base + 0x20438;
2215		regs->arm_to_pciea_msg1     = base + 0x2043C;
2216
2217		/* For Message Unit */
2218
2219		regs->inb_aval_count_basel  = base + 0x508;
2220		regs->inb_aval_count_baseh  = base + 0x50C;
2221		regs->inb_write_pointer     = base + 0x518;
2222		regs->inb_read_pointer      = base + 0x51C;
2223		regs->outb_coal_cfg         = base + 0x568;
2224		regs->outb_copy_basel       = base + 0x5B0;
2225		regs->outb_copy_baseh       = base + 0x5B4;
2226		regs->outb_copy_pointer     = base + 0x544;
2227		regs->outb_read_pointer     = base + 0x548;
2228		regs->outb_isr_cause        = base + 0x560;
2229		regs->outb_coal_cfg         = base + 0x568;
2230		/* Bit setting for HW */
2231		regs->int_comaout           = 1 << 8;
2232		regs->int_comaerr           = 1 << 6;
2233		regs->int_dl_cpu2pciea      = 1 << 1;
2234		regs->cl_pointer_toggle     = 1 << 12;
2235		regs->clic_irq              = 1 << 1;
2236		regs->clic_in_err           = 1 << 8;
2237		regs->clic_out_err          = 1 << 12;
2238		regs->cl_slot_num_mask      = 0xFFF;
2239		regs->int_drbl_int_mask     = 0x3FFFFFFF;
2240		regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
2241							regs->int_comaerr;
2242		break;
2243	case PCI_DEVICE_ID_MARVELL_MV9580:
2244		mhba->mmio = mhba->base_addr[2];
2245		base = mhba->mmio;
2246		if (!mhba->regs) {
2247			mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2248			if (mhba->regs == NULL)
2249				return -ENOMEM;
2250		}
2251		regs = mhba->regs;
2252		/* For Arm */
2253		regs->ctrl_sts_reg          = base + 0x20104;
2254		regs->rstoutn_mask_reg      = base + 0x1010C;
2255		regs->sys_soft_rst_reg      = base + 0x10108;
2256		regs->main_int_cause_reg    = base + 0x10200;
2257		regs->enpointa_mask_reg     = base + 0x1020C;
2258		regs->rstoutn_en_reg        = base + 0xF1400;
2259
2260		/* For Doorbell */
2261		regs->pciea_to_arm_drbl_reg = base + 0x10460;
2262		regs->arm_to_pciea_drbl_reg = base + 0x10480;
2263		regs->arm_to_pciea_mask_reg = base + 0x10484;
2264		regs->pciea_to_arm_msg0     = base + 0x10400;
2265		regs->pciea_to_arm_msg1     = base + 0x10404;
2266		regs->arm_to_pciea_msg0     = base + 0x10420;
2267		regs->arm_to_pciea_msg1     = base + 0x10424;
2268
2269		/* For reset*/
2270		regs->reset_request         = base + 0x10108;
2271		regs->reset_enable          = base + 0x1010c;
2272
2273		/* For Message Unit */
2274		regs->inb_aval_count_basel  = base + 0x4008;
2275		regs->inb_aval_count_baseh  = base + 0x400C;
2276		regs->inb_write_pointer     = base + 0x4018;
2277		regs->inb_read_pointer      = base + 0x401C;
2278		regs->outb_copy_basel       = base + 0x4058;
2279		regs->outb_copy_baseh       = base + 0x405C;
2280		regs->outb_copy_pointer     = base + 0x406C;
2281		regs->outb_read_pointer     = base + 0x4070;
2282		regs->outb_coal_cfg         = base + 0x4080;
2283		regs->outb_isr_cause        = base + 0x4088;
2284		/* Bit setting for HW */
2285		regs->int_comaout           = 1 << 4;
2286		regs->int_dl_cpu2pciea      = 1 << 12;
2287		regs->int_comaerr           = 1 << 29;
2288		regs->cl_pointer_toggle     = 1 << 14;
2289		regs->cl_slot_num_mask      = 0x3FFF;
2290		regs->clic_irq              = 1 << 0;
2291		regs->clic_out_err          = 1 << 1;
2292		regs->int_drbl_int_mask     = 0x3FFFFFFF;
2293		regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
2294		break;
2295	default:
2296		return -1;
 
2297	}
2298
2299	return 0;
2300}
2301
2302/**
2303 * mvumi_init_fw -	Initializes the FW
2304 * @mhba:		Adapter soft state
2305 *
2306 * This is the main function for initializing firmware.
2307 */
2308static int mvumi_init_fw(struct mvumi_hba *mhba)
2309{
2310	int ret = 0;
2311
2312	if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
2313		dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
2314		return -EBUSY;
2315	}
2316	ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2317	if (ret)
2318		goto fail_ioremap;
2319
2320	switch (mhba->pdev->device) {
2321	case PCI_DEVICE_ID_MARVELL_MV9143:
2322		mhba->instancet = &mvumi_instance_9143;
2323		mhba->io_seq = 0;
2324		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2325		mhba->request_id_enabled = 1;
2326		break;
2327	case PCI_DEVICE_ID_MARVELL_MV9580:
2328		mhba->instancet = &mvumi_instance_9580;
2329		mhba->io_seq = 0;
2330		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2331		break;
2332	default:
2333		dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
2334							mhba->pdev->device);
2335		mhba->instancet = NULL;
2336		ret = -EINVAL;
2337		goto fail_alloc_mem;
2338	}
2339	dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
2340							mhba->pdev->device);
2341	ret = mvumi_cfg_hw_reg(mhba);
2342	if (ret) {
2343		dev_err(&mhba->pdev->dev,
2344			"failed to allocate memory for reg\n");
2345		ret = -ENOMEM;
2346		goto fail_alloc_mem;
2347	}
2348	mhba->handshake_page = dma_alloc_coherent(&mhba->pdev->dev,
2349			HSP_MAX_SIZE, &mhba->handshake_page_phys, GFP_KERNEL);
2350	if (!mhba->handshake_page) {
2351		dev_err(&mhba->pdev->dev,
2352			"failed to allocate memory for handshake\n");
2353		ret = -ENOMEM;
2354		goto fail_alloc_page;
2355	}
2356
2357	if (mvumi_start(mhba)) {
2358		ret = -EINVAL;
2359		goto fail_ready_state;
2360	}
2361	ret = mvumi_alloc_cmds(mhba);
2362	if (ret)
2363		goto fail_ready_state;
2364
2365	return 0;
2366
2367fail_ready_state:
2368	mvumi_release_mem_resource(mhba);
2369	dma_free_coherent(&mhba->pdev->dev, HSP_MAX_SIZE,
2370		mhba->handshake_page, mhba->handshake_page_phys);
2371fail_alloc_page:
2372	kfree(mhba->regs);
2373fail_alloc_mem:
2374	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
2375fail_ioremap:
2376	pci_release_regions(mhba->pdev);
2377
2378	return ret;
2379}
2380
2381/**
2382 * mvumi_io_attach -	Attaches this driver to SCSI mid-layer
2383 * @mhba:		Adapter soft state
2384 */
2385static int mvumi_io_attach(struct mvumi_hba *mhba)
2386{
2387	struct Scsi_Host *host = mhba->shost;
2388	struct scsi_device *sdev = NULL;
2389	int ret;
2390	unsigned int max_sg = (mhba->ib_max_size -
2391		sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
2392
2393	host->irq = mhba->pdev->irq;
2394	host->unique_id = mhba->unique_id;
2395	host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2396	host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
2397	host->max_sectors = mhba->max_transfer_size / 512;
2398	host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2399	host->max_id = mhba->max_target_id;
2400	host->max_cmd_len = MAX_COMMAND_SIZE;
 
2401
2402	ret = scsi_add_host(host, &mhba->pdev->dev);
2403	if (ret) {
2404		dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
2405		return ret;
2406	}
2407	mhba->fw_flag |= MVUMI_FW_ATTACH;
2408
2409	mutex_lock(&mhba->sas_discovery_mutex);
2410	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2411		ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2412	else
2413		ret = 0;
2414	if (ret) {
2415		dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2416		mutex_unlock(&mhba->sas_discovery_mutex);
2417		goto fail_add_device;
2418	}
2419
2420	mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2421						mhba, "mvumi_scanthread");
2422	if (IS_ERR(mhba->dm_thread)) {
2423		dev_err(&mhba->pdev->dev,
2424			"failed to create device scan thread\n");
2425		ret = PTR_ERR(mhba->dm_thread);
2426		mutex_unlock(&mhba->sas_discovery_mutex);
2427		goto fail_create_thread;
2428	}
2429	atomic_set(&mhba->pnp_count, 1);
2430	wake_up_process(mhba->dm_thread);
2431
2432	mutex_unlock(&mhba->sas_discovery_mutex);
2433	return 0;
2434
2435fail_create_thread:
2436	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2437		sdev = scsi_device_lookup(mhba->shost, 0,
2438						mhba->max_target_id - 1, 0);
2439	if (sdev) {
2440		scsi_remove_device(sdev);
2441		scsi_device_put(sdev);
2442	}
2443fail_add_device:
2444	scsi_remove_host(mhba->shost);
2445	return ret;
2446}
2447
2448/**
2449 * mvumi_probe_one -	PCI hotplug entry point
2450 * @pdev:		PCI device structure
2451 * @id:			PCI ids of supported hotplugged adapter
2452 */
2453static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2454{
2455	struct Scsi_Host *host;
2456	struct mvumi_hba *mhba;
2457	int ret;
2458
2459	dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2460			pdev->vendor, pdev->device, pdev->subsystem_vendor,
2461			pdev->subsystem_device);
2462
2463	ret = pci_enable_device(pdev);
2464	if (ret)
2465		return ret;
2466
2467	ret = mvumi_pci_set_master(pdev);
2468	if (ret)
2469		goto fail_set_dma_mask;
 
 
 
 
 
 
 
 
 
 
 
2470
2471	host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2472	if (!host) {
2473		dev_err(&pdev->dev, "scsi_host_alloc failed\n");
2474		ret = -ENOMEM;
2475		goto fail_alloc_instance;
2476	}
2477	mhba = shost_priv(host);
2478
2479	INIT_LIST_HEAD(&mhba->cmd_pool);
2480	INIT_LIST_HEAD(&mhba->ob_data_list);
2481	INIT_LIST_HEAD(&mhba->free_ob_list);
2482	INIT_LIST_HEAD(&mhba->res_list);
2483	INIT_LIST_HEAD(&mhba->waiting_req_list);
2484	mutex_init(&mhba->device_lock);
2485	INIT_LIST_HEAD(&mhba->mhba_dev_list);
2486	INIT_LIST_HEAD(&mhba->shost_dev_list);
2487	atomic_set(&mhba->fw_outstanding, 0);
2488	init_waitqueue_head(&mhba->int_cmd_wait_q);
2489	mutex_init(&mhba->sas_discovery_mutex);
2490
2491	mhba->pdev = pdev;
2492	mhba->shost = host;
2493	mhba->unique_id = pci_dev_id(pdev);
2494
2495	ret = mvumi_init_fw(mhba);
2496	if (ret)
2497		goto fail_init_fw;
2498
2499	ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2500				"mvumi", mhba);
2501	if (ret) {
2502		dev_err(&pdev->dev, "failed to register IRQ\n");
2503		goto fail_init_irq;
2504	}
2505
2506	mhba->instancet->enable_intr(mhba);
2507	pci_set_drvdata(pdev, mhba);
2508
2509	ret = mvumi_io_attach(mhba);
2510	if (ret)
2511		goto fail_io_attach;
2512
2513	mvumi_backup_bar_addr(mhba);
2514	dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
2515
2516	return 0;
2517
2518fail_io_attach:
2519	mhba->instancet->disable_intr(mhba);
2520	free_irq(mhba->pdev->irq, mhba);
2521fail_init_irq:
2522	mvumi_release_fw(mhba);
2523fail_init_fw:
2524	scsi_host_put(host);
2525
2526fail_alloc_instance:
2527fail_set_dma_mask:
2528	pci_disable_device(pdev);
2529
2530	return ret;
2531}
2532
2533static void mvumi_detach_one(struct pci_dev *pdev)
2534{
2535	struct Scsi_Host *host;
2536	struct mvumi_hba *mhba;
2537
2538	mhba = pci_get_drvdata(pdev);
2539	if (mhba->dm_thread) {
2540		kthread_stop(mhba->dm_thread);
2541		mhba->dm_thread = NULL;
2542	}
2543
2544	mvumi_detach_devices(mhba);
2545	host = mhba->shost;
2546	scsi_remove_host(mhba->shost);
2547	mvumi_flush_cache(mhba);
2548
2549	mhba->instancet->disable_intr(mhba);
2550	free_irq(mhba->pdev->irq, mhba);
2551	mvumi_release_fw(mhba);
2552	scsi_host_put(host);
2553	pci_disable_device(pdev);
2554	dev_dbg(&pdev->dev, "driver is removed!\n");
2555}
2556
2557/**
2558 * mvumi_shutdown -	Shutdown entry point
2559 * @pdev:		PCI device structure
2560 */
2561static void mvumi_shutdown(struct pci_dev *pdev)
2562{
2563	struct mvumi_hba *mhba = pci_get_drvdata(pdev);
2564
2565	mvumi_flush_cache(mhba);
2566}
2567
2568static int __maybe_unused mvumi_suspend(struct device *dev)
2569{
2570	struct pci_dev *pdev = to_pci_dev(dev);
2571	struct mvumi_hba *mhba = pci_get_drvdata(pdev);
2572
 
2573	mvumi_flush_cache(mhba);
2574
 
2575	mhba->instancet->disable_intr(mhba);
 
2576	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
 
 
 
 
2577
2578	return 0;
2579}
2580
2581static int __maybe_unused mvumi_resume(struct device *dev)
2582{
2583	int ret;
2584	struct pci_dev *pdev = to_pci_dev(dev);
2585	struct mvumi_hba *mhba = pci_get_drvdata(pdev);
 
 
 
 
 
2586
2587	ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2588	if (ret)
2589		goto fail;
2590	ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2591	if (ret)
2592		goto release_regions;
2593
2594	if (mvumi_cfg_hw_reg(mhba)) {
2595		ret = -EINVAL;
2596		goto unmap_pci_addr;
2597	}
2598
2599	mhba->mmio = mhba->base_addr[0];
2600	mvumi_reset(mhba);
2601
2602	if (mvumi_start(mhba)) {
2603		ret = -EINVAL;
2604		goto unmap_pci_addr;
2605	}
2606
 
 
 
 
 
 
2607	mhba->instancet->enable_intr(mhba);
2608
2609	return 0;
2610
2611unmap_pci_addr:
2612	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2613release_regions:
2614	pci_release_regions(pdev);
2615fail:
 
2616
2617	return ret;
2618}
2619
2620static SIMPLE_DEV_PM_OPS(mvumi_pm_ops, mvumi_suspend, mvumi_resume);
2621
2622static struct pci_driver mvumi_pci_driver = {
2623
2624	.name = MV_DRIVER_NAME,
2625	.id_table = mvumi_pci_table,
2626	.probe = mvumi_probe_one,
2627	.remove = mvumi_detach_one,
2628	.shutdown = mvumi_shutdown,
2629	.driver.pm = &mvumi_pm_ops,
 
 
 
2630};
2631
2632module_pci_driver(mvumi_pci_driver);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v4.10.11
 
   1/*
   2 * Marvell UMI driver
   3 *
   4 * Copyright 2011 Marvell. <jyli@marvell.com>
   5 *
   6 * This file is licensed under GPLv2.
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License as
  10 * published by the Free Software Foundation; version 2 of the
  11 * License.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  21 * USA
  22*/
  23
  24#include <linux/kernel.h>
  25#include <linux/module.h>
  26#include <linux/moduleparam.h>
  27#include <linux/init.h>
  28#include <linux/device.h>
  29#include <linux/pci.h>
  30#include <linux/list.h>
  31#include <linux/spinlock.h>
  32#include <linux/interrupt.h>
  33#include <linux/delay.h>
  34#include <linux/ktime.h>
  35#include <linux/blkdev.h>
  36#include <linux/io.h>
  37#include <scsi/scsi.h>
  38#include <scsi/scsi_cmnd.h>
  39#include <scsi/scsi_device.h>
  40#include <scsi/scsi_host.h>
  41#include <scsi/scsi_transport.h>
  42#include <scsi/scsi_eh.h>
  43#include <linux/uaccess.h>
  44#include <linux/kthread.h>
  45
  46#include "mvumi.h"
  47
  48MODULE_LICENSE("GPL");
  49MODULE_AUTHOR("jyli@marvell.com");
  50MODULE_DESCRIPTION("Marvell UMI Driver");
  51
  52static const struct pci_device_id mvumi_pci_table[] = {
  53	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
  54	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
  55	{ 0 }
  56};
  57
  58MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
  59
  60static void tag_init(struct mvumi_tag *st, unsigned short size)
  61{
  62	unsigned short i;
  63	BUG_ON(size != st->size);
  64	st->top = size;
  65	for (i = 0; i < size; i++)
  66		st->stack[i] = size - 1 - i;
  67}
  68
  69static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
  70{
  71	BUG_ON(st->top <= 0);
  72	return st->stack[--st->top];
  73}
  74
  75static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
  76							unsigned short tag)
  77{
  78	BUG_ON(st->top >= st->size);
  79	st->stack[st->top++] = tag;
  80}
  81
  82static bool tag_is_empty(struct mvumi_tag *st)
  83{
  84	if (st->top == 0)
  85		return 1;
  86	else
  87		return 0;
  88}
  89
  90static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
  91{
  92	int i;
  93
  94	for (i = 0; i < MAX_BASE_ADDRESS; i++)
  95		if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
  96								addr_array[i])
  97			pci_iounmap(dev, addr_array[i]);
  98}
  99
 100static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
 101{
 102	int i;
 103
 104	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 105		if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
 106			addr_array[i] = pci_iomap(dev, i, 0);
 107			if (!addr_array[i]) {
 108				dev_err(&dev->dev, "failed to map Bar[%d]\n",
 109									i);
 110				mvumi_unmap_pci_addr(dev, addr_array);
 111				return -ENOMEM;
 112			}
 113		} else
 114			addr_array[i] = NULL;
 115
 116		dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
 117	}
 118
 119	return 0;
 120}
 121
 122static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
 123				enum resource_type type, unsigned int size)
 124{
 125	struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
 126
 127	if (!res) {
 128		dev_err(&mhba->pdev->dev,
 129			"Failed to allocate memory for resource manager.\n");
 130		return NULL;
 131	}
 132
 133	switch (type) {
 134	case RESOURCE_CACHED_MEMORY:
 135		res->virt_addr = kzalloc(size, GFP_ATOMIC);
 136		if (!res->virt_addr) {
 137			dev_err(&mhba->pdev->dev,
 138				"unable to allocate memory,size = %d.\n", size);
 139			kfree(res);
 140			return NULL;
 141		}
 142		break;
 143
 144	case RESOURCE_UNCACHED_MEMORY:
 145		size = round_up(size, 8);
 146		res->virt_addr = pci_zalloc_consistent(mhba->pdev, size,
 147						       &res->bus_addr);
 
 148		if (!res->virt_addr) {
 149			dev_err(&mhba->pdev->dev,
 150					"unable to allocate consistent mem,"
 151							"size = %d.\n", size);
 152			kfree(res);
 153			return NULL;
 154		}
 155		break;
 156
 157	default:
 158		dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
 159		kfree(res);
 160		return NULL;
 161	}
 162
 163	res->type = type;
 164	res->size = size;
 165	INIT_LIST_HEAD(&res->entry);
 166	list_add_tail(&res->entry, &mhba->res_list);
 167
 168	return res;
 169}
 170
 171static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
 172{
 173	struct mvumi_res *res, *tmp;
 174
 175	list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
 176		switch (res->type) {
 177		case RESOURCE_UNCACHED_MEMORY:
 178			pci_free_consistent(mhba->pdev, res->size,
 179						res->virt_addr, res->bus_addr);
 180			break;
 181		case RESOURCE_CACHED_MEMORY:
 182			kfree(res->virt_addr);
 183			break;
 184		default:
 185			dev_err(&mhba->pdev->dev,
 186				"unknown resource type %d\n", res->type);
 187			break;
 188		}
 189		list_del(&res->entry);
 190		kfree(res);
 191	}
 192	mhba->fw_flag &= ~MVUMI_FW_ALLOC;
 193}
 194
 195/**
 196 * mvumi_make_sgl -	Prepares  SGL
 197 * @mhba:		Adapter soft state
 198 * @scmd:		SCSI command from the mid-layer
 199 * @sgl_p:		SGL to be filled in
 200 * @sg_count		return the number of SG elements
 201 *
 202 * If successful, this function returns 0. otherwise, it returns -1.
 203 */
 204static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
 205					void *sgl_p, unsigned char *sg_count)
 206{
 207	struct scatterlist *sg;
 208	struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
 209	unsigned int i;
 210	unsigned int sgnum = scsi_sg_count(scmd);
 211	dma_addr_t busaddr;
 212
 213	if (sgnum) {
 214		sg = scsi_sglist(scmd);
 215		*sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
 216				(int) scmd->sc_data_direction);
 217		if (*sg_count > mhba->max_sge) {
 218			dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
 219						"than max sg[0x%x].\n",
 220						*sg_count, mhba->max_sge);
 221			return -1;
 222		}
 223		for (i = 0; i < *sg_count; i++) {
 224			busaddr = sg_dma_address(&sg[i]);
 225			m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
 226			m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
 227			m_sg->flags = 0;
 228			sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
 229			if ((i + 1) == *sg_count)
 230				m_sg->flags |= 1U << mhba->eot_flag;
 231
 232			sgd_inc(mhba, m_sg);
 233		}
 234	} else {
 235		scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
 236			pci_map_single(mhba->pdev, scsi_sglist(scmd),
 237				scsi_bufflen(scmd),
 238				(int) scmd->sc_data_direction)
 239			: 0;
 240		busaddr = scmd->SCp.dma_handle;
 241		m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
 242		m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
 243		m_sg->flags = 1U << mhba->eot_flag;
 244		sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd)));
 245		*sg_count = 1;
 
 
 
 246	}
 247
 248	return 0;
 249}
 250
 251static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
 252							unsigned int size)
 253{
 254	struct mvumi_sgl *m_sg;
 255	void *virt_addr;
 256	dma_addr_t phy_addr;
 257
 258	if (size == 0)
 259		return 0;
 260
 261	virt_addr = pci_zalloc_consistent(mhba->pdev, size, &phy_addr);
 
 262	if (!virt_addr)
 263		return -1;
 264
 265	m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
 266	cmd->frame->sg_counts = 1;
 267	cmd->data_buf = virt_addr;
 268
 269	m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
 270	m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
 271	m_sg->flags = 1U << mhba->eot_flag;
 272	sgd_setsz(mhba, m_sg, cpu_to_le32(size));
 273
 274	return 0;
 275}
 276
 277static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
 278				unsigned int buf_size)
 279{
 280	struct mvumi_cmd *cmd;
 281
 282	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 283	if (!cmd) {
 284		dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
 285		return NULL;
 286	}
 287	INIT_LIST_HEAD(&cmd->queue_pointer);
 288
 289	cmd->frame = pci_alloc_consistent(mhba->pdev,
 290				mhba->ib_max_size, &cmd->frame_phys);
 291	if (!cmd->frame) {
 292		dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
 293			" frame,size = %d.\n", mhba->ib_max_size);
 294		kfree(cmd);
 295		return NULL;
 296	}
 297
 298	if (buf_size) {
 299		if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
 300			dev_err(&mhba->pdev->dev, "failed to allocate memory"
 301						" for internal frame\n");
 302			pci_free_consistent(mhba->pdev, mhba->ib_max_size,
 303					cmd->frame, cmd->frame_phys);
 304			kfree(cmd);
 305			return NULL;
 306		}
 307	} else
 308		cmd->frame->sg_counts = 0;
 309
 310	return cmd;
 311}
 312
 313static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
 314						struct mvumi_cmd *cmd)
 315{
 316	struct mvumi_sgl *m_sg;
 317	unsigned int size;
 318	dma_addr_t phy_addr;
 319
 320	if (cmd && cmd->frame) {
 321		if (cmd->frame->sg_counts) {
 322			m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
 323			sgd_getsz(mhba, m_sg, size);
 324
 325			phy_addr = (dma_addr_t) m_sg->baseaddr_l |
 326				(dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
 327
 328			pci_free_consistent(mhba->pdev, size, cmd->data_buf,
 329								phy_addr);
 330		}
 331		pci_free_consistent(mhba->pdev, mhba->ib_max_size,
 332				cmd->frame, cmd->frame_phys);
 333		kfree(cmd);
 334	}
 335}
 336
 337/**
 338 * mvumi_get_cmd -	Get a command from the free pool
 339 * @mhba:		Adapter soft state
 340 *
 341 * Returns a free command from the pool
 342 */
 343static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
 344{
 345	struct mvumi_cmd *cmd = NULL;
 346
 347	if (likely(!list_empty(&mhba->cmd_pool))) {
 348		cmd = list_entry((&mhba->cmd_pool)->next,
 349				struct mvumi_cmd, queue_pointer);
 350		list_del_init(&cmd->queue_pointer);
 351	} else
 352		dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
 353
 354	return cmd;
 355}
 356
 357/**
 358 * mvumi_return_cmd -	Return a cmd to free command pool
 359 * @mhba:		Adapter soft state
 360 * @cmd:		Command packet to be returned to free command pool
 361 */
 362static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
 363						struct mvumi_cmd *cmd)
 364{
 365	cmd->scmd = NULL;
 366	list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
 367}
 368
 369/**
 370 * mvumi_free_cmds -	Free all the cmds in the free cmd pool
 371 * @mhba:		Adapter soft state
 372 */
 373static void mvumi_free_cmds(struct mvumi_hba *mhba)
 374{
 375	struct mvumi_cmd *cmd;
 376
 377	while (!list_empty(&mhba->cmd_pool)) {
 378		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 379							queue_pointer);
 380		list_del(&cmd->queue_pointer);
 381		if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
 382			kfree(cmd->frame);
 383		kfree(cmd);
 384	}
 385}
 386
 387/**
 388 * mvumi_alloc_cmds -	Allocates the command packets
 389 * @mhba:		Adapter soft state
 390 *
 391 */
 392static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
 393{
 394	int i;
 395	struct mvumi_cmd *cmd;
 396
 397	for (i = 0; i < mhba->max_io; i++) {
 398		cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 399		if (!cmd)
 400			goto err_exit;
 401
 402		INIT_LIST_HEAD(&cmd->queue_pointer);
 403		list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
 404		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 405			cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
 406			cmd->frame_phys = mhba->ib_frame_phys
 407						+ i * mhba->ib_max_size;
 408		} else
 409			cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
 410		if (!cmd->frame)
 411			goto err_exit;
 412	}
 413	return 0;
 414
 415err_exit:
 416	dev_err(&mhba->pdev->dev,
 417			"failed to allocate memory for cmd[0x%x].\n", i);
 418	while (!list_empty(&mhba->cmd_pool)) {
 419		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 420						queue_pointer);
 421		list_del(&cmd->queue_pointer);
 422		if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
 423			kfree(cmd->frame);
 424		kfree(cmd);
 425	}
 426	return -ENOMEM;
 427}
 428
 429static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
 430{
 431	unsigned int ib_rp_reg;
 432	struct mvumi_hw_regs *regs = mhba->regs;
 433
 434	ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
 435
 436	if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
 437			(mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
 438			((ib_rp_reg & regs->cl_pointer_toggle)
 439			 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
 440		dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
 441		return 0;
 442	}
 443	if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
 444		dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
 445		return 0;
 446	} else {
 447		return mhba->max_io - atomic_read(&mhba->fw_outstanding);
 448	}
 449}
 450
 451static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
 452{
 453	unsigned int count;
 454	if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
 455		return 0;
 456	count = ioread32(mhba->ib_shadow);
 457	if (count == 0xffff)
 458		return 0;
 459	return count;
 460}
 461
 462static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
 463{
 464	unsigned int cur_ib_entry;
 465
 466	cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
 467	cur_ib_entry++;
 468	if (cur_ib_entry >= mhba->list_num_io) {
 469		cur_ib_entry -= mhba->list_num_io;
 470		mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
 471	}
 472	mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
 473	mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
 474	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 475		*ib_entry = mhba->ib_list + cur_ib_entry *
 476				sizeof(struct mvumi_dyn_list_entry);
 477	} else {
 478		*ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
 479	}
 480	atomic_inc(&mhba->fw_outstanding);
 481}
 482
 483static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
 484{
 485	iowrite32(0xffff, mhba->ib_shadow);
 486	iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
 487}
 488
 489static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
 490		unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
 491{
 492	unsigned short tag, request_id;
 493
 494	udelay(1);
 495	p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
 496	request_id = p_outb_frame->request_id;
 497	tag = p_outb_frame->tag;
 498	if (tag > mhba->tag_pool.size) {
 499		dev_err(&mhba->pdev->dev, "ob frame data error\n");
 500		return -1;
 501	}
 502	if (mhba->tag_cmd[tag] == NULL) {
 503		dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
 504		return -1;
 505	} else if (mhba->tag_cmd[tag]->request_id != request_id &&
 506						mhba->request_id_enabled) {
 507			dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
 508					"cmd request ID:0x%x\n", request_id,
 509					mhba->tag_cmd[tag]->request_id);
 510			return -1;
 511	}
 512
 513	return 0;
 514}
 515
 516static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
 517			unsigned int *cur_obf, unsigned int *assign_obf_end)
 518{
 519	unsigned int ob_write, ob_write_shadow;
 520	struct mvumi_hw_regs *regs = mhba->regs;
 521
 522	do {
 523		ob_write = ioread32(regs->outb_copy_pointer);
 524		ob_write_shadow = ioread32(mhba->ob_shadow);
 525	} while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
 526
 527	*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
 528	*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
 529
 530	if ((ob_write & regs->cl_pointer_toggle) !=
 531			(mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
 532		*assign_obf_end += mhba->list_num_io;
 533	}
 534	return 0;
 535}
 536
 537static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
 538			unsigned int *cur_obf, unsigned int *assign_obf_end)
 539{
 540	unsigned int ob_write;
 541	struct mvumi_hw_regs *regs = mhba->regs;
 542
 543	ob_write = ioread32(regs->outb_read_pointer);
 544	ob_write = ioread32(regs->outb_copy_pointer);
 545	*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
 546	*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
 547	if (*assign_obf_end < *cur_obf)
 548		*assign_obf_end += mhba->list_num_io;
 549	else if (*assign_obf_end == *cur_obf)
 550		return -1;
 551	return 0;
 552}
 553
 554static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
 555{
 556	unsigned int cur_obf, assign_obf_end, i;
 557	struct mvumi_ob_data *ob_data;
 558	struct mvumi_rsp_frame *p_outb_frame;
 559	struct mvumi_hw_regs *regs = mhba->regs;
 560
 561	if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
 562		return;
 563
 564	for (i = (assign_obf_end - cur_obf); i != 0; i--) {
 565		cur_obf++;
 566		if (cur_obf >= mhba->list_num_io) {
 567			cur_obf -= mhba->list_num_io;
 568			mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
 569		}
 570
 571		p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
 572
 573		/* Copy pointer may point to entry in outbound list
 574		*  before entry has valid data
 575		*/
 576		if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
 577			mhba->tag_cmd[p_outb_frame->tag] == NULL ||
 578			p_outb_frame->request_id !=
 579				mhba->tag_cmd[p_outb_frame->tag]->request_id))
 580			if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
 581				continue;
 582
 583		if (!list_empty(&mhba->ob_data_list)) {
 584			ob_data = (struct mvumi_ob_data *)
 585				list_first_entry(&mhba->ob_data_list,
 586					struct mvumi_ob_data, list);
 587			list_del_init(&ob_data->list);
 588		} else {
 589			ob_data = NULL;
 590			if (cur_obf == 0) {
 591				cur_obf = mhba->list_num_io - 1;
 592				mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
 593			} else
 594				cur_obf -= 1;
 595			break;
 596		}
 597
 598		memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
 599		p_outb_frame->tag = 0xff;
 600
 601		list_add_tail(&ob_data->list, &mhba->free_ob_list);
 602	}
 603	mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
 604	mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
 605	iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
 606}
 607
 608static void mvumi_reset(struct mvumi_hba *mhba)
 609{
 610	struct mvumi_hw_regs *regs = mhba->regs;
 611
 612	iowrite32(0, regs->enpointa_mask_reg);
 613	if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
 614		return;
 615
 616	iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
 617}
 618
 619static unsigned char mvumi_start(struct mvumi_hba *mhba);
 620
 621static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
 622{
 623	mhba->fw_state = FW_STATE_ABORT;
 624	mvumi_reset(mhba);
 625
 626	if (mvumi_start(mhba))
 627		return FAILED;
 628	else
 629		return SUCCESS;
 630}
 631
 632static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
 633{
 634	struct mvumi_hw_regs *regs = mhba->regs;
 635	u32 tmp;
 636	unsigned long before;
 637	before = jiffies;
 638
 639	iowrite32(0, regs->enpointa_mask_reg);
 640	tmp = ioread32(regs->arm_to_pciea_msg1);
 641	while (tmp != HANDSHAKE_READYSTATE) {
 642		iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
 643		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
 644			dev_err(&mhba->pdev->dev,
 645				"FW reset failed [0x%x].\n", tmp);
 646			return FAILED;
 647		}
 648
 649		msleep(500);
 650		rmb();
 651		tmp = ioread32(regs->arm_to_pciea_msg1);
 652	}
 653
 654	return SUCCESS;
 655}
 656
 657static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
 658{
 659	unsigned char i;
 660
 661	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 662		pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
 663						&mhba->pci_base[i]);
 664	}
 665}
 666
 667static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
 668{
 669	unsigned char i;
 670
 671	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 672		if (mhba->pci_base[i])
 673			pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
 674						mhba->pci_base[i]);
 675	}
 676}
 677
 678static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
 679{
 680	unsigned int ret = 0;
 
 681	pci_set_master(pdev);
 682
 683	if (IS_DMA64) {
 684		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
 685			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 686	} else
 687		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 688
 689	return ret;
 690}
 691
 692static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
 693{
 694	mhba->fw_state = FW_STATE_ABORT;
 695
 696	iowrite32(0, mhba->regs->reset_enable);
 697	iowrite32(0xf, mhba->regs->reset_request);
 698
 699	iowrite32(0x10, mhba->regs->reset_enable);
 700	iowrite32(0x10, mhba->regs->reset_request);
 701	msleep(100);
 702	pci_disable_device(mhba->pdev);
 703
 704	if (pci_enable_device(mhba->pdev)) {
 705		dev_err(&mhba->pdev->dev, "enable device failed\n");
 706		return FAILED;
 707	}
 708	if (mvumi_pci_set_master(mhba->pdev)) {
 709		dev_err(&mhba->pdev->dev, "set master failed\n");
 710		return FAILED;
 711	}
 712	mvumi_restore_bar_addr(mhba);
 713	if (mvumi_wait_for_fw(mhba) == FAILED)
 714		return FAILED;
 715
 716	return mvumi_wait_for_outstanding(mhba);
 717}
 718
 719static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
 720{
 721	return mvumi_wait_for_outstanding(mhba);
 722}
 723
 724static int mvumi_host_reset(struct scsi_cmnd *scmd)
 725{
 726	struct mvumi_hba *mhba;
 727
 728	mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
 729
 730	scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
 731			scmd->serial_number, scmd->cmnd[0], scmd->retries);
 732
 733	return mhba->instancet->reset_host(mhba);
 734}
 735
 736static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
 737						struct mvumi_cmd *cmd)
 738{
 739	unsigned long flags;
 740
 741	cmd->cmd_status = REQ_STATUS_PENDING;
 742
 743	if (atomic_read(&cmd->sync_cmd)) {
 744		dev_err(&mhba->pdev->dev,
 745			"last blocked cmd not finished, sync_cmd = %d\n",
 746						atomic_read(&cmd->sync_cmd));
 747		BUG_ON(1);
 748		return -1;
 749	}
 750	atomic_inc(&cmd->sync_cmd);
 751	spin_lock_irqsave(mhba->shost->host_lock, flags);
 752	mhba->instancet->fire_cmd(mhba, cmd);
 753	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
 754
 755	wait_event_timeout(mhba->int_cmd_wait_q,
 756		(cmd->cmd_status != REQ_STATUS_PENDING),
 757		MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
 758
 759	/* command timeout */
 760	if (atomic_read(&cmd->sync_cmd)) {
 761		spin_lock_irqsave(mhba->shost->host_lock, flags);
 762		atomic_dec(&cmd->sync_cmd);
 763		if (mhba->tag_cmd[cmd->frame->tag]) {
 764			mhba->tag_cmd[cmd->frame->tag] = 0;
 765			dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
 766							cmd->frame->tag);
 767			tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
 768		}
 769		if (!list_empty(&cmd->queue_pointer)) {
 770			dev_warn(&mhba->pdev->dev,
 771				"TIMEOUT:A internal command doesn't send!\n");
 772			list_del_init(&cmd->queue_pointer);
 773		} else
 774			atomic_dec(&mhba->fw_outstanding);
 775
 776		spin_unlock_irqrestore(mhba->shost->host_lock, flags);
 777	}
 778	return 0;
 779}
 780
 781static void mvumi_release_fw(struct mvumi_hba *mhba)
 782{
 783	mvumi_free_cmds(mhba);
 784	mvumi_release_mem_resource(mhba);
 785	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
 786	pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
 787		mhba->handshake_page, mhba->handshake_page_phys);
 788	kfree(mhba->regs);
 789	pci_release_regions(mhba->pdev);
 790}
 791
 792static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
 793{
 794	struct mvumi_cmd *cmd;
 795	struct mvumi_msg_frame *frame;
 796	unsigned char device_id, retry = 0;
 797	unsigned char bitcount = sizeof(unsigned char) * 8;
 798
 799	for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
 800		if (!(mhba->target_map[device_id / bitcount] &
 801				(1 << (device_id % bitcount))))
 802			continue;
 803get_cmd:	cmd = mvumi_create_internal_cmd(mhba, 0);
 804		if (!cmd) {
 805			if (retry++ >= 5) {
 806				dev_err(&mhba->pdev->dev, "failed to get memory"
 807					" for internal flush cache cmd for "
 808					"device %d", device_id);
 809				retry = 0;
 810				continue;
 811			} else
 812				goto get_cmd;
 813		}
 814		cmd->scmd = NULL;
 815		cmd->cmd_status = REQ_STATUS_PENDING;
 816		atomic_set(&cmd->sync_cmd, 0);
 817		frame = cmd->frame;
 818		frame->req_function = CL_FUN_SCSI_CMD;
 819		frame->device_id = device_id;
 820		frame->cmd_flag = CMD_FLAG_NON_DATA;
 821		frame->data_transfer_length = 0;
 822		frame->cdb_length = MAX_COMMAND_SIZE;
 823		memset(frame->cdb, 0, MAX_COMMAND_SIZE);
 824		frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
 825		frame->cdb[1] = CDB_CORE_MODULE;
 826		frame->cdb[2] = CDB_CORE_SHUTDOWN;
 827
 828		mvumi_issue_blocked_cmd(mhba, cmd);
 829		if (cmd->cmd_status != SAM_STAT_GOOD) {
 830			dev_err(&mhba->pdev->dev,
 831				"device %d flush cache failed, status=0x%x.\n",
 832				device_id, cmd->cmd_status);
 833		}
 834
 835		mvumi_delete_internal_cmd(mhba, cmd);
 836	}
 837	return 0;
 838}
 839
 840static unsigned char
 841mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
 842							unsigned short len)
 843{
 844	unsigned char *ptr;
 845	unsigned char ret = 0, i;
 846
 847	ptr = (unsigned char *) p_header->frame_content;
 848	for (i = 0; i < len; i++) {
 849		ret ^= *ptr;
 850		ptr++;
 851	}
 852
 853	return ret;
 854}
 855
 856static void mvumi_hs_build_page(struct mvumi_hba *mhba,
 857				struct mvumi_hs_header *hs_header)
 858{
 859	struct mvumi_hs_page2 *hs_page2;
 860	struct mvumi_hs_page4 *hs_page4;
 861	struct mvumi_hs_page3 *hs_page3;
 862	u64 time;
 863	u64 local_time;
 864
 865	switch (hs_header->page_code) {
 866	case HS_PAGE_HOST_INFO:
 867		hs_page2 = (struct mvumi_hs_page2 *) hs_header;
 868		hs_header->frame_length = sizeof(*hs_page2) - 4;
 869		memset(hs_header->frame_content, 0, hs_header->frame_length);
 870		hs_page2->host_type = 3; /* 3 mean linux*/
 871		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
 872			hs_page2->host_cap = 0x08;/* host dynamic source mode */
 873		hs_page2->host_ver.ver_major = VER_MAJOR;
 874		hs_page2->host_ver.ver_minor = VER_MINOR;
 875		hs_page2->host_ver.ver_oem = VER_OEM;
 876		hs_page2->host_ver.ver_build = VER_BUILD;
 877		hs_page2->system_io_bus = 0;
 878		hs_page2->slot_number = 0;
 879		hs_page2->intr_level = 0;
 880		hs_page2->intr_vector = 0;
 881		time = ktime_get_real_seconds();
 882		local_time = (time - (sys_tz.tz_minuteswest * 60));
 883		hs_page2->seconds_since1970 = local_time;
 884		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 885						hs_header->frame_length);
 886		break;
 887
 888	case HS_PAGE_FIRM_CTL:
 889		hs_page3 = (struct mvumi_hs_page3 *) hs_header;
 890		hs_header->frame_length = sizeof(*hs_page3) - 4;
 891		memset(hs_header->frame_content, 0, hs_header->frame_length);
 892		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 893						hs_header->frame_length);
 894		break;
 895
 896	case HS_PAGE_CL_INFO:
 897		hs_page4 = (struct mvumi_hs_page4 *) hs_header;
 898		hs_header->frame_length = sizeof(*hs_page4) - 4;
 899		memset(hs_header->frame_content, 0, hs_header->frame_length);
 900		hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
 901		hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
 902
 903		hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
 904		hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
 905		hs_page4->ib_entry_size = mhba->ib_max_size_setting;
 906		hs_page4->ob_entry_size = mhba->ob_max_size_setting;
 907		if (mhba->hba_capability
 908			& HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
 909			hs_page4->ob_depth = find_first_bit((unsigned long *)
 910							    &mhba->list_num_io,
 911							    BITS_PER_LONG);
 912			hs_page4->ib_depth = find_first_bit((unsigned long *)
 913							    &mhba->list_num_io,
 914							    BITS_PER_LONG);
 915		} else {
 916			hs_page4->ob_depth = (u8) mhba->list_num_io;
 917			hs_page4->ib_depth = (u8) mhba->list_num_io;
 918		}
 919		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 920						hs_header->frame_length);
 921		break;
 922
 923	default:
 924		dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
 925			hs_header->page_code);
 926		break;
 927	}
 928}
 929
 930/**
 931 * mvumi_init_data -	Initialize requested date for FW
 932 * @mhba:			Adapter soft state
 933 */
 934static int mvumi_init_data(struct mvumi_hba *mhba)
 935{
 936	struct mvumi_ob_data *ob_pool;
 937	struct mvumi_res *res_mgnt;
 938	unsigned int tmp_size, offset, i;
 939	void *virmem, *v;
 940	dma_addr_t p;
 941
 942	if (mhba->fw_flag & MVUMI_FW_ALLOC)
 943		return 0;
 944
 945	tmp_size = mhba->ib_max_size * mhba->max_io;
 946	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
 947		tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 948
 949	tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
 950	tmp_size += 8 + sizeof(u32)*2 + 16;
 951
 952	res_mgnt = mvumi_alloc_mem_resource(mhba,
 953					RESOURCE_UNCACHED_MEMORY, tmp_size);
 954	if (!res_mgnt) {
 955		dev_err(&mhba->pdev->dev,
 956			"failed to allocate memory for inbound list\n");
 957		goto fail_alloc_dma_buf;
 958	}
 959
 960	p = res_mgnt->bus_addr;
 961	v = res_mgnt->virt_addr;
 962	/* ib_list */
 963	offset = round_up(p, 128) - p;
 964	p += offset;
 965	v += offset;
 966	mhba->ib_list = v;
 967	mhba->ib_list_phys = p;
 968	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 969		v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 970		p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 971		mhba->ib_frame = v;
 972		mhba->ib_frame_phys = p;
 973	}
 974	v += mhba->ib_max_size * mhba->max_io;
 975	p += mhba->ib_max_size * mhba->max_io;
 976
 977	/* ib shadow */
 978	offset = round_up(p, 8) - p;
 979	p += offset;
 980	v += offset;
 981	mhba->ib_shadow = v;
 982	mhba->ib_shadow_phys = p;
 983	p += sizeof(u32)*2;
 984	v += sizeof(u32)*2;
 985	/* ob shadow */
 986	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
 987		offset = round_up(p, 8) - p;
 988		p += offset;
 989		v += offset;
 990		mhba->ob_shadow = v;
 991		mhba->ob_shadow_phys = p;
 992		p += 8;
 993		v += 8;
 994	} else {
 995		offset = round_up(p, 4) - p;
 996		p += offset;
 997		v += offset;
 998		mhba->ob_shadow = v;
 999		mhba->ob_shadow_phys = p;
1000		p += 4;
1001		v += 4;
1002	}
1003
1004	/* ob list */
1005	offset = round_up(p, 128) - p;
1006	p += offset;
1007	v += offset;
1008
1009	mhba->ob_list = v;
1010	mhba->ob_list_phys = p;
1011
1012	/* ob data pool */
1013	tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
1014	tmp_size = round_up(tmp_size, 8);
1015
1016	res_mgnt = mvumi_alloc_mem_resource(mhba,
1017				RESOURCE_CACHED_MEMORY, tmp_size);
1018	if (!res_mgnt) {
1019		dev_err(&mhba->pdev->dev,
1020			"failed to allocate memory for outbound data buffer\n");
1021		goto fail_alloc_dma_buf;
1022	}
1023	virmem = res_mgnt->virt_addr;
1024
1025	for (i = mhba->max_io; i != 0; i--) {
1026		ob_pool = (struct mvumi_ob_data *) virmem;
1027		list_add_tail(&ob_pool->list, &mhba->ob_data_list);
1028		virmem += mhba->ob_max_size + sizeof(*ob_pool);
1029	}
1030
1031	tmp_size = sizeof(unsigned short) * mhba->max_io +
1032				sizeof(struct mvumi_cmd *) * mhba->max_io;
1033	tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
1034						(sizeof(unsigned char) * 8);
1035
1036	res_mgnt = mvumi_alloc_mem_resource(mhba,
1037				RESOURCE_CACHED_MEMORY, tmp_size);
1038	if (!res_mgnt) {
1039		dev_err(&mhba->pdev->dev,
1040			"failed to allocate memory for tag and target map\n");
1041		goto fail_alloc_dma_buf;
1042	}
1043
1044	virmem = res_mgnt->virt_addr;
1045	mhba->tag_pool.stack = virmem;
1046	mhba->tag_pool.size = mhba->max_io;
1047	tag_init(&mhba->tag_pool, mhba->max_io);
1048	virmem += sizeof(unsigned short) * mhba->max_io;
1049
1050	mhba->tag_cmd = virmem;
1051	virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
1052
1053	mhba->target_map = virmem;
1054
1055	mhba->fw_flag |= MVUMI_FW_ALLOC;
1056	return 0;
1057
1058fail_alloc_dma_buf:
1059	mvumi_release_mem_resource(mhba);
1060	return -1;
1061}
1062
1063static int mvumi_hs_process_page(struct mvumi_hba *mhba,
1064				struct mvumi_hs_header *hs_header)
1065{
1066	struct mvumi_hs_page1 *hs_page1;
1067	unsigned char page_checksum;
1068
1069	page_checksum = mvumi_calculate_checksum(hs_header,
1070						hs_header->frame_length);
1071	if (page_checksum != hs_header->checksum) {
1072		dev_err(&mhba->pdev->dev, "checksum error\n");
1073		return -1;
1074	}
1075
1076	switch (hs_header->page_code) {
1077	case HS_PAGE_FIRM_CAP:
1078		hs_page1 = (struct mvumi_hs_page1 *) hs_header;
1079
1080		mhba->max_io = hs_page1->max_io_support;
1081		mhba->list_num_io = hs_page1->cl_inout_list_depth;
1082		mhba->max_transfer_size = hs_page1->max_transfer_size;
1083		mhba->max_target_id = hs_page1->max_devices_support;
1084		mhba->hba_capability = hs_page1->capability;
1085		mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
1086		mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
1087
1088		mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
1089		mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
1090
1091		dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
1092						hs_page1->fw_ver.ver_build);
1093
1094		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1095			mhba->eot_flag = 22;
1096		else
1097			mhba->eot_flag = 27;
1098		if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1099			mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
1100		break;
1101	default:
1102		dev_err(&mhba->pdev->dev, "handshake: page code error\n");
1103		return -1;
1104	}
1105	return 0;
1106}
1107
1108/**
1109 * mvumi_handshake -	Move the FW to READY state
1110 * @mhba:				Adapter soft state
1111 *
1112 * During the initialization, FW passes can potentially be in any one of
1113 * several possible states. If the FW in operational, waiting-for-handshake
1114 * states, driver must take steps to bring it to ready state. Otherwise, it
1115 * has to wait for the ready state.
1116 */
1117static int mvumi_handshake(struct mvumi_hba *mhba)
1118{
1119	unsigned int hs_state, tmp, hs_fun;
1120	struct mvumi_hs_header *hs_header;
1121	struct mvumi_hw_regs *regs = mhba->regs;
1122
1123	if (mhba->fw_state == FW_STATE_STARTING)
1124		hs_state = HS_S_START;
1125	else {
1126		tmp = ioread32(regs->arm_to_pciea_msg0);
1127		hs_state = HS_GET_STATE(tmp);
1128		dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
1129		if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
1130			mhba->fw_state = FW_STATE_STARTING;
1131			return -1;
1132		}
1133	}
1134
1135	hs_fun = 0;
1136	switch (hs_state) {
1137	case HS_S_START:
1138		mhba->fw_state = FW_STATE_HANDSHAKING;
1139		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1140		HS_SET_STATE(hs_fun, HS_S_RESET);
1141		iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
1142		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1143		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1144		break;
1145
1146	case HS_S_RESET:
1147		iowrite32(lower_32_bits(mhba->handshake_page_phys),
1148					regs->pciea_to_arm_msg1);
1149		iowrite32(upper_32_bits(mhba->handshake_page_phys),
1150					regs->arm_to_pciea_msg1);
1151		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1152		HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
1153		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1154		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1155		break;
1156
1157	case HS_S_PAGE_ADDR:
1158	case HS_S_QUERY_PAGE:
1159	case HS_S_SEND_PAGE:
1160		hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
1161		if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
1162			mhba->hba_total_pages =
1163			((struct mvumi_hs_page1 *) hs_header)->total_pages;
1164
1165			if (mhba->hba_total_pages == 0)
1166				mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1167		}
1168
1169		if (hs_state == HS_S_QUERY_PAGE) {
1170			if (mvumi_hs_process_page(mhba, hs_header)) {
1171				HS_SET_STATE(hs_fun, HS_S_ABORT);
1172				return -1;
1173			}
1174			if (mvumi_init_data(mhba)) {
1175				HS_SET_STATE(hs_fun, HS_S_ABORT);
1176				return -1;
1177			}
1178		} else if (hs_state == HS_S_PAGE_ADDR) {
1179			hs_header->page_code = 0;
1180			mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1181		}
1182
1183		if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
1184			hs_header->page_code++;
1185			if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
1186				mvumi_hs_build_page(mhba, hs_header);
1187				HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
1188			} else
1189				HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
1190		} else
1191			HS_SET_STATE(hs_fun, HS_S_END);
1192
1193		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1194		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1195		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1196		break;
1197
1198	case HS_S_END:
1199		/* Set communication list ISR */
1200		tmp = ioread32(regs->enpointa_mask_reg);
1201		tmp |= regs->int_comaout | regs->int_comaerr;
1202		iowrite32(tmp, regs->enpointa_mask_reg);
1203		iowrite32(mhba->list_num_io, mhba->ib_shadow);
1204		/* Set InBound List Available count shadow */
1205		iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1206					regs->inb_aval_count_basel);
1207		iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1208					regs->inb_aval_count_baseh);
1209
1210		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1211			/* Set OutBound List Available count shadow */
1212			iowrite32((mhba->list_num_io-1) |
1213							regs->cl_pointer_toggle,
1214							mhba->ob_shadow);
1215			iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1216							regs->outb_copy_basel);
1217			iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1218							regs->outb_copy_baseh);
1219		}
1220
1221		mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1222							regs->cl_pointer_toggle;
1223		mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1224							regs->cl_pointer_toggle;
1225		mhba->fw_state = FW_STATE_STARTED;
1226
1227		break;
1228	default:
1229		dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1230								hs_state);
1231		return -1;
1232	}
1233	return 0;
1234}
1235
1236static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1237{
1238	unsigned int isr_status;
1239	unsigned long before;
1240
1241	before = jiffies;
1242	mvumi_handshake(mhba);
1243	do {
1244		isr_status = mhba->instancet->read_fw_status_reg(mhba);
1245
1246		if (mhba->fw_state == FW_STATE_STARTED)
1247			return 0;
1248		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1249			dev_err(&mhba->pdev->dev,
1250				"no handshake response at state 0x%x.\n",
1251				  mhba->fw_state);
1252			dev_err(&mhba->pdev->dev,
1253				"isr : global=0x%x,status=0x%x.\n",
1254					mhba->global_isr, isr_status);
1255			return -1;
1256		}
1257		rmb();
1258		usleep_range(1000, 2000);
1259	} while (!(isr_status & DRBL_HANDSHAKE_ISR));
1260
1261	return 0;
1262}
1263
1264static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1265{
1266	unsigned int tmp;
1267	unsigned long before;
1268
1269	before = jiffies;
1270	tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1271	while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1272		if (tmp != HANDSHAKE_READYSTATE)
1273			iowrite32(DRBL_MU_RESET,
1274					mhba->regs->pciea_to_arm_drbl_reg);
1275		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1276			dev_err(&mhba->pdev->dev,
1277				"invalid signature [0x%x].\n", tmp);
1278			return -1;
1279		}
1280		usleep_range(1000, 2000);
1281		rmb();
1282		tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1283	}
1284
1285	mhba->fw_state = FW_STATE_STARTING;
1286	dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1287	do {
1288		if (mvumi_handshake_event(mhba)) {
1289			dev_err(&mhba->pdev->dev,
1290					"handshake failed at state 0x%x.\n",
1291						mhba->fw_state);
1292			return -1;
1293		}
1294	} while (mhba->fw_state != FW_STATE_STARTED);
1295
1296	dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1297
1298	return 0;
1299}
1300
1301static unsigned char mvumi_start(struct mvumi_hba *mhba)
1302{
1303	unsigned int tmp;
1304	struct mvumi_hw_regs *regs = mhba->regs;
1305
1306	/* clear Door bell */
1307	tmp = ioread32(regs->arm_to_pciea_drbl_reg);
1308	iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
1309
1310	iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1311	tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
1312	iowrite32(tmp, regs->enpointa_mask_reg);
1313	msleep(100);
1314	if (mvumi_check_handshake(mhba))
1315		return -1;
1316
1317	return 0;
1318}
1319
1320/**
1321 * mvumi_complete_cmd -	Completes a command
1322 * @mhba:			Adapter soft state
1323 * @cmd:			Command to be completed
 
1324 */
1325static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1326					struct mvumi_rsp_frame *ob_frame)
1327{
1328	struct scsi_cmnd *scmd = cmd->scmd;
1329
1330	cmd->scmd->SCp.ptr = NULL;
1331	scmd->result = ob_frame->req_status;
1332
1333	switch (ob_frame->req_status) {
1334	case SAM_STAT_GOOD:
1335		scmd->result |= DID_OK << 16;
1336		break;
1337	case SAM_STAT_BUSY:
1338		scmd->result |= DID_BUS_BUSY << 16;
1339		break;
1340	case SAM_STAT_CHECK_CONDITION:
1341		scmd->result |= (DID_OK << 16);
1342		if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1343			memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1344				sizeof(struct mvumi_sense_data));
1345			scmd->result |=  (DRIVER_SENSE << 24);
1346		}
1347		break;
1348	default:
1349		scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1350		break;
1351	}
1352
1353	if (scsi_bufflen(scmd)) {
1354		if (scsi_sg_count(scmd)) {
1355			pci_unmap_sg(mhba->pdev,
1356				scsi_sglist(scmd),
1357				scsi_sg_count(scmd),
1358				(int) scmd->sc_data_direction);
1359		} else {
1360			pci_unmap_single(mhba->pdev,
1361				scmd->SCp.dma_handle,
1362				scsi_bufflen(scmd),
1363				(int) scmd->sc_data_direction);
1364
1365			scmd->SCp.dma_handle = 0;
1366		}
1367	}
1368	cmd->scmd->scsi_done(scmd);
1369	mvumi_return_cmd(mhba, cmd);
1370}
1371
1372static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1373						struct mvumi_cmd *cmd,
1374					struct mvumi_rsp_frame *ob_frame)
1375{
1376	if (atomic_read(&cmd->sync_cmd)) {
1377		cmd->cmd_status = ob_frame->req_status;
1378
1379		if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1380				(ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1381				cmd->data_buf) {
1382			memcpy(cmd->data_buf, ob_frame->payload,
1383					sizeof(struct mvumi_sense_data));
1384		}
1385		atomic_dec(&cmd->sync_cmd);
1386		wake_up(&mhba->int_cmd_wait_q);
1387	}
1388}
1389
1390static void mvumi_show_event(struct mvumi_hba *mhba,
1391			struct mvumi_driver_event *ptr)
1392{
1393	unsigned int i;
1394
1395	dev_warn(&mhba->pdev->dev,
1396		"Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1397		ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1398	if (ptr->param_count) {
1399		printk(KERN_WARNING "Event param(len 0x%x): ",
1400						ptr->param_count);
1401		for (i = 0; i < ptr->param_count; i++)
1402			printk(KERN_WARNING "0x%x ", ptr->params[i]);
1403
1404		printk(KERN_WARNING "\n");
1405	}
1406
1407	if (ptr->sense_data_length) {
1408		printk(KERN_WARNING "Event sense data(len 0x%x): ",
1409						ptr->sense_data_length);
1410		for (i = 0; i < ptr->sense_data_length; i++)
1411			printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1412		printk(KERN_WARNING "\n");
1413	}
1414}
1415
1416static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1417{
1418	struct scsi_device *sdev;
1419	int ret = -1;
1420
1421	if (status == DEVICE_OFFLINE) {
1422		sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1423		if (sdev) {
1424			dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1425								sdev->id, 0);
1426			scsi_remove_device(sdev);
1427			scsi_device_put(sdev);
1428			ret = 0;
1429		} else
1430			dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1431									devid);
1432	} else if (status == DEVICE_ONLINE) {
1433		sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1434		if (!sdev) {
1435			scsi_add_device(mhba->shost, 0, devid, 0);
1436			dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1437								devid, 0);
1438			ret = 0;
1439		} else {
1440			dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1441								0, devid, 0);
1442			scsi_device_put(sdev);
1443		}
1444	}
1445	return ret;
1446}
1447
1448static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1449	unsigned int id, struct mvumi_cmd *cmd)
1450{
1451	struct mvumi_msg_frame *frame;
1452	u64 wwid = 0;
1453	int cmd_alloc = 0;
1454	int data_buf_len = 64;
1455
1456	if (!cmd) {
1457		cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1458		if (cmd)
1459			cmd_alloc = 1;
1460		else
1461			return 0;
1462	} else {
1463		memset(cmd->data_buf, 0, data_buf_len);
1464	}
1465	cmd->scmd = NULL;
1466	cmd->cmd_status = REQ_STATUS_PENDING;
1467	atomic_set(&cmd->sync_cmd, 0);
1468	frame = cmd->frame;
1469	frame->device_id = (u16) id;
1470	frame->cmd_flag = CMD_FLAG_DATA_IN;
1471	frame->req_function = CL_FUN_SCSI_CMD;
1472	frame->cdb_length = 6;
1473	frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
1474	memset(frame->cdb, 0, frame->cdb_length);
1475	frame->cdb[0] = INQUIRY;
1476	frame->cdb[4] = frame->data_transfer_length;
1477
1478	mvumi_issue_blocked_cmd(mhba, cmd);
1479
1480	if (cmd->cmd_status == SAM_STAT_GOOD) {
1481		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1482			wwid = id + 1;
1483		else
1484			memcpy((void *)&wwid,
1485			       (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
1486			       MVUMI_INQUIRY_UUID_LEN);
1487		dev_dbg(&mhba->pdev->dev,
1488			"inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
1489	} else {
1490		wwid = 0;
1491	}
1492	if (cmd_alloc)
1493		mvumi_delete_internal_cmd(mhba, cmd);
1494
1495	return wwid;
1496}
1497
1498static void mvumi_detach_devices(struct mvumi_hba *mhba)
1499{
1500	struct mvumi_device *mv_dev = NULL , *dev_next;
1501	struct scsi_device *sdev = NULL;
1502
1503	mutex_lock(&mhba->device_lock);
1504
1505	/* detach Hard Disk */
1506	list_for_each_entry_safe(mv_dev, dev_next,
1507		&mhba->shost_dev_list, list) {
1508		mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1509		list_del_init(&mv_dev->list);
1510		dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1511			mv_dev->id, mv_dev->wwid);
1512		kfree(mv_dev);
1513	}
1514	list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1515		list_del_init(&mv_dev->list);
1516		dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1517			mv_dev->id, mv_dev->wwid);
1518		kfree(mv_dev);
1519	}
1520
1521	/* detach virtual device */
1522	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1523		sdev = scsi_device_lookup(mhba->shost, 0,
1524						mhba->max_target_id - 1, 0);
1525
1526	if (sdev) {
1527		scsi_remove_device(sdev);
1528		scsi_device_put(sdev);
1529	}
1530
1531	mutex_unlock(&mhba->device_lock);
1532}
1533
1534static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1535{
1536	struct scsi_device *sdev;
1537
1538	sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1539	if (sdev) {
1540		scsi_rescan_device(&sdev->sdev_gendev);
1541		scsi_device_put(sdev);
1542	}
1543}
1544
1545static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1546{
1547	struct mvumi_device *mv_dev = NULL;
1548
1549	list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1550		if (mv_dev->wwid == wwid) {
1551			if (mv_dev->id != id) {
1552				dev_err(&mhba->pdev->dev,
1553					"%s has same wwid[%llx] ,"
1554					" but different id[%d %d]\n",
1555					__func__, mv_dev->wwid, mv_dev->id, id);
1556				return -1;
1557			} else {
1558				if (mhba->pdev->device ==
1559						PCI_DEVICE_ID_MARVELL_MV9143)
1560					mvumi_rescan_devices(mhba, id);
1561				return 1;
1562			}
1563		}
1564	}
1565	return 0;
1566}
1567
1568static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1569{
1570	struct mvumi_device *mv_dev = NULL, *dev_next;
1571
1572	list_for_each_entry_safe(mv_dev, dev_next,
1573				&mhba->shost_dev_list, list) {
1574		if (mv_dev->id == id) {
1575			dev_dbg(&mhba->pdev->dev,
1576				"detach device(0:%d:0) wwid(%llx) from HOST\n",
1577				mv_dev->id, mv_dev->wwid);
1578			mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1579			list_del_init(&mv_dev->list);
1580			kfree(mv_dev);
1581		}
1582	}
1583}
1584
1585static int mvumi_probe_devices(struct mvumi_hba *mhba)
1586{
1587	int id, maxid;
1588	u64 wwid = 0;
1589	struct mvumi_device *mv_dev = NULL;
1590	struct mvumi_cmd *cmd = NULL;
1591	int found = 0;
1592
1593	cmd = mvumi_create_internal_cmd(mhba, 64);
1594	if (!cmd)
1595		return -1;
1596
1597	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1598		maxid = mhba->max_target_id;
1599	else
1600		maxid = mhba->max_target_id - 1;
1601
1602	for (id = 0; id < maxid; id++) {
1603		wwid = mvumi_inquiry(mhba, id, cmd);
1604		if (!wwid) {
1605			/* device no response, remove it */
1606			mvumi_remove_devices(mhba, id);
1607		} else {
1608			/* device response, add it */
1609			found = mvumi_match_devices(mhba, id, wwid);
1610			if (!found) {
1611				mvumi_remove_devices(mhba, id);
1612				mv_dev = kzalloc(sizeof(struct mvumi_device),
1613								GFP_KERNEL);
1614				if (!mv_dev) {
1615					dev_err(&mhba->pdev->dev,
1616						"%s alloc mv_dev failed\n",
1617						__func__);
1618					continue;
1619				}
1620				mv_dev->id = id;
1621				mv_dev->wwid = wwid;
1622				mv_dev->sdev = NULL;
1623				INIT_LIST_HEAD(&mv_dev->list);
1624				list_add_tail(&mv_dev->list,
1625					      &mhba->mhba_dev_list);
1626				dev_dbg(&mhba->pdev->dev,
1627					"probe a new device(0:%d:0)"
1628					" wwid(%llx)\n", id, mv_dev->wwid);
1629			} else if (found == -1)
1630				return -1;
1631			else
1632				continue;
1633		}
1634	}
1635
1636	if (cmd)
1637		mvumi_delete_internal_cmd(mhba, cmd);
1638
1639	return 0;
1640}
1641
1642static int mvumi_rescan_bus(void *data)
1643{
1644	int ret = 0;
1645	struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1646	struct mvumi_device *mv_dev = NULL , *dev_next;
1647
1648	while (!kthread_should_stop()) {
1649
1650		set_current_state(TASK_INTERRUPTIBLE);
1651		if (!atomic_read(&mhba->pnp_count))
1652			schedule();
1653		msleep(1000);
1654		atomic_set(&mhba->pnp_count, 0);
1655		__set_current_state(TASK_RUNNING);
1656
1657		mutex_lock(&mhba->device_lock);
1658		ret = mvumi_probe_devices(mhba);
1659		if (!ret) {
1660			list_for_each_entry_safe(mv_dev, dev_next,
1661						 &mhba->mhba_dev_list, list) {
1662				if (mvumi_handle_hotplug(mhba, mv_dev->id,
1663							 DEVICE_ONLINE)) {
1664					dev_err(&mhba->pdev->dev,
1665						"%s add device(0:%d:0) failed"
1666						"wwid(%llx) has exist\n",
1667						__func__,
1668						mv_dev->id, mv_dev->wwid);
1669					list_del_init(&mv_dev->list);
1670					kfree(mv_dev);
1671				} else {
1672					list_move_tail(&mv_dev->list,
1673						       &mhba->shost_dev_list);
1674				}
1675			}
1676		}
1677		mutex_unlock(&mhba->device_lock);
1678	}
1679	return 0;
1680}
1681
1682static void mvumi_proc_msg(struct mvumi_hba *mhba,
1683					struct mvumi_hotplug_event *param)
1684{
1685	u16 size = param->size;
1686	const unsigned long *ar_bitmap;
1687	const unsigned long *re_bitmap;
1688	int index;
1689
1690	if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1691		index = -1;
1692		ar_bitmap = (const unsigned long *) param->bitmap;
1693		re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
1694
1695		mutex_lock(&mhba->sas_discovery_mutex);
1696		do {
1697			index = find_next_zero_bit(ar_bitmap, size, index + 1);
1698			if (index >= size)
1699				break;
1700			mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1701		} while (1);
1702
1703		index = -1;
1704		do {
1705			index = find_next_zero_bit(re_bitmap, size, index + 1);
1706			if (index >= size)
1707				break;
1708			mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1709		} while (1);
1710		mutex_unlock(&mhba->sas_discovery_mutex);
1711	}
1712}
1713
1714static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1715{
1716	if (msg == APICDB1_EVENT_GETEVENT) {
1717		int i, count;
1718		struct mvumi_driver_event *param = NULL;
1719		struct mvumi_event_req *er = buffer;
1720		count = er->count;
1721		if (count > MAX_EVENTS_RETURNED) {
1722			dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1723					" than max event count[0x%x].\n",
1724					count, MAX_EVENTS_RETURNED);
1725			return;
1726		}
1727		for (i = 0; i < count; i++) {
1728			param = &er->events[i];
1729			mvumi_show_event(mhba, param);
1730		}
1731	} else if (msg == APICDB1_HOST_GETEVENT) {
1732		mvumi_proc_msg(mhba, buffer);
1733	}
1734}
1735
1736static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1737{
1738	struct mvumi_cmd *cmd;
1739	struct mvumi_msg_frame *frame;
1740
1741	cmd = mvumi_create_internal_cmd(mhba, 512);
1742	if (!cmd)
1743		return -1;
1744	cmd->scmd = NULL;
1745	cmd->cmd_status = REQ_STATUS_PENDING;
1746	atomic_set(&cmd->sync_cmd, 0);
1747	frame = cmd->frame;
1748	frame->device_id = 0;
1749	frame->cmd_flag = CMD_FLAG_DATA_IN;
1750	frame->req_function = CL_FUN_SCSI_CMD;
1751	frame->cdb_length = MAX_COMMAND_SIZE;
1752	frame->data_transfer_length = sizeof(struct mvumi_event_req);
1753	memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1754	frame->cdb[0] = APICDB0_EVENT;
1755	frame->cdb[1] = msg;
1756	mvumi_issue_blocked_cmd(mhba, cmd);
1757
1758	if (cmd->cmd_status != SAM_STAT_GOOD)
1759		dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1760							cmd->cmd_status);
1761	else
1762		mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1763
1764	mvumi_delete_internal_cmd(mhba, cmd);
1765	return 0;
1766}
1767
1768static void mvumi_scan_events(struct work_struct *work)
1769{
1770	struct mvumi_events_wq *mu_ev =
1771		container_of(work, struct mvumi_events_wq, work_q);
1772
1773	mvumi_get_event(mu_ev->mhba, mu_ev->event);
1774	kfree(mu_ev);
1775}
1776
1777static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1778{
1779	struct mvumi_events_wq *mu_ev;
1780
1781	while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
1782		if (isr_status & DRBL_BUS_CHANGE) {
1783			atomic_inc(&mhba->pnp_count);
1784			wake_up_process(mhba->dm_thread);
1785			isr_status &= ~(DRBL_BUS_CHANGE);
1786			continue;
1787		}
1788
1789		mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1790		if (mu_ev) {
1791			INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1792			mu_ev->mhba = mhba;
1793			mu_ev->event = APICDB1_EVENT_GETEVENT;
1794			isr_status &= ~(DRBL_EVENT_NOTIFY);
1795			mu_ev->param = NULL;
1796			schedule_work(&mu_ev->work_q);
1797		}
1798	}
1799}
1800
1801static void mvumi_handle_clob(struct mvumi_hba *mhba)
1802{
1803	struct mvumi_rsp_frame *ob_frame;
1804	struct mvumi_cmd *cmd;
1805	struct mvumi_ob_data *pool;
1806
1807	while (!list_empty(&mhba->free_ob_list)) {
1808		pool = list_first_entry(&mhba->free_ob_list,
1809						struct mvumi_ob_data, list);
1810		list_del_init(&pool->list);
1811		list_add_tail(&pool->list, &mhba->ob_data_list);
1812
1813		ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1814		cmd = mhba->tag_cmd[ob_frame->tag];
1815
1816		atomic_dec(&mhba->fw_outstanding);
1817		mhba->tag_cmd[ob_frame->tag] = 0;
1818		tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1819		if (cmd->scmd)
1820			mvumi_complete_cmd(mhba, cmd, ob_frame);
1821		else
1822			mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1823	}
1824	mhba->instancet->fire_cmd(mhba, NULL);
1825}
1826
1827static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1828{
1829	struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1830	unsigned long flags;
1831
1832	spin_lock_irqsave(mhba->shost->host_lock, flags);
1833	if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1834		spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1835		return IRQ_NONE;
1836	}
1837
1838	if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1839		if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1840			mvumi_launch_events(mhba, mhba->isr_status);
1841		if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1842			dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1843			mvumi_handshake(mhba);
1844		}
1845
1846	}
1847
1848	if (mhba->global_isr & mhba->regs->int_comaout)
1849		mvumi_receive_ob_list_entry(mhba);
1850
1851	mhba->global_isr = 0;
1852	mhba->isr_status = 0;
1853	if (mhba->fw_state == FW_STATE_STARTED)
1854		mvumi_handle_clob(mhba);
1855	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1856	return IRQ_HANDLED;
1857}
1858
1859static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1860						struct mvumi_cmd *cmd)
1861{
1862	void *ib_entry;
1863	struct mvumi_msg_frame *ib_frame;
1864	unsigned int frame_len;
1865
1866	ib_frame = cmd->frame;
1867	if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1868		dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1869		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1870	}
1871	if (tag_is_empty(&mhba->tag_pool)) {
1872		dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1873		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1874	}
1875	mvumi_get_ib_list_entry(mhba, &ib_entry);
1876
1877	cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1878	cmd->frame->request_id = mhba->io_seq++;
1879	cmd->request_id = cmd->frame->request_id;
1880	mhba->tag_cmd[cmd->frame->tag] = cmd;
1881	frame_len = sizeof(*ib_frame) - 4 +
1882				ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1883	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1884		struct mvumi_dyn_list_entry *dle;
1885		dle = ib_entry;
1886		dle->src_low_addr =
1887			cpu_to_le32(lower_32_bits(cmd->frame_phys));
1888		dle->src_high_addr =
1889			cpu_to_le32(upper_32_bits(cmd->frame_phys));
1890		dle->if_length = (frame_len >> 2) & 0xFFF;
1891	} else {
1892		memcpy(ib_entry, ib_frame, frame_len);
1893	}
1894	return MV_QUEUE_COMMAND_RESULT_SENT;
1895}
1896
1897static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1898{
1899	unsigned short num_of_cl_sent = 0;
1900	unsigned int count;
1901	enum mvumi_qc_result result;
1902
1903	if (cmd)
1904		list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1905	count = mhba->instancet->check_ib_list(mhba);
1906	if (list_empty(&mhba->waiting_req_list) || !count)
1907		return;
1908
1909	do {
1910		cmd = list_first_entry(&mhba->waiting_req_list,
1911				       struct mvumi_cmd, queue_pointer);
1912		list_del_init(&cmd->queue_pointer);
1913		result = mvumi_send_command(mhba, cmd);
1914		switch (result) {
1915		case MV_QUEUE_COMMAND_RESULT_SENT:
1916			num_of_cl_sent++;
1917			break;
1918		case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1919			list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1920			if (num_of_cl_sent > 0)
1921				mvumi_send_ib_list_entry(mhba);
1922
1923			return;
1924		}
1925	} while (!list_empty(&mhba->waiting_req_list) && count--);
1926
1927	if (num_of_cl_sent > 0)
1928		mvumi_send_ib_list_entry(mhba);
1929}
1930
1931/**
1932 * mvumi_enable_intr -	Enables interrupts
1933 * @mhba:		Adapter soft state
1934 */
1935static void mvumi_enable_intr(struct mvumi_hba *mhba)
1936{
1937	unsigned int mask;
1938	struct mvumi_hw_regs *regs = mhba->regs;
1939
1940	iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1941	mask = ioread32(regs->enpointa_mask_reg);
1942	mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
1943	iowrite32(mask, regs->enpointa_mask_reg);
1944}
1945
1946/**
1947 * mvumi_disable_intr -Disables interrupt
1948 * @mhba:		Adapter soft state
1949 */
1950static void mvumi_disable_intr(struct mvumi_hba *mhba)
1951{
1952	unsigned int mask;
1953	struct mvumi_hw_regs *regs = mhba->regs;
1954
1955	iowrite32(0, regs->arm_to_pciea_mask_reg);
1956	mask = ioread32(regs->enpointa_mask_reg);
1957	mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
1958							regs->int_comaerr);
1959	iowrite32(mask, regs->enpointa_mask_reg);
1960}
1961
1962static int mvumi_clear_intr(void *extend)
1963{
1964	struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1965	unsigned int status, isr_status = 0, tmp = 0;
1966	struct mvumi_hw_regs *regs = mhba->regs;
1967
1968	status = ioread32(regs->main_int_cause_reg);
1969	if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
1970		return 1;
1971	if (unlikely(status & regs->int_comaerr)) {
1972		tmp = ioread32(regs->outb_isr_cause);
1973		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1974			if (tmp & regs->clic_out_err) {
1975				iowrite32(tmp & regs->clic_out_err,
1976							regs->outb_isr_cause);
1977			}
1978		} else {
1979			if (tmp & (regs->clic_in_err | regs->clic_out_err))
1980				iowrite32(tmp & (regs->clic_in_err |
1981						regs->clic_out_err),
1982						regs->outb_isr_cause);
1983		}
1984		status ^= mhba->regs->int_comaerr;
1985		/* inbound or outbound parity error, command will timeout */
1986	}
1987	if (status & regs->int_comaout) {
1988		tmp = ioread32(regs->outb_isr_cause);
1989		if (tmp & regs->clic_irq)
1990			iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
1991	}
1992	if (status & regs->int_dl_cpu2pciea) {
1993		isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
1994		if (isr_status)
1995			iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
1996	}
1997
1998	mhba->global_isr = status;
1999	mhba->isr_status = isr_status;
2000
2001	return 0;
2002}
2003
2004/**
2005 * mvumi_read_fw_status_reg - returns the current FW status value
2006 * @mhba:		Adapter soft state
2007 */
2008static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
2009{
2010	unsigned int status;
2011
2012	status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
2013	if (status)
2014		iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
2015	return status;
2016}
2017
2018static struct mvumi_instance_template mvumi_instance_9143 = {
2019	.fire_cmd = mvumi_fire_cmd,
2020	.enable_intr = mvumi_enable_intr,
2021	.disable_intr = mvumi_disable_intr,
2022	.clear_intr = mvumi_clear_intr,
2023	.read_fw_status_reg = mvumi_read_fw_status_reg,
2024	.check_ib_list = mvumi_check_ib_list_9143,
2025	.check_ob_list = mvumi_check_ob_list_9143,
2026	.reset_host = mvumi_reset_host_9143,
2027};
2028
2029static struct mvumi_instance_template mvumi_instance_9580 = {
2030	.fire_cmd = mvumi_fire_cmd,
2031	.enable_intr = mvumi_enable_intr,
2032	.disable_intr = mvumi_disable_intr,
2033	.clear_intr = mvumi_clear_intr,
2034	.read_fw_status_reg = mvumi_read_fw_status_reg,
2035	.check_ib_list = mvumi_check_ib_list_9580,
2036	.check_ob_list = mvumi_check_ob_list_9580,
2037	.reset_host = mvumi_reset_host_9580,
2038};
2039
2040static int mvumi_slave_configure(struct scsi_device *sdev)
2041{
2042	struct mvumi_hba *mhba;
2043	unsigned char bitcount = sizeof(unsigned char) * 8;
2044
2045	mhba = (struct mvumi_hba *) sdev->host->hostdata;
2046	if (sdev->id >= mhba->max_target_id)
2047		return -EINVAL;
2048
2049	mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
2050	return 0;
2051}
2052
2053/**
2054 * mvumi_build_frame -	Prepares a direct cdb (DCDB) command
2055 * @mhba:		Adapter soft state
2056 * @scmd:		SCSI command
2057 * @cmd:		Command to be prepared in
2058 *
2059 * This function prepares CDB commands. These are typcially pass-through
2060 * commands to the devices.
2061 */
2062static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
2063				struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
2064{
2065	struct mvumi_msg_frame *pframe;
2066
2067	cmd->scmd = scmd;
2068	cmd->cmd_status = REQ_STATUS_PENDING;
2069	pframe = cmd->frame;
2070	pframe->device_id = ((unsigned short) scmd->device->id) |
2071				(((unsigned short) scmd->device->lun) << 8);
2072	pframe->cmd_flag = 0;
2073
2074	switch (scmd->sc_data_direction) {
2075	case DMA_NONE:
2076		pframe->cmd_flag |= CMD_FLAG_NON_DATA;
2077		break;
2078	case DMA_FROM_DEVICE:
2079		pframe->cmd_flag |= CMD_FLAG_DATA_IN;
2080		break;
2081	case DMA_TO_DEVICE:
2082		pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
2083		break;
2084	case DMA_BIDIRECTIONAL:
2085	default:
2086		dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
2087			"cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
2088		goto error;
2089	}
2090
2091	pframe->cdb_length = scmd->cmd_len;
2092	memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
2093	pframe->req_function = CL_FUN_SCSI_CMD;
2094	if (scsi_bufflen(scmd)) {
2095		if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
2096			&pframe->sg_counts))
2097			goto error;
2098
2099		pframe->data_transfer_length = scsi_bufflen(scmd);
2100	} else {
2101		pframe->sg_counts = 0;
2102		pframe->data_transfer_length = 0;
2103	}
2104	return 0;
2105
2106error:
2107	scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
2108		SAM_STAT_CHECK_CONDITION;
2109	scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
2110									0);
2111	return -1;
2112}
2113
2114/**
2115 * mvumi_queue_command -	Queue entry point
 
2116 * @scmd:			SCSI command to be queued
2117 * @done:			Callback entry point
2118 */
2119static int mvumi_queue_command(struct Scsi_Host *shost,
2120					struct scsi_cmnd *scmd)
2121{
2122	struct mvumi_cmd *cmd;
2123	struct mvumi_hba *mhba;
2124	unsigned long irq_flags;
2125
2126	spin_lock_irqsave(shost->host_lock, irq_flags);
2127	scsi_cmd_get_serial(shost, scmd);
2128
2129	mhba = (struct mvumi_hba *) shost->hostdata;
2130	scmd->result = 0;
2131	cmd = mvumi_get_cmd(mhba);
2132	if (unlikely(!cmd)) {
2133		spin_unlock_irqrestore(shost->host_lock, irq_flags);
2134		return SCSI_MLQUEUE_HOST_BUSY;
2135	}
2136
2137	if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
2138		goto out_return_cmd;
2139
2140	cmd->scmd = scmd;
2141	scmd->SCp.ptr = (char *) cmd;
2142	mhba->instancet->fire_cmd(mhba, cmd);
2143	spin_unlock_irqrestore(shost->host_lock, irq_flags);
2144	return 0;
2145
2146out_return_cmd:
2147	mvumi_return_cmd(mhba, cmd);
2148	scmd->scsi_done(scmd);
2149	spin_unlock_irqrestore(shost->host_lock, irq_flags);
2150	return 0;
2151}
2152
2153static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
2154{
2155	struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
2156	struct Scsi_Host *host = scmd->device->host;
2157	struct mvumi_hba *mhba = shost_priv(host);
2158	unsigned long flags;
2159
2160	spin_lock_irqsave(mhba->shost->host_lock, flags);
2161
2162	if (mhba->tag_cmd[cmd->frame->tag]) {
2163		mhba->tag_cmd[cmd->frame->tag] = 0;
2164		tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
2165	}
2166	if (!list_empty(&cmd->queue_pointer))
2167		list_del_init(&cmd->queue_pointer);
2168	else
2169		atomic_dec(&mhba->fw_outstanding);
2170
2171	scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
2172	scmd->SCp.ptr = NULL;
2173	if (scsi_bufflen(scmd)) {
2174		if (scsi_sg_count(scmd)) {
2175			pci_unmap_sg(mhba->pdev,
2176				scsi_sglist(scmd),
2177				scsi_sg_count(scmd),
2178				(int)scmd->sc_data_direction);
2179		} else {
2180			pci_unmap_single(mhba->pdev,
2181				scmd->SCp.dma_handle,
2182				scsi_bufflen(scmd),
2183				(int)scmd->sc_data_direction);
2184
2185			scmd->SCp.dma_handle = 0;
2186		}
2187	}
2188	mvumi_return_cmd(mhba, cmd);
2189	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
2190
2191	return BLK_EH_NOT_HANDLED;
2192}
2193
2194static int
2195mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2196			sector_t capacity, int geom[])
2197{
2198	int heads, sectors;
2199	sector_t cylinders;
2200	unsigned long tmp;
2201
2202	heads = 64;
2203	sectors = 32;
2204	tmp = heads * sectors;
2205	cylinders = capacity;
2206	sector_div(cylinders, tmp);
2207
2208	if (capacity >= 0x200000) {
2209		heads = 255;
2210		sectors = 63;
2211		tmp = heads * sectors;
2212		cylinders = capacity;
2213		sector_div(cylinders, tmp);
2214	}
2215	geom[0] = heads;
2216	geom[1] = sectors;
2217	geom[2] = cylinders;
2218
2219	return 0;
2220}
2221
2222static struct scsi_host_template mvumi_template = {
2223
2224	.module = THIS_MODULE,
2225	.name = "Marvell Storage Controller",
2226	.slave_configure = mvumi_slave_configure,
2227	.queuecommand = mvumi_queue_command,
 
2228	.eh_host_reset_handler = mvumi_host_reset,
2229	.bios_param = mvumi_bios_param,
 
2230	.this_id = -1,
2231};
2232
2233static struct scsi_transport_template mvumi_transport_template = {
2234	.eh_timed_out = mvumi_timed_out,
2235};
2236
2237static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2238{
2239	void *base = NULL;
2240	struct mvumi_hw_regs *regs;
2241
2242	switch (mhba->pdev->device) {
2243	case PCI_DEVICE_ID_MARVELL_MV9143:
2244		mhba->mmio = mhba->base_addr[0];
2245		base = mhba->mmio;
2246		if (!mhba->regs) {
2247			mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2248			if (mhba->regs == NULL)
2249				return -ENOMEM;
2250		}
2251		regs = mhba->regs;
2252
2253		/* For Arm */
2254		regs->ctrl_sts_reg          = base + 0x20104;
2255		regs->rstoutn_mask_reg      = base + 0x20108;
2256		regs->sys_soft_rst_reg      = base + 0x2010C;
2257		regs->main_int_cause_reg    = base + 0x20200;
2258		regs->enpointa_mask_reg     = base + 0x2020C;
2259		regs->rstoutn_en_reg        = base + 0xF1400;
2260		/* For Doorbell */
2261		regs->pciea_to_arm_drbl_reg = base + 0x20400;
2262		regs->arm_to_pciea_drbl_reg = base + 0x20408;
2263		regs->arm_to_pciea_mask_reg = base + 0x2040C;
2264		regs->pciea_to_arm_msg0     = base + 0x20430;
2265		regs->pciea_to_arm_msg1     = base + 0x20434;
2266		regs->arm_to_pciea_msg0     = base + 0x20438;
2267		regs->arm_to_pciea_msg1     = base + 0x2043C;
2268
2269		/* For Message Unit */
2270
2271		regs->inb_aval_count_basel  = base + 0x508;
2272		regs->inb_aval_count_baseh  = base + 0x50C;
2273		regs->inb_write_pointer     = base + 0x518;
2274		regs->inb_read_pointer      = base + 0x51C;
2275		regs->outb_coal_cfg         = base + 0x568;
2276		regs->outb_copy_basel       = base + 0x5B0;
2277		regs->outb_copy_baseh       = base + 0x5B4;
2278		regs->outb_copy_pointer     = base + 0x544;
2279		regs->outb_read_pointer     = base + 0x548;
2280		regs->outb_isr_cause        = base + 0x560;
2281		regs->outb_coal_cfg         = base + 0x568;
2282		/* Bit setting for HW */
2283		regs->int_comaout           = 1 << 8;
2284		regs->int_comaerr           = 1 << 6;
2285		regs->int_dl_cpu2pciea      = 1 << 1;
2286		regs->cl_pointer_toggle     = 1 << 12;
2287		regs->clic_irq              = 1 << 1;
2288		regs->clic_in_err           = 1 << 8;
2289		regs->clic_out_err          = 1 << 12;
2290		regs->cl_slot_num_mask      = 0xFFF;
2291		regs->int_drbl_int_mask     = 0x3FFFFFFF;
2292		regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
2293							regs->int_comaerr;
2294		break;
2295	case PCI_DEVICE_ID_MARVELL_MV9580:
2296		mhba->mmio = mhba->base_addr[2];
2297		base = mhba->mmio;
2298		if (!mhba->regs) {
2299			mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2300			if (mhba->regs == NULL)
2301				return -ENOMEM;
2302		}
2303		regs = mhba->regs;
2304		/* For Arm */
2305		regs->ctrl_sts_reg          = base + 0x20104;
2306		regs->rstoutn_mask_reg      = base + 0x1010C;
2307		regs->sys_soft_rst_reg      = base + 0x10108;
2308		regs->main_int_cause_reg    = base + 0x10200;
2309		regs->enpointa_mask_reg     = base + 0x1020C;
2310		regs->rstoutn_en_reg        = base + 0xF1400;
2311
2312		/* For Doorbell */
2313		regs->pciea_to_arm_drbl_reg = base + 0x10460;
2314		regs->arm_to_pciea_drbl_reg = base + 0x10480;
2315		regs->arm_to_pciea_mask_reg = base + 0x10484;
2316		regs->pciea_to_arm_msg0     = base + 0x10400;
2317		regs->pciea_to_arm_msg1     = base + 0x10404;
2318		regs->arm_to_pciea_msg0     = base + 0x10420;
2319		regs->arm_to_pciea_msg1     = base + 0x10424;
2320
2321		/* For reset*/
2322		regs->reset_request         = base + 0x10108;
2323		regs->reset_enable          = base + 0x1010c;
2324
2325		/* For Message Unit */
2326		regs->inb_aval_count_basel  = base + 0x4008;
2327		regs->inb_aval_count_baseh  = base + 0x400C;
2328		regs->inb_write_pointer     = base + 0x4018;
2329		regs->inb_read_pointer      = base + 0x401C;
2330		regs->outb_copy_basel       = base + 0x4058;
2331		regs->outb_copy_baseh       = base + 0x405C;
2332		regs->outb_copy_pointer     = base + 0x406C;
2333		regs->outb_read_pointer     = base + 0x4070;
2334		regs->outb_coal_cfg         = base + 0x4080;
2335		regs->outb_isr_cause        = base + 0x4088;
2336		/* Bit setting for HW */
2337		regs->int_comaout           = 1 << 4;
2338		regs->int_dl_cpu2pciea      = 1 << 12;
2339		regs->int_comaerr           = 1 << 29;
2340		regs->cl_pointer_toggle     = 1 << 14;
2341		regs->cl_slot_num_mask      = 0x3FFF;
2342		regs->clic_irq              = 1 << 0;
2343		regs->clic_out_err          = 1 << 1;
2344		regs->int_drbl_int_mask     = 0x3FFFFFFF;
2345		regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
2346		break;
2347	default:
2348		return -1;
2349		break;
2350	}
2351
2352	return 0;
2353}
2354
2355/**
2356 * mvumi_init_fw -	Initializes the FW
2357 * @mhba:		Adapter soft state
2358 *
2359 * This is the main function for initializing firmware.
2360 */
2361static int mvumi_init_fw(struct mvumi_hba *mhba)
2362{
2363	int ret = 0;
2364
2365	if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
2366		dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
2367		return -EBUSY;
2368	}
2369	ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2370	if (ret)
2371		goto fail_ioremap;
2372
2373	switch (mhba->pdev->device) {
2374	case PCI_DEVICE_ID_MARVELL_MV9143:
2375		mhba->instancet = &mvumi_instance_9143;
2376		mhba->io_seq = 0;
2377		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2378		mhba->request_id_enabled = 1;
2379		break;
2380	case PCI_DEVICE_ID_MARVELL_MV9580:
2381		mhba->instancet = &mvumi_instance_9580;
2382		mhba->io_seq = 0;
2383		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2384		break;
2385	default:
2386		dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
2387							mhba->pdev->device);
2388		mhba->instancet = NULL;
2389		ret = -EINVAL;
2390		goto fail_alloc_mem;
2391	}
2392	dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
2393							mhba->pdev->device);
2394	ret = mvumi_cfg_hw_reg(mhba);
2395	if (ret) {
2396		dev_err(&mhba->pdev->dev,
2397			"failed to allocate memory for reg\n");
2398		ret = -ENOMEM;
2399		goto fail_alloc_mem;
2400	}
2401	mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
2402						&mhba->handshake_page_phys);
2403	if (!mhba->handshake_page) {
2404		dev_err(&mhba->pdev->dev,
2405			"failed to allocate memory for handshake\n");
2406		ret = -ENOMEM;
2407		goto fail_alloc_page;
2408	}
2409
2410	if (mvumi_start(mhba)) {
2411		ret = -EINVAL;
2412		goto fail_ready_state;
2413	}
2414	ret = mvumi_alloc_cmds(mhba);
2415	if (ret)
2416		goto fail_ready_state;
2417
2418	return 0;
2419
2420fail_ready_state:
2421	mvumi_release_mem_resource(mhba);
2422	pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
2423		mhba->handshake_page, mhba->handshake_page_phys);
2424fail_alloc_page:
2425	kfree(mhba->regs);
2426fail_alloc_mem:
2427	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
2428fail_ioremap:
2429	pci_release_regions(mhba->pdev);
2430
2431	return ret;
2432}
2433
2434/**
2435 * mvumi_io_attach -	Attaches this driver to SCSI mid-layer
2436 * @mhba:		Adapter soft state
2437 */
2438static int mvumi_io_attach(struct mvumi_hba *mhba)
2439{
2440	struct Scsi_Host *host = mhba->shost;
2441	struct scsi_device *sdev = NULL;
2442	int ret;
2443	unsigned int max_sg = (mhba->ib_max_size + 4 -
2444		sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
2445
2446	host->irq = mhba->pdev->irq;
2447	host->unique_id = mhba->unique_id;
2448	host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2449	host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
2450	host->max_sectors = mhba->max_transfer_size / 512;
2451	host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2452	host->max_id = mhba->max_target_id;
2453	host->max_cmd_len = MAX_COMMAND_SIZE;
2454	host->transportt = &mvumi_transport_template;
2455
2456	ret = scsi_add_host(host, &mhba->pdev->dev);
2457	if (ret) {
2458		dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
2459		return ret;
2460	}
2461	mhba->fw_flag |= MVUMI_FW_ATTACH;
2462
2463	mutex_lock(&mhba->sas_discovery_mutex);
2464	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2465		ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2466	else
2467		ret = 0;
2468	if (ret) {
2469		dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2470		mutex_unlock(&mhba->sas_discovery_mutex);
2471		goto fail_add_device;
2472	}
2473
2474	mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2475						mhba, "mvumi_scanthread");
2476	if (IS_ERR(mhba->dm_thread)) {
2477		dev_err(&mhba->pdev->dev,
2478			"failed to create device scan thread\n");
 
2479		mutex_unlock(&mhba->sas_discovery_mutex);
2480		goto fail_create_thread;
2481	}
2482	atomic_set(&mhba->pnp_count, 1);
2483	wake_up_process(mhba->dm_thread);
2484
2485	mutex_unlock(&mhba->sas_discovery_mutex);
2486	return 0;
2487
2488fail_create_thread:
2489	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2490		sdev = scsi_device_lookup(mhba->shost, 0,
2491						mhba->max_target_id - 1, 0);
2492	if (sdev) {
2493		scsi_remove_device(sdev);
2494		scsi_device_put(sdev);
2495	}
2496fail_add_device:
2497	scsi_remove_host(mhba->shost);
2498	return ret;
2499}
2500
2501/**
2502 * mvumi_probe_one -	PCI hotplug entry point
2503 * @pdev:		PCI device structure
2504 * @id:			PCI ids of supported hotplugged adapter
2505 */
2506static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2507{
2508	struct Scsi_Host *host;
2509	struct mvumi_hba *mhba;
2510	int ret;
2511
2512	dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2513			pdev->vendor, pdev->device, pdev->subsystem_vendor,
2514			pdev->subsystem_device);
2515
2516	ret = pci_enable_device(pdev);
2517	if (ret)
2518		return ret;
2519
2520	pci_set_master(pdev);
2521
2522	if (IS_DMA64) {
2523		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2524		if (ret) {
2525			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2526			if (ret)
2527				goto fail_set_dma_mask;
2528		}
2529	} else {
2530		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2531		if (ret)
2532			goto fail_set_dma_mask;
2533	}
2534
2535	host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2536	if (!host) {
2537		dev_err(&pdev->dev, "scsi_host_alloc failed\n");
2538		ret = -ENOMEM;
2539		goto fail_alloc_instance;
2540	}
2541	mhba = shost_priv(host);
2542
2543	INIT_LIST_HEAD(&mhba->cmd_pool);
2544	INIT_LIST_HEAD(&mhba->ob_data_list);
2545	INIT_LIST_HEAD(&mhba->free_ob_list);
2546	INIT_LIST_HEAD(&mhba->res_list);
2547	INIT_LIST_HEAD(&mhba->waiting_req_list);
2548	mutex_init(&mhba->device_lock);
2549	INIT_LIST_HEAD(&mhba->mhba_dev_list);
2550	INIT_LIST_HEAD(&mhba->shost_dev_list);
2551	atomic_set(&mhba->fw_outstanding, 0);
2552	init_waitqueue_head(&mhba->int_cmd_wait_q);
2553	mutex_init(&mhba->sas_discovery_mutex);
2554
2555	mhba->pdev = pdev;
2556	mhba->shost = host;
2557	mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
2558
2559	ret = mvumi_init_fw(mhba);
2560	if (ret)
2561		goto fail_init_fw;
2562
2563	ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2564				"mvumi", mhba);
2565	if (ret) {
2566		dev_err(&pdev->dev, "failed to register IRQ\n");
2567		goto fail_init_irq;
2568	}
2569
2570	mhba->instancet->enable_intr(mhba);
2571	pci_set_drvdata(pdev, mhba);
2572
2573	ret = mvumi_io_attach(mhba);
2574	if (ret)
2575		goto fail_io_attach;
2576
2577	mvumi_backup_bar_addr(mhba);
2578	dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
2579
2580	return 0;
2581
2582fail_io_attach:
2583	mhba->instancet->disable_intr(mhba);
2584	free_irq(mhba->pdev->irq, mhba);
2585fail_init_irq:
2586	mvumi_release_fw(mhba);
2587fail_init_fw:
2588	scsi_host_put(host);
2589
2590fail_alloc_instance:
2591fail_set_dma_mask:
2592	pci_disable_device(pdev);
2593
2594	return ret;
2595}
2596
2597static void mvumi_detach_one(struct pci_dev *pdev)
2598{
2599	struct Scsi_Host *host;
2600	struct mvumi_hba *mhba;
2601
2602	mhba = pci_get_drvdata(pdev);
2603	if (mhba->dm_thread) {
2604		kthread_stop(mhba->dm_thread);
2605		mhba->dm_thread = NULL;
2606	}
2607
2608	mvumi_detach_devices(mhba);
2609	host = mhba->shost;
2610	scsi_remove_host(mhba->shost);
2611	mvumi_flush_cache(mhba);
2612
2613	mhba->instancet->disable_intr(mhba);
2614	free_irq(mhba->pdev->irq, mhba);
2615	mvumi_release_fw(mhba);
2616	scsi_host_put(host);
2617	pci_disable_device(pdev);
2618	dev_dbg(&pdev->dev, "driver is removed!\n");
2619}
2620
2621/**
2622 * mvumi_shutdown -	Shutdown entry point
2623 * @device:		Generic device structure
2624 */
2625static void mvumi_shutdown(struct pci_dev *pdev)
2626{
2627	struct mvumi_hba *mhba = pci_get_drvdata(pdev);
2628
2629	mvumi_flush_cache(mhba);
2630}
2631
2632static int __maybe_unused mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
2633{
2634	struct mvumi_hba *mhba = NULL;
 
2635
2636	mhba = pci_get_drvdata(pdev);
2637	mvumi_flush_cache(mhba);
2638
2639	pci_set_drvdata(pdev, mhba);
2640	mhba->instancet->disable_intr(mhba);
2641	free_irq(mhba->pdev->irq, mhba);
2642	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2643	pci_release_regions(pdev);
2644	pci_save_state(pdev);
2645	pci_disable_device(pdev);
2646	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2647
2648	return 0;
2649}
2650
2651static int __maybe_unused mvumi_resume(struct pci_dev *pdev)
2652{
2653	int ret;
2654	struct mvumi_hba *mhba = NULL;
2655
2656	mhba = pci_get_drvdata(pdev);
2657
2658	pci_set_power_state(pdev, PCI_D0);
2659	pci_enable_wake(pdev, PCI_D0, 0);
2660	pci_restore_state(pdev);
2661
2662	ret = pci_enable_device(pdev);
2663	if (ret) {
2664		dev_err(&pdev->dev, "enable device failed\n");
2665		return ret;
2666	}
2667	pci_set_master(pdev);
2668	if (IS_DMA64) {
2669		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2670		if (ret) {
2671			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2672			if (ret)
2673				goto fail;
2674		}
2675	} else {
2676		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2677		if (ret)
2678			goto fail;
2679	}
2680	ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
2681	if (ret)
2682		goto fail;
2683	ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2684	if (ret)
2685		goto release_regions;
2686
2687	if (mvumi_cfg_hw_reg(mhba)) {
2688		ret = -EINVAL;
2689		goto unmap_pci_addr;
2690	}
2691
2692	mhba->mmio = mhba->base_addr[0];
2693	mvumi_reset(mhba);
2694
2695	if (mvumi_start(mhba)) {
2696		ret = -EINVAL;
2697		goto unmap_pci_addr;
2698	}
2699
2700	ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2701				"mvumi", mhba);
2702	if (ret) {
2703		dev_err(&pdev->dev, "failed to register IRQ\n");
2704		goto unmap_pci_addr;
2705	}
2706	mhba->instancet->enable_intr(mhba);
2707
2708	return 0;
2709
2710unmap_pci_addr:
2711	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2712release_regions:
2713	pci_release_regions(pdev);
2714fail:
2715	pci_disable_device(pdev);
2716
2717	return ret;
2718}
2719
 
 
2720static struct pci_driver mvumi_pci_driver = {
2721
2722	.name = MV_DRIVER_NAME,
2723	.id_table = mvumi_pci_table,
2724	.probe = mvumi_probe_one,
2725	.remove = mvumi_detach_one,
2726	.shutdown = mvumi_shutdown,
2727#ifdef CONFIG_PM
2728	.suspend = mvumi_suspend,
2729	.resume = mvumi_resume,
2730#endif
2731};
2732
2733/**
2734 * mvumi_init - Driver load entry point
2735 */
2736static int __init mvumi_init(void)
2737{
2738	return pci_register_driver(&mvumi_pci_driver);
2739}
2740
2741/**
2742 * mvumi_exit - Driver unload entry point
2743 */
2744static void __exit mvumi_exit(void)
2745{
2746
2747	pci_unregister_driver(&mvumi_pci_driver);
2748}
2749
2750module_init(mvumi_init);
2751module_exit(mvumi_exit);