Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * Marvell UMI driver
   3 *
   4 * Copyright 2011 Marvell. <jyli@marvell.com>
   5 *
   6 * This file is licensed under GPLv2.
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License as
  10 * published by the Free Software Foundation; version 2 of the
  11 * License.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  21 * USA
  22*/
  23
  24#include <linux/kernel.h>
  25#include <linux/module.h>
  26#include <linux/moduleparam.h>
  27#include <linux/init.h>
  28#include <linux/device.h>
  29#include <linux/pci.h>
  30#include <linux/list.h>
  31#include <linux/spinlock.h>
  32#include <linux/interrupt.h>
  33#include <linux/delay.h>
  34#include <linux/blkdev.h>
  35#include <linux/io.h>
  36#include <scsi/scsi.h>
  37#include <scsi/scsi_cmnd.h>
  38#include <scsi/scsi_device.h>
  39#include <scsi/scsi_host.h>
  40#include <scsi/scsi_transport.h>
  41#include <scsi/scsi_eh.h>
  42#include <linux/uaccess.h>
  43#include <linux/kthread.h>
  44
  45#include "mvumi.h"
  46
  47MODULE_LICENSE("GPL");
  48MODULE_AUTHOR("jyli@marvell.com");
  49MODULE_DESCRIPTION("Marvell UMI Driver");
  50
  51static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = {
  52	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9143) },
  53	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, PCI_DEVICE_ID_MARVELL_MV9580) },
  54	{ 0 }
  55};
  56
  57MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
  58
  59static void tag_init(struct mvumi_tag *st, unsigned short size)
  60{
  61	unsigned short i;
  62	BUG_ON(size != st->size);
  63	st->top = size;
  64	for (i = 0; i < size; i++)
  65		st->stack[i] = size - 1 - i;
  66}
  67
  68static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
  69{
  70	BUG_ON(st->top <= 0);
  71	return st->stack[--st->top];
  72}
  73
  74static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
  75							unsigned short tag)
  76{
  77	BUG_ON(st->top >= st->size);
  78	st->stack[st->top++] = tag;
  79}
  80
  81static bool tag_is_empty(struct mvumi_tag *st)
  82{
  83	if (st->top == 0)
  84		return 1;
  85	else
  86		return 0;
  87}
  88
  89static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
  90{
  91	int i;
  92
  93	for (i = 0; i < MAX_BASE_ADDRESS; i++)
  94		if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
  95								addr_array[i])
  96			pci_iounmap(dev, addr_array[i]);
  97}
  98
  99static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
 100{
 101	int i;
 102
 103	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 104		if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
 105			addr_array[i] = pci_iomap(dev, i, 0);
 106			if (!addr_array[i]) {
 107				dev_err(&dev->dev, "failed to map Bar[%d]\n",
 108									i);
 109				mvumi_unmap_pci_addr(dev, addr_array);
 110				return -ENOMEM;
 111			}
 112		} else
 113			addr_array[i] = NULL;
 114
 115		dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
 116	}
 117
 118	return 0;
 119}
 120
 121static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
 122				enum resource_type type, unsigned int size)
 123{
 124	struct mvumi_res *res = kzalloc(sizeof(*res), GFP_ATOMIC);
 125
 126	if (!res) {
 127		dev_err(&mhba->pdev->dev,
 128			"Failed to allocate memory for resource manager.\n");
 129		return NULL;
 130	}
 131
 132	switch (type) {
 133	case RESOURCE_CACHED_MEMORY:
 134		res->virt_addr = kzalloc(size, GFP_ATOMIC);
 135		if (!res->virt_addr) {
 136			dev_err(&mhba->pdev->dev,
 137				"unable to allocate memory,size = %d.\n", size);
 138			kfree(res);
 139			return NULL;
 140		}
 141		break;
 142
 143	case RESOURCE_UNCACHED_MEMORY:
 144		size = round_up(size, 8);
 145		res->virt_addr = pci_alloc_consistent(mhba->pdev, size,
 146							&res->bus_addr);
 147		if (!res->virt_addr) {
 148			dev_err(&mhba->pdev->dev,
 149					"unable to allocate consistent mem,"
 150							"size = %d.\n", size);
 151			kfree(res);
 152			return NULL;
 153		}
 154		memset(res->virt_addr, 0, size);
 155		break;
 156
 157	default:
 158		dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
 159		kfree(res);
 160		return NULL;
 161	}
 162
 163	res->type = type;
 164	res->size = size;
 165	INIT_LIST_HEAD(&res->entry);
 166	list_add_tail(&res->entry, &mhba->res_list);
 167
 168	return res;
 169}
 170
 171static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
 172{
 173	struct mvumi_res *res, *tmp;
 174
 175	list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
 176		switch (res->type) {
 177		case RESOURCE_UNCACHED_MEMORY:
 178			pci_free_consistent(mhba->pdev, res->size,
 179						res->virt_addr, res->bus_addr);
 180			break;
 181		case RESOURCE_CACHED_MEMORY:
 182			kfree(res->virt_addr);
 183			break;
 184		default:
 185			dev_err(&mhba->pdev->dev,
 186				"unknown resource type %d\n", res->type);
 187			break;
 188		}
 189		list_del(&res->entry);
 190		kfree(res);
 191	}
 192	mhba->fw_flag &= ~MVUMI_FW_ALLOC;
 193}
 194
 195/**
 196 * mvumi_make_sgl -	Prepares  SGL
 197 * @mhba:		Adapter soft state
 198 * @scmd:		SCSI command from the mid-layer
 199 * @sgl_p:		SGL to be filled in
 200 * @sg_count		return the number of SG elements
 201 *
 202 * If successful, this function returns 0. otherwise, it returns -1.
 203 */
 204static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
 205					void *sgl_p, unsigned char *sg_count)
 206{
 207	struct scatterlist *sg;
 208	struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
 209	unsigned int i;
 210	unsigned int sgnum = scsi_sg_count(scmd);
 211	dma_addr_t busaddr;
 212
 213	if (sgnum) {
 214		sg = scsi_sglist(scmd);
 215		*sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
 216				(int) scmd->sc_data_direction);
 217		if (*sg_count > mhba->max_sge) {
 218			dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
 219						"than max sg[0x%x].\n",
 220						*sg_count, mhba->max_sge);
 221			return -1;
 222		}
 223		for (i = 0; i < *sg_count; i++) {
 224			busaddr = sg_dma_address(&sg[i]);
 225			m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
 226			m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
 227			m_sg->flags = 0;
 228			sgd_setsz(mhba, m_sg, cpu_to_le32(sg_dma_len(&sg[i])));
 229			if ((i + 1) == *sg_count)
 230				m_sg->flags |= 1U << mhba->eot_flag;
 231
 232			sgd_inc(mhba, m_sg);
 233		}
 234	} else {
 235		scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
 236			pci_map_single(mhba->pdev, scsi_sglist(scmd),
 237				scsi_bufflen(scmd),
 238				(int) scmd->sc_data_direction)
 239			: 0;
 240		busaddr = scmd->SCp.dma_handle;
 241		m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
 242		m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
 243		m_sg->flags = 1U << mhba->eot_flag;
 244		sgd_setsz(mhba, m_sg, cpu_to_le32(scsi_bufflen(scmd)));
 245		*sg_count = 1;
 246	}
 247
 248	return 0;
 249}
 250
 251static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
 252							unsigned int size)
 253{
 254	struct mvumi_sgl *m_sg;
 255	void *virt_addr;
 256	dma_addr_t phy_addr;
 257
 258	if (size == 0)
 259		return 0;
 260
 261	virt_addr = pci_alloc_consistent(mhba->pdev, size, &phy_addr);
 262	if (!virt_addr)
 263		return -1;
 264
 265	memset(virt_addr, 0, size);
 266
 267	m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
 268	cmd->frame->sg_counts = 1;
 269	cmd->data_buf = virt_addr;
 270
 271	m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
 272	m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
 273	m_sg->flags = 1U << mhba->eot_flag;
 274	sgd_setsz(mhba, m_sg, cpu_to_le32(size));
 275
 276	return 0;
 277}
 278
 279static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
 280				unsigned int buf_size)
 281{
 282	struct mvumi_cmd *cmd;
 283
 284	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 285	if (!cmd) {
 286		dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
 287		return NULL;
 288	}
 289	INIT_LIST_HEAD(&cmd->queue_pointer);
 290
 291	cmd->frame = pci_alloc_consistent(mhba->pdev,
 292				mhba->ib_max_size, &cmd->frame_phys);
 293	if (!cmd->frame) {
 294		dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
 295			" frame,size = %d.\n", mhba->ib_max_size);
 296		kfree(cmd);
 297		return NULL;
 298	}
 299
 300	if (buf_size) {
 301		if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
 302			dev_err(&mhba->pdev->dev, "failed to allocate memory"
 303						" for internal frame\n");
 304			pci_free_consistent(mhba->pdev, mhba->ib_max_size,
 305					cmd->frame, cmd->frame_phys);
 306			kfree(cmd);
 307			return NULL;
 308		}
 309	} else
 310		cmd->frame->sg_counts = 0;
 311
 312	return cmd;
 313}
 314
 315static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
 316						struct mvumi_cmd *cmd)
 317{
 318	struct mvumi_sgl *m_sg;
 319	unsigned int size;
 320	dma_addr_t phy_addr;
 321
 322	if (cmd && cmd->frame) {
 323		if (cmd->frame->sg_counts) {
 324			m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
 325			sgd_getsz(mhba, m_sg, size);
 326
 327			phy_addr = (dma_addr_t) m_sg->baseaddr_l |
 328				(dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
 329
 330			pci_free_consistent(mhba->pdev, size, cmd->data_buf,
 331								phy_addr);
 332		}
 333		pci_free_consistent(mhba->pdev, mhba->ib_max_size,
 334				cmd->frame, cmd->frame_phys);
 335		kfree(cmd);
 336	}
 337}
 338
 339/**
 340 * mvumi_get_cmd -	Get a command from the free pool
 341 * @mhba:		Adapter soft state
 342 *
 343 * Returns a free command from the pool
 344 */
 345static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
 346{
 347	struct mvumi_cmd *cmd = NULL;
 348
 349	if (likely(!list_empty(&mhba->cmd_pool))) {
 350		cmd = list_entry((&mhba->cmd_pool)->next,
 351				struct mvumi_cmd, queue_pointer);
 352		list_del_init(&cmd->queue_pointer);
 353	} else
 354		dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
 355
 356	return cmd;
 357}
 358
 359/**
 360 * mvumi_return_cmd -	Return a cmd to free command pool
 361 * @mhba:		Adapter soft state
 362 * @cmd:		Command packet to be returned to free command pool
 363 */
 364static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
 365						struct mvumi_cmd *cmd)
 366{
 367	cmd->scmd = NULL;
 368	list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
 369}
 370
 371/**
 372 * mvumi_free_cmds -	Free all the cmds in the free cmd pool
 373 * @mhba:		Adapter soft state
 374 */
 375static void mvumi_free_cmds(struct mvumi_hba *mhba)
 376{
 377	struct mvumi_cmd *cmd;
 378
 379	while (!list_empty(&mhba->cmd_pool)) {
 380		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 381							queue_pointer);
 382		list_del(&cmd->queue_pointer);
 383		if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
 384			kfree(cmd->frame);
 385		kfree(cmd);
 386	}
 387}
 388
 389/**
 390 * mvumi_alloc_cmds -	Allocates the command packets
 391 * @mhba:		Adapter soft state
 392 *
 393 */
 394static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
 395{
 396	int i;
 397	struct mvumi_cmd *cmd;
 398
 399	for (i = 0; i < mhba->max_io; i++) {
 400		cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 401		if (!cmd)
 402			goto err_exit;
 403
 404		INIT_LIST_HEAD(&cmd->queue_pointer);
 405		list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
 406		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 407			cmd->frame = mhba->ib_frame + i * mhba->ib_max_size;
 408			cmd->frame_phys = mhba->ib_frame_phys
 409						+ i * mhba->ib_max_size;
 410		} else
 411			cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
 412		if (!cmd->frame)
 413			goto err_exit;
 414	}
 415	return 0;
 416
 417err_exit:
 418	dev_err(&mhba->pdev->dev,
 419			"failed to allocate memory for cmd[0x%x].\n", i);
 420	while (!list_empty(&mhba->cmd_pool)) {
 421		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 422						queue_pointer);
 423		list_del(&cmd->queue_pointer);
 424		if (!(mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC))
 425			kfree(cmd->frame);
 426		kfree(cmd);
 427	}
 428	return -ENOMEM;
 429}
 430
 431static unsigned int mvumi_check_ib_list_9143(struct mvumi_hba *mhba)
 432{
 433	unsigned int ib_rp_reg;
 434	struct mvumi_hw_regs *regs = mhba->regs;
 435
 436	ib_rp_reg = ioread32(mhba->regs->inb_read_pointer);
 437
 438	if (unlikely(((ib_rp_reg & regs->cl_slot_num_mask) ==
 439			(mhba->ib_cur_slot & regs->cl_slot_num_mask)) &&
 440			((ib_rp_reg & regs->cl_pointer_toggle)
 441			 != (mhba->ib_cur_slot & regs->cl_pointer_toggle)))) {
 442		dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
 443		return 0;
 444	}
 445	if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
 446		dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
 447		return 0;
 448	} else {
 449		return mhba->max_io - atomic_read(&mhba->fw_outstanding);
 450	}
 451}
 452
 453static unsigned int mvumi_check_ib_list_9580(struct mvumi_hba *mhba)
 454{
 455	unsigned int count;
 456	if (atomic_read(&mhba->fw_outstanding) >= (mhba->max_io - 1))
 457		return 0;
 458	count = ioread32(mhba->ib_shadow);
 459	if (count == 0xffff)
 460		return 0;
 461	return count;
 462}
 463
 464static void mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
 465{
 466	unsigned int cur_ib_entry;
 467
 468	cur_ib_entry = mhba->ib_cur_slot & mhba->regs->cl_slot_num_mask;
 469	cur_ib_entry++;
 470	if (cur_ib_entry >= mhba->list_num_io) {
 471		cur_ib_entry -= mhba->list_num_io;
 472		mhba->ib_cur_slot ^= mhba->regs->cl_pointer_toggle;
 473	}
 474	mhba->ib_cur_slot &= ~mhba->regs->cl_slot_num_mask;
 475	mhba->ib_cur_slot |= (cur_ib_entry & mhba->regs->cl_slot_num_mask);
 476	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 477		*ib_entry = mhba->ib_list + cur_ib_entry *
 478				sizeof(struct mvumi_dyn_list_entry);
 479	} else {
 480		*ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
 481	}
 
 
 
 482	atomic_inc(&mhba->fw_outstanding);
 
 
 483}
 484
 485static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
 486{
 487	iowrite32(0xffff, mhba->ib_shadow);
 488	iowrite32(mhba->ib_cur_slot, mhba->regs->inb_write_pointer);
 489}
 490
 491static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
 492		unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
 493{
 494	unsigned short tag, request_id;
 495
 496	udelay(1);
 497	p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
 498	request_id = p_outb_frame->request_id;
 499	tag = p_outb_frame->tag;
 500	if (tag > mhba->tag_pool.size) {
 501		dev_err(&mhba->pdev->dev, "ob frame data error\n");
 502		return -1;
 503	}
 504	if (mhba->tag_cmd[tag] == NULL) {
 505		dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
 506		return -1;
 507	} else if (mhba->tag_cmd[tag]->request_id != request_id &&
 508						mhba->request_id_enabled) {
 509			dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
 510					"cmd request ID:0x%x\n", request_id,
 511					mhba->tag_cmd[tag]->request_id);
 512			return -1;
 513	}
 514
 515	return 0;
 516}
 517
 518static int mvumi_check_ob_list_9143(struct mvumi_hba *mhba,
 519			unsigned int *cur_obf, unsigned int *assign_obf_end)
 520{
 521	unsigned int ob_write, ob_write_shadow;
 522	struct mvumi_hw_regs *regs = mhba->regs;
 523
 524	do {
 525		ob_write = ioread32(regs->outb_copy_pointer);
 526		ob_write_shadow = ioread32(mhba->ob_shadow);
 527	} while ((ob_write & regs->cl_slot_num_mask) != ob_write_shadow);
 528
 529	*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
 530	*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
 531
 532	if ((ob_write & regs->cl_pointer_toggle) !=
 533			(mhba->ob_cur_slot & regs->cl_pointer_toggle)) {
 534		*assign_obf_end += mhba->list_num_io;
 535	}
 536	return 0;
 537}
 538
 539static int mvumi_check_ob_list_9580(struct mvumi_hba *mhba,
 540			unsigned int *cur_obf, unsigned int *assign_obf_end)
 541{
 542	unsigned int ob_write;
 543	struct mvumi_hw_regs *regs = mhba->regs;
 544
 545	ob_write = ioread32(regs->outb_read_pointer);
 546	ob_write = ioread32(regs->outb_copy_pointer);
 547	*cur_obf = mhba->ob_cur_slot & mhba->regs->cl_slot_num_mask;
 548	*assign_obf_end = ob_write & mhba->regs->cl_slot_num_mask;
 549	if (*assign_obf_end < *cur_obf)
 550		*assign_obf_end += mhba->list_num_io;
 551	else if (*assign_obf_end == *cur_obf)
 552		return -1;
 553	return 0;
 554}
 555
 556static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
 557{
 
 558	unsigned int cur_obf, assign_obf_end, i;
 559	struct mvumi_ob_data *ob_data;
 560	struct mvumi_rsp_frame *p_outb_frame;
 561	struct mvumi_hw_regs *regs = mhba->regs;
 562
 563	if (mhba->instancet->check_ob_list(mhba, &cur_obf, &assign_obf_end))
 564		return;
 
 
 
 
 
 
 
 
 
 
 565
 566	for (i = (assign_obf_end - cur_obf); i != 0; i--) {
 567		cur_obf++;
 568		if (cur_obf >= mhba->list_num_io) {
 569			cur_obf -= mhba->list_num_io;
 570			mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
 571		}
 572
 573		p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
 574
 575		/* Copy pointer may point to entry in outbound list
 576		*  before entry has valid data
 577		*/
 578		if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
 579			mhba->tag_cmd[p_outb_frame->tag] == NULL ||
 580			p_outb_frame->request_id !=
 581				mhba->tag_cmd[p_outb_frame->tag]->request_id))
 582			if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
 583				continue;
 584
 585		if (!list_empty(&mhba->ob_data_list)) {
 586			ob_data = (struct mvumi_ob_data *)
 587				list_first_entry(&mhba->ob_data_list,
 588					struct mvumi_ob_data, list);
 589			list_del_init(&ob_data->list);
 590		} else {
 591			ob_data = NULL;
 592			if (cur_obf == 0) {
 593				cur_obf = mhba->list_num_io - 1;
 594				mhba->ob_cur_slot ^= regs->cl_pointer_toggle;
 595			} else
 596				cur_obf -= 1;
 597			break;
 598		}
 599
 600		memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
 601		p_outb_frame->tag = 0xff;
 602
 603		list_add_tail(&ob_data->list, &mhba->free_ob_list);
 604	}
 605	mhba->ob_cur_slot &= ~regs->cl_slot_num_mask;
 606	mhba->ob_cur_slot |= (cur_obf & regs->cl_slot_num_mask);
 607	iowrite32(mhba->ob_cur_slot, regs->outb_read_pointer);
 608}
 609
 610static void mvumi_reset(struct mvumi_hba *mhba)
 611{
 612	struct mvumi_hw_regs *regs = mhba->regs;
 613
 614	iowrite32(0, regs->enpointa_mask_reg);
 615	if (ioread32(regs->arm_to_pciea_msg1) != HANDSHAKE_DONESTATE)
 616		return;
 617
 618	iowrite32(DRBL_SOFT_RESET, regs->pciea_to_arm_drbl_reg);
 619}
 620
 621static unsigned char mvumi_start(struct mvumi_hba *mhba);
 622
 623static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
 624{
 625	mhba->fw_state = FW_STATE_ABORT;
 626	mvumi_reset(mhba);
 627
 628	if (mvumi_start(mhba))
 629		return FAILED;
 630	else
 631		return SUCCESS;
 632}
 633
 634static int mvumi_wait_for_fw(struct mvumi_hba *mhba)
 635{
 636	struct mvumi_hw_regs *regs = mhba->regs;
 637	u32 tmp;
 638	unsigned long before;
 639	before = jiffies;
 640
 641	iowrite32(0, regs->enpointa_mask_reg);
 642	tmp = ioread32(regs->arm_to_pciea_msg1);
 643	while (tmp != HANDSHAKE_READYSTATE) {
 644		iowrite32(DRBL_MU_RESET, regs->pciea_to_arm_drbl_reg);
 645		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
 646			dev_err(&mhba->pdev->dev,
 647				"FW reset failed [0x%x].\n", tmp);
 648			return FAILED;
 649		}
 650
 651		msleep(500);
 652		rmb();
 653		tmp = ioread32(regs->arm_to_pciea_msg1);
 654	}
 655
 656	return SUCCESS;
 657}
 658
 659static void mvumi_backup_bar_addr(struct mvumi_hba *mhba)
 660{
 661	unsigned char i;
 662
 663	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 664		pci_read_config_dword(mhba->pdev, 0x10 + i * 4,
 665						&mhba->pci_base[i]);
 666	}
 667}
 668
 669static void mvumi_restore_bar_addr(struct mvumi_hba *mhba)
 670{
 671	unsigned char i;
 672
 673	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 674		if (mhba->pci_base[i])
 675			pci_write_config_dword(mhba->pdev, 0x10 + i * 4,
 676						mhba->pci_base[i]);
 677	}
 678}
 679
 680static unsigned int mvumi_pci_set_master(struct pci_dev *pdev)
 681{
 682	unsigned int ret = 0;
 683	pci_set_master(pdev);
 684
 685	if (IS_DMA64) {
 686		if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
 687			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 688	} else
 689		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
 690
 691	return ret;
 692}
 693
 694static int mvumi_reset_host_9580(struct mvumi_hba *mhba)
 695{
 696	mhba->fw_state = FW_STATE_ABORT;
 697
 698	iowrite32(0, mhba->regs->reset_enable);
 699	iowrite32(0xf, mhba->regs->reset_request);
 700
 701	iowrite32(0x10, mhba->regs->reset_enable);
 702	iowrite32(0x10, mhba->regs->reset_request);
 703	msleep(100);
 704	pci_disable_device(mhba->pdev);
 705
 706	if (pci_enable_device(mhba->pdev)) {
 707		dev_err(&mhba->pdev->dev, "enable device failed\n");
 708		return FAILED;
 709	}
 710	if (mvumi_pci_set_master(mhba->pdev)) {
 711		dev_err(&mhba->pdev->dev, "set master failed\n");
 712		return FAILED;
 713	}
 714	mvumi_restore_bar_addr(mhba);
 715	if (mvumi_wait_for_fw(mhba) == FAILED)
 716		return FAILED;
 717
 718	return mvumi_wait_for_outstanding(mhba);
 719}
 720
 721static int mvumi_reset_host_9143(struct mvumi_hba *mhba)
 722{
 723	return mvumi_wait_for_outstanding(mhba);
 724}
 725
 726static int mvumi_host_reset(struct scsi_cmnd *scmd)
 727{
 728	struct mvumi_hba *mhba;
 729
 730	mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
 731
 732	scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
 733			scmd->serial_number, scmd->cmnd[0], scmd->retries);
 734
 735	return mhba->instancet->reset_host(mhba);
 736}
 737
 738static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
 739						struct mvumi_cmd *cmd)
 740{
 741	unsigned long flags;
 742
 743	cmd->cmd_status = REQ_STATUS_PENDING;
 744
 745	if (atomic_read(&cmd->sync_cmd)) {
 746		dev_err(&mhba->pdev->dev,
 747			"last blocked cmd not finished, sync_cmd = %d\n",
 748						atomic_read(&cmd->sync_cmd));
 749		BUG_ON(1);
 750		return -1;
 751	}
 752	atomic_inc(&cmd->sync_cmd);
 753	spin_lock_irqsave(mhba->shost->host_lock, flags);
 754	mhba->instancet->fire_cmd(mhba, cmd);
 755	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
 756
 757	wait_event_timeout(mhba->int_cmd_wait_q,
 758		(cmd->cmd_status != REQ_STATUS_PENDING),
 759		MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
 760
 761	/* command timeout */
 762	if (atomic_read(&cmd->sync_cmd)) {
 763		spin_lock_irqsave(mhba->shost->host_lock, flags);
 764		atomic_dec(&cmd->sync_cmd);
 765		if (mhba->tag_cmd[cmd->frame->tag]) {
 766			mhba->tag_cmd[cmd->frame->tag] = 0;
 767			dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
 768							cmd->frame->tag);
 769			tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
 770		}
 771		if (!list_empty(&cmd->queue_pointer)) {
 772			dev_warn(&mhba->pdev->dev,
 773				"TIMEOUT:A internal command doesn't send!\n");
 774			list_del_init(&cmd->queue_pointer);
 775		} else
 776			atomic_dec(&mhba->fw_outstanding);
 777
 778		spin_unlock_irqrestore(mhba->shost->host_lock, flags);
 779	}
 780	return 0;
 781}
 782
 783static void mvumi_release_fw(struct mvumi_hba *mhba)
 784{
 785	mvumi_free_cmds(mhba);
 786	mvumi_release_mem_resource(mhba);
 787	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
 788	pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
 789		mhba->handshake_page, mhba->handshake_page_phys);
 790	kfree(mhba->regs);
 791	pci_release_regions(mhba->pdev);
 792}
 793
 794static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
 795{
 796	struct mvumi_cmd *cmd;
 797	struct mvumi_msg_frame *frame;
 798	unsigned char device_id, retry = 0;
 799	unsigned char bitcount = sizeof(unsigned char) * 8;
 800
 801	for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
 802		if (!(mhba->target_map[device_id / bitcount] &
 803				(1 << (device_id % bitcount))))
 804			continue;
 805get_cmd:	cmd = mvumi_create_internal_cmd(mhba, 0);
 806		if (!cmd) {
 807			if (retry++ >= 5) {
 808				dev_err(&mhba->pdev->dev, "failed to get memory"
 809					" for internal flush cache cmd for "
 810					"device %d", device_id);
 811				retry = 0;
 812				continue;
 813			} else
 814				goto get_cmd;
 815		}
 816		cmd->scmd = NULL;
 817		cmd->cmd_status = REQ_STATUS_PENDING;
 818		atomic_set(&cmd->sync_cmd, 0);
 819		frame = cmd->frame;
 820		frame->req_function = CL_FUN_SCSI_CMD;
 821		frame->device_id = device_id;
 822		frame->cmd_flag = CMD_FLAG_NON_DATA;
 823		frame->data_transfer_length = 0;
 824		frame->cdb_length = MAX_COMMAND_SIZE;
 825		memset(frame->cdb, 0, MAX_COMMAND_SIZE);
 826		frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
 827		frame->cdb[1] = CDB_CORE_MODULE;
 828		frame->cdb[2] = CDB_CORE_SHUTDOWN;
 829
 830		mvumi_issue_blocked_cmd(mhba, cmd);
 831		if (cmd->cmd_status != SAM_STAT_GOOD) {
 832			dev_err(&mhba->pdev->dev,
 833				"device %d flush cache failed, status=0x%x.\n",
 834				device_id, cmd->cmd_status);
 835		}
 836
 837		mvumi_delete_internal_cmd(mhba, cmd);
 838	}
 839	return 0;
 840}
 841
 842static unsigned char
 843mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
 844							unsigned short len)
 845{
 846	unsigned char *ptr;
 847	unsigned char ret = 0, i;
 848
 849	ptr = (unsigned char *) p_header->frame_content;
 850	for (i = 0; i < len; i++) {
 851		ret ^= *ptr;
 852		ptr++;
 853	}
 854
 855	return ret;
 856}
 857
 858static void mvumi_hs_build_page(struct mvumi_hba *mhba,
 859				struct mvumi_hs_header *hs_header)
 860{
 861	struct mvumi_hs_page2 *hs_page2;
 862	struct mvumi_hs_page4 *hs_page4;
 863	struct mvumi_hs_page3 *hs_page3;
 864	struct timeval time;
 865	unsigned int local_time;
 866
 867	switch (hs_header->page_code) {
 868	case HS_PAGE_HOST_INFO:
 869		hs_page2 = (struct mvumi_hs_page2 *) hs_header;
 870		hs_header->frame_length = sizeof(*hs_page2) - 4;
 871		memset(hs_header->frame_content, 0, hs_header->frame_length);
 872		hs_page2->host_type = 3; /* 3 mean linux*/
 873		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
 874			hs_page2->host_cap = 0x08;/* host dynamic source mode */
 875		hs_page2->host_ver.ver_major = VER_MAJOR;
 876		hs_page2->host_ver.ver_minor = VER_MINOR;
 877		hs_page2->host_ver.ver_oem = VER_OEM;
 878		hs_page2->host_ver.ver_build = VER_BUILD;
 879		hs_page2->system_io_bus = 0;
 880		hs_page2->slot_number = 0;
 881		hs_page2->intr_level = 0;
 882		hs_page2->intr_vector = 0;
 883		do_gettimeofday(&time);
 884		local_time = (unsigned int) (time.tv_sec -
 885						(sys_tz.tz_minuteswest * 60));
 886		hs_page2->seconds_since1970 = local_time;
 887		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 888						hs_header->frame_length);
 889		break;
 890
 891	case HS_PAGE_FIRM_CTL:
 892		hs_page3 = (struct mvumi_hs_page3 *) hs_header;
 893		hs_header->frame_length = sizeof(*hs_page3) - 4;
 894		memset(hs_header->frame_content, 0, hs_header->frame_length);
 895		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 896						hs_header->frame_length);
 897		break;
 898
 899	case HS_PAGE_CL_INFO:
 900		hs_page4 = (struct mvumi_hs_page4 *) hs_header;
 901		hs_header->frame_length = sizeof(*hs_page4) - 4;
 902		memset(hs_header->frame_content, 0, hs_header->frame_length);
 903		hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
 904		hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
 905
 906		hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
 907		hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
 908		hs_page4->ib_entry_size = mhba->ib_max_size_setting;
 909		hs_page4->ob_entry_size = mhba->ob_max_size_setting;
 910		if (mhba->hba_capability
 911			& HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF) {
 912			hs_page4->ob_depth = find_first_bit((unsigned long *)
 913							    &mhba->list_num_io,
 914							    BITS_PER_LONG);
 915			hs_page4->ib_depth = find_first_bit((unsigned long *)
 916							    &mhba->list_num_io,
 917							    BITS_PER_LONG);
 918		} else {
 919			hs_page4->ob_depth = (u8) mhba->list_num_io;
 920			hs_page4->ib_depth = (u8) mhba->list_num_io;
 921		}
 922		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 923						hs_header->frame_length);
 924		break;
 925
 926	default:
 927		dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
 928			hs_header->page_code);
 929		break;
 930	}
 931}
 932
 933/**
 934 * mvumi_init_data -	Initialize requested date for FW
 935 * @mhba:			Adapter soft state
 936 */
 937static int mvumi_init_data(struct mvumi_hba *mhba)
 938{
 939	struct mvumi_ob_data *ob_pool;
 940	struct mvumi_res *res_mgnt;
 941	unsigned int tmp_size, offset, i;
 942	void *virmem, *v;
 943	dma_addr_t p;
 944
 945	if (mhba->fw_flag & MVUMI_FW_ALLOC)
 946		return 0;
 947
 948	tmp_size = mhba->ib_max_size * mhba->max_io;
 949	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC)
 950		tmp_size += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 951
 952	tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
 953	tmp_size += 8 + sizeof(u32)*2 + 16;
 954
 955	res_mgnt = mvumi_alloc_mem_resource(mhba,
 956					RESOURCE_UNCACHED_MEMORY, tmp_size);
 957	if (!res_mgnt) {
 958		dev_err(&mhba->pdev->dev,
 959			"failed to allocate memory for inbound list\n");
 960		goto fail_alloc_dma_buf;
 961	}
 962
 963	p = res_mgnt->bus_addr;
 964	v = res_mgnt->virt_addr;
 965	/* ib_list */
 966	offset = round_up(p, 128) - p;
 967	p += offset;
 968	v += offset;
 969	mhba->ib_list = v;
 970	mhba->ib_list_phys = p;
 971	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
 972		v += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 973		p += sizeof(struct mvumi_dyn_list_entry) * mhba->max_io;
 974		mhba->ib_frame = v;
 975		mhba->ib_frame_phys = p;
 976	}
 977	v += mhba->ib_max_size * mhba->max_io;
 978	p += mhba->ib_max_size * mhba->max_io;
 979
 980	/* ib shadow */
 981	offset = round_up(p, 8) - p;
 982	p += offset;
 983	v += offset;
 984	mhba->ib_shadow = v;
 985	mhba->ib_shadow_phys = p;
 986	p += sizeof(u32)*2;
 987	v += sizeof(u32)*2;
 988	/* ob shadow */
 989	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
 990		offset = round_up(p, 8) - p;
 991		p += offset;
 992		v += offset;
 993		mhba->ob_shadow = v;
 994		mhba->ob_shadow_phys = p;
 995		p += 8;
 996		v += 8;
 997	} else {
 998		offset = round_up(p, 4) - p;
 999		p += offset;
1000		v += offset;
1001		mhba->ob_shadow = v;
1002		mhba->ob_shadow_phys = p;
1003		p += 4;
1004		v += 4;
1005	}
1006
1007	/* ob list */
1008	offset = round_up(p, 128) - p;
1009	p += offset;
1010	v += offset;
1011
1012	mhba->ob_list = v;
1013	mhba->ob_list_phys = p;
1014
1015	/* ob data pool */
1016	tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
1017	tmp_size = round_up(tmp_size, 8);
1018
1019	res_mgnt = mvumi_alloc_mem_resource(mhba,
1020				RESOURCE_CACHED_MEMORY, tmp_size);
1021	if (!res_mgnt) {
1022		dev_err(&mhba->pdev->dev,
1023			"failed to allocate memory for outbound data buffer\n");
1024		goto fail_alloc_dma_buf;
1025	}
1026	virmem = res_mgnt->virt_addr;
1027
1028	for (i = mhba->max_io; i != 0; i--) {
1029		ob_pool = (struct mvumi_ob_data *) virmem;
1030		list_add_tail(&ob_pool->list, &mhba->ob_data_list);
1031		virmem += mhba->ob_max_size + sizeof(*ob_pool);
1032	}
1033
1034	tmp_size = sizeof(unsigned short) * mhba->max_io +
1035				sizeof(struct mvumi_cmd *) * mhba->max_io;
1036	tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
1037						(sizeof(unsigned char) * 8);
1038
1039	res_mgnt = mvumi_alloc_mem_resource(mhba,
1040				RESOURCE_CACHED_MEMORY, tmp_size);
1041	if (!res_mgnt) {
1042		dev_err(&mhba->pdev->dev,
1043			"failed to allocate memory for tag and target map\n");
1044		goto fail_alloc_dma_buf;
1045	}
1046
1047	virmem = res_mgnt->virt_addr;
1048	mhba->tag_pool.stack = virmem;
1049	mhba->tag_pool.size = mhba->max_io;
1050	tag_init(&mhba->tag_pool, mhba->max_io);
1051	virmem += sizeof(unsigned short) * mhba->max_io;
1052
1053	mhba->tag_cmd = virmem;
1054	virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
1055
1056	mhba->target_map = virmem;
1057
1058	mhba->fw_flag |= MVUMI_FW_ALLOC;
1059	return 0;
1060
1061fail_alloc_dma_buf:
1062	mvumi_release_mem_resource(mhba);
1063	return -1;
1064}
1065
1066static int mvumi_hs_process_page(struct mvumi_hba *mhba,
1067				struct mvumi_hs_header *hs_header)
1068{
1069	struct mvumi_hs_page1 *hs_page1;
1070	unsigned char page_checksum;
1071
1072	page_checksum = mvumi_calculate_checksum(hs_header,
1073						hs_header->frame_length);
1074	if (page_checksum != hs_header->checksum) {
1075		dev_err(&mhba->pdev->dev, "checksum error\n");
1076		return -1;
1077	}
1078
1079	switch (hs_header->page_code) {
1080	case HS_PAGE_FIRM_CAP:
1081		hs_page1 = (struct mvumi_hs_page1 *) hs_header;
1082
1083		mhba->max_io = hs_page1->max_io_support;
1084		mhba->list_num_io = hs_page1->cl_inout_list_depth;
1085		mhba->max_transfer_size = hs_page1->max_transfer_size;
1086		mhba->max_target_id = hs_page1->max_devices_support;
1087		mhba->hba_capability = hs_page1->capability;
1088		mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
1089		mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
1090
1091		mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
1092		mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
1093
1094		dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
1095						hs_page1->fw_ver.ver_build);
1096
1097		if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_COMPACT_SG)
1098			mhba->eot_flag = 22;
1099		else
1100			mhba->eot_flag = 27;
1101		if (mhba->hba_capability & HS_CAPABILITY_NEW_PAGE_IO_DEPTH_DEF)
1102			mhba->list_num_io = 1 << hs_page1->cl_inout_list_depth;
1103		break;
1104	default:
1105		dev_err(&mhba->pdev->dev, "handshake: page code error\n");
1106		return -1;
1107	}
1108	return 0;
1109}
1110
1111/**
1112 * mvumi_handshake -	Move the FW to READY state
1113 * @mhba:				Adapter soft state
1114 *
1115 * During the initialization, FW passes can potentially be in any one of
1116 * several possible states. If the FW in operational, waiting-for-handshake
1117 * states, driver must take steps to bring it to ready state. Otherwise, it
1118 * has to wait for the ready state.
1119 */
1120static int mvumi_handshake(struct mvumi_hba *mhba)
1121{
1122	unsigned int hs_state, tmp, hs_fun;
1123	struct mvumi_hs_header *hs_header;
1124	struct mvumi_hw_regs *regs = mhba->regs;
1125
1126	if (mhba->fw_state == FW_STATE_STARTING)
1127		hs_state = HS_S_START;
1128	else {
1129		tmp = ioread32(regs->arm_to_pciea_msg0);
1130		hs_state = HS_GET_STATE(tmp);
1131		dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
1132		if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
1133			mhba->fw_state = FW_STATE_STARTING;
1134			return -1;
1135		}
1136	}
1137
1138	hs_fun = 0;
1139	switch (hs_state) {
1140	case HS_S_START:
1141		mhba->fw_state = FW_STATE_HANDSHAKING;
1142		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1143		HS_SET_STATE(hs_fun, HS_S_RESET);
1144		iowrite32(HANDSHAKE_SIGNATURE, regs->pciea_to_arm_msg1);
1145		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1146		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1147		break;
1148
1149	case HS_S_RESET:
1150		iowrite32(lower_32_bits(mhba->handshake_page_phys),
1151					regs->pciea_to_arm_msg1);
1152		iowrite32(upper_32_bits(mhba->handshake_page_phys),
1153					regs->arm_to_pciea_msg1);
1154		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1155		HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
1156		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1157		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
 
1158		break;
1159
1160	case HS_S_PAGE_ADDR:
1161	case HS_S_QUERY_PAGE:
1162	case HS_S_SEND_PAGE:
1163		hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
1164		if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
1165			mhba->hba_total_pages =
1166			((struct mvumi_hs_page1 *) hs_header)->total_pages;
1167
1168			if (mhba->hba_total_pages == 0)
1169				mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1170		}
1171
1172		if (hs_state == HS_S_QUERY_PAGE) {
1173			if (mvumi_hs_process_page(mhba, hs_header)) {
1174				HS_SET_STATE(hs_fun, HS_S_ABORT);
1175				return -1;
1176			}
1177			if (mvumi_init_data(mhba)) {
1178				HS_SET_STATE(hs_fun, HS_S_ABORT);
1179				return -1;
1180			}
1181		} else if (hs_state == HS_S_PAGE_ADDR) {
1182			hs_header->page_code = 0;
1183			mhba->hba_total_pages = HS_PAGE_TOTAL-1;
1184		}
1185
1186		if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
1187			hs_header->page_code++;
1188			if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
1189				mvumi_hs_build_page(mhba, hs_header);
1190				HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
1191			} else
1192				HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
1193		} else
1194			HS_SET_STATE(hs_fun, HS_S_END);
1195
1196		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1197		iowrite32(hs_fun, regs->pciea_to_arm_msg0);
1198		iowrite32(DRBL_HANDSHAKE, regs->pciea_to_arm_drbl_reg);
1199		break;
1200
1201	case HS_S_END:
1202		/* Set communication list ISR */
1203		tmp = ioread32(regs->enpointa_mask_reg);
1204		tmp |= regs->int_comaout | regs->int_comaerr;
1205		iowrite32(tmp, regs->enpointa_mask_reg);
1206		iowrite32(mhba->list_num_io, mhba->ib_shadow);
1207		/* Set InBound List Available count shadow */
1208		iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1209					regs->inb_aval_count_basel);
1210		iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1211					regs->inb_aval_count_baseh);
1212
1213		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143) {
1214			/* Set OutBound List Available count shadow */
1215			iowrite32((mhba->list_num_io-1) |
1216							regs->cl_pointer_toggle,
1217							mhba->ob_shadow);
1218			iowrite32(lower_32_bits(mhba->ob_shadow_phys),
1219							regs->outb_copy_basel);
1220			iowrite32(upper_32_bits(mhba->ob_shadow_phys),
1221							regs->outb_copy_baseh);
1222		}
1223
1224		mhba->ib_cur_slot = (mhba->list_num_io - 1) |
1225							regs->cl_pointer_toggle;
1226		mhba->ob_cur_slot = (mhba->list_num_io - 1) |
1227							regs->cl_pointer_toggle;
1228		mhba->fw_state = FW_STATE_STARTED;
1229
1230		break;
1231	default:
1232		dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1233								hs_state);
1234		return -1;
1235	}
1236	return 0;
1237}
1238
1239static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1240{
1241	unsigned int isr_status;
1242	unsigned long before;
1243
1244	before = jiffies;
1245	mvumi_handshake(mhba);
1246	do {
1247		isr_status = mhba->instancet->read_fw_status_reg(mhba);
1248
1249		if (mhba->fw_state == FW_STATE_STARTED)
1250			return 0;
1251		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1252			dev_err(&mhba->pdev->dev,
1253				"no handshake response at state 0x%x.\n",
1254				  mhba->fw_state);
1255			dev_err(&mhba->pdev->dev,
1256				"isr : global=0x%x,status=0x%x.\n",
1257					mhba->global_isr, isr_status);
1258			return -1;
1259		}
1260		rmb();
1261		usleep_range(1000, 2000);
1262	} while (!(isr_status & DRBL_HANDSHAKE_ISR));
1263
1264	return 0;
1265}
1266
1267static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1268{
 
1269	unsigned int tmp;
1270	unsigned long before;
1271
1272	before = jiffies;
1273	tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1274	while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1275		if (tmp != HANDSHAKE_READYSTATE)
1276			iowrite32(DRBL_MU_RESET,
1277					mhba->regs->pciea_to_arm_drbl_reg);
1278		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1279			dev_err(&mhba->pdev->dev,
1280				"invalid signature [0x%x].\n", tmp);
1281			return -1;
1282		}
1283		usleep_range(1000, 2000);
1284		rmb();
1285		tmp = ioread32(mhba->regs->arm_to_pciea_msg1);
1286	}
1287
1288	mhba->fw_state = FW_STATE_STARTING;
1289	dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1290	do {
1291		if (mvumi_handshake_event(mhba)) {
1292			dev_err(&mhba->pdev->dev,
1293					"handshake failed at state 0x%x.\n",
1294						mhba->fw_state);
1295			return -1;
1296		}
1297	} while (mhba->fw_state != FW_STATE_STARTED);
1298
1299	dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1300
1301	return 0;
1302}
1303
1304static unsigned char mvumi_start(struct mvumi_hba *mhba)
1305{
 
1306	unsigned int tmp;
1307	struct mvumi_hw_regs *regs = mhba->regs;
1308
1309	/* clear Door bell */
1310	tmp = ioread32(regs->arm_to_pciea_drbl_reg);
1311	iowrite32(tmp, regs->arm_to_pciea_drbl_reg);
1312
1313	iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1314	tmp = ioread32(regs->enpointa_mask_reg) | regs->int_dl_cpu2pciea;
1315	iowrite32(tmp, regs->enpointa_mask_reg);
1316	msleep(100);
1317	if (mvumi_check_handshake(mhba))
1318		return -1;
1319
1320	return 0;
1321}
1322
1323/**
1324 * mvumi_complete_cmd -	Completes a command
1325 * @mhba:			Adapter soft state
1326 * @cmd:			Command to be completed
1327 */
1328static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1329					struct mvumi_rsp_frame *ob_frame)
1330{
1331	struct scsi_cmnd *scmd = cmd->scmd;
1332
1333	cmd->scmd->SCp.ptr = NULL;
1334	scmd->result = ob_frame->req_status;
1335
1336	switch (ob_frame->req_status) {
1337	case SAM_STAT_GOOD:
1338		scmd->result |= DID_OK << 16;
1339		break;
1340	case SAM_STAT_BUSY:
1341		scmd->result |= DID_BUS_BUSY << 16;
1342		break;
1343	case SAM_STAT_CHECK_CONDITION:
1344		scmd->result |= (DID_OK << 16);
1345		if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1346			memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1347				sizeof(struct mvumi_sense_data));
1348			scmd->result |=  (DRIVER_SENSE << 24);
1349		}
1350		break;
1351	default:
1352		scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1353		break;
1354	}
1355
1356	if (scsi_bufflen(scmd)) {
1357		if (scsi_sg_count(scmd)) {
1358			pci_unmap_sg(mhba->pdev,
1359				scsi_sglist(scmd),
1360				scsi_sg_count(scmd),
1361				(int) scmd->sc_data_direction);
1362		} else {
1363			pci_unmap_single(mhba->pdev,
1364				scmd->SCp.dma_handle,
1365				scsi_bufflen(scmd),
1366				(int) scmd->sc_data_direction);
1367
1368			scmd->SCp.dma_handle = 0;
1369		}
1370	}
1371	cmd->scmd->scsi_done(scmd);
1372	mvumi_return_cmd(mhba, cmd);
1373}
1374
1375static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1376						struct mvumi_cmd *cmd,
1377					struct mvumi_rsp_frame *ob_frame)
1378{
1379	if (atomic_read(&cmd->sync_cmd)) {
1380		cmd->cmd_status = ob_frame->req_status;
1381
1382		if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1383				(ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1384				cmd->data_buf) {
1385			memcpy(cmd->data_buf, ob_frame->payload,
1386					sizeof(struct mvumi_sense_data));
1387		}
1388		atomic_dec(&cmd->sync_cmd);
1389		wake_up(&mhba->int_cmd_wait_q);
1390	}
1391}
1392
1393static void mvumi_show_event(struct mvumi_hba *mhba,
1394			struct mvumi_driver_event *ptr)
1395{
1396	unsigned int i;
1397
1398	dev_warn(&mhba->pdev->dev,
1399		"Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1400		ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1401	if (ptr->param_count) {
1402		printk(KERN_WARNING "Event param(len 0x%x): ",
1403						ptr->param_count);
1404		for (i = 0; i < ptr->param_count; i++)
1405			printk(KERN_WARNING "0x%x ", ptr->params[i]);
1406
1407		printk(KERN_WARNING "\n");
1408	}
1409
1410	if (ptr->sense_data_length) {
1411		printk(KERN_WARNING "Event sense data(len 0x%x): ",
1412						ptr->sense_data_length);
1413		for (i = 0; i < ptr->sense_data_length; i++)
1414			printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1415		printk(KERN_WARNING "\n");
1416	}
1417}
1418
1419static int mvumi_handle_hotplug(struct mvumi_hba *mhba, u16 devid, int status)
1420{
1421	struct scsi_device *sdev;
1422	int ret = -1;
1423
1424	if (status == DEVICE_OFFLINE) {
1425		sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1426		if (sdev) {
1427			dev_dbg(&mhba->pdev->dev, "remove disk %d-%d-%d.\n", 0,
1428								sdev->id, 0);
1429			scsi_remove_device(sdev);
1430			scsi_device_put(sdev);
1431			ret = 0;
1432		} else
1433			dev_err(&mhba->pdev->dev, " no disk[%d] to remove\n",
1434									devid);
1435	} else if (status == DEVICE_ONLINE) {
1436		sdev = scsi_device_lookup(mhba->shost, 0, devid, 0);
1437		if (!sdev) {
1438			scsi_add_device(mhba->shost, 0, devid, 0);
1439			dev_dbg(&mhba->pdev->dev, " add disk %d-%d-%d.\n", 0,
1440								devid, 0);
1441			ret = 0;
1442		} else {
1443			dev_err(&mhba->pdev->dev, " don't add disk %d-%d-%d.\n",
1444								0, devid, 0);
1445			scsi_device_put(sdev);
1446		}
1447	}
1448	return ret;
1449}
1450
1451static u64 mvumi_inquiry(struct mvumi_hba *mhba,
1452	unsigned int id, struct mvumi_cmd *cmd)
1453{
1454	struct mvumi_msg_frame *frame;
1455	u64 wwid = 0;
1456	int cmd_alloc = 0;
1457	int data_buf_len = 64;
1458
1459	if (!cmd) {
1460		cmd = mvumi_create_internal_cmd(mhba, data_buf_len);
1461		if (cmd)
1462			cmd_alloc = 1;
1463		else
1464			return 0;
1465	} else {
1466		memset(cmd->data_buf, 0, data_buf_len);
1467	}
1468	cmd->scmd = NULL;
1469	cmd->cmd_status = REQ_STATUS_PENDING;
1470	atomic_set(&cmd->sync_cmd, 0);
1471	frame = cmd->frame;
1472	frame->device_id = (u16) id;
1473	frame->cmd_flag = CMD_FLAG_DATA_IN;
1474	frame->req_function = CL_FUN_SCSI_CMD;
1475	frame->cdb_length = 6;
1476	frame->data_transfer_length = MVUMI_INQUIRY_LENGTH;
1477	memset(frame->cdb, 0, frame->cdb_length);
1478	frame->cdb[0] = INQUIRY;
1479	frame->cdb[4] = frame->data_transfer_length;
1480
1481	mvumi_issue_blocked_cmd(mhba, cmd);
1482
1483	if (cmd->cmd_status == SAM_STAT_GOOD) {
1484		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1485			wwid = id + 1;
1486		else
1487			memcpy((void *)&wwid,
1488			       (cmd->data_buf + MVUMI_INQUIRY_UUID_OFF),
1489			       MVUMI_INQUIRY_UUID_LEN);
1490		dev_dbg(&mhba->pdev->dev,
1491			"inquiry device(0:%d:0) wwid(%llx)\n", id, wwid);
1492	} else {
1493		wwid = 0;
1494	}
1495	if (cmd_alloc)
1496		mvumi_delete_internal_cmd(mhba, cmd);
1497
1498	return wwid;
1499}
1500
1501static void mvumi_detach_devices(struct mvumi_hba *mhba)
1502{
1503	struct mvumi_device *mv_dev = NULL , *dev_next;
1504	struct scsi_device *sdev = NULL;
1505
1506	mutex_lock(&mhba->device_lock);
1507
1508	/* detach Hard Disk */
1509	list_for_each_entry_safe(mv_dev, dev_next,
1510		&mhba->shost_dev_list, list) {
1511		mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1512		list_del_init(&mv_dev->list);
1513		dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1514			mv_dev->id, mv_dev->wwid);
1515		kfree(mv_dev);
1516	}
1517	list_for_each_entry_safe(mv_dev, dev_next, &mhba->mhba_dev_list, list) {
1518		list_del_init(&mv_dev->list);
1519		dev_dbg(&mhba->pdev->dev, "release device(0:%d:0) wwid(%llx)\n",
1520			mv_dev->id, mv_dev->wwid);
1521		kfree(mv_dev);
1522	}
1523
1524	/* detach virtual device */
1525	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
1526		sdev = scsi_device_lookup(mhba->shost, 0,
1527						mhba->max_target_id - 1, 0);
1528
1529	if (sdev) {
1530		scsi_remove_device(sdev);
1531		scsi_device_put(sdev);
1532	}
1533
1534	mutex_unlock(&mhba->device_lock);
1535}
1536
1537static void mvumi_rescan_devices(struct mvumi_hba *mhba, int id)
1538{
1539	struct scsi_device *sdev;
1540
1541	sdev = scsi_device_lookup(mhba->shost, 0, id, 0);
1542	if (sdev) {
1543		scsi_rescan_device(&sdev->sdev_gendev);
1544		scsi_device_put(sdev);
1545	}
1546}
1547
1548static int mvumi_match_devices(struct mvumi_hba *mhba, int id, u64 wwid)
1549{
1550	struct mvumi_device *mv_dev = NULL;
1551
1552	list_for_each_entry(mv_dev, &mhba->shost_dev_list, list) {
1553		if (mv_dev->wwid == wwid) {
1554			if (mv_dev->id != id) {
1555				dev_err(&mhba->pdev->dev,
1556					"%s has same wwid[%llx] ,"
1557					" but different id[%d %d]\n",
1558					__func__, mv_dev->wwid, mv_dev->id, id);
1559				return -1;
1560			} else {
1561				if (mhba->pdev->device ==
1562						PCI_DEVICE_ID_MARVELL_MV9143)
1563					mvumi_rescan_devices(mhba, id);
1564				return 1;
1565			}
1566		}
1567	}
1568	return 0;
1569}
1570
1571static void mvumi_remove_devices(struct mvumi_hba *mhba, int id)
1572{
1573	struct mvumi_device *mv_dev = NULL, *dev_next;
1574
1575	list_for_each_entry_safe(mv_dev, dev_next,
1576				&mhba->shost_dev_list, list) {
1577		if (mv_dev->id == id) {
1578			dev_dbg(&mhba->pdev->dev,
1579				"detach device(0:%d:0) wwid(%llx) from HOST\n",
1580				mv_dev->id, mv_dev->wwid);
1581			mvumi_handle_hotplug(mhba, mv_dev->id, DEVICE_OFFLINE);
1582			list_del_init(&mv_dev->list);
1583			kfree(mv_dev);
1584		}
1585	}
1586}
1587
1588static int mvumi_probe_devices(struct mvumi_hba *mhba)
1589{
1590	int id, maxid;
1591	u64 wwid = 0;
1592	struct mvumi_device *mv_dev = NULL;
1593	struct mvumi_cmd *cmd = NULL;
1594	int found = 0;
1595
1596	cmd = mvumi_create_internal_cmd(mhba, 64);
1597	if (!cmd)
1598		return -1;
1599
1600	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9143)
1601		maxid = mhba->max_target_id;
1602	else
1603		maxid = mhba->max_target_id - 1;
1604
1605	for (id = 0; id < maxid; id++) {
1606		wwid = mvumi_inquiry(mhba, id, cmd);
1607		if (!wwid) {
1608			/* device no response, remove it */
1609			mvumi_remove_devices(mhba, id);
1610		} else {
1611			/* device response, add it */
1612			found = mvumi_match_devices(mhba, id, wwid);
1613			if (!found) {
1614				mvumi_remove_devices(mhba, id);
1615				mv_dev = kzalloc(sizeof(struct mvumi_device),
1616								GFP_KERNEL);
1617				if (!mv_dev) {
1618					dev_err(&mhba->pdev->dev,
1619						"%s alloc mv_dev failed\n",
1620						__func__);
1621					continue;
1622				}
1623				mv_dev->id = id;
1624				mv_dev->wwid = wwid;
1625				mv_dev->sdev = NULL;
1626				INIT_LIST_HEAD(&mv_dev->list);
1627				list_add_tail(&mv_dev->list,
1628					      &mhba->mhba_dev_list);
1629				dev_dbg(&mhba->pdev->dev,
1630					"probe a new device(0:%d:0)"
1631					" wwid(%llx)\n", id, mv_dev->wwid);
1632			} else if (found == -1)
1633				return -1;
1634			else
1635				continue;
1636		}
1637	}
1638
1639	if (cmd)
1640		mvumi_delete_internal_cmd(mhba, cmd);
1641
1642	return 0;
1643}
1644
1645static int mvumi_rescan_bus(void *data)
1646{
1647	int ret = 0;
1648	struct mvumi_hba *mhba = (struct mvumi_hba *) data;
1649	struct mvumi_device *mv_dev = NULL , *dev_next;
1650
1651	while (!kthread_should_stop()) {
1652
1653		set_current_state(TASK_INTERRUPTIBLE);
1654		if (!atomic_read(&mhba->pnp_count))
1655			schedule();
1656		msleep(1000);
1657		atomic_set(&mhba->pnp_count, 0);
1658		__set_current_state(TASK_RUNNING);
1659
1660		mutex_lock(&mhba->device_lock);
1661		ret = mvumi_probe_devices(mhba);
1662		if (!ret) {
1663			list_for_each_entry_safe(mv_dev, dev_next,
1664						 &mhba->mhba_dev_list, list) {
1665				if (mvumi_handle_hotplug(mhba, mv_dev->id,
1666							 DEVICE_ONLINE)) {
1667					dev_err(&mhba->pdev->dev,
1668						"%s add device(0:%d:0) failed"
1669						"wwid(%llx) has exist\n",
1670						__func__,
1671						mv_dev->id, mv_dev->wwid);
1672					list_del_init(&mv_dev->list);
1673					kfree(mv_dev);
1674				} else {
1675					list_move_tail(&mv_dev->list,
1676						       &mhba->shost_dev_list);
1677				}
1678			}
1679		}
1680		mutex_unlock(&mhba->device_lock);
1681	}
1682	return 0;
1683}
1684
1685static void mvumi_proc_msg(struct mvumi_hba *mhba,
1686					struct mvumi_hotplug_event *param)
1687{
1688	u16 size = param->size;
1689	const unsigned long *ar_bitmap;
1690	const unsigned long *re_bitmap;
1691	int index;
1692
1693	if (mhba->fw_flag & MVUMI_FW_ATTACH) {
1694		index = -1;
1695		ar_bitmap = (const unsigned long *) param->bitmap;
1696		re_bitmap = (const unsigned long *) &param->bitmap[size >> 3];
1697
1698		mutex_lock(&mhba->sas_discovery_mutex);
1699		do {
1700			index = find_next_zero_bit(ar_bitmap, size, index + 1);
1701			if (index >= size)
1702				break;
1703			mvumi_handle_hotplug(mhba, index, DEVICE_ONLINE);
1704		} while (1);
1705
1706		index = -1;
1707		do {
1708			index = find_next_zero_bit(re_bitmap, size, index + 1);
1709			if (index >= size)
1710				break;
1711			mvumi_handle_hotplug(mhba, index, DEVICE_OFFLINE);
1712		} while (1);
1713		mutex_unlock(&mhba->sas_discovery_mutex);
1714	}
1715}
1716
1717static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1718{
1719	if (msg == APICDB1_EVENT_GETEVENT) {
1720		int i, count;
1721		struct mvumi_driver_event *param = NULL;
1722		struct mvumi_event_req *er = buffer;
1723		count = er->count;
1724		if (count > MAX_EVENTS_RETURNED) {
1725			dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1726					" than max event count[0x%x].\n",
1727					count, MAX_EVENTS_RETURNED);
1728			return;
1729		}
1730		for (i = 0; i < count; i++) {
1731			param = &er->events[i];
1732			mvumi_show_event(mhba, param);
1733		}
1734	} else if (msg == APICDB1_HOST_GETEVENT) {
1735		mvumi_proc_msg(mhba, buffer);
1736	}
1737}
1738
1739static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1740{
1741	struct mvumi_cmd *cmd;
1742	struct mvumi_msg_frame *frame;
1743
1744	cmd = mvumi_create_internal_cmd(mhba, 512);
1745	if (!cmd)
1746		return -1;
1747	cmd->scmd = NULL;
1748	cmd->cmd_status = REQ_STATUS_PENDING;
1749	atomic_set(&cmd->sync_cmd, 0);
1750	frame = cmd->frame;
1751	frame->device_id = 0;
1752	frame->cmd_flag = CMD_FLAG_DATA_IN;
1753	frame->req_function = CL_FUN_SCSI_CMD;
1754	frame->cdb_length = MAX_COMMAND_SIZE;
1755	frame->data_transfer_length = sizeof(struct mvumi_event_req);
1756	memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1757	frame->cdb[0] = APICDB0_EVENT;
1758	frame->cdb[1] = msg;
1759	mvumi_issue_blocked_cmd(mhba, cmd);
1760
1761	if (cmd->cmd_status != SAM_STAT_GOOD)
1762		dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1763							cmd->cmd_status);
1764	else
1765		mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1766
1767	mvumi_delete_internal_cmd(mhba, cmd);
1768	return 0;
1769}
1770
1771static void mvumi_scan_events(struct work_struct *work)
1772{
1773	struct mvumi_events_wq *mu_ev =
1774		container_of(work, struct mvumi_events_wq, work_q);
1775
1776	mvumi_get_event(mu_ev->mhba, mu_ev->event);
1777	kfree(mu_ev);
1778}
1779
1780static void mvumi_launch_events(struct mvumi_hba *mhba, u32 isr_status)
1781{
1782	struct mvumi_events_wq *mu_ev;
1783
1784	while (isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY)) {
1785		if (isr_status & DRBL_BUS_CHANGE) {
1786			atomic_inc(&mhba->pnp_count);
1787			wake_up_process(mhba->dm_thread);
1788			isr_status &= ~(DRBL_BUS_CHANGE);
1789			continue;
1790		}
1791
1792		mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1793		if (mu_ev) {
1794			INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1795			mu_ev->mhba = mhba;
1796			mu_ev->event = APICDB1_EVENT_GETEVENT;
1797			isr_status &= ~(DRBL_EVENT_NOTIFY);
1798			mu_ev->param = NULL;
1799			schedule_work(&mu_ev->work_q);
1800		}
1801	}
1802}
1803
1804static void mvumi_handle_clob(struct mvumi_hba *mhba)
1805{
1806	struct mvumi_rsp_frame *ob_frame;
1807	struct mvumi_cmd *cmd;
1808	struct mvumi_ob_data *pool;
1809
1810	while (!list_empty(&mhba->free_ob_list)) {
1811		pool = list_first_entry(&mhba->free_ob_list,
1812						struct mvumi_ob_data, list);
1813		list_del_init(&pool->list);
1814		list_add_tail(&pool->list, &mhba->ob_data_list);
1815
1816		ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1817		cmd = mhba->tag_cmd[ob_frame->tag];
1818
1819		atomic_dec(&mhba->fw_outstanding);
1820		mhba->tag_cmd[ob_frame->tag] = 0;
1821		tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1822		if (cmd->scmd)
1823			mvumi_complete_cmd(mhba, cmd, ob_frame);
1824		else
1825			mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1826	}
1827	mhba->instancet->fire_cmd(mhba, NULL);
1828}
1829
1830static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1831{
1832	struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1833	unsigned long flags;
1834
1835	spin_lock_irqsave(mhba->shost->host_lock, flags);
1836	if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1837		spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1838		return IRQ_NONE;
1839	}
1840
1841	if (mhba->global_isr & mhba->regs->int_dl_cpu2pciea) {
1842		if (mhba->isr_status & (DRBL_BUS_CHANGE | DRBL_EVENT_NOTIFY))
1843			mvumi_launch_events(mhba, mhba->isr_status);
1844		if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1845			dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1846			mvumi_handshake(mhba);
1847		}
1848
 
1849	}
1850
1851	if (mhba->global_isr & mhba->regs->int_comaout)
1852		mvumi_receive_ob_list_entry(mhba);
1853
1854	mhba->global_isr = 0;
1855	mhba->isr_status = 0;
1856	if (mhba->fw_state == FW_STATE_STARTED)
1857		mvumi_handle_clob(mhba);
1858	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1859	return IRQ_HANDLED;
1860}
1861
1862static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1863						struct mvumi_cmd *cmd)
1864{
1865	void *ib_entry;
1866	struct mvumi_msg_frame *ib_frame;
1867	unsigned int frame_len;
1868
1869	ib_frame = cmd->frame;
1870	if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1871		dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1872		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1873	}
1874	if (tag_is_empty(&mhba->tag_pool)) {
1875		dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1876		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1877	}
1878	mvumi_get_ib_list_entry(mhba, &ib_entry);
 
1879
1880	cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1881	cmd->frame->request_id = mhba->io_seq++;
1882	cmd->request_id = cmd->frame->request_id;
1883	mhba->tag_cmd[cmd->frame->tag] = cmd;
1884	frame_len = sizeof(*ib_frame) - 4 +
1885				ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1886	if (mhba->hba_capability & HS_CAPABILITY_SUPPORT_DYN_SRC) {
1887		struct mvumi_dyn_list_entry *dle;
1888		dle = ib_entry;
1889		dle->src_low_addr =
1890			cpu_to_le32(lower_32_bits(cmd->frame_phys));
1891		dle->src_high_addr =
1892			cpu_to_le32(upper_32_bits(cmd->frame_phys));
1893		dle->if_length = (frame_len >> 2) & 0xFFF;
1894	} else {
1895		memcpy(ib_entry, ib_frame, frame_len);
1896	}
1897	return MV_QUEUE_COMMAND_RESULT_SENT;
1898}
1899
1900static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1901{
1902	unsigned short num_of_cl_sent = 0;
1903	unsigned int count;
1904	enum mvumi_qc_result result;
1905
1906	if (cmd)
1907		list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
1908	count = mhba->instancet->check_ib_list(mhba);
1909	if (list_empty(&mhba->waiting_req_list) || !count)
1910		return;
1911
1912	do {
1913		cmd = list_first_entry(&mhba->waiting_req_list,
1914				       struct mvumi_cmd, queue_pointer);
1915		list_del_init(&cmd->queue_pointer);
1916		result = mvumi_send_command(mhba, cmd);
1917		switch (result) {
1918		case MV_QUEUE_COMMAND_RESULT_SENT:
1919			num_of_cl_sent++;
1920			break;
1921		case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1922			list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1923			if (num_of_cl_sent > 0)
1924				mvumi_send_ib_list_entry(mhba);
1925
1926			return;
1927		}
1928	} while (!list_empty(&mhba->waiting_req_list) && count--);
1929
1930	if (num_of_cl_sent > 0)
1931		mvumi_send_ib_list_entry(mhba);
1932}
1933
1934/**
1935 * mvumi_enable_intr -	Enables interrupts
1936 * @mhba:		Adapter soft state
1937 */
1938static void mvumi_enable_intr(struct mvumi_hba *mhba)
1939{
1940	unsigned int mask;
1941	struct mvumi_hw_regs *regs = mhba->regs;
1942
1943	iowrite32(regs->int_drbl_int_mask, regs->arm_to_pciea_mask_reg);
1944	mask = ioread32(regs->enpointa_mask_reg);
1945	mask |= regs->int_dl_cpu2pciea | regs->int_comaout | regs->int_comaerr;
1946	iowrite32(mask, regs->enpointa_mask_reg);
1947}
1948
1949/**
1950 * mvumi_disable_intr -Disables interrupt
1951 * @mhba:		Adapter soft state
1952 */
1953static void mvumi_disable_intr(struct mvumi_hba *mhba)
1954{
1955	unsigned int mask;
1956	struct mvumi_hw_regs *regs = mhba->regs;
1957
1958	iowrite32(0, regs->arm_to_pciea_mask_reg);
1959	mask = ioread32(regs->enpointa_mask_reg);
1960	mask &= ~(regs->int_dl_cpu2pciea | regs->int_comaout |
1961							regs->int_comaerr);
1962	iowrite32(mask, regs->enpointa_mask_reg);
1963}
1964
1965static int mvumi_clear_intr(void *extend)
1966{
1967	struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1968	unsigned int status, isr_status = 0, tmp = 0;
1969	struct mvumi_hw_regs *regs = mhba->regs;
1970
1971	status = ioread32(regs->main_int_cause_reg);
1972	if (!(status & regs->int_mu) || status == 0xFFFFFFFF)
1973		return 1;
1974	if (unlikely(status & regs->int_comaerr)) {
1975		tmp = ioread32(regs->outb_isr_cause);
1976		if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580) {
1977			if (tmp & regs->clic_out_err) {
1978				iowrite32(tmp & regs->clic_out_err,
1979							regs->outb_isr_cause);
1980			}
1981		} else {
1982			if (tmp & (regs->clic_in_err | regs->clic_out_err))
1983				iowrite32(tmp & (regs->clic_in_err |
1984						regs->clic_out_err),
1985						regs->outb_isr_cause);
1986		}
1987		status ^= mhba->regs->int_comaerr;
1988		/* inbound or outbound parity error, command will timeout */
1989	}
1990	if (status & regs->int_comaout) {
1991		tmp = ioread32(regs->outb_isr_cause);
1992		if (tmp & regs->clic_irq)
1993			iowrite32(tmp & regs->clic_irq, regs->outb_isr_cause);
1994	}
1995	if (status & regs->int_dl_cpu2pciea) {
1996		isr_status = ioread32(regs->arm_to_pciea_drbl_reg);
1997		if (isr_status)
1998			iowrite32(isr_status, regs->arm_to_pciea_drbl_reg);
1999	}
2000
2001	mhba->global_isr = status;
2002	mhba->isr_status = isr_status;
2003
2004	return 0;
2005}
2006
2007/**
2008 * mvumi_read_fw_status_reg - returns the current FW status value
2009 * @mhba:		Adapter soft state
2010 */
2011static unsigned int mvumi_read_fw_status_reg(struct mvumi_hba *mhba)
2012{
2013	unsigned int status;
2014
2015	status = ioread32(mhba->regs->arm_to_pciea_drbl_reg);
2016	if (status)
2017		iowrite32(status, mhba->regs->arm_to_pciea_drbl_reg);
2018	return status;
2019}
2020
2021static struct mvumi_instance_template mvumi_instance_9143 = {
2022	.fire_cmd = mvumi_fire_cmd,
2023	.enable_intr = mvumi_enable_intr,
2024	.disable_intr = mvumi_disable_intr,
2025	.clear_intr = mvumi_clear_intr,
2026	.read_fw_status_reg = mvumi_read_fw_status_reg,
2027	.check_ib_list = mvumi_check_ib_list_9143,
2028	.check_ob_list = mvumi_check_ob_list_9143,
2029	.reset_host = mvumi_reset_host_9143,
2030};
2031
2032static struct mvumi_instance_template mvumi_instance_9580 = {
2033	.fire_cmd = mvumi_fire_cmd,
2034	.enable_intr = mvumi_enable_intr,
2035	.disable_intr = mvumi_disable_intr,
2036	.clear_intr = mvumi_clear_intr,
2037	.read_fw_status_reg = mvumi_read_fw_status_reg,
2038	.check_ib_list = mvumi_check_ib_list_9580,
2039	.check_ob_list = mvumi_check_ob_list_9580,
2040	.reset_host = mvumi_reset_host_9580,
2041};
2042
2043static int mvumi_slave_configure(struct scsi_device *sdev)
2044{
2045	struct mvumi_hba *mhba;
2046	unsigned char bitcount = sizeof(unsigned char) * 8;
2047
2048	mhba = (struct mvumi_hba *) sdev->host->hostdata;
2049	if (sdev->id >= mhba->max_target_id)
2050		return -EINVAL;
2051
2052	mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
2053	return 0;
2054}
2055
2056/**
2057 * mvumi_build_frame -	Prepares a direct cdb (DCDB) command
2058 * @mhba:		Adapter soft state
2059 * @scmd:		SCSI command
2060 * @cmd:		Command to be prepared in
2061 *
2062 * This function prepares CDB commands. These are typcially pass-through
2063 * commands to the devices.
2064 */
2065static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
2066				struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
2067{
2068	struct mvumi_msg_frame *pframe;
2069
2070	cmd->scmd = scmd;
2071	cmd->cmd_status = REQ_STATUS_PENDING;
2072	pframe = cmd->frame;
2073	pframe->device_id = ((unsigned short) scmd->device->id) |
2074				(((unsigned short) scmd->device->lun) << 8);
2075	pframe->cmd_flag = 0;
2076
2077	switch (scmd->sc_data_direction) {
2078	case DMA_NONE:
2079		pframe->cmd_flag |= CMD_FLAG_NON_DATA;
2080		break;
2081	case DMA_FROM_DEVICE:
2082		pframe->cmd_flag |= CMD_FLAG_DATA_IN;
2083		break;
2084	case DMA_TO_DEVICE:
2085		pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
2086		break;
2087	case DMA_BIDIRECTIONAL:
2088	default:
2089		dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
2090			"cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
2091		goto error;
2092	}
2093
2094	pframe->cdb_length = scmd->cmd_len;
2095	memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
2096	pframe->req_function = CL_FUN_SCSI_CMD;
2097	if (scsi_bufflen(scmd)) {
2098		if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
2099			&pframe->sg_counts))
2100			goto error;
2101
2102		pframe->data_transfer_length = scsi_bufflen(scmd);
2103	} else {
2104		pframe->sg_counts = 0;
2105		pframe->data_transfer_length = 0;
2106	}
2107	return 0;
2108
2109error:
2110	scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
2111		SAM_STAT_CHECK_CONDITION;
2112	scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
2113									0);
2114	return -1;
2115}
2116
2117/**
2118 * mvumi_queue_command -	Queue entry point
2119 * @scmd:			SCSI command to be queued
2120 * @done:			Callback entry point
2121 */
2122static int mvumi_queue_command(struct Scsi_Host *shost,
2123					struct scsi_cmnd *scmd)
2124{
2125	struct mvumi_cmd *cmd;
2126	struct mvumi_hba *mhba;
2127	unsigned long irq_flags;
2128
2129	spin_lock_irqsave(shost->host_lock, irq_flags);
2130	scsi_cmd_get_serial(shost, scmd);
2131
2132	mhba = (struct mvumi_hba *) shost->hostdata;
2133	scmd->result = 0;
2134	cmd = mvumi_get_cmd(mhba);
2135	if (unlikely(!cmd)) {
2136		spin_unlock_irqrestore(shost->host_lock, irq_flags);
2137		return SCSI_MLQUEUE_HOST_BUSY;
2138	}
2139
2140	if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
2141		goto out_return_cmd;
2142
2143	cmd->scmd = scmd;
2144	scmd->SCp.ptr = (char *) cmd;
2145	mhba->instancet->fire_cmd(mhba, cmd);
2146	spin_unlock_irqrestore(shost->host_lock, irq_flags);
2147	return 0;
2148
2149out_return_cmd:
2150	mvumi_return_cmd(mhba, cmd);
2151	scmd->scsi_done(scmd);
2152	spin_unlock_irqrestore(shost->host_lock, irq_flags);
2153	return 0;
2154}
2155
2156static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
2157{
2158	struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
2159	struct Scsi_Host *host = scmd->device->host;
2160	struct mvumi_hba *mhba = shost_priv(host);
2161	unsigned long flags;
2162
2163	spin_lock_irqsave(mhba->shost->host_lock, flags);
2164
2165	if (mhba->tag_cmd[cmd->frame->tag]) {
2166		mhba->tag_cmd[cmd->frame->tag] = 0;
2167		tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
2168	}
2169	if (!list_empty(&cmd->queue_pointer))
2170		list_del_init(&cmd->queue_pointer);
2171	else
2172		atomic_dec(&mhba->fw_outstanding);
2173
2174	scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
2175	scmd->SCp.ptr = NULL;
2176	if (scsi_bufflen(scmd)) {
2177		if (scsi_sg_count(scmd)) {
2178			pci_unmap_sg(mhba->pdev,
2179				scsi_sglist(scmd),
2180				scsi_sg_count(scmd),
2181				(int)scmd->sc_data_direction);
2182		} else {
2183			pci_unmap_single(mhba->pdev,
2184				scmd->SCp.dma_handle,
2185				scsi_bufflen(scmd),
2186				(int)scmd->sc_data_direction);
2187
2188			scmd->SCp.dma_handle = 0;
2189		}
2190	}
2191	mvumi_return_cmd(mhba, cmd);
2192	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
2193
2194	return BLK_EH_NOT_HANDLED;
2195}
2196
2197static int
2198mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
2199			sector_t capacity, int geom[])
2200{
2201	int heads, sectors;
2202	sector_t cylinders;
2203	unsigned long tmp;
2204
2205	heads = 64;
2206	sectors = 32;
2207	tmp = heads * sectors;
2208	cylinders = capacity;
2209	sector_div(cylinders, tmp);
2210
2211	if (capacity >= 0x200000) {
2212		heads = 255;
2213		sectors = 63;
2214		tmp = heads * sectors;
2215		cylinders = capacity;
2216		sector_div(cylinders, tmp);
2217	}
2218	geom[0] = heads;
2219	geom[1] = sectors;
2220	geom[2] = cylinders;
2221
2222	return 0;
2223}
2224
2225static struct scsi_host_template mvumi_template = {
2226
2227	.module = THIS_MODULE,
2228	.name = "Marvell Storage Controller",
2229	.slave_configure = mvumi_slave_configure,
2230	.queuecommand = mvumi_queue_command,
2231	.eh_host_reset_handler = mvumi_host_reset,
2232	.bios_param = mvumi_bios_param,
2233	.this_id = -1,
2234};
2235
2236static struct scsi_transport_template mvumi_transport_template = {
2237	.eh_timed_out = mvumi_timed_out,
2238};
2239
2240static int mvumi_cfg_hw_reg(struct mvumi_hba *mhba)
2241{
2242	void *base = NULL;
2243	struct mvumi_hw_regs *regs;
2244
2245	switch (mhba->pdev->device) {
2246	case PCI_DEVICE_ID_MARVELL_MV9143:
2247		mhba->mmio = mhba->base_addr[0];
2248		base = mhba->mmio;
2249		if (!mhba->regs) {
2250			mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2251			if (mhba->regs == NULL)
2252				return -ENOMEM;
2253		}
2254		regs = mhba->regs;
2255
2256		/* For Arm */
2257		regs->ctrl_sts_reg          = base + 0x20104;
2258		regs->rstoutn_mask_reg      = base + 0x20108;
2259		regs->sys_soft_rst_reg      = base + 0x2010C;
2260		regs->main_int_cause_reg    = base + 0x20200;
2261		regs->enpointa_mask_reg     = base + 0x2020C;
2262		regs->rstoutn_en_reg        = base + 0xF1400;
2263		/* For Doorbell */
2264		regs->pciea_to_arm_drbl_reg = base + 0x20400;
2265		regs->arm_to_pciea_drbl_reg = base + 0x20408;
2266		regs->arm_to_pciea_mask_reg = base + 0x2040C;
2267		regs->pciea_to_arm_msg0     = base + 0x20430;
2268		regs->pciea_to_arm_msg1     = base + 0x20434;
2269		regs->arm_to_pciea_msg0     = base + 0x20438;
2270		regs->arm_to_pciea_msg1     = base + 0x2043C;
2271
2272		/* For Message Unit */
2273
2274		regs->inb_aval_count_basel  = base + 0x508;
2275		regs->inb_aval_count_baseh  = base + 0x50C;
2276		regs->inb_write_pointer     = base + 0x518;
2277		regs->inb_read_pointer      = base + 0x51C;
2278		regs->outb_coal_cfg         = base + 0x568;
2279		regs->outb_copy_basel       = base + 0x5B0;
2280		regs->outb_copy_baseh       = base + 0x5B4;
2281		regs->outb_copy_pointer     = base + 0x544;
2282		regs->outb_read_pointer     = base + 0x548;
2283		regs->outb_isr_cause        = base + 0x560;
2284		regs->outb_coal_cfg         = base + 0x568;
2285		/* Bit setting for HW */
2286		regs->int_comaout           = 1 << 8;
2287		regs->int_comaerr           = 1 << 6;
2288		regs->int_dl_cpu2pciea      = 1 << 1;
2289		regs->cl_pointer_toggle     = 1 << 12;
2290		regs->clic_irq              = 1 << 1;
2291		regs->clic_in_err           = 1 << 8;
2292		regs->clic_out_err          = 1 << 12;
2293		regs->cl_slot_num_mask      = 0xFFF;
2294		regs->int_drbl_int_mask     = 0x3FFFFFFF;
2295		regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout |
2296							regs->int_comaerr;
2297		break;
2298	case PCI_DEVICE_ID_MARVELL_MV9580:
2299		mhba->mmio = mhba->base_addr[2];
2300		base = mhba->mmio;
2301		if (!mhba->regs) {
2302			mhba->regs = kzalloc(sizeof(*regs), GFP_KERNEL);
2303			if (mhba->regs == NULL)
2304				return -ENOMEM;
2305		}
2306		regs = mhba->regs;
2307		/* For Arm */
2308		regs->ctrl_sts_reg          = base + 0x20104;
2309		regs->rstoutn_mask_reg      = base + 0x1010C;
2310		regs->sys_soft_rst_reg      = base + 0x10108;
2311		regs->main_int_cause_reg    = base + 0x10200;
2312		regs->enpointa_mask_reg     = base + 0x1020C;
2313		regs->rstoutn_en_reg        = base + 0xF1400;
2314
2315		/* For Doorbell */
2316		regs->pciea_to_arm_drbl_reg = base + 0x10460;
2317		regs->arm_to_pciea_drbl_reg = base + 0x10480;
2318		regs->arm_to_pciea_mask_reg = base + 0x10484;
2319		regs->pciea_to_arm_msg0     = base + 0x10400;
2320		regs->pciea_to_arm_msg1     = base + 0x10404;
2321		regs->arm_to_pciea_msg0     = base + 0x10420;
2322		regs->arm_to_pciea_msg1     = base + 0x10424;
2323
2324		/* For reset*/
2325		regs->reset_request         = base + 0x10108;
2326		regs->reset_enable          = base + 0x1010c;
2327
2328		/* For Message Unit */
2329		regs->inb_aval_count_basel  = base + 0x4008;
2330		regs->inb_aval_count_baseh  = base + 0x400C;
2331		regs->inb_write_pointer     = base + 0x4018;
2332		regs->inb_read_pointer      = base + 0x401C;
2333		regs->outb_copy_basel       = base + 0x4058;
2334		regs->outb_copy_baseh       = base + 0x405C;
2335		regs->outb_copy_pointer     = base + 0x406C;
2336		regs->outb_read_pointer     = base + 0x4070;
2337		regs->outb_coal_cfg         = base + 0x4080;
2338		regs->outb_isr_cause        = base + 0x4088;
2339		/* Bit setting for HW */
2340		regs->int_comaout           = 1 << 4;
2341		regs->int_dl_cpu2pciea      = 1 << 12;
2342		regs->int_comaerr           = 1 << 29;
2343		regs->cl_pointer_toggle     = 1 << 14;
2344		regs->cl_slot_num_mask      = 0x3FFF;
2345		regs->clic_irq              = 1 << 0;
2346		regs->clic_out_err          = 1 << 1;
2347		regs->int_drbl_int_mask     = 0x3FFFFFFF;
2348		regs->int_mu = regs->int_dl_cpu2pciea | regs->int_comaout;
2349		break;
2350	default:
2351		return -1;
2352		break;
2353	}
2354
2355	return 0;
2356}
2357
2358/**
2359 * mvumi_init_fw -	Initializes the FW
2360 * @mhba:		Adapter soft state
2361 *
2362 * This is the main function for initializing firmware.
2363 */
2364static int mvumi_init_fw(struct mvumi_hba *mhba)
2365{
2366	int ret = 0;
2367
2368	if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
2369		dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
2370		return -EBUSY;
2371	}
2372	ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2373	if (ret)
2374		goto fail_ioremap;
2375
 
 
2376	switch (mhba->pdev->device) {
2377	case PCI_DEVICE_ID_MARVELL_MV9143:
2378		mhba->instancet = &mvumi_instance_9143;
2379		mhba->io_seq = 0;
2380		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2381		mhba->request_id_enabled = 1;
2382		break;
2383	case PCI_DEVICE_ID_MARVELL_MV9580:
2384		mhba->instancet = &mvumi_instance_9580;
2385		mhba->io_seq = 0;
2386		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
2387		break;
2388	default:
2389		dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
2390							mhba->pdev->device);
2391		mhba->instancet = NULL;
2392		ret = -EINVAL;
2393		goto fail_alloc_mem;
2394	}
2395	dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
2396							mhba->pdev->device);
2397	ret = mvumi_cfg_hw_reg(mhba);
2398	if (ret) {
2399		dev_err(&mhba->pdev->dev,
2400			"failed to allocate memory for reg\n");
2401		ret = -ENOMEM;
2402		goto fail_alloc_mem;
2403	}
2404	mhba->handshake_page = pci_alloc_consistent(mhba->pdev, HSP_MAX_SIZE,
2405						&mhba->handshake_page_phys);
2406	if (!mhba->handshake_page) {
2407		dev_err(&mhba->pdev->dev,
2408			"failed to allocate memory for handshake\n");
2409		ret = -ENOMEM;
2410		goto fail_alloc_page;
2411	}
 
2412
2413	if (mvumi_start(mhba)) {
2414		ret = -EINVAL;
2415		goto fail_ready_state;
2416	}
2417	ret = mvumi_alloc_cmds(mhba);
2418	if (ret)
2419		goto fail_ready_state;
2420
2421	return 0;
2422
2423fail_ready_state:
2424	mvumi_release_mem_resource(mhba);
2425	pci_free_consistent(mhba->pdev, HSP_MAX_SIZE,
2426		mhba->handshake_page, mhba->handshake_page_phys);
2427fail_alloc_page:
2428	kfree(mhba->regs);
2429fail_alloc_mem:
2430	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
2431fail_ioremap:
2432	pci_release_regions(mhba->pdev);
2433
2434	return ret;
2435}
2436
2437/**
2438 * mvumi_io_attach -	Attaches this driver to SCSI mid-layer
2439 * @mhba:		Adapter soft state
2440 */
2441static int mvumi_io_attach(struct mvumi_hba *mhba)
2442{
2443	struct Scsi_Host *host = mhba->shost;
2444	struct scsi_device *sdev = NULL;
2445	int ret;
2446	unsigned int max_sg = (mhba->ib_max_size + 4 -
2447		sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
2448
2449	host->irq = mhba->pdev->irq;
2450	host->unique_id = mhba->unique_id;
2451	host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2452	host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
2453	host->max_sectors = mhba->max_transfer_size / 512;
2454	host->cmd_per_lun = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
2455	host->max_id = mhba->max_target_id;
2456	host->max_cmd_len = MAX_COMMAND_SIZE;
2457	host->transportt = &mvumi_transport_template;
2458
2459	ret = scsi_add_host(host, &mhba->pdev->dev);
2460	if (ret) {
2461		dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
2462		return ret;
2463	}
2464	mhba->fw_flag |= MVUMI_FW_ATTACH;
 
2465
2466	mutex_lock(&mhba->sas_discovery_mutex);
2467	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2468		ret = scsi_add_device(host, 0, mhba->max_target_id - 1, 0);
2469	else
2470		ret = 0;
2471	if (ret) {
2472		dev_err(&mhba->pdev->dev, "add virtual device failed\n");
2473		mutex_unlock(&mhba->sas_discovery_mutex);
2474		goto fail_add_device;
2475	}
2476
2477	mhba->dm_thread = kthread_create(mvumi_rescan_bus,
2478						mhba, "mvumi_scanthread");
2479	if (IS_ERR(mhba->dm_thread)) {
2480		dev_err(&mhba->pdev->dev,
2481			"failed to create device scan thread\n");
2482		mutex_unlock(&mhba->sas_discovery_mutex);
2483		goto fail_create_thread;
2484	}
2485	atomic_set(&mhba->pnp_count, 1);
2486	wake_up_process(mhba->dm_thread);
2487
2488	mutex_unlock(&mhba->sas_discovery_mutex);
2489	return 0;
2490
2491fail_create_thread:
2492	if (mhba->pdev->device == PCI_DEVICE_ID_MARVELL_MV9580)
2493		sdev = scsi_device_lookup(mhba->shost, 0,
2494						mhba->max_target_id - 1, 0);
2495	if (sdev) {
2496		scsi_remove_device(sdev);
2497		scsi_device_put(sdev);
2498	}
2499fail_add_device:
2500	scsi_remove_host(mhba->shost);
2501	return ret;
2502}
2503
2504/**
2505 * mvumi_probe_one -	PCI hotplug entry point
2506 * @pdev:		PCI device structure
2507 * @id:			PCI ids of supported hotplugged adapter
2508 */
2509static int mvumi_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 
2510{
2511	struct Scsi_Host *host;
2512	struct mvumi_hba *mhba;
2513	int ret;
2514
2515	dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
2516			pdev->vendor, pdev->device, pdev->subsystem_vendor,
2517			pdev->subsystem_device);
2518
2519	ret = pci_enable_device(pdev);
2520	if (ret)
2521		return ret;
2522
2523	pci_set_master(pdev);
2524
2525	if (IS_DMA64) {
2526		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2527		if (ret) {
2528			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2529			if (ret)
2530				goto fail_set_dma_mask;
2531		}
2532	} else {
2533		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2534		if (ret)
2535			goto fail_set_dma_mask;
2536	}
2537
2538	host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
2539	if (!host) {
2540		dev_err(&pdev->dev, "scsi_host_alloc failed\n");
2541		ret = -ENOMEM;
2542		goto fail_alloc_instance;
2543	}
2544	mhba = shost_priv(host);
2545
2546	INIT_LIST_HEAD(&mhba->cmd_pool);
2547	INIT_LIST_HEAD(&mhba->ob_data_list);
2548	INIT_LIST_HEAD(&mhba->free_ob_list);
2549	INIT_LIST_HEAD(&mhba->res_list);
2550	INIT_LIST_HEAD(&mhba->waiting_req_list);
2551	mutex_init(&mhba->device_lock);
2552	INIT_LIST_HEAD(&mhba->mhba_dev_list);
2553	INIT_LIST_HEAD(&mhba->shost_dev_list);
2554	atomic_set(&mhba->fw_outstanding, 0);
2555	init_waitqueue_head(&mhba->int_cmd_wait_q);
2556	mutex_init(&mhba->sas_discovery_mutex);
2557
2558	mhba->pdev = pdev;
2559	mhba->shost = host;
2560	mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
2561
2562	ret = mvumi_init_fw(mhba);
2563	if (ret)
2564		goto fail_init_fw;
2565
2566	ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2567				"mvumi", mhba);
2568	if (ret) {
2569		dev_err(&pdev->dev, "failed to register IRQ\n");
2570		goto fail_init_irq;
2571	}
2572
2573	mhba->instancet->enable_intr(mhba);
2574	pci_set_drvdata(pdev, mhba);
2575
2576	ret = mvumi_io_attach(mhba);
2577	if (ret)
2578		goto fail_io_attach;
2579
2580	mvumi_backup_bar_addr(mhba);
2581	dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
2582
2583	return 0;
2584
2585fail_io_attach:
2586	mhba->instancet->disable_intr(mhba);
 
2587	free_irq(mhba->pdev->irq, mhba);
2588fail_init_irq:
2589	mvumi_release_fw(mhba);
2590fail_init_fw:
2591	scsi_host_put(host);
2592
2593fail_alloc_instance:
2594fail_set_dma_mask:
2595	pci_disable_device(pdev);
2596
2597	return ret;
2598}
2599
2600static void mvumi_detach_one(struct pci_dev *pdev)
2601{
2602	struct Scsi_Host *host;
2603	struct mvumi_hba *mhba;
2604
2605	mhba = pci_get_drvdata(pdev);
2606	if (mhba->dm_thread) {
2607		kthread_stop(mhba->dm_thread);
2608		mhba->dm_thread = NULL;
2609	}
2610
2611	mvumi_detach_devices(mhba);
2612	host = mhba->shost;
2613	scsi_remove_host(mhba->shost);
2614	mvumi_flush_cache(mhba);
2615
2616	mhba->instancet->disable_intr(mhba);
2617	free_irq(mhba->pdev->irq, mhba);
2618	mvumi_release_fw(mhba);
2619	scsi_host_put(host);
 
2620	pci_disable_device(pdev);
2621	dev_dbg(&pdev->dev, "driver is removed!\n");
2622}
2623
2624/**
2625 * mvumi_shutdown -	Shutdown entry point
2626 * @device:		Generic device structure
2627 */
2628static void mvumi_shutdown(struct pci_dev *pdev)
2629{
2630	struct mvumi_hba *mhba = pci_get_drvdata(pdev);
2631
2632	mvumi_flush_cache(mhba);
2633}
2634
2635static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
2636{
2637	struct mvumi_hba *mhba = NULL;
2638
2639	mhba = pci_get_drvdata(pdev);
2640	mvumi_flush_cache(mhba);
2641
2642	pci_set_drvdata(pdev, mhba);
2643	mhba->instancet->disable_intr(mhba);
2644	free_irq(mhba->pdev->irq, mhba);
2645	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2646	pci_release_regions(pdev);
2647	pci_save_state(pdev);
2648	pci_disable_device(pdev);
2649	pci_set_power_state(pdev, pci_choose_state(pdev, state));
2650
2651	return 0;
2652}
2653
2654static int mvumi_resume(struct pci_dev *pdev)
2655{
2656	int ret;
2657	struct mvumi_hba *mhba = NULL;
2658
2659	mhba = pci_get_drvdata(pdev);
2660
2661	pci_set_power_state(pdev, PCI_D0);
2662	pci_enable_wake(pdev, PCI_D0, 0);
2663	pci_restore_state(pdev);
2664
2665	ret = pci_enable_device(pdev);
2666	if (ret) {
2667		dev_err(&pdev->dev, "enable device failed\n");
2668		return ret;
2669	}
2670	pci_set_master(pdev);
2671	if (IS_DMA64) {
2672		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
2673		if (ret) {
2674			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2675			if (ret)
2676				goto fail;
2677		}
2678	} else {
2679		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2680		if (ret)
2681			goto fail;
2682	}
2683	ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
2684	if (ret)
2685		goto fail;
2686	ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
2687	if (ret)
2688		goto release_regions;
2689
2690	if (mvumi_cfg_hw_reg(mhba)) {
2691		ret = -EINVAL;
2692		goto unmap_pci_addr;
2693	}
2694
2695	mhba->mmio = mhba->base_addr[0];
2696	mvumi_reset(mhba);
2697
2698	if (mvumi_start(mhba)) {
2699		ret = -EINVAL;
2700		goto unmap_pci_addr;
2701	}
2702
2703	ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
2704				"mvumi", mhba);
2705	if (ret) {
2706		dev_err(&pdev->dev, "failed to register IRQ\n");
2707		goto unmap_pci_addr;
2708	}
2709	mhba->instancet->enable_intr(mhba);
2710
2711	return 0;
2712
2713unmap_pci_addr:
2714	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
2715release_regions:
2716	pci_release_regions(pdev);
2717fail:
2718	pci_disable_device(pdev);
2719
2720	return ret;
2721}
2722
2723static struct pci_driver mvumi_pci_driver = {
2724
2725	.name = MV_DRIVER_NAME,
2726	.id_table = mvumi_pci_table,
2727	.probe = mvumi_probe_one,
2728	.remove = mvumi_detach_one,
2729	.shutdown = mvumi_shutdown,
2730#ifdef CONFIG_PM
2731	.suspend = mvumi_suspend,
2732	.resume = mvumi_resume,
2733#endif
2734};
2735
2736/**
2737 * mvumi_init - Driver load entry point
2738 */
2739static int __init mvumi_init(void)
2740{
2741	return pci_register_driver(&mvumi_pci_driver);
2742}
2743
2744/**
2745 * mvumi_exit - Driver unload entry point
2746 */
2747static void __exit mvumi_exit(void)
2748{
2749
2750	pci_unregister_driver(&mvumi_pci_driver);
2751}
2752
2753module_init(mvumi_init);
2754module_exit(mvumi_exit);
v3.5.6
   1/*
   2 * Marvell UMI driver
   3 *
   4 * Copyright 2011 Marvell. <jyli@marvell.com>
   5 *
   6 * This file is licensed under GPLv2.
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License as
  10 * published by the Free Software Foundation; version 2 of the
  11 * License.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
  21 * USA
  22*/
  23
  24#include <linux/kernel.h>
  25#include <linux/module.h>
  26#include <linux/moduleparam.h>
  27#include <linux/init.h>
  28#include <linux/device.h>
  29#include <linux/pci.h>
  30#include <linux/list.h>
  31#include <linux/spinlock.h>
  32#include <linux/interrupt.h>
  33#include <linux/delay.h>
  34#include <linux/blkdev.h>
  35#include <linux/io.h>
  36#include <scsi/scsi.h>
  37#include <scsi/scsi_cmnd.h>
 
  38#include <scsi/scsi_host.h>
  39#include <scsi/scsi_transport.h>
  40#include <scsi/scsi_eh.h>
  41#include <linux/uaccess.h>
 
  42
  43#include "mvumi.h"
  44
  45MODULE_LICENSE("GPL");
  46MODULE_AUTHOR("jyli@marvell.com");
  47MODULE_DESCRIPTION("Marvell UMI Driver");
  48
  49static DEFINE_PCI_DEVICE_TABLE(mvumi_pci_table) = {
  50	{ PCI_DEVICE(PCI_VENDOR_ID_MARVELL_2, PCI_DEVICE_ID_MARVELL_MV9143) },
 
  51	{ 0 }
  52};
  53
  54MODULE_DEVICE_TABLE(pci, mvumi_pci_table);
  55
  56static void tag_init(struct mvumi_tag *st, unsigned short size)
  57{
  58	unsigned short i;
  59	BUG_ON(size != st->size);
  60	st->top = size;
  61	for (i = 0; i < size; i++)
  62		st->stack[i] = size - 1 - i;
  63}
  64
  65static unsigned short tag_get_one(struct mvumi_hba *mhba, struct mvumi_tag *st)
  66{
  67	BUG_ON(st->top <= 0);
  68	return st->stack[--st->top];
  69}
  70
  71static void tag_release_one(struct mvumi_hba *mhba, struct mvumi_tag *st,
  72							unsigned short tag)
  73{
  74	BUG_ON(st->top >= st->size);
  75	st->stack[st->top++] = tag;
  76}
  77
  78static bool tag_is_empty(struct mvumi_tag *st)
  79{
  80	if (st->top == 0)
  81		return 1;
  82	else
  83		return 0;
  84}
  85
  86static void mvumi_unmap_pci_addr(struct pci_dev *dev, void **addr_array)
  87{
  88	int i;
  89
  90	for (i = 0; i < MAX_BASE_ADDRESS; i++)
  91		if ((pci_resource_flags(dev, i) & IORESOURCE_MEM) &&
  92								addr_array[i])
  93			pci_iounmap(dev, addr_array[i]);
  94}
  95
  96static int mvumi_map_pci_addr(struct pci_dev *dev, void **addr_array)
  97{
  98	int i;
  99
 100	for (i = 0; i < MAX_BASE_ADDRESS; i++) {
 101		if (pci_resource_flags(dev, i) & IORESOURCE_MEM) {
 102			addr_array[i] = pci_iomap(dev, i, 0);
 103			if (!addr_array[i]) {
 104				dev_err(&dev->dev, "failed to map Bar[%d]\n",
 105									i);
 106				mvumi_unmap_pci_addr(dev, addr_array);
 107				return -ENOMEM;
 108			}
 109		} else
 110			addr_array[i] = NULL;
 111
 112		dev_dbg(&dev->dev, "Bar %d : %p.\n", i, addr_array[i]);
 113	}
 114
 115	return 0;
 116}
 117
 118static struct mvumi_res *mvumi_alloc_mem_resource(struct mvumi_hba *mhba,
 119				enum resource_type type, unsigned int size)
 120{
 121	struct mvumi_res *res = kzalloc(sizeof(*res), GFP_KERNEL);
 122
 123	if (!res) {
 124		dev_err(&mhba->pdev->dev,
 125			"Failed to allocate memory for resouce manager.\n");
 126		return NULL;
 127	}
 128
 129	switch (type) {
 130	case RESOURCE_CACHED_MEMORY:
 131		res->virt_addr = kzalloc(size, GFP_KERNEL);
 132		if (!res->virt_addr) {
 133			dev_err(&mhba->pdev->dev,
 134				"unable to allocate memory,size = %d.\n", size);
 135			kfree(res);
 136			return NULL;
 137		}
 138		break;
 139
 140	case RESOURCE_UNCACHED_MEMORY:
 141		size = round_up(size, 8);
 142		res->virt_addr = pci_alloc_consistent(mhba->pdev, size,
 143							&res->bus_addr);
 144		if (!res->virt_addr) {
 145			dev_err(&mhba->pdev->dev,
 146					"unable to allocate consistent mem,"
 147							"size = %d.\n", size);
 148			kfree(res);
 149			return NULL;
 150		}
 151		memset(res->virt_addr, 0, size);
 152		break;
 153
 154	default:
 155		dev_err(&mhba->pdev->dev, "unknown resource type %d.\n", type);
 156		kfree(res);
 157		return NULL;
 158	}
 159
 160	res->type = type;
 161	res->size = size;
 162	INIT_LIST_HEAD(&res->entry);
 163	list_add_tail(&res->entry, &mhba->res_list);
 164
 165	return res;
 166}
 167
 168static void mvumi_release_mem_resource(struct mvumi_hba *mhba)
 169{
 170	struct mvumi_res *res, *tmp;
 171
 172	list_for_each_entry_safe(res, tmp, &mhba->res_list, entry) {
 173		switch (res->type) {
 174		case RESOURCE_UNCACHED_MEMORY:
 175			pci_free_consistent(mhba->pdev, res->size,
 176						res->virt_addr, res->bus_addr);
 177			break;
 178		case RESOURCE_CACHED_MEMORY:
 179			kfree(res->virt_addr);
 180			break;
 181		default:
 182			dev_err(&mhba->pdev->dev,
 183				"unknown resource type %d\n", res->type);
 184			break;
 185		}
 186		list_del(&res->entry);
 187		kfree(res);
 188	}
 189	mhba->fw_flag &= ~MVUMI_FW_ALLOC;
 190}
 191
 192/**
 193 * mvumi_make_sgl -	Prepares  SGL
 194 * @mhba:		Adapter soft state
 195 * @scmd:		SCSI command from the mid-layer
 196 * @sgl_p:		SGL to be filled in
 197 * @sg_count		return the number of SG elements
 198 *
 199 * If successful, this function returns 0. otherwise, it returns -1.
 200 */
 201static int mvumi_make_sgl(struct mvumi_hba *mhba, struct scsi_cmnd *scmd,
 202					void *sgl_p, unsigned char *sg_count)
 203{
 204	struct scatterlist *sg;
 205	struct mvumi_sgl *m_sg = (struct mvumi_sgl *) sgl_p;
 206	unsigned int i;
 207	unsigned int sgnum = scsi_sg_count(scmd);
 208	dma_addr_t busaddr;
 209
 210	if (sgnum) {
 211		sg = scsi_sglist(scmd);
 212		*sg_count = pci_map_sg(mhba->pdev, sg, sgnum,
 213				(int) scmd->sc_data_direction);
 214		if (*sg_count > mhba->max_sge) {
 215			dev_err(&mhba->pdev->dev, "sg count[0x%x] is bigger "
 216						"than max sg[0x%x].\n",
 217						*sg_count, mhba->max_sge);
 218			return -1;
 219		}
 220		for (i = 0; i < *sg_count; i++) {
 221			busaddr = sg_dma_address(&sg[i]);
 222			m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
 223			m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
 224			m_sg->flags = 0;
 225			m_sg->size = cpu_to_le32(sg_dma_len(&sg[i]));
 226			if ((i + 1) == *sg_count)
 227				m_sg->flags |= SGD_EOT;
 228
 229			m_sg++;
 230		}
 231	} else {
 232		scmd->SCp.dma_handle = scsi_bufflen(scmd) ?
 233			pci_map_single(mhba->pdev, scsi_sglist(scmd),
 234				scsi_bufflen(scmd),
 235				(int) scmd->sc_data_direction)
 236			: 0;
 237		busaddr = scmd->SCp.dma_handle;
 238		m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(busaddr));
 239		m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(busaddr));
 240		m_sg->flags = SGD_EOT;
 241		m_sg->size = cpu_to_le32(scsi_bufflen(scmd));
 242		*sg_count = 1;
 243	}
 244
 245	return 0;
 246}
 247
 248static int mvumi_internal_cmd_sgl(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
 249							unsigned int size)
 250{
 251	struct mvumi_sgl *m_sg;
 252	void *virt_addr;
 253	dma_addr_t phy_addr;
 254
 255	if (size == 0)
 256		return 0;
 257
 258	virt_addr = pci_alloc_consistent(mhba->pdev, size, &phy_addr);
 259	if (!virt_addr)
 260		return -1;
 261
 262	memset(virt_addr, 0, size);
 263
 264	m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
 265	cmd->frame->sg_counts = 1;
 266	cmd->data_buf = virt_addr;
 267
 268	m_sg->baseaddr_l = cpu_to_le32(lower_32_bits(phy_addr));
 269	m_sg->baseaddr_h = cpu_to_le32(upper_32_bits(phy_addr));
 270	m_sg->flags = SGD_EOT;
 271	m_sg->size = cpu_to_le32(size);
 272
 273	return 0;
 274}
 275
 276static struct mvumi_cmd *mvumi_create_internal_cmd(struct mvumi_hba *mhba,
 277				unsigned int buf_size)
 278{
 279	struct mvumi_cmd *cmd;
 280
 281	cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 282	if (!cmd) {
 283		dev_err(&mhba->pdev->dev, "failed to create a internal cmd\n");
 284		return NULL;
 285	}
 286	INIT_LIST_HEAD(&cmd->queue_pointer);
 287
 288	cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
 
 289	if (!cmd->frame) {
 290		dev_err(&mhba->pdev->dev, "failed to allocate memory for FW"
 291			" frame,size = %d.\n", mhba->ib_max_size);
 292		kfree(cmd);
 293		return NULL;
 294	}
 295
 296	if (buf_size) {
 297		if (mvumi_internal_cmd_sgl(mhba, cmd, buf_size)) {
 298			dev_err(&mhba->pdev->dev, "failed to allocate memory"
 299						" for internal frame\n");
 300			kfree(cmd->frame);
 
 301			kfree(cmd);
 302			return NULL;
 303		}
 304	} else
 305		cmd->frame->sg_counts = 0;
 306
 307	return cmd;
 308}
 309
 310static void mvumi_delete_internal_cmd(struct mvumi_hba *mhba,
 311						struct mvumi_cmd *cmd)
 312{
 313	struct mvumi_sgl *m_sg;
 314	unsigned int size;
 315	dma_addr_t phy_addr;
 316
 317	if (cmd && cmd->frame) {
 318		if (cmd->frame->sg_counts) {
 319			m_sg = (struct mvumi_sgl *) &cmd->frame->payload[0];
 320			size = m_sg->size;
 321
 322			phy_addr = (dma_addr_t) m_sg->baseaddr_l |
 323				(dma_addr_t) ((m_sg->baseaddr_h << 16) << 16);
 324
 325			pci_free_consistent(mhba->pdev, size, cmd->data_buf,
 326								phy_addr);
 327		}
 328		kfree(cmd->frame);
 
 329		kfree(cmd);
 330	}
 331}
 332
 333/**
 334 * mvumi_get_cmd -	Get a command from the free pool
 335 * @mhba:		Adapter soft state
 336 *
 337 * Returns a free command from the pool
 338 */
 339static struct mvumi_cmd *mvumi_get_cmd(struct mvumi_hba *mhba)
 340{
 341	struct mvumi_cmd *cmd = NULL;
 342
 343	if (likely(!list_empty(&mhba->cmd_pool))) {
 344		cmd = list_entry((&mhba->cmd_pool)->next,
 345				struct mvumi_cmd, queue_pointer);
 346		list_del_init(&cmd->queue_pointer);
 347	} else
 348		dev_warn(&mhba->pdev->dev, "command pool is empty!\n");
 349
 350	return cmd;
 351}
 352
 353/**
 354 * mvumi_return_cmd -	Return a cmd to free command pool
 355 * @mhba:		Adapter soft state
 356 * @cmd:		Command packet to be returned to free command pool
 357 */
 358static inline void mvumi_return_cmd(struct mvumi_hba *mhba,
 359						struct mvumi_cmd *cmd)
 360{
 361	cmd->scmd = NULL;
 362	list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
 363}
 364
 365/**
 366 * mvumi_free_cmds -	Free all the cmds in the free cmd pool
 367 * @mhba:		Adapter soft state
 368 */
 369static void mvumi_free_cmds(struct mvumi_hba *mhba)
 370{
 371	struct mvumi_cmd *cmd;
 372
 373	while (!list_empty(&mhba->cmd_pool)) {
 374		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 375							queue_pointer);
 376		list_del(&cmd->queue_pointer);
 377		kfree(cmd->frame);
 
 378		kfree(cmd);
 379	}
 380}
 381
 382/**
 383 * mvumi_alloc_cmds -	Allocates the command packets
 384 * @mhba:		Adapter soft state
 385 *
 386 */
 387static int mvumi_alloc_cmds(struct mvumi_hba *mhba)
 388{
 389	int i;
 390	struct mvumi_cmd *cmd;
 391
 392	for (i = 0; i < mhba->max_io; i++) {
 393		cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
 394		if (!cmd)
 395			goto err_exit;
 396
 397		INIT_LIST_HEAD(&cmd->queue_pointer);
 398		list_add_tail(&cmd->queue_pointer, &mhba->cmd_pool);
 399		cmd->frame = kzalloc(mhba->ib_max_size, GFP_KERNEL);
 
 
 
 
 
 400		if (!cmd->frame)
 401			goto err_exit;
 402	}
 403	return 0;
 404
 405err_exit:
 406	dev_err(&mhba->pdev->dev,
 407			"failed to allocate memory for cmd[0x%x].\n", i);
 408	while (!list_empty(&mhba->cmd_pool)) {
 409		cmd = list_first_entry(&mhba->cmd_pool, struct mvumi_cmd,
 410						queue_pointer);
 411		list_del(&cmd->queue_pointer);
 412		kfree(cmd->frame);
 
 413		kfree(cmd);
 414	}
 415	return -ENOMEM;
 416}
 417
 418static int mvumi_get_ib_list_entry(struct mvumi_hba *mhba, void **ib_entry)
 419{
 420	unsigned int ib_rp_reg, cur_ib_entry;
 
 
 
 421
 
 
 
 
 
 
 
 422	if (atomic_read(&mhba->fw_outstanding) >= mhba->max_io) {
 423		dev_warn(&mhba->pdev->dev, "firmware io overflow.\n");
 424		return -1;
 
 
 425	}
 426	ib_rp_reg = ioread32(mhba->mmio + CLA_INB_READ_POINTER);
 427
 428	if (unlikely(((ib_rp_reg & CL_SLOT_NUM_MASK) ==
 429			(mhba->ib_cur_slot & CL_SLOT_NUM_MASK)) &&
 430			((ib_rp_reg & CL_POINTER_TOGGLE) !=
 431			(mhba->ib_cur_slot & CL_POINTER_TOGGLE)))) {
 432		dev_warn(&mhba->pdev->dev, "no free slot to use.\n");
 433		return -1;
 434	}
 
 
 
 
 
 
 
 435
 436	cur_ib_entry = mhba->ib_cur_slot & CL_SLOT_NUM_MASK;
 437	cur_ib_entry++;
 438	if (cur_ib_entry >= mhba->list_num_io) {
 439		cur_ib_entry -= mhba->list_num_io;
 440		mhba->ib_cur_slot ^= CL_POINTER_TOGGLE;
 
 
 
 
 
 
 
 
 441	}
 442	mhba->ib_cur_slot &= ~CL_SLOT_NUM_MASK;
 443	mhba->ib_cur_slot |= (cur_ib_entry & CL_SLOT_NUM_MASK);
 444	*ib_entry = mhba->ib_list + cur_ib_entry * mhba->ib_max_size;
 445	atomic_inc(&mhba->fw_outstanding);
 446
 447	return 0;
 448}
 449
 450static void mvumi_send_ib_list_entry(struct mvumi_hba *mhba)
 451{
 452	iowrite32(0xfff, mhba->ib_shadow);
 453	iowrite32(mhba->ib_cur_slot, mhba->mmio + CLA_INB_WRITE_POINTER);
 454}
 455
 456static char mvumi_check_ob_frame(struct mvumi_hba *mhba,
 457		unsigned int cur_obf, struct mvumi_rsp_frame *p_outb_frame)
 458{
 459	unsigned short tag, request_id;
 460
 461	udelay(1);
 462	p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
 463	request_id = p_outb_frame->request_id;
 464	tag = p_outb_frame->tag;
 465	if (tag > mhba->tag_pool.size) {
 466		dev_err(&mhba->pdev->dev, "ob frame data error\n");
 467		return -1;
 468	}
 469	if (mhba->tag_cmd[tag] == NULL) {
 470		dev_err(&mhba->pdev->dev, "tag[0x%x] with NO command\n", tag);
 471		return -1;
 472	} else if (mhba->tag_cmd[tag]->request_id != request_id &&
 473						mhba->request_id_enabled) {
 474			dev_err(&mhba->pdev->dev, "request ID from FW:0x%x,"
 475					"cmd request ID:0x%x\n", request_id,
 476					mhba->tag_cmd[tag]->request_id);
 477			return -1;
 478	}
 479
 480	return 0;
 481}
 482
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 483static void mvumi_receive_ob_list_entry(struct mvumi_hba *mhba)
 484{
 485	unsigned int ob_write_reg, ob_write_shadow_reg;
 486	unsigned int cur_obf, assign_obf_end, i;
 487	struct mvumi_ob_data *ob_data;
 488	struct mvumi_rsp_frame *p_outb_frame;
 
 489
 490	do {
 491		ob_write_reg = ioread32(mhba->mmio + CLA_OUTB_COPY_POINTER);
 492		ob_write_shadow_reg = ioread32(mhba->ob_shadow);
 493	} while ((ob_write_reg & CL_SLOT_NUM_MASK) != ob_write_shadow_reg);
 494
 495	cur_obf = mhba->ob_cur_slot & CL_SLOT_NUM_MASK;
 496	assign_obf_end = ob_write_reg & CL_SLOT_NUM_MASK;
 497
 498	if ((ob_write_reg & CL_POINTER_TOGGLE) !=
 499				(mhba->ob_cur_slot & CL_POINTER_TOGGLE)) {
 500		assign_obf_end += mhba->list_num_io;
 501	}
 502
 503	for (i = (assign_obf_end - cur_obf); i != 0; i--) {
 504		cur_obf++;
 505		if (cur_obf >= mhba->list_num_io) {
 506			cur_obf -= mhba->list_num_io;
 507			mhba->ob_cur_slot ^= CL_POINTER_TOGGLE;
 508		}
 509
 510		p_outb_frame = mhba->ob_list + cur_obf * mhba->ob_max_size;
 511
 512		/* Copy pointer may point to entry in outbound list
 513		*  before entry has valid data
 514		*/
 515		if (unlikely(p_outb_frame->tag > mhba->tag_pool.size ||
 516			mhba->tag_cmd[p_outb_frame->tag] == NULL ||
 517			p_outb_frame->request_id !=
 518				mhba->tag_cmd[p_outb_frame->tag]->request_id))
 519			if (mvumi_check_ob_frame(mhba, cur_obf, p_outb_frame))
 520				continue;
 521
 522		if (!list_empty(&mhba->ob_data_list)) {
 523			ob_data = (struct mvumi_ob_data *)
 524				list_first_entry(&mhba->ob_data_list,
 525					struct mvumi_ob_data, list);
 526			list_del_init(&ob_data->list);
 527		} else {
 528			ob_data = NULL;
 529			if (cur_obf == 0) {
 530				cur_obf = mhba->list_num_io - 1;
 531				mhba->ob_cur_slot ^= CL_POINTER_TOGGLE;
 532			} else
 533				cur_obf -= 1;
 534			break;
 535		}
 536
 537		memcpy(ob_data->data, p_outb_frame, mhba->ob_max_size);
 538		p_outb_frame->tag = 0xff;
 539
 540		list_add_tail(&ob_data->list, &mhba->free_ob_list);
 541	}
 542	mhba->ob_cur_slot &= ~CL_SLOT_NUM_MASK;
 543	mhba->ob_cur_slot |= (cur_obf & CL_SLOT_NUM_MASK);
 544	iowrite32(mhba->ob_cur_slot, mhba->mmio + CLA_OUTB_READ_POINTER);
 545}
 546
 547static void mvumi_reset(void *regs)
 548{
 549	iowrite32(0, regs + CPU_ENPOINTA_MASK_REG);
 550	if (ioread32(regs + CPU_ARM_TO_PCIEA_MSG1) != HANDSHAKE_DONESTATE)
 
 
 551		return;
 552
 553	iowrite32(DRBL_SOFT_RESET, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
 554}
 555
 556static unsigned char mvumi_start(struct mvumi_hba *mhba);
 557
 558static int mvumi_wait_for_outstanding(struct mvumi_hba *mhba)
 559{
 560	mhba->fw_state = FW_STATE_ABORT;
 561	mvumi_reset(mhba->mmio);
 562
 563	if (mvumi_start(mhba))
 564		return FAILED;
 565	else
 566		return SUCCESS;
 567}
 568
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 569static int mvumi_host_reset(struct scsi_cmnd *scmd)
 570{
 571	struct mvumi_hba *mhba;
 572
 573	mhba = (struct mvumi_hba *) scmd->device->host->hostdata;
 574
 575	scmd_printk(KERN_NOTICE, scmd, "RESET -%ld cmd=%x retries=%x\n",
 576			scmd->serial_number, scmd->cmnd[0], scmd->retries);
 577
 578	return mvumi_wait_for_outstanding(mhba);
 579}
 580
 581static int mvumi_issue_blocked_cmd(struct mvumi_hba *mhba,
 582						struct mvumi_cmd *cmd)
 583{
 584	unsigned long flags;
 585
 586	cmd->cmd_status = REQ_STATUS_PENDING;
 587
 588	if (atomic_read(&cmd->sync_cmd)) {
 589		dev_err(&mhba->pdev->dev,
 590			"last blocked cmd not finished, sync_cmd = %d\n",
 591						atomic_read(&cmd->sync_cmd));
 592		BUG_ON(1);
 593		return -1;
 594	}
 595	atomic_inc(&cmd->sync_cmd);
 596	spin_lock_irqsave(mhba->shost->host_lock, flags);
 597	mhba->instancet->fire_cmd(mhba, cmd);
 598	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
 599
 600	wait_event_timeout(mhba->int_cmd_wait_q,
 601		(cmd->cmd_status != REQ_STATUS_PENDING),
 602		MVUMI_INTERNAL_CMD_WAIT_TIME * HZ);
 603
 604	/* command timeout */
 605	if (atomic_read(&cmd->sync_cmd)) {
 606		spin_lock_irqsave(mhba->shost->host_lock, flags);
 607		atomic_dec(&cmd->sync_cmd);
 608		if (mhba->tag_cmd[cmd->frame->tag]) {
 609			mhba->tag_cmd[cmd->frame->tag] = 0;
 610			dev_warn(&mhba->pdev->dev, "TIMEOUT:release tag [%d]\n",
 611							cmd->frame->tag);
 612			tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
 613		}
 614		if (!list_empty(&cmd->queue_pointer)) {
 615			dev_warn(&mhba->pdev->dev,
 616				"TIMEOUT:A internal command doesn't send!\n");
 617			list_del_init(&cmd->queue_pointer);
 618		} else
 619			atomic_dec(&mhba->fw_outstanding);
 620
 621		spin_unlock_irqrestore(mhba->shost->host_lock, flags);
 622	}
 623	return 0;
 624}
 625
 626static void mvumi_release_fw(struct mvumi_hba *mhba)
 627{
 628	mvumi_free_cmds(mhba);
 629	mvumi_release_mem_resource(mhba);
 630	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
 631	kfree(mhba->handshake_page);
 
 
 632	pci_release_regions(mhba->pdev);
 633}
 634
 635static unsigned char mvumi_flush_cache(struct mvumi_hba *mhba)
 636{
 637	struct mvumi_cmd *cmd;
 638	struct mvumi_msg_frame *frame;
 639	unsigned char device_id, retry = 0;
 640	unsigned char bitcount = sizeof(unsigned char) * 8;
 641
 642	for (device_id = 0; device_id < mhba->max_target_id; device_id++) {
 643		if (!(mhba->target_map[device_id / bitcount] &
 644				(1 << (device_id % bitcount))))
 645			continue;
 646get_cmd:	cmd = mvumi_create_internal_cmd(mhba, 0);
 647		if (!cmd) {
 648			if (retry++ >= 5) {
 649				dev_err(&mhba->pdev->dev, "failed to get memory"
 650					" for internal flush cache cmd for "
 651					"device %d", device_id);
 652				retry = 0;
 653				continue;
 654			} else
 655				goto get_cmd;
 656		}
 657		cmd->scmd = NULL;
 658		cmd->cmd_status = REQ_STATUS_PENDING;
 659		atomic_set(&cmd->sync_cmd, 0);
 660		frame = cmd->frame;
 661		frame->req_function = CL_FUN_SCSI_CMD;
 662		frame->device_id = device_id;
 663		frame->cmd_flag = CMD_FLAG_NON_DATA;
 664		frame->data_transfer_length = 0;
 665		frame->cdb_length = MAX_COMMAND_SIZE;
 666		memset(frame->cdb, 0, MAX_COMMAND_SIZE);
 667		frame->cdb[0] = SCSI_CMD_MARVELL_SPECIFIC;
 
 668		frame->cdb[2] = CDB_CORE_SHUTDOWN;
 669
 670		mvumi_issue_blocked_cmd(mhba, cmd);
 671		if (cmd->cmd_status != SAM_STAT_GOOD) {
 672			dev_err(&mhba->pdev->dev,
 673				"device %d flush cache failed, status=0x%x.\n",
 674				device_id, cmd->cmd_status);
 675		}
 676
 677		mvumi_delete_internal_cmd(mhba, cmd);
 678	}
 679	return 0;
 680}
 681
 682static unsigned char
 683mvumi_calculate_checksum(struct mvumi_hs_header *p_header,
 684							unsigned short len)
 685{
 686	unsigned char *ptr;
 687	unsigned char ret = 0, i;
 688
 689	ptr = (unsigned char *) p_header->frame_content;
 690	for (i = 0; i < len; i++) {
 691		ret ^= *ptr;
 692		ptr++;
 693	}
 694
 695	return ret;
 696}
 697
 698void mvumi_hs_build_page(struct mvumi_hba *mhba,
 699				struct mvumi_hs_header *hs_header)
 700{
 701	struct mvumi_hs_page2 *hs_page2;
 702	struct mvumi_hs_page4 *hs_page4;
 703	struct mvumi_hs_page3 *hs_page3;
 704	struct timeval time;
 705	unsigned int local_time;
 706
 707	switch (hs_header->page_code) {
 708	case HS_PAGE_HOST_INFO:
 709		hs_page2 = (struct mvumi_hs_page2 *) hs_header;
 710		hs_header->frame_length = sizeof(*hs_page2) - 4;
 711		memset(hs_header->frame_content, 0, hs_header->frame_length);
 712		hs_page2->host_type = 3; /* 3 mean linux*/
 
 
 713		hs_page2->host_ver.ver_major = VER_MAJOR;
 714		hs_page2->host_ver.ver_minor = VER_MINOR;
 715		hs_page2->host_ver.ver_oem = VER_OEM;
 716		hs_page2->host_ver.ver_build = VER_BUILD;
 717		hs_page2->system_io_bus = 0;
 718		hs_page2->slot_number = 0;
 719		hs_page2->intr_level = 0;
 720		hs_page2->intr_vector = 0;
 721		do_gettimeofday(&time);
 722		local_time = (unsigned int) (time.tv_sec -
 723						(sys_tz.tz_minuteswest * 60));
 724		hs_page2->seconds_since1970 = local_time;
 725		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 726						hs_header->frame_length);
 727		break;
 728
 729	case HS_PAGE_FIRM_CTL:
 730		hs_page3 = (struct mvumi_hs_page3 *) hs_header;
 731		hs_header->frame_length = sizeof(*hs_page3) - 4;
 732		memset(hs_header->frame_content, 0, hs_header->frame_length);
 733		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 734						hs_header->frame_length);
 735		break;
 736
 737	case HS_PAGE_CL_INFO:
 738		hs_page4 = (struct mvumi_hs_page4 *) hs_header;
 739		hs_header->frame_length = sizeof(*hs_page4) - 4;
 740		memset(hs_header->frame_content, 0, hs_header->frame_length);
 741		hs_page4->ib_baseaddr_l = lower_32_bits(mhba->ib_list_phys);
 742		hs_page4->ib_baseaddr_h = upper_32_bits(mhba->ib_list_phys);
 743
 744		hs_page4->ob_baseaddr_l = lower_32_bits(mhba->ob_list_phys);
 745		hs_page4->ob_baseaddr_h = upper_32_bits(mhba->ob_list_phys);
 746		hs_page4->ib_entry_size = mhba->ib_max_size_setting;
 747		hs_page4->ob_entry_size = mhba->ob_max_size_setting;
 748		hs_page4->ob_depth = mhba->list_num_io;
 749		hs_page4->ib_depth = mhba->list_num_io;
 
 
 
 
 
 
 
 
 
 
 750		hs_header->checksum = mvumi_calculate_checksum(hs_header,
 751						hs_header->frame_length);
 752		break;
 753
 754	default:
 755		dev_err(&mhba->pdev->dev, "cannot build page, code[0x%x]\n",
 756			hs_header->page_code);
 757		break;
 758	}
 759}
 760
 761/**
 762 * mvumi_init_data -	Initialize requested date for FW
 763 * @mhba:			Adapter soft state
 764 */
 765static int mvumi_init_data(struct mvumi_hba *mhba)
 766{
 767	struct mvumi_ob_data *ob_pool;
 768	struct mvumi_res *res_mgnt;
 769	unsigned int tmp_size, offset, i;
 770	void *virmem, *v;
 771	dma_addr_t p;
 772
 773	if (mhba->fw_flag & MVUMI_FW_ALLOC)
 774		return 0;
 775
 776	tmp_size = mhba->ib_max_size * mhba->max_io;
 
 
 
 777	tmp_size += 128 + mhba->ob_max_size * mhba->max_io;
 778	tmp_size += 8 + sizeof(u32) + 16;
 779
 780	res_mgnt = mvumi_alloc_mem_resource(mhba,
 781					RESOURCE_UNCACHED_MEMORY, tmp_size);
 782	if (!res_mgnt) {
 783		dev_err(&mhba->pdev->dev,
 784			"failed to allocate memory for inbound list\n");
 785		goto fail_alloc_dma_buf;
 786	}
 787
 788	p = res_mgnt->bus_addr;
 789	v = res_mgnt->virt_addr;
 790	/* ib_list */
 791	offset = round_up(p, 128) - p;
 792	p += offset;
 793	v += offset;
 794	mhba->ib_list = v;
 795	mhba->ib_list_phys = p;
 
 
 
 
 
 
 796	v += mhba->ib_max_size * mhba->max_io;
 797	p += mhba->ib_max_size * mhba->max_io;
 
 798	/* ib shadow */
 799	offset = round_up(p, 8) - p;
 800	p += offset;
 801	v += offset;
 802	mhba->ib_shadow = v;
 803	mhba->ib_shadow_phys = p;
 804	p += sizeof(u32);
 805	v += sizeof(u32);
 806	/* ob shadow */
 807	offset = round_up(p, 8) - p;
 808	p += offset;
 809	v += offset;
 810	mhba->ob_shadow = v;
 811	mhba->ob_shadow_phys = p;
 812	p += 8;
 813	v += 8;
 
 
 
 
 
 
 
 
 
 
 814
 815	/* ob list */
 816	offset = round_up(p, 128) - p;
 817	p += offset;
 818	v += offset;
 819
 820	mhba->ob_list = v;
 821	mhba->ob_list_phys = p;
 822
 823	/* ob data pool */
 824	tmp_size = mhba->max_io * (mhba->ob_max_size + sizeof(*ob_pool));
 825	tmp_size = round_up(tmp_size, 8);
 826
 827	res_mgnt = mvumi_alloc_mem_resource(mhba,
 828				RESOURCE_CACHED_MEMORY, tmp_size);
 829	if (!res_mgnt) {
 830		dev_err(&mhba->pdev->dev,
 831			"failed to allocate memory for outbound data buffer\n");
 832		goto fail_alloc_dma_buf;
 833	}
 834	virmem = res_mgnt->virt_addr;
 835
 836	for (i = mhba->max_io; i != 0; i--) {
 837		ob_pool = (struct mvumi_ob_data *) virmem;
 838		list_add_tail(&ob_pool->list, &mhba->ob_data_list);
 839		virmem += mhba->ob_max_size + sizeof(*ob_pool);
 840	}
 841
 842	tmp_size = sizeof(unsigned short) * mhba->max_io +
 843				sizeof(struct mvumi_cmd *) * mhba->max_io;
 844	tmp_size += round_up(mhba->max_target_id, sizeof(unsigned char) * 8) /
 845						(sizeof(unsigned char) * 8);
 846
 847	res_mgnt = mvumi_alloc_mem_resource(mhba,
 848				RESOURCE_CACHED_MEMORY, tmp_size);
 849	if (!res_mgnt) {
 850		dev_err(&mhba->pdev->dev,
 851			"failed to allocate memory for tag and target map\n");
 852		goto fail_alloc_dma_buf;
 853	}
 854
 855	virmem = res_mgnt->virt_addr;
 856	mhba->tag_pool.stack = virmem;
 857	mhba->tag_pool.size = mhba->max_io;
 858	tag_init(&mhba->tag_pool, mhba->max_io);
 859	virmem += sizeof(unsigned short) * mhba->max_io;
 860
 861	mhba->tag_cmd = virmem;
 862	virmem += sizeof(struct mvumi_cmd *) * mhba->max_io;
 863
 864	mhba->target_map = virmem;
 865
 866	mhba->fw_flag |= MVUMI_FW_ALLOC;
 867	return 0;
 868
 869fail_alloc_dma_buf:
 870	mvumi_release_mem_resource(mhba);
 871	return -1;
 872}
 873
 874static int mvumi_hs_process_page(struct mvumi_hba *mhba,
 875				struct mvumi_hs_header *hs_header)
 876{
 877	struct mvumi_hs_page1 *hs_page1;
 878	unsigned char page_checksum;
 879
 880	page_checksum = mvumi_calculate_checksum(hs_header,
 881						hs_header->frame_length);
 882	if (page_checksum != hs_header->checksum) {
 883		dev_err(&mhba->pdev->dev, "checksum error\n");
 884		return -1;
 885	}
 886
 887	switch (hs_header->page_code) {
 888	case HS_PAGE_FIRM_CAP:
 889		hs_page1 = (struct mvumi_hs_page1 *) hs_header;
 890
 891		mhba->max_io = hs_page1->max_io_support;
 892		mhba->list_num_io = hs_page1->cl_inout_list_depth;
 893		mhba->max_transfer_size = hs_page1->max_transfer_size;
 894		mhba->max_target_id = hs_page1->max_devices_support;
 895		mhba->hba_capability = hs_page1->capability;
 896		mhba->ib_max_size_setting = hs_page1->cl_in_max_entry_size;
 897		mhba->ib_max_size = (1 << hs_page1->cl_in_max_entry_size) << 2;
 898
 899		mhba->ob_max_size_setting = hs_page1->cl_out_max_entry_size;
 900		mhba->ob_max_size = (1 << hs_page1->cl_out_max_entry_size) << 2;
 901
 902		dev_dbg(&mhba->pdev->dev, "FW version:%d\n",
 903						hs_page1->fw_ver.ver_build);
 904
 
 
 
 
 
 
 905		break;
 906	default:
 907		dev_err(&mhba->pdev->dev, "handshake: page code error\n");
 908		return -1;
 909	}
 910	return 0;
 911}
 912
 913/**
 914 * mvumi_handshake -	Move the FW to READY state
 915 * @mhba:				Adapter soft state
 916 *
 917 * During the initialization, FW passes can potentially be in any one of
 918 * several possible states. If the FW in operational, waiting-for-handshake
 919 * states, driver must take steps to bring it to ready state. Otherwise, it
 920 * has to wait for the ready state.
 921 */
 922static int mvumi_handshake(struct mvumi_hba *mhba)
 923{
 924	unsigned int hs_state, tmp, hs_fun;
 925	struct mvumi_hs_header *hs_header;
 926	void *regs = mhba->mmio;
 927
 928	if (mhba->fw_state == FW_STATE_STARTING)
 929		hs_state = HS_S_START;
 930	else {
 931		tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG0);
 932		hs_state = HS_GET_STATE(tmp);
 933		dev_dbg(&mhba->pdev->dev, "handshake state[0x%x].\n", hs_state);
 934		if (HS_GET_STATUS(tmp) != HS_STATUS_OK) {
 935			mhba->fw_state = FW_STATE_STARTING;
 936			return -1;
 937		}
 938	}
 939
 940	hs_fun = 0;
 941	switch (hs_state) {
 942	case HS_S_START:
 943		mhba->fw_state = FW_STATE_HANDSHAKING;
 944		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
 945		HS_SET_STATE(hs_fun, HS_S_RESET);
 946		iowrite32(HANDSHAKE_SIGNATURE, regs + CPU_PCIEA_TO_ARM_MSG1);
 947		iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
 948		iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
 949		break;
 950
 951	case HS_S_RESET:
 952		iowrite32(lower_32_bits(mhba->handshake_page_phys),
 953					regs + CPU_PCIEA_TO_ARM_MSG1);
 954		iowrite32(upper_32_bits(mhba->handshake_page_phys),
 955					regs + CPU_ARM_TO_PCIEA_MSG1);
 956		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
 957		HS_SET_STATE(hs_fun, HS_S_PAGE_ADDR);
 958		iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
 959		iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
 960
 961		break;
 962
 963	case HS_S_PAGE_ADDR:
 964	case HS_S_QUERY_PAGE:
 965	case HS_S_SEND_PAGE:
 966		hs_header = (struct mvumi_hs_header *) mhba->handshake_page;
 967		if (hs_header->page_code == HS_PAGE_FIRM_CAP) {
 968			mhba->hba_total_pages =
 969			((struct mvumi_hs_page1 *) hs_header)->total_pages;
 970
 971			if (mhba->hba_total_pages == 0)
 972				mhba->hba_total_pages = HS_PAGE_TOTAL-1;
 973		}
 974
 975		if (hs_state == HS_S_QUERY_PAGE) {
 976			if (mvumi_hs_process_page(mhba, hs_header)) {
 977				HS_SET_STATE(hs_fun, HS_S_ABORT);
 978				return -1;
 979			}
 980			if (mvumi_init_data(mhba)) {
 981				HS_SET_STATE(hs_fun, HS_S_ABORT);
 982				return -1;
 983			}
 984		} else if (hs_state == HS_S_PAGE_ADDR) {
 985			hs_header->page_code = 0;
 986			mhba->hba_total_pages = HS_PAGE_TOTAL-1;
 987		}
 988
 989		if ((hs_header->page_code + 1) <= mhba->hba_total_pages) {
 990			hs_header->page_code++;
 991			if (hs_header->page_code != HS_PAGE_FIRM_CAP) {
 992				mvumi_hs_build_page(mhba, hs_header);
 993				HS_SET_STATE(hs_fun, HS_S_SEND_PAGE);
 994			} else
 995				HS_SET_STATE(hs_fun, HS_S_QUERY_PAGE);
 996		} else
 997			HS_SET_STATE(hs_fun, HS_S_END);
 998
 999		HS_SET_STATUS(hs_fun, HS_STATUS_OK);
1000		iowrite32(hs_fun, regs + CPU_PCIEA_TO_ARM_MSG0);
1001		iowrite32(DRBL_HANDSHAKE, regs + CPU_PCIEA_TO_ARM_DRBL_REG);
1002		break;
1003
1004	case HS_S_END:
1005		/* Set communication list ISR */
1006		tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG);
1007		tmp |= INT_MAP_COMAOUT | INT_MAP_COMAERR;
1008		iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG);
1009		iowrite32(mhba->list_num_io, mhba->ib_shadow);
1010		/* Set InBound List Avaliable count shadow */
1011		iowrite32(lower_32_bits(mhba->ib_shadow_phys),
1012					regs + CLA_INB_AVAL_COUNT_BASEL);
1013		iowrite32(upper_32_bits(mhba->ib_shadow_phys),
1014					regs + CLA_INB_AVAL_COUNT_BASEH);
1015
1016		/* Set OutBound List Avaliable count shadow */
1017		iowrite32((mhba->list_num_io-1) | CL_POINTER_TOGGLE,
1018						mhba->ob_shadow);
1019		iowrite32(lower_32_bits(mhba->ob_shadow_phys), regs + 0x5B0);
1020		iowrite32(upper_32_bits(mhba->ob_shadow_phys), regs + 0x5B4);
1021
1022		mhba->ib_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE;
1023		mhba->ob_cur_slot = (mhba->list_num_io - 1) | CL_POINTER_TOGGLE;
 
 
 
 
 
 
 
1024		mhba->fw_state = FW_STATE_STARTED;
1025
1026		break;
1027	default:
1028		dev_err(&mhba->pdev->dev, "unknown handshake state [0x%x].\n",
1029								hs_state);
1030		return -1;
1031	}
1032	return 0;
1033}
1034
1035static unsigned char mvumi_handshake_event(struct mvumi_hba *mhba)
1036{
1037	unsigned int isr_status;
1038	unsigned long before;
1039
1040	before = jiffies;
1041	mvumi_handshake(mhba);
1042	do {
1043		isr_status = mhba->instancet->read_fw_status_reg(mhba->mmio);
1044
1045		if (mhba->fw_state == FW_STATE_STARTED)
1046			return 0;
1047		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1048			dev_err(&mhba->pdev->dev,
1049				"no handshake response at state 0x%x.\n",
1050				  mhba->fw_state);
1051			dev_err(&mhba->pdev->dev,
1052				"isr : global=0x%x,status=0x%x.\n",
1053					mhba->global_isr, isr_status);
1054			return -1;
1055		}
1056		rmb();
1057		usleep_range(1000, 2000);
1058	} while (!(isr_status & DRBL_HANDSHAKE_ISR));
1059
1060	return 0;
1061}
1062
1063static unsigned char mvumi_check_handshake(struct mvumi_hba *mhba)
1064{
1065	void *regs = mhba->mmio;
1066	unsigned int tmp;
1067	unsigned long before;
1068
1069	before = jiffies;
1070	tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1);
1071	while ((tmp != HANDSHAKE_READYSTATE) && (tmp != HANDSHAKE_DONESTATE)) {
1072		if (tmp != HANDSHAKE_READYSTATE)
1073			iowrite32(DRBL_MU_RESET,
1074					regs + CPU_PCIEA_TO_ARM_DRBL_REG);
1075		if (time_after(jiffies, before + FW_MAX_DELAY * HZ)) {
1076			dev_err(&mhba->pdev->dev,
1077				"invalid signature [0x%x].\n", tmp);
1078			return -1;
1079		}
1080		usleep_range(1000, 2000);
1081		rmb();
1082		tmp = ioread32(regs + CPU_ARM_TO_PCIEA_MSG1);
1083	}
1084
1085	mhba->fw_state = FW_STATE_STARTING;
1086	dev_dbg(&mhba->pdev->dev, "start firmware handshake...\n");
1087	do {
1088		if (mvumi_handshake_event(mhba)) {
1089			dev_err(&mhba->pdev->dev,
1090					"handshake failed at state 0x%x.\n",
1091						mhba->fw_state);
1092			return -1;
1093		}
1094	} while (mhba->fw_state != FW_STATE_STARTED);
1095
1096	dev_dbg(&mhba->pdev->dev, "firmware handshake done\n");
1097
1098	return 0;
1099}
1100
1101static unsigned char mvumi_start(struct mvumi_hba *mhba)
1102{
1103	void *regs = mhba->mmio;
1104	unsigned int tmp;
 
 
1105	/* clear Door bell */
1106	tmp = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1107	iowrite32(tmp, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1108
1109	iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG);
1110	tmp = ioread32(regs + CPU_ENPOINTA_MASK_REG) | INT_MAP_DL_CPU2PCIEA;
1111	iowrite32(tmp, regs + CPU_ENPOINTA_MASK_REG);
 
1112	if (mvumi_check_handshake(mhba))
1113		return -1;
1114
1115	return 0;
1116}
1117
1118/**
1119 * mvumi_complete_cmd -	Completes a command
1120 * @mhba:			Adapter soft state
1121 * @cmd:			Command to be completed
1122 */
1123static void mvumi_complete_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd,
1124					struct mvumi_rsp_frame *ob_frame)
1125{
1126	struct scsi_cmnd *scmd = cmd->scmd;
1127
1128	cmd->scmd->SCp.ptr = NULL;
1129	scmd->result = ob_frame->req_status;
1130
1131	switch (ob_frame->req_status) {
1132	case SAM_STAT_GOOD:
1133		scmd->result |= DID_OK << 16;
1134		break;
1135	case SAM_STAT_BUSY:
1136		scmd->result |= DID_BUS_BUSY << 16;
1137		break;
1138	case SAM_STAT_CHECK_CONDITION:
1139		scmd->result |= (DID_OK << 16);
1140		if (ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) {
1141			memcpy(cmd->scmd->sense_buffer, ob_frame->payload,
1142				sizeof(struct mvumi_sense_data));
1143			scmd->result |=  (DRIVER_SENSE << 24);
1144		}
1145		break;
1146	default:
1147		scmd->result |= (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1148		break;
1149	}
1150
1151	if (scsi_bufflen(scmd)) {
1152		if (scsi_sg_count(scmd)) {
1153			pci_unmap_sg(mhba->pdev,
1154				scsi_sglist(scmd),
1155				scsi_sg_count(scmd),
1156				(int) scmd->sc_data_direction);
1157		} else {
1158			pci_unmap_single(mhba->pdev,
1159				scmd->SCp.dma_handle,
1160				scsi_bufflen(scmd),
1161				(int) scmd->sc_data_direction);
1162
1163			scmd->SCp.dma_handle = 0;
1164		}
1165	}
1166	cmd->scmd->scsi_done(scmd);
1167	mvumi_return_cmd(mhba, cmd);
1168}
 
1169static void mvumi_complete_internal_cmd(struct mvumi_hba *mhba,
1170						struct mvumi_cmd *cmd,
1171					struct mvumi_rsp_frame *ob_frame)
1172{
1173	if (atomic_read(&cmd->sync_cmd)) {
1174		cmd->cmd_status = ob_frame->req_status;
1175
1176		if ((ob_frame->req_status == SAM_STAT_CHECK_CONDITION) &&
1177				(ob_frame->rsp_flag & CL_RSP_FLAG_SENSEDATA) &&
1178				cmd->data_buf) {
1179			memcpy(cmd->data_buf, ob_frame->payload,
1180					sizeof(struct mvumi_sense_data));
1181		}
1182		atomic_dec(&cmd->sync_cmd);
1183		wake_up(&mhba->int_cmd_wait_q);
1184	}
1185}
1186
1187static void mvumi_show_event(struct mvumi_hba *mhba,
1188			struct mvumi_driver_event *ptr)
1189{
1190	unsigned int i;
1191
1192	dev_warn(&mhba->pdev->dev,
1193		"Event[0x%x] id[0x%x] severity[0x%x] device id[0x%x]\n",
1194		ptr->sequence_no, ptr->event_id, ptr->severity, ptr->device_id);
1195	if (ptr->param_count) {
1196		printk(KERN_WARNING "Event param(len 0x%x): ",
1197						ptr->param_count);
1198		for (i = 0; i < ptr->param_count; i++)
1199			printk(KERN_WARNING "0x%x ", ptr->params[i]);
1200
1201		printk(KERN_WARNING "\n");
1202	}
1203
1204	if (ptr->sense_data_length) {
1205		printk(KERN_WARNING "Event sense data(len 0x%x): ",
1206						ptr->sense_data_length);
1207		for (i = 0; i < ptr->sense_data_length; i++)
1208			printk(KERN_WARNING "0x%x ", ptr->sense_data[i]);
1209		printk(KERN_WARNING "\n");
1210	}
1211}
1212
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1213static void mvumi_notification(struct mvumi_hba *mhba, u8 msg, void *buffer)
1214{
1215	if (msg == APICDB1_EVENT_GETEVENT) {
1216		int i, count;
1217		struct mvumi_driver_event *param = NULL;
1218		struct mvumi_event_req *er = buffer;
1219		count = er->count;
1220		if (count > MAX_EVENTS_RETURNED) {
1221			dev_err(&mhba->pdev->dev, "event count[0x%x] is bigger"
1222					" than max event count[0x%x].\n",
1223					count, MAX_EVENTS_RETURNED);
1224			return;
1225		}
1226		for (i = 0; i < count; i++) {
1227			param = &er->events[i];
1228			mvumi_show_event(mhba, param);
1229		}
 
 
1230	}
1231}
1232
1233static int mvumi_get_event(struct mvumi_hba *mhba, unsigned char msg)
1234{
1235	struct mvumi_cmd *cmd;
1236	struct mvumi_msg_frame *frame;
1237
1238	cmd = mvumi_create_internal_cmd(mhba, 512);
1239	if (!cmd)
1240		return -1;
1241	cmd->scmd = NULL;
1242	cmd->cmd_status = REQ_STATUS_PENDING;
1243	atomic_set(&cmd->sync_cmd, 0);
1244	frame = cmd->frame;
1245	frame->device_id = 0;
1246	frame->cmd_flag = CMD_FLAG_DATA_IN;
1247	frame->req_function = CL_FUN_SCSI_CMD;
1248	frame->cdb_length = MAX_COMMAND_SIZE;
1249	frame->data_transfer_length = sizeof(struct mvumi_event_req);
1250	memset(frame->cdb, 0, MAX_COMMAND_SIZE);
1251	frame->cdb[0] = APICDB0_EVENT;
1252	frame->cdb[1] = msg;
1253	mvumi_issue_blocked_cmd(mhba, cmd);
1254
1255	if (cmd->cmd_status != SAM_STAT_GOOD)
1256		dev_err(&mhba->pdev->dev, "get event failed, status=0x%x.\n",
1257							cmd->cmd_status);
1258	else
1259		mvumi_notification(mhba, cmd->frame->cdb[1], cmd->data_buf);
1260
1261	mvumi_delete_internal_cmd(mhba, cmd);
1262	return 0;
1263}
1264
1265static void mvumi_scan_events(struct work_struct *work)
1266{
1267	struct mvumi_events_wq *mu_ev =
1268		container_of(work, struct mvumi_events_wq, work_q);
1269
1270	mvumi_get_event(mu_ev->mhba, mu_ev->event);
1271	kfree(mu_ev);
1272}
1273
1274static void mvumi_launch_events(struct mvumi_hba *mhba, u8 msg)
1275{
1276	struct mvumi_events_wq *mu_ev;
1277
1278	mu_ev = kzalloc(sizeof(*mu_ev), GFP_ATOMIC);
1279	if (mu_ev) {
1280		INIT_WORK(&mu_ev->work_q, mvumi_scan_events);
1281		mu_ev->mhba = mhba;
1282		mu_ev->event = msg;
1283		mu_ev->param = NULL;
1284		schedule_work(&mu_ev->work_q);
 
 
 
 
 
 
 
 
 
 
1285	}
1286}
1287
1288static void mvumi_handle_clob(struct mvumi_hba *mhba)
1289{
1290	struct mvumi_rsp_frame *ob_frame;
1291	struct mvumi_cmd *cmd;
1292	struct mvumi_ob_data *pool;
1293
1294	while (!list_empty(&mhba->free_ob_list)) {
1295		pool = list_first_entry(&mhba->free_ob_list,
1296						struct mvumi_ob_data, list);
1297		list_del_init(&pool->list);
1298		list_add_tail(&pool->list, &mhba->ob_data_list);
1299
1300		ob_frame = (struct mvumi_rsp_frame *) &pool->data[0];
1301		cmd = mhba->tag_cmd[ob_frame->tag];
1302
1303		atomic_dec(&mhba->fw_outstanding);
1304		mhba->tag_cmd[ob_frame->tag] = 0;
1305		tag_release_one(mhba, &mhba->tag_pool, ob_frame->tag);
1306		if (cmd->scmd)
1307			mvumi_complete_cmd(mhba, cmd, ob_frame);
1308		else
1309			mvumi_complete_internal_cmd(mhba, cmd, ob_frame);
1310	}
1311	mhba->instancet->fire_cmd(mhba, NULL);
1312}
1313
1314static irqreturn_t mvumi_isr_handler(int irq, void *devp)
1315{
1316	struct mvumi_hba *mhba = (struct mvumi_hba *) devp;
1317	unsigned long flags;
1318
1319	spin_lock_irqsave(mhba->shost->host_lock, flags);
1320	if (unlikely(mhba->instancet->clear_intr(mhba) || !mhba->global_isr)) {
1321		spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1322		return IRQ_NONE;
1323	}
1324
1325	if (mhba->global_isr & INT_MAP_DL_CPU2PCIEA) {
 
 
1326		if (mhba->isr_status & DRBL_HANDSHAKE_ISR) {
1327			dev_warn(&mhba->pdev->dev, "enter handshake again!\n");
1328			mvumi_handshake(mhba);
1329		}
1330		if (mhba->isr_status & DRBL_EVENT_NOTIFY)
1331			mvumi_launch_events(mhba, APICDB1_EVENT_GETEVENT);
1332	}
1333
1334	if (mhba->global_isr & INT_MAP_COMAOUT)
1335		mvumi_receive_ob_list_entry(mhba);
1336
1337	mhba->global_isr = 0;
1338	mhba->isr_status = 0;
1339	if (mhba->fw_state == FW_STATE_STARTED)
1340		mvumi_handle_clob(mhba);
1341	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1342	return IRQ_HANDLED;
1343}
1344
1345static enum mvumi_qc_result mvumi_send_command(struct mvumi_hba *mhba,
1346						struct mvumi_cmd *cmd)
1347{
1348	void *ib_entry;
1349	struct mvumi_msg_frame *ib_frame;
1350	unsigned int frame_len;
1351
1352	ib_frame = cmd->frame;
1353	if (unlikely(mhba->fw_state != FW_STATE_STARTED)) {
1354		dev_dbg(&mhba->pdev->dev, "firmware not ready.\n");
1355		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1356	}
1357	if (tag_is_empty(&mhba->tag_pool)) {
1358		dev_dbg(&mhba->pdev->dev, "no free tag.\n");
1359		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1360	}
1361	if (mvumi_get_ib_list_entry(mhba, &ib_entry))
1362		return MV_QUEUE_COMMAND_RESULT_NO_RESOURCE;
1363
1364	cmd->frame->tag = tag_get_one(mhba, &mhba->tag_pool);
1365	cmd->frame->request_id = mhba->io_seq++;
1366	cmd->request_id = cmd->frame->request_id;
1367	mhba->tag_cmd[cmd->frame->tag] = cmd;
1368	frame_len = sizeof(*ib_frame) - 4 +
1369				ib_frame->sg_counts * sizeof(struct mvumi_sgl);
1370	memcpy(ib_entry, ib_frame, frame_len);
 
 
 
 
 
 
 
 
 
 
1371	return MV_QUEUE_COMMAND_RESULT_SENT;
1372}
1373
1374static void mvumi_fire_cmd(struct mvumi_hba *mhba, struct mvumi_cmd *cmd)
1375{
1376	unsigned short num_of_cl_sent = 0;
 
1377	enum mvumi_qc_result result;
1378
1379	if (cmd)
1380		list_add_tail(&cmd->queue_pointer, &mhba->waiting_req_list);
 
 
 
1381
1382	while (!list_empty(&mhba->waiting_req_list)) {
1383		cmd = list_first_entry(&mhba->waiting_req_list,
1384					 struct mvumi_cmd, queue_pointer);
1385		list_del_init(&cmd->queue_pointer);
1386		result = mvumi_send_command(mhba, cmd);
1387		switch (result) {
1388		case MV_QUEUE_COMMAND_RESULT_SENT:
1389			num_of_cl_sent++;
1390			break;
1391		case MV_QUEUE_COMMAND_RESULT_NO_RESOURCE:
1392			list_add(&cmd->queue_pointer, &mhba->waiting_req_list);
1393			if (num_of_cl_sent > 0)
1394				mvumi_send_ib_list_entry(mhba);
1395
1396			return;
1397		}
1398	}
 
1399	if (num_of_cl_sent > 0)
1400		mvumi_send_ib_list_entry(mhba);
1401}
1402
1403/**
1404 * mvumi_enable_intr -	Enables interrupts
1405 * @regs:			FW register set
1406 */
1407static void mvumi_enable_intr(void *regs)
1408{
1409	unsigned int mask;
 
1410
1411	iowrite32(0x3FFFFFFF, regs + CPU_ARM_TO_PCIEA_MASK_REG);
1412	mask = ioread32(regs + CPU_ENPOINTA_MASK_REG);
1413	mask |= INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR;
1414	iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG);
1415}
1416
1417/**
1418 * mvumi_disable_intr -Disables interrupt
1419 * @regs:			FW register set
1420 */
1421static void mvumi_disable_intr(void *regs)
1422{
1423	unsigned int mask;
 
1424
1425	iowrite32(0, regs + CPU_ARM_TO_PCIEA_MASK_REG);
1426	mask = ioread32(regs + CPU_ENPOINTA_MASK_REG);
1427	mask &= ~(INT_MAP_DL_CPU2PCIEA | INT_MAP_COMAOUT | INT_MAP_COMAERR);
1428	iowrite32(mask, regs + CPU_ENPOINTA_MASK_REG);
 
1429}
1430
1431static int mvumi_clear_intr(void *extend)
1432{
1433	struct mvumi_hba *mhba = (struct mvumi_hba *) extend;
1434	unsigned int status, isr_status = 0, tmp = 0;
1435	void *regs = mhba->mmio;
1436
1437	status = ioread32(regs + CPU_MAIN_INT_CAUSE_REG);
1438	if (!(status & INT_MAP_MU) || status == 0xFFFFFFFF)
1439		return 1;
1440	if (unlikely(status & INT_MAP_COMAERR)) {
1441		tmp = ioread32(regs + CLA_ISR_CAUSE);
1442		if (tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ))
1443			iowrite32(tmp & (CLIC_IN_ERR_IRQ | CLIC_OUT_ERR_IRQ),
1444					regs + CLA_ISR_CAUSE);
1445		status ^= INT_MAP_COMAERR;
 
 
 
 
 
 
 
 
1446		/* inbound or outbound parity error, command will timeout */
1447	}
1448	if (status & INT_MAP_COMAOUT) {
1449		tmp = ioread32(regs + CLA_ISR_CAUSE);
1450		if (tmp & CLIC_OUT_IRQ)
1451			iowrite32(tmp & CLIC_OUT_IRQ, regs + CLA_ISR_CAUSE);
1452	}
1453	if (status & INT_MAP_DL_CPU2PCIEA) {
1454		isr_status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1455		if (isr_status)
1456			iowrite32(isr_status, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1457	}
1458
1459	mhba->global_isr = status;
1460	mhba->isr_status = isr_status;
1461
1462	return 0;
1463}
1464
1465/**
1466 * mvumi_read_fw_status_reg - returns the current FW status value
1467 * @regs:			FW register set
1468 */
1469static unsigned int mvumi_read_fw_status_reg(void *regs)
1470{
1471	unsigned int status;
1472
1473	status = ioread32(regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1474	if (status)
1475		iowrite32(status, regs + CPU_ARM_TO_PCIEA_DRBL_REG);
1476	return status;
1477}
1478
1479static struct mvumi_instance_template mvumi_instance_template = {
 
 
 
 
 
 
 
 
 
 
 
1480	.fire_cmd = mvumi_fire_cmd,
1481	.enable_intr = mvumi_enable_intr,
1482	.disable_intr = mvumi_disable_intr,
1483	.clear_intr = mvumi_clear_intr,
1484	.read_fw_status_reg = mvumi_read_fw_status_reg,
 
 
 
1485};
1486
1487static int mvumi_slave_configure(struct scsi_device *sdev)
1488{
1489	struct mvumi_hba *mhba;
1490	unsigned char bitcount = sizeof(unsigned char) * 8;
1491
1492	mhba = (struct mvumi_hba *) sdev->host->hostdata;
1493	if (sdev->id >= mhba->max_target_id)
1494		return -EINVAL;
1495
1496	mhba->target_map[sdev->id / bitcount] |= (1 << (sdev->id % bitcount));
1497	return 0;
1498}
1499
1500/**
1501 * mvumi_build_frame -	Prepares a direct cdb (DCDB) command
1502 * @mhba:		Adapter soft state
1503 * @scmd:		SCSI command
1504 * @cmd:		Command to be prepared in
1505 *
1506 * This function prepares CDB commands. These are typcially pass-through
1507 * commands to the devices.
1508 */
1509static unsigned char mvumi_build_frame(struct mvumi_hba *mhba,
1510				struct scsi_cmnd *scmd, struct mvumi_cmd *cmd)
1511{
1512	struct mvumi_msg_frame *pframe;
1513
1514	cmd->scmd = scmd;
1515	cmd->cmd_status = REQ_STATUS_PENDING;
1516	pframe = cmd->frame;
1517	pframe->device_id = ((unsigned short) scmd->device->id) |
1518				(((unsigned short) scmd->device->lun) << 8);
1519	pframe->cmd_flag = 0;
1520
1521	switch (scmd->sc_data_direction) {
1522	case DMA_NONE:
1523		pframe->cmd_flag |= CMD_FLAG_NON_DATA;
1524		break;
1525	case DMA_FROM_DEVICE:
1526		pframe->cmd_flag |= CMD_FLAG_DATA_IN;
1527		break;
1528	case DMA_TO_DEVICE:
1529		pframe->cmd_flag |= CMD_FLAG_DATA_OUT;
1530		break;
1531	case DMA_BIDIRECTIONAL:
1532	default:
1533		dev_warn(&mhba->pdev->dev, "unexpected data direction[%d] "
1534			"cmd[0x%x]\n", scmd->sc_data_direction, scmd->cmnd[0]);
1535		goto error;
1536	}
1537
1538	pframe->cdb_length = scmd->cmd_len;
1539	memcpy(pframe->cdb, scmd->cmnd, pframe->cdb_length);
1540	pframe->req_function = CL_FUN_SCSI_CMD;
1541	if (scsi_bufflen(scmd)) {
1542		if (mvumi_make_sgl(mhba, scmd, &pframe->payload[0],
1543			&pframe->sg_counts))
1544			goto error;
1545
1546		pframe->data_transfer_length = scsi_bufflen(scmd);
1547	} else {
1548		pframe->sg_counts = 0;
1549		pframe->data_transfer_length = 0;
1550	}
1551	return 0;
1552
1553error:
1554	scmd->result = (DID_OK << 16) | (DRIVER_SENSE << 24) |
1555		SAM_STAT_CHECK_CONDITION;
1556	scsi_build_sense_buffer(0, scmd->sense_buffer, ILLEGAL_REQUEST, 0x24,
1557									0);
1558	return -1;
1559}
1560
1561/**
1562 * mvumi_queue_command -	Queue entry point
1563 * @scmd:			SCSI command to be queued
1564 * @done:			Callback entry point
1565 */
1566static int mvumi_queue_command(struct Scsi_Host *shost,
1567					struct scsi_cmnd *scmd)
1568{
1569	struct mvumi_cmd *cmd;
1570	struct mvumi_hba *mhba;
1571	unsigned long irq_flags;
1572
1573	spin_lock_irqsave(shost->host_lock, irq_flags);
1574	scsi_cmd_get_serial(shost, scmd);
1575
1576	mhba = (struct mvumi_hba *) shost->hostdata;
1577	scmd->result = 0;
1578	cmd = mvumi_get_cmd(mhba);
1579	if (unlikely(!cmd)) {
1580		spin_unlock_irqrestore(shost->host_lock, irq_flags);
1581		return SCSI_MLQUEUE_HOST_BUSY;
1582	}
1583
1584	if (unlikely(mvumi_build_frame(mhba, scmd, cmd)))
1585		goto out_return_cmd;
1586
1587	cmd->scmd = scmd;
1588	scmd->SCp.ptr = (char *) cmd;
1589	mhba->instancet->fire_cmd(mhba, cmd);
1590	spin_unlock_irqrestore(shost->host_lock, irq_flags);
1591	return 0;
1592
1593out_return_cmd:
1594	mvumi_return_cmd(mhba, cmd);
1595	scmd->scsi_done(scmd);
1596	spin_unlock_irqrestore(shost->host_lock, irq_flags);
1597	return 0;
1598}
1599
1600static enum blk_eh_timer_return mvumi_timed_out(struct scsi_cmnd *scmd)
1601{
1602	struct mvumi_cmd *cmd = (struct mvumi_cmd *) scmd->SCp.ptr;
1603	struct Scsi_Host *host = scmd->device->host;
1604	struct mvumi_hba *mhba = shost_priv(host);
1605	unsigned long flags;
1606
1607	spin_lock_irqsave(mhba->shost->host_lock, flags);
1608
1609	if (mhba->tag_cmd[cmd->frame->tag]) {
1610		mhba->tag_cmd[cmd->frame->tag] = 0;
1611		tag_release_one(mhba, &mhba->tag_pool, cmd->frame->tag);
1612	}
1613	if (!list_empty(&cmd->queue_pointer))
1614		list_del_init(&cmd->queue_pointer);
1615	else
1616		atomic_dec(&mhba->fw_outstanding);
1617
1618	scmd->result = (DRIVER_INVALID << 24) | (DID_ABORT << 16);
1619	scmd->SCp.ptr = NULL;
1620	if (scsi_bufflen(scmd)) {
1621		if (scsi_sg_count(scmd)) {
1622			pci_unmap_sg(mhba->pdev,
1623				scsi_sglist(scmd),
1624				scsi_sg_count(scmd),
1625				(int)scmd->sc_data_direction);
1626		} else {
1627			pci_unmap_single(mhba->pdev,
1628				scmd->SCp.dma_handle,
1629				scsi_bufflen(scmd),
1630				(int)scmd->sc_data_direction);
1631
1632			scmd->SCp.dma_handle = 0;
1633		}
1634	}
1635	mvumi_return_cmd(mhba, cmd);
1636	spin_unlock_irqrestore(mhba->shost->host_lock, flags);
1637
1638	return BLK_EH_NOT_HANDLED;
1639}
1640
1641static int
1642mvumi_bios_param(struct scsi_device *sdev, struct block_device *bdev,
1643			sector_t capacity, int geom[])
1644{
1645	int heads, sectors;
1646	sector_t cylinders;
1647	unsigned long tmp;
1648
1649	heads = 64;
1650	sectors = 32;
1651	tmp = heads * sectors;
1652	cylinders = capacity;
1653	sector_div(cylinders, tmp);
1654
1655	if (capacity >= 0x200000) {
1656		heads = 255;
1657		sectors = 63;
1658		tmp = heads * sectors;
1659		cylinders = capacity;
1660		sector_div(cylinders, tmp);
1661	}
1662	geom[0] = heads;
1663	geom[1] = sectors;
1664	geom[2] = cylinders;
1665
1666	return 0;
1667}
1668
1669static struct scsi_host_template mvumi_template = {
1670
1671	.module = THIS_MODULE,
1672	.name = "Marvell Storage Controller",
1673	.slave_configure = mvumi_slave_configure,
1674	.queuecommand = mvumi_queue_command,
1675	.eh_host_reset_handler = mvumi_host_reset,
1676	.bios_param = mvumi_bios_param,
1677	.this_id = -1,
1678};
1679
1680static struct scsi_transport_template mvumi_transport_template = {
1681	.eh_timed_out = mvumi_timed_out,
1682};
1683
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1684/**
1685 * mvumi_init_fw -	Initializes the FW
1686 * @mhba:		Adapter soft state
1687 *
1688 * This is the main function for initializing firmware.
1689 */
1690static int mvumi_init_fw(struct mvumi_hba *mhba)
1691{
1692	int ret = 0;
1693
1694	if (pci_request_regions(mhba->pdev, MV_DRIVER_NAME)) {
1695		dev_err(&mhba->pdev->dev, "IO memory region busy!\n");
1696		return -EBUSY;
1697	}
1698	ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
1699	if (ret)
1700		goto fail_ioremap;
1701
1702	mhba->mmio = mhba->base_addr[0];
1703
1704	switch (mhba->pdev->device) {
1705	case PCI_DEVICE_ID_MARVELL_MV9143:
1706		mhba->instancet = &mvumi_instance_template;
1707		mhba->io_seq = 0;
1708		mhba->max_sge = MVUMI_MAX_SG_ENTRY;
1709		mhba->request_id_enabled = 1;
1710		break;
 
 
 
 
 
1711	default:
1712		dev_err(&mhba->pdev->dev, "device 0x%x not supported!\n",
1713							mhba->pdev->device);
1714		mhba->instancet = NULL;
1715		ret = -EINVAL;
1716		goto fail_alloc_mem;
1717	}
1718	dev_dbg(&mhba->pdev->dev, "device id : %04X is found.\n",
1719							mhba->pdev->device);
1720
1721	mhba->handshake_page = kzalloc(HSP_MAX_SIZE, GFP_KERNEL);
 
 
 
 
 
 
 
1722	if (!mhba->handshake_page) {
1723		dev_err(&mhba->pdev->dev,
1724			"failed to allocate memory for handshake\n");
1725		ret = -ENOMEM;
1726		goto fail_alloc_mem;
1727	}
1728	mhba->handshake_page_phys = virt_to_phys(mhba->handshake_page);
1729
1730	if (mvumi_start(mhba)) {
1731		ret = -EINVAL;
1732		goto fail_ready_state;
1733	}
1734	ret = mvumi_alloc_cmds(mhba);
1735	if (ret)
1736		goto fail_ready_state;
1737
1738	return 0;
1739
1740fail_ready_state:
1741	mvumi_release_mem_resource(mhba);
1742	kfree(mhba->handshake_page);
 
 
 
1743fail_alloc_mem:
1744	mvumi_unmap_pci_addr(mhba->pdev, mhba->base_addr);
1745fail_ioremap:
1746	pci_release_regions(mhba->pdev);
1747
1748	return ret;
1749}
1750
1751/**
1752 * mvumi_io_attach -	Attaches this driver to SCSI mid-layer
1753 * @mhba:		Adapter soft state
1754 */
1755static int mvumi_io_attach(struct mvumi_hba *mhba)
1756{
1757	struct Scsi_Host *host = mhba->shost;
 
1758	int ret;
1759	unsigned int max_sg = (mhba->ib_max_size + 4 -
1760		sizeof(struct mvumi_msg_frame)) / sizeof(struct mvumi_sgl);
1761
1762	host->irq = mhba->pdev->irq;
1763	host->unique_id = mhba->unique_id;
1764	host->can_queue = (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
1765	host->sg_tablesize = mhba->max_sge > max_sg ? max_sg : mhba->max_sge;
1766	host->max_sectors = mhba->max_transfer_size / 512;
1767	host->cmd_per_lun =  (mhba->max_io - 1) ? (mhba->max_io - 1) : 1;
1768	host->max_id = mhba->max_target_id;
1769	host->max_cmd_len = MAX_COMMAND_SIZE;
1770	host->transportt = &mvumi_transport_template;
1771
1772	ret = scsi_add_host(host, &mhba->pdev->dev);
1773	if (ret) {
1774		dev_err(&mhba->pdev->dev, "scsi_add_host failed\n");
1775		return ret;
1776	}
1777	mhba->fw_flag |= MVUMI_FW_ATTACH;
1778	scsi_scan_host(host);
1779
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1780	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
1781}
1782
1783/**
1784 * mvumi_probe_one -	PCI hotplug entry point
1785 * @pdev:		PCI device structure
1786 * @id:			PCI ids of supported hotplugged adapter
1787 */
1788static int __devinit mvumi_probe_one(struct pci_dev *pdev,
1789					const struct pci_device_id *id)
1790{
1791	struct Scsi_Host *host;
1792	struct mvumi_hba *mhba;
1793	int ret;
1794
1795	dev_dbg(&pdev->dev, " %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
1796			pdev->vendor, pdev->device, pdev->subsystem_vendor,
1797			pdev->subsystem_device);
1798
1799	ret = pci_enable_device(pdev);
1800	if (ret)
1801		return ret;
1802
1803	pci_set_master(pdev);
1804
1805	if (IS_DMA64) {
1806		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1807		if (ret) {
1808			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1809			if (ret)
1810				goto fail_set_dma_mask;
1811		}
1812	} else {
1813		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1814		if (ret)
1815			goto fail_set_dma_mask;
1816	}
1817
1818	host = scsi_host_alloc(&mvumi_template, sizeof(*mhba));
1819	if (!host) {
1820		dev_err(&pdev->dev, "scsi_host_alloc failed\n");
1821		ret = -ENOMEM;
1822		goto fail_alloc_instance;
1823	}
1824	mhba = shost_priv(host);
1825
1826	INIT_LIST_HEAD(&mhba->cmd_pool);
1827	INIT_LIST_HEAD(&mhba->ob_data_list);
1828	INIT_LIST_HEAD(&mhba->free_ob_list);
1829	INIT_LIST_HEAD(&mhba->res_list);
1830	INIT_LIST_HEAD(&mhba->waiting_req_list);
 
 
 
1831	atomic_set(&mhba->fw_outstanding, 0);
1832	init_waitqueue_head(&mhba->int_cmd_wait_q);
 
1833
1834	mhba->pdev = pdev;
1835	mhba->shost = host;
1836	mhba->unique_id = pdev->bus->number << 8 | pdev->devfn;
1837
1838	ret = mvumi_init_fw(mhba);
1839	if (ret)
1840		goto fail_init_fw;
1841
1842	ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
1843				"mvumi", mhba);
1844	if (ret) {
1845		dev_err(&pdev->dev, "failed to register IRQ\n");
1846		goto fail_init_irq;
1847	}
1848	mhba->instancet->enable_intr(mhba->mmio);
 
1849	pci_set_drvdata(pdev, mhba);
1850
1851	ret = mvumi_io_attach(mhba);
1852	if (ret)
1853		goto fail_io_attach;
 
 
1854	dev_dbg(&pdev->dev, "probe mvumi driver successfully.\n");
1855
1856	return 0;
1857
1858fail_io_attach:
1859	pci_set_drvdata(pdev, NULL);
1860	mhba->instancet->disable_intr(mhba->mmio);
1861	free_irq(mhba->pdev->irq, mhba);
1862fail_init_irq:
1863	mvumi_release_fw(mhba);
1864fail_init_fw:
1865	scsi_host_put(host);
1866
1867fail_alloc_instance:
1868fail_set_dma_mask:
1869	pci_disable_device(pdev);
1870
1871	return ret;
1872}
1873
1874static void mvumi_detach_one(struct pci_dev *pdev)
1875{
1876	struct Scsi_Host *host;
1877	struct mvumi_hba *mhba;
1878
1879	mhba = pci_get_drvdata(pdev);
 
 
 
 
 
 
1880	host = mhba->shost;
1881	scsi_remove_host(mhba->shost);
1882	mvumi_flush_cache(mhba);
1883
1884	mhba->instancet->disable_intr(mhba->mmio);
1885	free_irq(mhba->pdev->irq, mhba);
1886	mvumi_release_fw(mhba);
1887	scsi_host_put(host);
1888	pci_set_drvdata(pdev, NULL);
1889	pci_disable_device(pdev);
1890	dev_dbg(&pdev->dev, "driver is removed!\n");
1891}
1892
1893/**
1894 * mvumi_shutdown -	Shutdown entry point
1895 * @device:		Generic device structure
1896 */
1897static void mvumi_shutdown(struct pci_dev *pdev)
1898{
1899	struct mvumi_hba *mhba = pci_get_drvdata(pdev);
1900
1901	mvumi_flush_cache(mhba);
1902}
1903
1904static int mvumi_suspend(struct pci_dev *pdev, pm_message_t state)
1905{
1906	struct mvumi_hba *mhba = NULL;
1907
1908	mhba = pci_get_drvdata(pdev);
1909	mvumi_flush_cache(mhba);
1910
1911	pci_set_drvdata(pdev, mhba);
1912	mhba->instancet->disable_intr(mhba->mmio);
1913	free_irq(mhba->pdev->irq, mhba);
1914	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
1915	pci_release_regions(pdev);
1916	pci_save_state(pdev);
1917	pci_disable_device(pdev);
1918	pci_set_power_state(pdev, pci_choose_state(pdev, state));
1919
1920	return 0;
1921}
1922
1923static int mvumi_resume(struct pci_dev *pdev)
1924{
1925	int ret;
1926	struct mvumi_hba *mhba = NULL;
1927
1928	mhba = pci_get_drvdata(pdev);
1929
1930	pci_set_power_state(pdev, PCI_D0);
1931	pci_enable_wake(pdev, PCI_D0, 0);
1932	pci_restore_state(pdev);
1933
1934	ret = pci_enable_device(pdev);
1935	if (ret) {
1936		dev_err(&pdev->dev, "enable device failed\n");
1937		return ret;
1938	}
1939	pci_set_master(pdev);
1940	if (IS_DMA64) {
1941		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
1942		if (ret) {
1943			ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1944			if (ret)
1945				goto fail;
1946		}
1947	} else {
1948		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1949		if (ret)
1950			goto fail;
1951	}
1952	ret = pci_request_regions(mhba->pdev, MV_DRIVER_NAME);
1953	if (ret)
1954		goto fail;
1955	ret = mvumi_map_pci_addr(mhba->pdev, mhba->base_addr);
1956	if (ret)
1957		goto release_regions;
1958
 
 
 
 
 
1959	mhba->mmio = mhba->base_addr[0];
1960	mvumi_reset(mhba->mmio);
1961
1962	if (mvumi_start(mhba)) {
1963		ret = -EINVAL;
1964		goto unmap_pci_addr;
1965	}
1966
1967	ret = request_irq(mhba->pdev->irq, mvumi_isr_handler, IRQF_SHARED,
1968				"mvumi", mhba);
1969	if (ret) {
1970		dev_err(&pdev->dev, "failed to register IRQ\n");
1971		goto unmap_pci_addr;
1972	}
1973	mhba->instancet->enable_intr(mhba->mmio);
1974
1975	return 0;
1976
1977unmap_pci_addr:
1978	mvumi_unmap_pci_addr(pdev, mhba->base_addr);
1979release_regions:
1980	pci_release_regions(pdev);
1981fail:
1982	pci_disable_device(pdev);
1983
1984	return ret;
1985}
1986
1987static struct pci_driver mvumi_pci_driver = {
1988
1989	.name = MV_DRIVER_NAME,
1990	.id_table = mvumi_pci_table,
1991	.probe = mvumi_probe_one,
1992	.remove = __devexit_p(mvumi_detach_one),
1993	.shutdown = mvumi_shutdown,
1994#ifdef CONFIG_PM
1995	.suspend = mvumi_suspend,
1996	.resume = mvumi_resume,
1997#endif
1998};
1999
2000/**
2001 * mvumi_init - Driver load entry point
2002 */
2003static int __init mvumi_init(void)
2004{
2005	return pci_register_driver(&mvumi_pci_driver);
2006}
2007
2008/**
2009 * mvumi_exit - Driver unload entry point
2010 */
2011static void __exit mvumi_exit(void)
2012{
2013
2014	pci_unregister_driver(&mvumi_pci_driver);
2015}
2016
2017module_init(mvumi_init);
2018module_exit(mvumi_exit);