Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v3.1
   1/*
   2 *
   3 *			Linux MegaRAID device driver
   4 *
   5 * Copyright (c) 2003-2004  LSI Logic Corporation.
   6 *
   7 *	   This program is free software; you can redistribute it and/or
   8 *	   modify it under the terms of the GNU General Public License
   9 *	   as published by the Free Software Foundation; either version
  10 *	   2 of the License, or (at your option) any later version.
  11 *
  12 * FILE		: megaraid_mbox.c
  13 * Version	: v2.20.5.1 (Nov 16 2006)
  14 *
  15 * Authors:
  16 * 	Atul Mukker		<Atul.Mukker@lsi.com>
  17 * 	Sreenivas Bagalkote	<Sreenivas.Bagalkote@lsi.com>
  18 * 	Manoj Jose		<Manoj.Jose@lsi.com>
  19 * 	Seokmann Ju
  20 *
  21 * List of supported controllers
  22 *
  23 * OEM	Product Name			VID	DID	SSVID	SSID
  24 * ---	------------			---	---	----	----
  25 * Dell PERC3/QC			101E	1960	1028	0471
  26 * Dell PERC3/DC			101E	1960	1028	0493
  27 * Dell PERC3/SC			101E	1960	1028	0475
  28 * Dell PERC3/Di			1028	1960	1028	0123
  29 * Dell PERC4/SC			1000	1960	1028	0520
  30 * Dell PERC4/DC			1000	1960	1028	0518
  31 * Dell PERC4/QC			1000	0407	1028	0531
  32 * Dell PERC4/Di			1028	000F	1028	014A
  33 * Dell PERC 4e/Si			1028	0013	1028	016c
  34 * Dell PERC 4e/Di			1028	0013	1028	016d
  35 * Dell PERC 4e/Di			1028	0013	1028	016e
  36 * Dell PERC 4e/Di			1028	0013	1028	016f
  37 * Dell PERC 4e/Di			1028	0013	1028	0170
  38 * Dell PERC 4e/DC			1000	0408	1028	0002
  39 * Dell PERC 4e/SC			1000	0408	1028	0001
  40 *
  41 *
  42 * LSI MegaRAID SCSI 320-0		1000	1960	1000	A520
  43 * LSI MegaRAID SCSI 320-1		1000	1960	1000	0520
  44 * LSI MegaRAID SCSI 320-2		1000	1960	1000	0518
  45 * LSI MegaRAID SCSI 320-0X		1000	0407	1000	0530
  46 * LSI MegaRAID SCSI 320-2X		1000	0407	1000	0532
  47 * LSI MegaRAID SCSI 320-4X		1000	0407	1000	0531
  48 * LSI MegaRAID SCSI 320-1E		1000	0408	1000	0001
  49 * LSI MegaRAID SCSI 320-2E		1000	0408	1000	0002
  50 * LSI MegaRAID SATA 150-4		1000	1960	1000	4523
  51 * LSI MegaRAID SATA 150-6		1000	1960	1000	0523
  52 * LSI MegaRAID SATA 300-4X		1000	0409	1000	3004
  53 * LSI MegaRAID SATA 300-8X		1000	0409	1000	3008
  54 *
  55 * INTEL RAID Controller SRCU42X	1000	0407	8086	0532
  56 * INTEL RAID Controller SRCS16		1000	1960	8086	0523
  57 * INTEL RAID Controller SRCU42E	1000	0408	8086	0002
  58 * INTEL RAID Controller SRCZCRX	1000	0407	8086	0530
  59 * INTEL RAID Controller SRCS28X	1000	0409	8086	3008
  60 * INTEL RAID Controller SROMBU42E	1000	0408	8086	3431
  61 * INTEL RAID Controller SROMBU42E	1000	0408	8086	3499
  62 * INTEL RAID Controller SRCU51L	1000	1960	8086	0520
  63 *
  64 * FSC	MegaRAID PCI Express ROMB	1000	0408	1734	1065
  65 *
  66 * ACER	MegaRAID ROMB-2E		1000	0408	1025	004D
  67 *
  68 * NEC	MegaRAID PCI Express ROMB	1000	0408	1033	8287
  69 *
  70 * For history of changes, see Documentation/ChangeLog.megaraid
  71 */
  72
  73#include <linux/slab.h>
 
  74#include "megaraid_mbox.h"
  75
  76static int megaraid_init(void);
  77static void megaraid_exit(void);
  78
  79static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *);
  80static void megaraid_detach_one(struct pci_dev *);
  81static void megaraid_mbox_shutdown(struct pci_dev *);
  82
  83static int megaraid_io_attach(adapter_t *);
  84static void megaraid_io_detach(adapter_t *);
  85
  86static int megaraid_init_mbox(adapter_t *);
  87static void megaraid_fini_mbox(adapter_t *);
  88
  89static int megaraid_alloc_cmd_packets(adapter_t *);
  90static void megaraid_free_cmd_packets(adapter_t *);
  91
  92static int megaraid_mbox_setup_dma_pools(adapter_t *);
  93static void megaraid_mbox_teardown_dma_pools(adapter_t *);
  94
  95static int megaraid_sysfs_alloc_resources(adapter_t *);
  96static void megaraid_sysfs_free_resources(adapter_t *);
  97
  98static int megaraid_abort_handler(struct scsi_cmnd *);
  99static int megaraid_reset_handler(struct scsi_cmnd *);
 100
 101static int mbox_post_sync_cmd(adapter_t *, uint8_t []);
 102static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []);
 103static int megaraid_busywait_mbox(mraid_device_t *);
 104static int megaraid_mbox_product_info(adapter_t *);
 105static int megaraid_mbox_extended_cdb(adapter_t *);
 106static int megaraid_mbox_support_ha(adapter_t *, uint16_t *);
 107static int megaraid_mbox_support_random_del(adapter_t *);
 108static int megaraid_mbox_get_max_sg(adapter_t *);
 109static void megaraid_mbox_enum_raid_scsi(adapter_t *);
 110static void megaraid_mbox_flush_cache(adapter_t *);
 111static int megaraid_mbox_fire_sync_cmd(adapter_t *);
 112
 113static void megaraid_mbox_display_scb(adapter_t *, scb_t *);
 114static void megaraid_mbox_setup_device_map(adapter_t *);
 115
 116static int megaraid_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
 117static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
 118static void megaraid_mbox_runpendq(adapter_t *, scb_t *);
 119static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *,
 120		struct scsi_cmnd *);
 121static void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *,
 122		struct scsi_cmnd *);
 123
 124static irqreturn_t megaraid_isr(int, void *);
 125
 126static void megaraid_mbox_dpc(unsigned long);
 127
 128static ssize_t megaraid_sysfs_show_app_hndl(struct device *, struct device_attribute *attr, char *);
 129static ssize_t megaraid_sysfs_show_ldnum(struct device *, struct device_attribute *attr, char *);
 130
 131static int megaraid_cmm_register(adapter_t *);
 132static int megaraid_cmm_unregister(adapter_t *);
 133static int megaraid_mbox_mm_handler(unsigned long, uioc_t *, uint32_t);
 134static int megaraid_mbox_mm_command(adapter_t *, uioc_t *);
 135static void megaraid_mbox_mm_done(adapter_t *, scb_t *);
 136static int gather_hbainfo(adapter_t *, mraid_hba_info_t *);
 137static int wait_till_fw_empty(adapter_t *);
 138
 139
 140
 141MODULE_AUTHOR("megaraidlinux@lsi.com");
 142MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver");
 143MODULE_LICENSE("GPL");
 144MODULE_VERSION(MEGARAID_VERSION);
 145
 146/*
 147 * ### modules parameters for driver ###
 148 */
 149
 150/*
 151 * Set to enable driver to expose unconfigured disk to kernel
 152 */
 153static int megaraid_expose_unconf_disks = 0;
 154module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0);
 155MODULE_PARM_DESC(unconf_disks,
 156	"Set to expose unconfigured disks to kernel (default=0)");
 157
 158/*
 159 * driver wait time if the adapter's mailbox is busy
 160 */
 161static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT;
 162module_param_named(busy_wait, max_mbox_busy_wait, int, 0);
 163MODULE_PARM_DESC(busy_wait,
 164	"Max wait for mailbox in microseconds if busy (default=10)");
 165
 166/*
 167 * number of sectors per IO command
 168 */
 169static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS;
 170module_param_named(max_sectors, megaraid_max_sectors, int, 0);
 171MODULE_PARM_DESC(max_sectors,
 172	"Maximum number of sectors per IO command (default=128)");
 173
 174/*
 175 * number of commands per logical unit
 176 */
 177static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN;
 178module_param_named(cmd_per_lun, megaraid_cmd_per_lun, int, 0);
 179MODULE_PARM_DESC(cmd_per_lun,
 180	"Maximum number of commands per logical unit (default=64)");
 181
 182
 183/*
 184 * Fast driver load option, skip scanning for physical devices during load.
 185 * This would result in non-disk devices being skipped during driver load
 186 * time. These can be later added though, using /proc/scsi/scsi
 187 */
 188static unsigned int megaraid_fast_load = 0;
 189module_param_named(fast_load, megaraid_fast_load, int, 0);
 190MODULE_PARM_DESC(fast_load,
 191	"Faster loading of the driver, skips physical devices! (default=0)");
 192
 193
 194/*
 195 * mraid_debug level - threshold for amount of information to be displayed by
 196 * the driver. This level can be changed through modules parameters, ioctl or
 197 * sysfs/proc interface. By default, print the announcement messages only.
 198 */
 199int mraid_debug_level = CL_ANN;
 200module_param_named(debug_level, mraid_debug_level, int, 0);
 201MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
 202
 203/*
 204 * ### global data ###
 205 */
 206static uint8_t megaraid_mbox_version[8] =
 207	{ 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 };
 208
 209
 210/*
 211 * PCI table for all supported controllers.
 212 */
 213static struct pci_device_id pci_id_table_g[] =  {
 214	{
 215		PCI_VENDOR_ID_DELL,
 216		PCI_DEVICE_ID_PERC4_DI_DISCOVERY,
 217		PCI_VENDOR_ID_DELL,
 218		PCI_SUBSYS_ID_PERC4_DI_DISCOVERY,
 219	},
 220	{
 221		PCI_VENDOR_ID_LSI_LOGIC,
 222		PCI_DEVICE_ID_PERC4_SC,
 223		PCI_VENDOR_ID_DELL,
 224		PCI_SUBSYS_ID_PERC4_SC,
 225	},
 226	{
 227		PCI_VENDOR_ID_LSI_LOGIC,
 228		PCI_DEVICE_ID_PERC4_DC,
 229		PCI_VENDOR_ID_DELL,
 230		PCI_SUBSYS_ID_PERC4_DC,
 231	},
 232	{
 233		PCI_VENDOR_ID_LSI_LOGIC,
 234		PCI_DEVICE_ID_VERDE,
 235		PCI_ANY_ID,
 236		PCI_ANY_ID,
 237	},
 238	{
 239		PCI_VENDOR_ID_DELL,
 240		PCI_DEVICE_ID_PERC4_DI_EVERGLADES,
 241		PCI_VENDOR_ID_DELL,
 242		PCI_SUBSYS_ID_PERC4_DI_EVERGLADES,
 243	},
 244	{
 245		PCI_VENDOR_ID_DELL,
 246		PCI_DEVICE_ID_PERC4E_SI_BIGBEND,
 247		PCI_VENDOR_ID_DELL,
 248		PCI_SUBSYS_ID_PERC4E_SI_BIGBEND,
 249	},
 250	{
 251		PCI_VENDOR_ID_DELL,
 252		PCI_DEVICE_ID_PERC4E_DI_KOBUK,
 253		PCI_VENDOR_ID_DELL,
 254		PCI_SUBSYS_ID_PERC4E_DI_KOBUK,
 255	},
 256	{
 257		PCI_VENDOR_ID_DELL,
 258		PCI_DEVICE_ID_PERC4E_DI_CORVETTE,
 259		PCI_VENDOR_ID_DELL,
 260		PCI_SUBSYS_ID_PERC4E_DI_CORVETTE,
 261	},
 262	{
 263		PCI_VENDOR_ID_DELL,
 264		PCI_DEVICE_ID_PERC4E_DI_EXPEDITION,
 265		PCI_VENDOR_ID_DELL,
 266		PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION,
 267	},
 268	{
 269		PCI_VENDOR_ID_DELL,
 270		PCI_DEVICE_ID_PERC4E_DI_GUADALUPE,
 271		PCI_VENDOR_ID_DELL,
 272		PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE,
 273	},
 274	{
 275		PCI_VENDOR_ID_LSI_LOGIC,
 276		PCI_DEVICE_ID_DOBSON,
 277		PCI_ANY_ID,
 278		PCI_ANY_ID,
 279	},
 280	{
 281		PCI_VENDOR_ID_AMI,
 282		PCI_DEVICE_ID_AMI_MEGARAID3,
 283		PCI_ANY_ID,
 284		PCI_ANY_ID,
 285	},
 286	{
 287		PCI_VENDOR_ID_LSI_LOGIC,
 288		PCI_DEVICE_ID_AMI_MEGARAID3,
 289		PCI_ANY_ID,
 290		PCI_ANY_ID,
 291	},
 292	{
 293		PCI_VENDOR_ID_LSI_LOGIC,
 294		PCI_DEVICE_ID_LINDSAY,
 295		PCI_ANY_ID,
 296		PCI_ANY_ID,
 297	},
 298	{0}	/* Terminating entry */
 299};
 300MODULE_DEVICE_TABLE(pci, pci_id_table_g);
 301
 302
 303static struct pci_driver megaraid_pci_driver = {
 304	.name		= "megaraid",
 305	.id_table	= pci_id_table_g,
 306	.probe		= megaraid_probe_one,
 307	.remove		= __devexit_p(megaraid_detach_one),
 308	.shutdown	= megaraid_mbox_shutdown,
 309};
 310
 311
 312
 313// definitions for the device attributes for exporting logical drive number
 314// for a scsi address (Host, Channel, Id, Lun)
 315
 316DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
 317		NULL);
 318
 319// Host template initializer for megaraid mbox sysfs device attributes
 320static struct device_attribute *megaraid_shost_attrs[] = {
 321	&dev_attr_megaraid_mbox_app_hndl,
 322	NULL,
 323};
 324
 325
 326DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
 327
 328// Host template initializer for megaraid mbox sysfs device attributes
 329static struct device_attribute *megaraid_sdev_attrs[] = {
 330	&dev_attr_megaraid_mbox_ld,
 331	NULL,
 332};
 333
 334/**
 335 * megaraid_change_queue_depth - Change the device's queue depth
 336 * @sdev:	scsi device struct
 337 * @qdepth:	depth to set
 338 * @reason:	calling context
 339 *
 340 * Return value:
 341 * 	actual depth set
 342 */
 343static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth,
 344				       int reason)
 345{
 346	if (reason != SCSI_QDEPTH_DEFAULT)
 347		return -EOPNOTSUPP;
 348
 349	if (qdepth > MBOX_MAX_SCSI_CMDS)
 350		qdepth = MBOX_MAX_SCSI_CMDS;
 351	scsi_adjust_queue_depth(sdev, 0, qdepth);
 352	return sdev->queue_depth;
 353}
 354
 355/*
 356 * Scsi host template for megaraid unified driver
 357 */
 358static struct scsi_host_template megaraid_template_g = {
 359	.module				= THIS_MODULE,
 360	.name				= "LSI Logic MegaRAID driver",
 361	.proc_name			= "megaraid",
 362	.queuecommand			= megaraid_queue_command,
 363	.eh_abort_handler		= megaraid_abort_handler,
 364	.eh_device_reset_handler	= megaraid_reset_handler,
 365	.eh_bus_reset_handler		= megaraid_reset_handler,
 366	.eh_host_reset_handler		= megaraid_reset_handler,
 367	.change_queue_depth		= megaraid_change_queue_depth,
 368	.use_clustering			= ENABLE_CLUSTERING,
 369	.sdev_attrs			= megaraid_sdev_attrs,
 370	.shost_attrs			= megaraid_shost_attrs,
 371};
 372
 373
 374/**
 375 * megaraid_init - module load hook
 376 *
 377 * We register ourselves as hotplug enabled module and let PCI subsystem
 378 * discover our adapters.
 379 */
 380static int __init
 381megaraid_init(void)
 382{
 383	int	rval;
 384
 385	// Announce the driver version
 386	con_log(CL_ANN, (KERN_INFO "megaraid: %s %s\n", MEGARAID_VERSION,
 387		MEGARAID_EXT_VERSION));
 388
 389	// check validity of module parameters
 390	if (megaraid_cmd_per_lun > MBOX_MAX_SCSI_CMDS) {
 391
 392		con_log(CL_ANN, (KERN_WARNING
 393			"megaraid mailbox: max commands per lun reset to %d\n",
 394			MBOX_MAX_SCSI_CMDS));
 395
 396		megaraid_cmd_per_lun = MBOX_MAX_SCSI_CMDS;
 397	}
 398
 399
 400	// register as a PCI hot-plug driver module
 401	rval = pci_register_driver(&megaraid_pci_driver);
 402	if (rval < 0) {
 403		con_log(CL_ANN, (KERN_WARNING
 404			"megaraid: could not register hotplug support.\n"));
 405	}
 406
 407	return rval;
 408}
 409
 410
 411/**
 412 * megaraid_exit - driver unload entry point
 413 *
 414 * We simply unwrap the megaraid_init routine here.
 415 */
 416static void __exit
 417megaraid_exit(void)
 418{
 419	con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n"));
 420
 421	// unregister as PCI hotplug driver
 422	pci_unregister_driver(&megaraid_pci_driver);
 423
 424	return;
 425}
 426
 427
 428/**
 429 * megaraid_probe_one - PCI hotplug entry point
 430 * @pdev	: handle to this controller's PCI configuration space
 431 * @id		: pci device id of the class of controllers
 432 *
 433 * This routine should be called whenever a new adapter is detected by the
 434 * PCI hotplug susbsystem.
 435 */
 436static int __devinit
 437megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 438{
 439	adapter_t	*adapter;
 440
 441
 442	// detected a new controller
 443	con_log(CL_ANN, (KERN_INFO
 444		"megaraid: probe new device %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
 445		pdev->vendor, pdev->device, pdev->subsystem_vendor,
 446		pdev->subsystem_device));
 447
 448	con_log(CL_ANN, ("bus %d:slot %d:func %d\n", pdev->bus->number,
 449		PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)));
 450
 451	if (pci_enable_device(pdev)) {
 452		con_log(CL_ANN, (KERN_WARNING
 453				"megaraid: pci_enable_device failed\n"));
 454
 455		return -ENODEV;
 456	}
 457
 458	// Enable bus-mastering on this controller
 459	pci_set_master(pdev);
 460
 461	// Allocate the per driver initialization structure
 462	adapter = kzalloc(sizeof(adapter_t), GFP_KERNEL);
 463
 464	if (adapter == NULL) {
 465		con_log(CL_ANN, (KERN_WARNING
 466		"megaraid: out of memory, %s %d.\n", __func__, __LINE__));
 467
 468		goto out_probe_one;
 469	}
 470
 471
 472	// set up PCI related soft state and other pre-known parameters
 473	adapter->unique_id	= pdev->bus->number << 8 | pdev->devfn;
 474	adapter->irq		= pdev->irq;
 475	adapter->pdev		= pdev;
 476
 477	atomic_set(&adapter->being_detached, 0);
 478
 479	// Setup the default DMA mask. This would be changed later on
 480	// depending on hardware capabilities
 481	if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) {
 482
 483		con_log(CL_ANN, (KERN_WARNING
 484			"megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
 485
 486		goto out_free_adapter;
 487	}
 488
 489
 490	// Initialize the synchronization lock for kernel and LLD
 491	spin_lock_init(&adapter->lock);
 492
 493	// Initialize the command queues: the list of free SCBs and the list
 494	// of pending SCBs.
 495	INIT_LIST_HEAD(&adapter->kscb_pool);
 496	spin_lock_init(SCSI_FREE_LIST_LOCK(adapter));
 497
 498	INIT_LIST_HEAD(&adapter->pend_list);
 499	spin_lock_init(PENDING_LIST_LOCK(adapter));
 500
 501	INIT_LIST_HEAD(&adapter->completed_list);
 502	spin_lock_init(COMPLETED_LIST_LOCK(adapter));
 503
 504
 505	// Start the mailbox based controller
 506	if (megaraid_init_mbox(adapter) != 0) {
 507		con_log(CL_ANN, (KERN_WARNING
 508			"megaraid: maibox adapter did not initialize\n"));
 509
 510		goto out_free_adapter;
 511	}
 512
 513	// Register with LSI Common Management Module
 514	if (megaraid_cmm_register(adapter) != 0) {
 515
 516		con_log(CL_ANN, (KERN_WARNING
 517		"megaraid: could not register with management module\n"));
 518
 519		goto out_fini_mbox;
 520	}
 521
 522	// setup adapter handle in PCI soft state
 523	pci_set_drvdata(pdev, adapter);
 524
 525	// attach with scsi mid-layer
 526	if (megaraid_io_attach(adapter) != 0) {
 527
 528		con_log(CL_ANN, (KERN_WARNING "megaraid: io attach failed\n"));
 529
 530		goto out_cmm_unreg;
 531	}
 532
 533	return 0;
 534
 535out_cmm_unreg:
 536	pci_set_drvdata(pdev, NULL);
 537	megaraid_cmm_unregister(adapter);
 538out_fini_mbox:
 539	megaraid_fini_mbox(adapter);
 540out_free_adapter:
 541	kfree(adapter);
 542out_probe_one:
 543	pci_disable_device(pdev);
 544
 545	return -ENODEV;
 546}
 547
 548
 549/**
 550 * megaraid_detach_one - release framework resources and call LLD release routine
 551 * @pdev	: handle for our PCI cofiguration space
 552 *
 553 * This routine is called during driver unload. We free all the allocated
 554 * resources and call the corresponding LLD so that it can also release all
 555 * its resources.
 556 *
 557 * This routine is also called from the PCI hotplug system.
 558 */
 559static void
 560megaraid_detach_one(struct pci_dev *pdev)
 561{
 562	adapter_t		*adapter;
 563	struct Scsi_Host	*host;
 564
 565
 566	// Start a rollback on this adapter
 567	adapter = pci_get_drvdata(pdev);
 568
 569	if (!adapter) {
 570		con_log(CL_ANN, (KERN_CRIT
 571		"megaraid: Invalid detach on %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
 572			pdev->vendor, pdev->device, pdev->subsystem_vendor,
 573			pdev->subsystem_device));
 574
 575		return;
 576	}
 577	else {
 578		con_log(CL_ANN, (KERN_NOTICE
 579		"megaraid: detaching device %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
 580			pdev->vendor, pdev->device, pdev->subsystem_vendor,
 581			pdev->subsystem_device));
 582	}
 583
 584
 585	host = adapter->host;
 586
 587	// do not allow any more requests from the management module for this
 588	// adapter.
 589	// FIXME: How do we account for the request which might still be
 590	// pending with us?
 591	atomic_set(&adapter->being_detached, 1);
 592
 593	// detach from the IO sub-system
 594	megaraid_io_detach(adapter);
 595
 596	// reset the device state in the PCI structure. We check this
 597	// condition when we enter here. If the device state is NULL,
 598	// that would mean the device has already been removed
 599	pci_set_drvdata(pdev, NULL);
 600
 601	// Unregister from common management module
 602	//
 603	// FIXME: this must return success or failure for conditions if there
 604	// is a command pending with LLD or not.
 605	megaraid_cmm_unregister(adapter);
 606
 607	// finalize the mailbox based controller and release all resources
 608	megaraid_fini_mbox(adapter);
 609
 610	kfree(adapter);
 611
 612	scsi_host_put(host);
 613
 614	pci_disable_device(pdev);
 615
 616	return;
 617}
 618
 619
 620/**
 621 * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA
 622 * @pdev		: generic driver model device
 623 *
 624 * Shutdown notification, perform flush cache.
 625 */
 626static void
 627megaraid_mbox_shutdown(struct pci_dev *pdev)
 628{
 629	adapter_t		*adapter = pci_get_drvdata(pdev);
 630	static int		counter;
 631
 632	if (!adapter) {
 633		con_log(CL_ANN, (KERN_WARNING
 634			"megaraid: null device in shutdown\n"));
 635		return;
 636	}
 637
 638	// flush caches now
 639	con_log(CL_ANN, (KERN_INFO "megaraid: flushing adapter %d...",
 640		counter++));
 641
 642	megaraid_mbox_flush_cache(adapter);
 643
 644	con_log(CL_ANN, ("done\n"));
 645}
 646
 647
 648/**
 649 * megaraid_io_attach - attach a device with the IO subsystem
 650 * @adapter		: controller's soft state
 651 *
 652 * Attach this device with the IO subsystem.
 653 */
 654static int
 655megaraid_io_attach(adapter_t *adapter)
 656{
 657	struct Scsi_Host	*host;
 658
 659	// Initialize SCSI Host structure
 660	host = scsi_host_alloc(&megaraid_template_g, 8);
 661	if (!host) {
 662		con_log(CL_ANN, (KERN_WARNING
 663			"megaraid mbox: scsi_register failed\n"));
 664
 665		return -1;
 666	}
 667
 668	SCSIHOST2ADAP(host)	= (caddr_t)adapter;
 669	adapter->host		= host;
 670
 671	host->irq		= adapter->irq;
 672	host->unique_id		= adapter->unique_id;
 673	host->can_queue		= adapter->max_cmds;
 674	host->this_id		= adapter->init_id;
 675	host->sg_tablesize	= adapter->sglen;
 676	host->max_sectors	= adapter->max_sectors;
 677	host->cmd_per_lun	= adapter->cmd_per_lun;
 678	host->max_channel	= adapter->max_channel;
 679	host->max_id		= adapter->max_target;
 680	host->max_lun		= adapter->max_lun;
 681
 682
 683	// notify mid-layer about the new controller
 684	if (scsi_add_host(host, &adapter->pdev->dev)) {
 685
 686		con_log(CL_ANN, (KERN_WARNING
 687			"megaraid mbox: scsi_add_host failed\n"));
 688
 689		scsi_host_put(host);
 690
 691		return -1;
 692	}
 693
 694	scsi_scan_host(host);
 695
 696	return 0;
 697}
 698
 699
 700/**
 701 * megaraid_io_detach - detach a device from the IO subsystem
 702 * @adapter		: controller's soft state
 703 *
 704 * Detach this device from the IO subsystem.
 705 */
 706static void
 707megaraid_io_detach(adapter_t *adapter)
 708{
 709	struct Scsi_Host	*host;
 710
 711	con_log(CL_DLEVEL1, (KERN_INFO "megaraid: io detach\n"));
 712
 713	host = adapter->host;
 714
 715	scsi_remove_host(host);
 716
 717	return;
 718}
 719
 720
 721/*
 722 * START: Mailbox Low Level Driver
 723 *
 724 * This is section specific to the single mailbox based controllers
 725 */
 726
 727/**
 728 * megaraid_init_mbox - initialize controller
 729 * @adapter		: our soft state
 730 *
 731 * - Allocate 16-byte aligned mailbox memory for firmware handshake
 732 * - Allocate controller's memory resources
 733 * - Find out all initialization data
 734 * - Allocate memory required for all the commands
 735 * - Use internal library of FW routines, build up complete soft state
 736 */
 737static int __devinit
 738megaraid_init_mbox(adapter_t *adapter)
 739{
 740	struct pci_dev		*pdev;
 741	mraid_device_t		*raid_dev;
 742	int			i;
 743	uint32_t		magic64;
 744
 745
 746	adapter->ito	= MBOX_TIMEOUT;
 747	pdev		= adapter->pdev;
 748
 749	/*
 750	 * Allocate and initialize the init data structure for mailbox
 751	 * controllers
 752	 */
 753	raid_dev = kzalloc(sizeof(mraid_device_t), GFP_KERNEL);
 754	if (raid_dev == NULL) return -1;
 755
 756
 757	/*
 758	 * Attach the adapter soft state to raid device soft state
 759	 */
 760	adapter->raid_device	= (caddr_t)raid_dev;
 761	raid_dev->fast_load	= megaraid_fast_load;
 762
 763
 764	// our baseport
 765	raid_dev->baseport = pci_resource_start(pdev, 0);
 766
 767	if (pci_request_regions(pdev, "MegaRAID: LSI Logic Corporation") != 0) {
 768
 769		con_log(CL_ANN, (KERN_WARNING
 770				"megaraid: mem region busy\n"));
 771
 772		goto out_free_raid_dev;
 773	}
 774
 775	raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128);
 776
 777	if (!raid_dev->baseaddr) {
 778
 779		con_log(CL_ANN, (KERN_WARNING
 780			"megaraid: could not map hba memory\n") );
 781
 782		goto out_release_regions;
 783	}
 784
 785	/* initialize the mutual exclusion lock for the mailbox */
 786	spin_lock_init(&raid_dev->mailbox_lock);
 787
 788	/* allocate memory required for commands */
 789	if (megaraid_alloc_cmd_packets(adapter) != 0)
 790		goto out_iounmap;
 791
 792	/*
 793	 * Issue SYNC cmd to flush the pending cmds in the adapter
 794	 * and initialize its internal state
 795	 */
 796
 797	if (megaraid_mbox_fire_sync_cmd(adapter))
 798		con_log(CL_ANN, ("megaraid: sync cmd failed\n"));
 799
 800	/*
 801	 * Setup the rest of the soft state using the library of
 802	 * FW routines
 803	 */
 804
 805	/* request IRQ and register the interrupt service routine */
 806	if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid",
 807		adapter)) {
 808
 809		con_log(CL_ANN, (KERN_WARNING
 810			"megaraid: Couldn't register IRQ %d!\n", adapter->irq));
 811		goto out_alloc_cmds;
 812
 813	}
 814
 815	// Product info
 816	if (megaraid_mbox_product_info(adapter) != 0)
 817		goto out_free_irq;
 818
 819	// Do we support extended CDBs
 820	adapter->max_cdb_sz = 10;
 821	if (megaraid_mbox_extended_cdb(adapter) == 0) {
 822		adapter->max_cdb_sz = 16;
 823	}
 824
 825	/*
 826	 * Do we support cluster environment, if we do, what is the initiator
 827	 * id.
 828	 * NOTE: In a non-cluster aware firmware environment, the LLD should
 829	 * return 7 as initiator id.
 830	 */
 831	adapter->ha		= 0;
 832	adapter->init_id	= -1;
 833	if (megaraid_mbox_support_ha(adapter, &adapter->init_id) == 0) {
 834		adapter->ha = 1;
 835	}
 836
 837	/*
 838	 * Prepare the device ids array to have the mapping between the kernel
 839	 * device address and megaraid device address.
 840	 * We export the physical devices on their actual addresses. The
 841	 * logical drives are exported on a virtual SCSI channel
 842	 */
 843	megaraid_mbox_setup_device_map(adapter);
 844
 845	// If the firmware supports random deletion, update the device id map
 846	if (megaraid_mbox_support_random_del(adapter)) {
 847
 848		// Change the logical drives numbers in device_ids array one
 849		// slot in device_ids is reserved for target id, that's why
 850		// "<=" below
 851		for (i = 0; i <= MAX_LOGICAL_DRIVES_40LD; i++) {
 852			adapter->device_ids[adapter->max_channel][i] += 0x80;
 853		}
 854		adapter->device_ids[adapter->max_channel][adapter->init_id] =
 855			0xFF;
 856
 857		raid_dev->random_del_supported = 1;
 858	}
 859
 860	/*
 861	 * find out the maximum number of scatter-gather elements supported by
 862	 * this firmware
 863	 */
 864	adapter->sglen = megaraid_mbox_get_max_sg(adapter);
 865
 866	// enumerate RAID and SCSI channels so that all devices on SCSI
 867	// channels can later be exported, including disk devices
 868	megaraid_mbox_enum_raid_scsi(adapter);
 869
 870	/*
 871	 * Other parameters required by upper layer
 872	 *
 873	 * maximum number of sectors per IO command
 874	 */
 875	adapter->max_sectors = megaraid_max_sectors;
 876
 877	/*
 878	 * number of queued commands per LUN.
 879	 */
 880	adapter->cmd_per_lun = megaraid_cmd_per_lun;
 881
 882	/*
 883	 * Allocate resources required to issue FW calls, when sysfs is
 884	 * accessed
 885	 */
 886	if (megaraid_sysfs_alloc_resources(adapter) != 0)
 887		goto out_free_irq;
 888
 889	// Set the DMA mask to 64-bit. All supported controllers as capable of
 890	// DMA in this range
 891	pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64);
 892
 893	if (((magic64 == HBA_SIGNATURE_64_BIT) &&
 894		((adapter->pdev->subsystem_device !=
 895		PCI_SUBSYS_ID_MEGARAID_SATA_150_6) &&
 896		(adapter->pdev->subsystem_device !=
 897		PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) ||
 898		(adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
 899		adapter->pdev->device == PCI_DEVICE_ID_VERDE) ||
 900		(adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
 901		adapter->pdev->device == PCI_DEVICE_ID_DOBSON) ||
 902		(adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
 903		adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) ||
 904		(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
 905		adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
 906		(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
 907		adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
 908		if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
 909			con_log(CL_ANN, (KERN_WARNING
 910				"megaraid: DMA mask for 64-bit failed\n"));
 911
 912			if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) {
 913				con_log(CL_ANN, (KERN_WARNING
 914					"megaraid: 32-bit DMA mask failed\n"));
 915				goto out_free_sysfs_res;
 916			}
 917		}
 918	}
 919
 920	// setup tasklet for DPC
 921	tasklet_init(&adapter->dpc_h, megaraid_mbox_dpc,
 922			(unsigned long)adapter);
 923
 924	con_log(CL_DLEVEL1, (KERN_INFO
 925		"megaraid mbox hba successfully initialized\n"));
 926
 927	return 0;
 928
 929out_free_sysfs_res:
 930	megaraid_sysfs_free_resources(adapter);
 931out_free_irq:
 932	free_irq(adapter->irq, adapter);
 933out_alloc_cmds:
 934	megaraid_free_cmd_packets(adapter);
 935out_iounmap:
 936	iounmap(raid_dev->baseaddr);
 937out_release_regions:
 938	pci_release_regions(pdev);
 939out_free_raid_dev:
 940	kfree(raid_dev);
 941
 942	return -1;
 943}
 944
 945
 946/**
 947 * megaraid_fini_mbox - undo controller initialization
 948 * @adapter		: our soft state
 949 */
 950static void
 951megaraid_fini_mbox(adapter_t *adapter)
 952{
 953	mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
 954
 955	// flush all caches
 956	megaraid_mbox_flush_cache(adapter);
 957
 958	tasklet_kill(&adapter->dpc_h);
 959
 960	megaraid_sysfs_free_resources(adapter);
 961
 962	megaraid_free_cmd_packets(adapter);
 963
 964	free_irq(adapter->irq, adapter);
 965
 966	iounmap(raid_dev->baseaddr);
 967
 968	pci_release_regions(adapter->pdev);
 969
 970	kfree(raid_dev);
 971
 972	return;
 973}
 974
 975
 976/**
 977 * megaraid_alloc_cmd_packets - allocate shared mailbox
 978 * @adapter		: soft state of the raid controller
 979 *
 980 * Allocate and align the shared mailbox. This maibox is used to issue
 981 * all the commands. For IO based controllers, the mailbox is also regsitered
 982 * with the FW. Allocate memory for all commands as well.
 983 * This is our big allocator.
 984 */
 985static int
 986megaraid_alloc_cmd_packets(adapter_t *adapter)
 987{
 988	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
 989	struct pci_dev		*pdev;
 990	unsigned long		align;
 991	scb_t			*scb;
 992	mbox_ccb_t		*ccb;
 993	struct mraid_pci_blk	*epthru_pci_blk;
 994	struct mraid_pci_blk	*sg_pci_blk;
 995	struct mraid_pci_blk	*mbox_pci_blk;
 996	int			i;
 997
 998	pdev = adapter->pdev;
 999
1000	/*
1001	 * Setup the mailbox
1002	 * Allocate the common 16-byte aligned memory for the handshake
1003	 * mailbox.
1004	 */
1005	raid_dev->una_mbox64 = pci_alloc_consistent(adapter->pdev,
1006			sizeof(mbox64_t), &raid_dev->una_mbox64_dma);
1007
1008	if (!raid_dev->una_mbox64) {
1009		con_log(CL_ANN, (KERN_WARNING
1010			"megaraid: out of memory, %s %d\n", __func__,
1011			__LINE__));
1012		return -1;
1013	}
1014	memset(raid_dev->una_mbox64, 0, sizeof(mbox64_t));
1015
1016	/*
1017	 * Align the mailbox at 16-byte boundary
1018	 */
1019	raid_dev->mbox	= &raid_dev->una_mbox64->mbox32;
1020
1021	raid_dev->mbox	= (mbox_t *)((((unsigned long)raid_dev->mbox) + 15) &
1022				(~0UL ^ 0xFUL));
1023
1024	raid_dev->mbox64 = (mbox64_t *)(((unsigned long)raid_dev->mbox) - 8);
1025
1026	align = ((void *)raid_dev->mbox -
1027			((void *)&raid_dev->una_mbox64->mbox32));
1028
1029	raid_dev->mbox_dma = (unsigned long)raid_dev->una_mbox64_dma + 8 +
1030			align;
1031
1032	// Allocate memory for commands issued internally
1033	adapter->ibuf = pci_alloc_consistent(pdev, MBOX_IBUF_SIZE,
1034				&adapter->ibuf_dma_h);
1035	if (!adapter->ibuf) {
1036
1037		con_log(CL_ANN, (KERN_WARNING
1038			"megaraid: out of memory, %s %d\n", __func__,
1039			__LINE__));
1040
1041		goto out_free_common_mbox;
1042	}
1043	memset(adapter->ibuf, 0, MBOX_IBUF_SIZE);
1044
1045	// Allocate memory for our SCSI Command Blocks and their associated
1046	// memory
1047
1048	/*
1049	 * Allocate memory for the base list of scb. Later allocate memory for
1050	 * CCBs and embedded components of each CCB and point the pointers in
1051	 * scb to the allocated components
1052	 * NOTE: The code to allocate SCB will be duplicated in all the LLD
1053	 * since the calling routine does not yet know the number of available
1054	 * commands.
1055	 */
1056	adapter->kscb_list = kcalloc(MBOX_MAX_SCSI_CMDS, sizeof(scb_t), GFP_KERNEL);
1057
1058	if (adapter->kscb_list == NULL) {
1059		con_log(CL_ANN, (KERN_WARNING
1060			"megaraid: out of memory, %s %d\n", __func__,
1061			__LINE__));
1062		goto out_free_ibuf;
1063	}
1064
1065	// memory allocation for our command packets
1066	if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
1067		con_log(CL_ANN, (KERN_WARNING
1068			"megaraid: out of memory, %s %d\n", __func__,
1069			__LINE__));
1070		goto out_free_scb_list;
1071	}
1072
1073	// Adjust the scb pointers and link in the free pool
1074	epthru_pci_blk	= raid_dev->epthru_pool;
1075	sg_pci_blk	= raid_dev->sg_pool;
1076	mbox_pci_blk	= raid_dev->mbox_pool;
1077
1078	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1079		scb			= adapter->kscb_list + i;
1080		ccb			= raid_dev->ccb_list + i;
1081
1082		ccb->mbox	= (mbox_t *)(mbox_pci_blk[i].vaddr + 16);
1083		ccb->raw_mbox	= (uint8_t *)ccb->mbox;
1084		ccb->mbox64	= (mbox64_t *)(mbox_pci_blk[i].vaddr + 8);
1085		ccb->mbox_dma_h	= (unsigned long)mbox_pci_blk[i].dma_addr + 16;
1086
1087		// make sure the mailbox is aligned properly
1088		if (ccb->mbox_dma_h & 0x0F) {
1089			con_log(CL_ANN, (KERN_CRIT
1090				"megaraid mbox: not aligned on 16-bytes\n"));
1091
1092			goto out_teardown_dma_pools;
1093		}
1094
1095		ccb->epthru		= (mraid_epassthru_t *)
1096						epthru_pci_blk[i].vaddr;
1097		ccb->epthru_dma_h	= epthru_pci_blk[i].dma_addr;
1098		ccb->pthru		= (mraid_passthru_t *)ccb->epthru;
1099		ccb->pthru_dma_h	= ccb->epthru_dma_h;
1100
1101
1102		ccb->sgl64		= (mbox_sgl64 *)sg_pci_blk[i].vaddr;
1103		ccb->sgl_dma_h		= sg_pci_blk[i].dma_addr;
1104		ccb->sgl32		= (mbox_sgl32 *)ccb->sgl64;
1105
1106		scb->ccb		= (caddr_t)ccb;
1107		scb->gp			= 0;
1108
1109		scb->sno		= i;	// command index
1110
1111		scb->scp		= NULL;
1112		scb->state		= SCB_FREE;
1113		scb->dma_direction	= PCI_DMA_NONE;
1114		scb->dma_type		= MRAID_DMA_NONE;
1115		scb->dev_channel	= -1;
1116		scb->dev_target		= -1;
1117
1118		// put scb in the free pool
1119		list_add_tail(&scb->list, &adapter->kscb_pool);
1120	}
1121
1122	return 0;
1123
1124out_teardown_dma_pools:
1125	megaraid_mbox_teardown_dma_pools(adapter);
1126out_free_scb_list:
1127	kfree(adapter->kscb_list);
1128out_free_ibuf:
1129	pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
1130		adapter->ibuf_dma_h);
1131out_free_common_mbox:
1132	pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
1133		(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
1134
1135	return -1;
1136}
1137
1138
1139/**
1140 * megaraid_free_cmd_packets - free memory
1141 * @adapter		: soft state of the raid controller
1142 *
1143 * Release memory resources allocated for commands.
1144 */
1145static void
1146megaraid_free_cmd_packets(adapter_t *adapter)
1147{
1148	mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1149
1150	megaraid_mbox_teardown_dma_pools(adapter);
1151
1152	kfree(adapter->kscb_list);
1153
1154	pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,
1155		(void *)adapter->ibuf, adapter->ibuf_dma_h);
1156
1157	pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
1158		(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
1159	return;
1160}
1161
1162
1163/**
1164 * megaraid_mbox_setup_dma_pools - setup dma pool for command packets
1165 * @adapter		: HBA soft state
1166 *
1167 * Setup the dma pools for mailbox, passthru and extended passthru structures,
1168 * and scatter-gather lists.
1169 */
1170static int
1171megaraid_mbox_setup_dma_pools(adapter_t *adapter)
1172{
1173	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
1174	struct mraid_pci_blk	*epthru_pci_blk;
1175	struct mraid_pci_blk	*sg_pci_blk;
1176	struct mraid_pci_blk	*mbox_pci_blk;
1177	int			i;
1178
1179
1180
1181	// Allocate memory for 16-bytes aligned mailboxes
1182	raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool",
1183						adapter->pdev,
1184						sizeof(mbox64_t) + 16,
1185						16, 0);
1186
1187	if (raid_dev->mbox_pool_handle == NULL) {
1188		goto fail_setup_dma_pool;
1189	}
1190
1191	mbox_pci_blk = raid_dev->mbox_pool;
1192	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1193		mbox_pci_blk[i].vaddr = pci_pool_alloc(
1194						raid_dev->mbox_pool_handle,
1195						GFP_KERNEL,
1196						&mbox_pci_blk[i].dma_addr);
1197		if (!mbox_pci_blk[i].vaddr) {
1198			goto fail_setup_dma_pool;
1199		}
1200	}
1201
1202	/*
1203	 * Allocate memory for each embedded passthru strucuture pointer
1204	 * Request for a 128 bytes aligned structure for each passthru command
1205	 * structure
1206	 * Since passthru and extended passthru commands are exclusive, they
1207	 * share common memory pool. Passthru structures piggyback on memory
1208	 * allocted to extended passthru since passthru is smaller of the two
1209	 */
1210	raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru",
1211			adapter->pdev, sizeof(mraid_epassthru_t), 128, 0);
1212
1213	if (raid_dev->epthru_pool_handle == NULL) {
1214		goto fail_setup_dma_pool;
1215	}
1216
1217	epthru_pci_blk = raid_dev->epthru_pool;
1218	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1219		epthru_pci_blk[i].vaddr = pci_pool_alloc(
1220						raid_dev->epthru_pool_handle,
1221						GFP_KERNEL,
1222						&epthru_pci_blk[i].dma_addr);
1223		if (!epthru_pci_blk[i].vaddr) {
1224			goto fail_setup_dma_pool;
1225		}
1226	}
1227
1228
1229	// Allocate memory for each scatter-gather list. Request for 512 bytes
1230	// alignment for each sg list
1231	raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg",
1232					adapter->pdev,
1233					sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,
1234					512, 0);
1235
1236	if (raid_dev->sg_pool_handle == NULL) {
1237		goto fail_setup_dma_pool;
1238	}
1239
1240	sg_pci_blk = raid_dev->sg_pool;
1241	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1242		sg_pci_blk[i].vaddr = pci_pool_alloc(
1243						raid_dev->sg_pool_handle,
1244						GFP_KERNEL,
1245						&sg_pci_blk[i].dma_addr);
1246		if (!sg_pci_blk[i].vaddr) {
1247			goto fail_setup_dma_pool;
1248		}
1249	}
1250
1251	return 0;
1252
1253fail_setup_dma_pool:
1254	megaraid_mbox_teardown_dma_pools(adapter);
1255	return -1;
1256}
1257
1258
1259/**
1260 * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets
1261 * @adapter		: HBA soft state
1262 *
1263 * Teardown the dma pool for mailbox, passthru and extended passthru
1264 * structures, and scatter-gather lists.
1265 */
1266static void
1267megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
1268{
1269	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
1270	struct mraid_pci_blk	*epthru_pci_blk;
1271	struct mraid_pci_blk	*sg_pci_blk;
1272	struct mraid_pci_blk	*mbox_pci_blk;
1273	int			i;
1274
1275
1276	sg_pci_blk = raid_dev->sg_pool;
1277	for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {
1278		pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
1279			sg_pci_blk[i].dma_addr);
1280	}
1281	if (raid_dev->sg_pool_handle)
1282		pci_pool_destroy(raid_dev->sg_pool_handle);
1283
1284
1285	epthru_pci_blk = raid_dev->epthru_pool;
1286	for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {
1287		pci_pool_free(raid_dev->epthru_pool_handle,
1288			epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
1289	}
1290	if (raid_dev->epthru_pool_handle)
1291		pci_pool_destroy(raid_dev->epthru_pool_handle);
1292
1293
1294	mbox_pci_blk = raid_dev->mbox_pool;
1295	for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) {
1296		pci_pool_free(raid_dev->mbox_pool_handle,
1297			mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
1298	}
1299	if (raid_dev->mbox_pool_handle)
1300		pci_pool_destroy(raid_dev->mbox_pool_handle);
1301
1302	return;
1303}
1304
1305
1306/**
1307 * megaraid_alloc_scb - detach and return a scb from the free list
1308 * @adapter	: controller's soft state
1309 * @scp		: pointer to the scsi command to be executed
1310 *
1311 * Return the scb from the head of the free list. %NULL if there are none
1312 * available.
1313 */
1314static scb_t *
1315megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
1316{
1317	struct list_head	*head = &adapter->kscb_pool;
1318	scb_t			*scb = NULL;
1319	unsigned long		flags;
1320
1321	// detach scb from free pool
1322	spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
1323
1324	if (list_empty(head)) {
1325		spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1326		return NULL;
1327	}
1328
1329	scb = list_entry(head->next, scb_t, list);
1330	list_del_init(&scb->list);
1331
1332	spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1333
1334	scb->state	= SCB_ACTIVE;
1335	scb->scp	= scp;
1336	scb->dma_type	= MRAID_DMA_NONE;
1337
1338	return scb;
1339}
1340
1341
1342/**
1343 * megaraid_dealloc_scb - return the scb to the free pool
1344 * @adapter	: controller's soft state
1345 * @scb		: scb to be freed
1346 *
1347 * Return the scb back to the free list of scbs. The caller must 'flush' the
1348 * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc.
1349 * NOTE NOTE: Make sure the scb is not on any list before calling this
1350 * routine.
1351 */
1352static inline void
1353megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
1354{
1355	unsigned long		flags;
1356
1357	// put scb in the free pool
1358	scb->state	= SCB_FREE;
1359	scb->scp	= NULL;
1360	spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
1361
1362	list_add(&scb->list, &adapter->kscb_pool);
1363
1364	spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1365
1366	return;
1367}
1368
1369
1370/**
1371 * megaraid_mbox_mksgl - make the scatter-gather list
1372 * @adapter	: controller's soft state
1373 * @scb		: scsi control block
1374 *
1375 * Prepare the scatter-gather list.
1376 */
1377static int
1378megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
1379{
1380	struct scatterlist	*sgl;
1381	mbox_ccb_t		*ccb;
1382	struct scsi_cmnd	*scp;
1383	int			sgcnt;
1384	int			i;
1385
1386
1387	scp	= scb->scp;
1388	ccb	= (mbox_ccb_t *)scb->ccb;
1389
1390	sgcnt = scsi_dma_map(scp);
1391	BUG_ON(sgcnt < 0 || sgcnt > adapter->sglen);
1392
1393	// no mapping required if no data to be transferred
1394	if (!sgcnt)
1395		return 0;
1396
1397	scb->dma_type = MRAID_DMA_WSG;
1398
1399	scsi_for_each_sg(scp, sgl, sgcnt, i) {
1400		ccb->sgl64[i].address	= sg_dma_address(sgl);
1401		ccb->sgl64[i].length	= sg_dma_len(sgl);
1402	}
1403
1404	// Return count of SG nodes
1405	return sgcnt;
1406}
1407
1408
1409/**
1410 * mbox_post_cmd - issue a mailbox command
1411 * @adapter	: controller's soft state
1412 * @scb		: command to be issued
1413 *
1414 * Post the command to the controller if mailbox is available.
1415 */
1416static int
1417mbox_post_cmd(adapter_t *adapter, scb_t *scb)
1418{
1419	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
1420	mbox64_t	*mbox64;
1421	mbox_t		*mbox;
1422	mbox_ccb_t	*ccb;
1423	unsigned long	flags;
1424	unsigned int	i = 0;
1425
1426
1427	ccb	= (mbox_ccb_t *)scb->ccb;
1428	mbox	= raid_dev->mbox;
1429	mbox64	= raid_dev->mbox64;
1430
1431	/*
1432	 * Check for busy mailbox. If it is, return failure - the caller
1433	 * should retry later.
1434	 */
1435	spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
1436
1437	if (unlikely(mbox->busy)) {
1438		do {
1439			udelay(1);
1440			i++;
1441			rmb();
1442		} while(mbox->busy && (i < max_mbox_busy_wait));
1443
1444		if (mbox->busy) {
1445
1446			spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
1447
1448			return -1;
1449		}
1450	}
1451
1452
1453	// Copy this command's mailbox data into "adapter's" mailbox
1454	memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22);
1455	mbox->cmdid = scb->sno;
1456
1457	adapter->outstanding_cmds++;
1458
1459	if (scb->dma_direction == PCI_DMA_TODEVICE)
1460		pci_dma_sync_sg_for_device(adapter->pdev,
1461					   scsi_sglist(scb->scp),
1462					   scsi_sg_count(scb->scp),
1463					   PCI_DMA_TODEVICE);
1464
1465	mbox->busy	= 1;	// Set busy
1466	mbox->poll	= 0;
1467	mbox->ack	= 0;
1468	wmb();
1469
1470	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
1471
1472	spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
1473
1474	return 0;
1475}
1476
1477
1478/**
1479 * megaraid_queue_command - generic queue entry point for all LLDs
1480 * @scp		: pointer to the scsi command to be executed
1481 * @done	: callback routine to be called after the cmd has be completed
1482 *
1483 * Queue entry point for mailbox based controllers.
1484 */
1485static int
1486megaraid_queue_command_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
1487{
1488	adapter_t	*adapter;
1489	scb_t		*scb;
1490	int		if_busy;
1491
1492	adapter		= SCP2ADAPTER(scp);
1493	scp->scsi_done	= done;
1494	scp->result	= 0;
1495
1496	/*
1497	 * Allocate and build a SCB request
1498	 * if_busy flag will be set if megaraid_mbox_build_cmd() command could
1499	 * not allocate scb. We will return non-zero status in that case.
1500	 * NOTE: scb can be null even though certain commands completed
1501	 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would
1502	 * return 0 in that case, and we would do the callback right away.
1503	 */
1504	if_busy	= 0;
1505	scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy);
1506	if (!scb) {	// command already completed
1507		done(scp);
1508		return 0;
1509	}
1510
1511	megaraid_mbox_runpendq(adapter, scb);
1512	return if_busy;
1513}
1514
1515static DEF_SCSI_QCMD(megaraid_queue_command)
1516
1517/**
1518 * megaraid_mbox_build_cmd - transform the mid-layer scsi commands
1519 * @adapter	: controller's soft state
1520 * @scp		: mid-layer scsi command pointer
1521 * @busy	: set if request could not be completed because of lack of
1522 *		resources
1523 *
1524 * Transform the mid-layer scsi command to megaraid firmware lingua.
1525 * Convert the command issued by mid-layer to format understood by megaraid
1526 * firmware. We also complete certain commands without sending them to firmware.
1527 */
1528static scb_t *
1529megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1530{
1531	mraid_device_t		*rdev = ADAP2RAIDDEV(adapter);
1532	int			channel;
1533	int			target;
1534	int			islogical;
1535	mbox_ccb_t		*ccb;
1536	mraid_passthru_t	*pthru;
1537	mbox64_t		*mbox64;
1538	mbox_t			*mbox;
1539	scb_t			*scb;
1540	char			skip[] = "skipping";
1541	char			scan[] = "scanning";
1542	char			*ss;
1543
1544
1545	/*
1546	 * Get the appropriate device map for the device this command is
1547	 * intended for
1548	 */
1549	MRAID_GET_DEVICE_MAP(adapter, scp, channel, target, islogical);
1550
1551	/*
1552	 * Logical drive commands
1553	 */
1554	if (islogical) {
1555		switch (scp->cmnd[0]) {
1556		case TEST_UNIT_READY:
1557			/*
1558			 * Do we support clustering and is the support enabled
1559			 * If no, return success always
1560			 */
1561			if (!adapter->ha) {
1562				scp->result = (DID_OK << 16);
1563				return NULL;
1564			}
1565
1566			if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1567				scp->result = (DID_ERROR << 16);
1568				*busy = 1;
1569				return NULL;
1570			}
1571
1572			scb->dma_direction	= scp->sc_data_direction;
1573			scb->dev_channel	= 0xFF;
1574			scb->dev_target		= target;
1575			ccb			= (mbox_ccb_t *)scb->ccb;
1576
1577			/*
1578			 * The command id will be provided by the command
1579			 * issuance routine
1580			 */
1581			ccb->raw_mbox[0]	= CLUSTER_CMD;
1582			ccb->raw_mbox[2]	= RESERVATION_STATUS;
1583			ccb->raw_mbox[3]	= target;
1584
1585			return scb;
1586
1587		case MODE_SENSE:
1588		{
1589			struct scatterlist	*sgl;
1590			caddr_t			vaddr;
1591
1592			sgl = scsi_sglist(scp);
1593			if (sg_page(sgl)) {
1594				vaddr = (caddr_t) sg_virt(&sgl[0]);
1595
1596				memset(vaddr, 0, scp->cmnd[4]);
1597			}
1598			else {
1599				con_log(CL_ANN, (KERN_WARNING
1600						 "megaraid mailbox: invalid sg:%d\n",
1601						 __LINE__));
1602			}
1603		}
1604		scp->result = (DID_OK << 16);
1605		return NULL;
1606
1607		case INQUIRY:
1608			/*
1609			 * Display the channel scan for logical drives
1610			 * Do not display scan for a channel if already done.
1611			 */
1612			if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
1613
1614				con_log(CL_ANN, (KERN_INFO
1615					"scsi[%d]: scanning scsi channel %d",
1616					adapter->host->host_no,
1617					SCP2CHANNEL(scp)));
1618
1619				con_log(CL_ANN, (
1620					" [virtual] for logical drives\n"));
1621
1622				rdev->last_disp |= (1L << SCP2CHANNEL(scp));
1623			}
1624
1625			if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
1626				scp->sense_buffer[0] = 0x70;
1627				scp->sense_buffer[2] = ILLEGAL_REQUEST;
1628				scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
1629				scp->result = CHECK_CONDITION << 1;
1630				return NULL;
1631			}
1632
1633			/* Fall through */
1634
1635		case READ_CAPACITY:
1636			/*
1637			 * Do not allow LUN > 0 for logical drives and
1638			 * requests for more than 40 logical drives
1639			 */
1640			if (SCP2LUN(scp)) {
1641				scp->result = (DID_BAD_TARGET << 16);
1642				return NULL;
1643			}
1644			if ((target % 0x80) >= MAX_LOGICAL_DRIVES_40LD) {
1645				scp->result = (DID_BAD_TARGET << 16);
1646				return NULL;
1647			}
1648
1649
1650			/* Allocate a SCB and initialize passthru */
1651			if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1652				scp->result = (DID_ERROR << 16);
1653				*busy = 1;
1654				return NULL;
1655			}
1656
1657			ccb			= (mbox_ccb_t *)scb->ccb;
1658			scb->dev_channel	= 0xFF;
1659			scb->dev_target		= target;
1660			pthru			= ccb->pthru;
1661			mbox			= ccb->mbox;
1662			mbox64			= ccb->mbox64;
1663
1664			pthru->timeout		= 0;
1665			pthru->ars		= 1;
1666			pthru->reqsenselen	= 14;
1667			pthru->islogical	= 1;
1668			pthru->logdrv		= target;
1669			pthru->cdblen		= scp->cmd_len;
1670			memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1671
1672			mbox->cmd		= MBOXCMD_PASSTHRU64;
1673			scb->dma_direction	= scp->sc_data_direction;
1674
1675			pthru->dataxferlen	= scsi_bufflen(scp);
1676			pthru->dataxferaddr	= ccb->sgl_dma_h;
1677			pthru->numsge		= megaraid_mbox_mksgl(adapter,
1678							scb);
1679
1680			mbox->xferaddr		= 0xFFFFFFFF;
1681			mbox64->xferaddr_lo	= (uint32_t )ccb->pthru_dma_h;
1682			mbox64->xferaddr_hi	= 0;
1683
1684			return scb;
1685
1686		case READ_6:
1687		case WRITE_6:
1688		case READ_10:
1689		case WRITE_10:
1690		case READ_12:
1691		case WRITE_12:
1692
1693			/*
1694			 * Allocate a SCB and initialize mailbox
1695			 */
1696			if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1697				scp->result = (DID_ERROR << 16);
1698				*busy = 1;
1699				return NULL;
1700			}
1701			ccb			= (mbox_ccb_t *)scb->ccb;
1702			scb->dev_channel	= 0xFF;
1703			scb->dev_target		= target;
1704			mbox			= ccb->mbox;
1705			mbox64			= ccb->mbox64;
1706			mbox->logdrv		= target;
1707
1708			/*
1709			 * A little HACK: 2nd bit is zero for all scsi read
1710			 * commands and is set for all scsi write commands
1711			 */
1712			mbox->cmd = (scp->cmnd[0] & 0x02) ?  MBOXCMD_LWRITE64:
1713					MBOXCMD_LREAD64 ;
1714
1715			/*
1716			 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1717			 */
1718			if (scp->cmd_len == 6) {
1719				mbox->numsectors = (uint32_t)scp->cmnd[4];
1720				mbox->lba =
1721					((uint32_t)scp->cmnd[1] << 16)	|
1722					((uint32_t)scp->cmnd[2] << 8)	|
1723					(uint32_t)scp->cmnd[3];
1724
1725				mbox->lba &= 0x1FFFFF;
1726			}
1727
1728			/*
1729			 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1730			 */
1731			else if (scp->cmd_len == 10) {
1732				mbox->numsectors =
1733					(uint32_t)scp->cmnd[8] |
1734					((uint32_t)scp->cmnd[7] << 8);
1735				mbox->lba =
1736					((uint32_t)scp->cmnd[2] << 24) |
1737					((uint32_t)scp->cmnd[3] << 16) |
1738					((uint32_t)scp->cmnd[4] << 8) |
1739					(uint32_t)scp->cmnd[5];
1740			}
1741
1742			/*
1743			 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1744			 */
1745			else if (scp->cmd_len == 12) {
1746				mbox->lba =
1747					((uint32_t)scp->cmnd[2] << 24) |
1748					((uint32_t)scp->cmnd[3] << 16) |
1749					((uint32_t)scp->cmnd[4] << 8) |
1750					(uint32_t)scp->cmnd[5];
1751
1752				mbox->numsectors =
1753					((uint32_t)scp->cmnd[6] << 24) |
1754					((uint32_t)scp->cmnd[7] << 16) |
1755					((uint32_t)scp->cmnd[8] << 8) |
1756					(uint32_t)scp->cmnd[9];
1757			}
1758			else {
1759				con_log(CL_ANN, (KERN_WARNING
1760					"megaraid: unsupported CDB length\n"));
1761
1762				megaraid_dealloc_scb(adapter, scb);
1763
1764				scp->result = (DID_ERROR << 16);
1765				return NULL;
1766			}
1767
1768			scb->dma_direction = scp->sc_data_direction;
1769
1770			// Calculate Scatter-Gather info
1771			mbox64->xferaddr_lo	= (uint32_t )ccb->sgl_dma_h;
1772			mbox->numsge		= megaraid_mbox_mksgl(adapter,
1773							scb);
1774			mbox->xferaddr		= 0xFFFFFFFF;
1775			mbox64->xferaddr_hi	= 0;
1776
1777			return scb;
1778
1779		case RESERVE:
1780		case RELEASE:
1781			/*
1782			 * Do we support clustering and is the support enabled
1783			 */
1784			if (!adapter->ha) {
1785				scp->result = (DID_BAD_TARGET << 16);
1786				return NULL;
1787			}
1788
1789			/*
1790			 * Allocate a SCB and initialize mailbox
1791			 */
1792			if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1793				scp->result = (DID_ERROR << 16);
1794				*busy = 1;
1795				return NULL;
1796			}
1797
1798			ccb			= (mbox_ccb_t *)scb->ccb;
1799			scb->dev_channel	= 0xFF;
1800			scb->dev_target		= target;
1801			ccb->raw_mbox[0]	= CLUSTER_CMD;
1802			ccb->raw_mbox[2]	=  (scp->cmnd[0] == RESERVE) ?
1803						RESERVE_LD : RELEASE_LD;
1804
1805			ccb->raw_mbox[3]	= target;
1806			scb->dma_direction	= scp->sc_data_direction;
1807
1808			return scb;
1809
1810		default:
1811			scp->result = (DID_BAD_TARGET << 16);
1812			return NULL;
1813		}
1814	}
1815	else { // Passthru device commands
1816
1817		// Do not allow access to target id > 15 or LUN > 7
1818		if (target > 15 || SCP2LUN(scp) > 7) {
1819			scp->result = (DID_BAD_TARGET << 16);
1820			return NULL;
1821		}
1822
1823		// if fast load option was set and scan for last device is
1824		// over, reset the fast_load flag so that during a possible
1825		// next scan, devices can be made available
1826		if (rdev->fast_load && (target == 15) &&
1827			(SCP2CHANNEL(scp) == adapter->max_channel -1)) {
1828
1829			con_log(CL_ANN, (KERN_INFO
1830			"megaraid[%d]: physical device scan re-enabled\n",
1831				adapter->host->host_no));
1832			rdev->fast_load = 0;
1833		}
1834
1835		/*
1836		 * Display the channel scan for physical devices
1837		 */
1838		if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
1839
1840			ss = rdev->fast_load ? skip : scan;
1841
1842			con_log(CL_ANN, (KERN_INFO
1843				"scsi[%d]: %s scsi channel %d [Phy %d]",
1844				adapter->host->host_no, ss, SCP2CHANNEL(scp),
1845				channel));
1846
1847			con_log(CL_ANN, (
1848				" for non-raid devices\n"));
1849
1850			rdev->last_disp |= (1L << SCP2CHANNEL(scp));
1851		}
1852
1853		// disable channel sweep if fast load option given
1854		if (rdev->fast_load) {
1855			scp->result = (DID_BAD_TARGET << 16);
1856			return NULL;
1857		}
1858
1859		// Allocate a SCB and initialize passthru
1860		if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1861			scp->result = (DID_ERROR << 16);
1862			*busy = 1;
1863			return NULL;
1864		}
1865
1866		ccb			= (mbox_ccb_t *)scb->ccb;
1867		scb->dev_channel	= channel;
1868		scb->dev_target		= target;
1869		scb->dma_direction	= scp->sc_data_direction;
1870		mbox			= ccb->mbox;
1871		mbox64			= ccb->mbox64;
1872
1873		// Does this firmware support extended CDBs
1874		if (adapter->max_cdb_sz == 16) {
1875			mbox->cmd		= MBOXCMD_EXTPTHRU;
1876
1877			megaraid_mbox_prepare_epthru(adapter, scb, scp);
1878
1879			mbox64->xferaddr_lo	= (uint32_t)ccb->epthru_dma_h;
1880			mbox64->xferaddr_hi	= 0;
1881			mbox->xferaddr		= 0xFFFFFFFF;
1882		}
1883		else {
1884			mbox->cmd = MBOXCMD_PASSTHRU64;
1885
1886			megaraid_mbox_prepare_pthru(adapter, scb, scp);
1887
1888			mbox64->xferaddr_lo	= (uint32_t)ccb->pthru_dma_h;
1889			mbox64->xferaddr_hi	= 0;
1890			mbox->xferaddr		= 0xFFFFFFFF;
1891		}
1892		return scb;
1893	}
1894
1895	// NOT REACHED
1896}
1897
1898
1899/**
1900 * megaraid_mbox_runpendq - execute commands queued in the pending queue
1901 * @adapter	: controller's soft state
1902 * @scb_q	: SCB to be queued in the pending list
1903 *
1904 * Scan the pending list for commands which are not yet issued and try to
1905 * post to the controller. The SCB can be a null pointer, which would indicate
1906 * no SCB to be queue, just try to execute the ones in the pending list.
1907 *
1908 * NOTE: We do not actually traverse the pending list. The SCBs are plucked
1909 * out from the head of the pending list. If it is successfully issued, the
1910 * next SCB is at the head now.
1911 */
1912static void
1913megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q)
1914{
1915	scb_t			*scb;
1916	unsigned long		flags;
1917
1918	spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
1919
1920	if (scb_q) {
1921		scb_q->state = SCB_PENDQ;
1922		list_add_tail(&scb_q->list, &adapter->pend_list);
1923	}
1924
1925	// if the adapter in not in quiescent mode, post the commands to FW
1926	if (adapter->quiescent) {
1927		spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
1928		return;
1929	}
1930
1931	while (!list_empty(&adapter->pend_list)) {
1932
1933		assert_spin_locked(PENDING_LIST_LOCK(adapter));
1934
1935		scb = list_entry(adapter->pend_list.next, scb_t, list);
1936
1937		// remove the scb from the pending list and try to
1938		// issue. If we are unable to issue it, put back in
1939		// the pending list and return
1940
1941		list_del_init(&scb->list);
1942
1943		spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
1944
1945		// if mailbox was busy, return SCB back to pending
1946		// list. Make sure to add at the head, since that's
1947		// where it would have been removed from
1948
1949		scb->state = SCB_ISSUED;
1950
1951		if (mbox_post_cmd(adapter, scb) != 0) {
1952
1953			spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
1954
1955			scb->state = SCB_PENDQ;
1956
1957			list_add(&scb->list, &adapter->pend_list);
1958
1959			spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
1960				flags);
1961
1962			return;
1963		}
1964
1965		spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
1966	}
1967
1968	spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
1969
1970
1971	return;
1972}
1973
1974
1975/**
1976 * megaraid_mbox_prepare_pthru - prepare a command for physical devices
1977 * @adapter	: pointer to controller's soft state
1978 * @scb		: scsi control block
1979 * @scp		: scsi command from the mid-layer
1980 *
1981 * Prepare a command for the scsi physical devices.
1982 */
1983static void
1984megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
1985		struct scsi_cmnd *scp)
1986{
1987	mbox_ccb_t		*ccb;
1988	mraid_passthru_t	*pthru;
1989	uint8_t			channel;
1990	uint8_t			target;
1991
1992	ccb	= (mbox_ccb_t *)scb->ccb;
1993	pthru	= ccb->pthru;
1994	channel	= scb->dev_channel;
1995	target	= scb->dev_target;
1996
1997	// 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
1998	pthru->timeout		= 4;	
1999	pthru->ars		= 1;
2000	pthru->islogical	= 0;
2001	pthru->channel		= 0;
2002	pthru->target		= (channel << 4) | target;
2003	pthru->logdrv		= SCP2LUN(scp);
2004	pthru->reqsenselen	= 14;
2005	pthru->cdblen		= scp->cmd_len;
2006
2007	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
2008
2009	if (scsi_bufflen(scp)) {
2010		pthru->dataxferlen	= scsi_bufflen(scp);
2011		pthru->dataxferaddr	= ccb->sgl_dma_h;
2012		pthru->numsge		= megaraid_mbox_mksgl(adapter, scb);
2013	}
2014	else {
2015		pthru->dataxferaddr	= 0;
2016		pthru->dataxferlen	= 0;
2017		pthru->numsge		= 0;
2018	}
2019	return;
2020}
2021
2022
2023/**
2024 * megaraid_mbox_prepare_epthru - prepare a command for physical devices
2025 * @adapter	: pointer to controller's soft state
2026 * @scb		: scsi control block
2027 * @scp		: scsi command from the mid-layer
2028 *
2029 * Prepare a command for the scsi physical devices. This rountine prepares
2030 * commands for devices which can take extended CDBs (>10 bytes).
2031 */
2032static void
2033megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
2034		struct scsi_cmnd *scp)
2035{
2036	mbox_ccb_t		*ccb;
2037	mraid_epassthru_t	*epthru;
2038	uint8_t			channel;
2039	uint8_t			target;
2040
2041	ccb	= (mbox_ccb_t *)scb->ccb;
2042	epthru	= ccb->epthru;
2043	channel	= scb->dev_channel;
2044	target	= scb->dev_target;
2045
2046	// 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
2047	epthru->timeout		= 4;	
2048	epthru->ars		= 1;
2049	epthru->islogical	= 0;
2050	epthru->channel		= 0;
2051	epthru->target		= (channel << 4) | target;
2052	epthru->logdrv		= SCP2LUN(scp);
2053	epthru->reqsenselen	= 14;
2054	epthru->cdblen		= scp->cmd_len;
2055
2056	memcpy(epthru->cdb, scp->cmnd, scp->cmd_len);
2057
2058	if (scsi_bufflen(scp)) {
2059		epthru->dataxferlen	= scsi_bufflen(scp);
2060		epthru->dataxferaddr	= ccb->sgl_dma_h;
2061		epthru->numsge		= megaraid_mbox_mksgl(adapter, scb);
2062	}
2063	else {
2064		epthru->dataxferaddr	= 0;
2065		epthru->dataxferlen	= 0;
2066		epthru->numsge		= 0;
2067	}
2068	return;
2069}
2070
2071
2072/**
2073 * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs
2074 * @adapter	: controller's soft state
2075 *
2076 * Interrupt acknowledgement sequence for memory mapped HBAs. Find out the
2077 * completed command and put them on the completed list for later processing.
2078 *
2079 * Returns:	1 if the interrupt is valid, 0 otherwise
2080 */
2081static int
2082megaraid_ack_sequence(adapter_t *adapter)
2083{
2084	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
2085	mbox_t			*mbox;
2086	scb_t			*scb;
2087	uint8_t			nstatus;
2088	uint8_t			completed[MBOX_MAX_FIRMWARE_STATUS];
2089	struct list_head	clist;
2090	int			handled;
2091	uint32_t		dword;
2092	unsigned long		flags;
2093	int			i, j;
2094
2095
2096	mbox	= raid_dev->mbox;
2097
2098	// move the SCBs from the firmware completed array to our local list
2099	INIT_LIST_HEAD(&clist);
2100
2101	// loop till F/W has more commands for us to complete
2102	handled = 0;
2103	spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
2104	do {
2105		/*
2106		 * Check if a valid interrupt is pending. If found, force the
2107		 * interrupt line low.
2108		 */
2109		dword = RDOUTDOOR(raid_dev);
2110		if (dword != 0x10001234) break;
2111
2112		handled = 1;
2113
2114		WROUTDOOR(raid_dev, 0x10001234);
2115
2116		nstatus = 0;
2117		// wait for valid numstatus to post
2118		for (i = 0; i < 0xFFFFF; i++) {
2119			if (mbox->numstatus != 0xFF) {
2120				nstatus = mbox->numstatus;
2121				break;
2122			}
2123			rmb();
2124		}
2125		mbox->numstatus = 0xFF;
2126
2127		adapter->outstanding_cmds -= nstatus;
2128
2129		for (i = 0; i < nstatus; i++) {
2130
2131			// wait for valid command index to post
2132			for (j = 0; j < 0xFFFFF; j++) {
2133				if (mbox->completed[i] != 0xFF) break;
2134				rmb();
2135			}
2136			completed[i]		= mbox->completed[i];
2137			mbox->completed[i]	= 0xFF;
2138
2139			if (completed[i] == 0xFF) {
2140				con_log(CL_ANN, (KERN_CRIT
2141				"megaraid: command posting timed out\n"));
2142
2143				BUG();
2144				continue;
2145			}
2146
2147			// Get SCB associated with this command id
2148			if (completed[i] >= MBOX_MAX_SCSI_CMDS) {
2149				// a cmm command
2150				scb = adapter->uscb_list + (completed[i] -
2151						MBOX_MAX_SCSI_CMDS);
2152			}
2153			else {
2154				// an os command
2155				scb = adapter->kscb_list + completed[i];
2156			}
2157
2158			scb->status = mbox->status;
2159			list_add_tail(&scb->list, &clist);
2160		}
2161
2162		// Acknowledge interrupt
2163		WRINDOOR(raid_dev, 0x02);
2164
2165	} while(1);
2166
2167	spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
2168
2169
2170	// put the completed commands in the completed list. DPC would
2171	// complete these commands later
2172	spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2173
2174	list_splice(&clist, &adapter->completed_list);
2175
2176	spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2177
2178
2179	// schedule the DPC if there is some work for it
2180	if (handled)
2181		tasklet_schedule(&adapter->dpc_h);
2182
2183	return handled;
2184}
2185
2186
2187/**
2188 * megaraid_isr - isr for memory based mailbox based controllers
2189 * @irq		: irq
2190 * @devp	: pointer to our soft state
2191 *
2192 * Interrupt service routine for memory-mapped mailbox controllers.
2193 */
2194static irqreturn_t
2195megaraid_isr(int irq, void *devp)
2196{
2197	adapter_t	*adapter = devp;
2198	int		handled;
2199
2200	handled = megaraid_ack_sequence(adapter);
2201
2202	/* Loop through any pending requests */
2203	if (!adapter->quiescent) {
2204		megaraid_mbox_runpendq(adapter, NULL);
2205	}
2206
2207	return IRQ_RETVAL(handled);
2208}
2209
2210
2211/**
2212 * megaraid_mbox_sync_scb - sync kernel buffers
2213 * @adapter	: controller's soft state
2214 * @scb		: pointer to the resource packet
2215 *
2216 * DMA sync if required.
2217 */
2218static void
2219megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
2220{
2221	mbox_ccb_t	*ccb;
2222
2223	ccb	= (mbox_ccb_t *)scb->ccb;
2224
2225	if (scb->dma_direction == PCI_DMA_FROMDEVICE)
2226		pci_dma_sync_sg_for_cpu(adapter->pdev,
2227					scsi_sglist(scb->scp),
2228					scsi_sg_count(scb->scp),
2229					PCI_DMA_FROMDEVICE);
2230
2231	scsi_dma_unmap(scb->scp);
2232	return;
2233}
2234
2235
2236/**
2237 * megaraid_mbox_dpc - the tasklet to complete the commands from completed list
2238 * @devp	: pointer to HBA soft state
2239 *
2240 * Pick up the commands from the completed list and send back to the owners.
2241 * This is a reentrant function and does not assume any locks are held while
2242 * it is being called.
2243 */
2244static void
2245megaraid_mbox_dpc(unsigned long devp)
2246{
2247	adapter_t		*adapter = (adapter_t *)devp;
2248	mraid_device_t		*raid_dev;
2249	struct list_head	clist;
2250	struct scatterlist	*sgl;
2251	scb_t			*scb;
2252	scb_t			*tmp;
2253	struct scsi_cmnd	*scp;
2254	mraid_passthru_t	*pthru;
2255	mraid_epassthru_t	*epthru;
2256	mbox_ccb_t		*ccb;
2257	int			islogical;
2258	int			pdev_index;
2259	int			pdev_state;
2260	mbox_t			*mbox;
2261	unsigned long		flags;
2262	uint8_t			c;
2263	int			status;
2264	uioc_t			*kioc;
2265
2266
2267	if (!adapter) return;
2268
2269	raid_dev = ADAP2RAIDDEV(adapter);
2270
2271	// move the SCBs from the completed list to our local list
2272	INIT_LIST_HEAD(&clist);
2273
2274	spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2275
2276	list_splice_init(&adapter->completed_list, &clist);
2277
2278	spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2279
2280
2281	list_for_each_entry_safe(scb, tmp, &clist, list) {
2282
2283		status		= scb->status;
2284		scp		= scb->scp;
2285		ccb		= (mbox_ccb_t *)scb->ccb;
2286		pthru		= ccb->pthru;
2287		epthru		= ccb->epthru;
2288		mbox		= ccb->mbox;
2289
2290		// Make sure f/w has completed a valid command
2291		if (scb->state != SCB_ISSUED) {
2292			con_log(CL_ANN, (KERN_CRIT
2293			"megaraid critical err: invalid command %d:%d:%p\n",
2294				scb->sno, scb->state, scp));
2295			BUG();
2296			continue;	// Must never happen!
2297		}
2298
2299		// check for the management command and complete it right away
2300		if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
2301			scb->state	= SCB_FREE;
2302			scb->status	= status;
2303
2304			// remove from local clist
2305			list_del_init(&scb->list);
2306
2307			kioc			= (uioc_t *)scb->gp;
2308			kioc->status		= 0;
2309
2310			megaraid_mbox_mm_done(adapter, scb);
2311
2312			continue;
2313		}
2314
2315		// Was an abort issued for this command earlier
2316		if (scb->state & SCB_ABORT) {
2317			con_log(CL_ANN, (KERN_NOTICE
2318			"megaraid: aborted cmd [%x] completed\n",
2319				scb->sno));
2320		}
2321
2322		/*
2323		 * If the inquiry came of a disk drive which is not part of
2324		 * any RAID array, expose it to the kernel. For this to be
2325		 * enabled, user must set the "megaraid_expose_unconf_disks"
2326		 * flag to 1 by specifying it on module parameter list.
2327		 * This would enable data migration off drives from other
2328		 * configurations.
2329		 */
2330		islogical = MRAID_IS_LOGICAL(adapter, scp);
2331		if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
2332				&& IS_RAID_CH(raid_dev, scb->dev_channel)) {
2333
2334			sgl = scsi_sglist(scp);
2335			if (sg_page(sgl)) {
2336				c = *(unsigned char *) sg_virt(&sgl[0]);
2337			} else {
2338				con_log(CL_ANN, (KERN_WARNING
2339						 "megaraid mailbox: invalid sg:%d\n",
2340						 __LINE__));
2341				c = 0;
2342			}
2343
2344			if ((c & 0x1F ) == TYPE_DISK) {
2345				pdev_index = (scb->dev_channel * 16) +
2346					scb->dev_target;
2347				pdev_state =
2348					raid_dev->pdrv_state[pdev_index] & 0x0F;
2349
2350				if (pdev_state == PDRV_ONLINE		||
2351					pdev_state == PDRV_FAILED	||
2352					pdev_state == PDRV_RBLD		||
2353					pdev_state == PDRV_HOTSPARE	||
2354					megaraid_expose_unconf_disks == 0) {
2355
2356					status = 0xF0;
2357				}
2358			}
2359		}
2360
2361		// Convert MegaRAID status to Linux error code
2362		switch (status) {
2363
2364		case 0x00:
2365
2366			scp->result = (DID_OK << 16);
2367			break;
2368
2369		case 0x02:
2370
2371			/* set sense_buffer and result fields */
2372			if (mbox->cmd == MBOXCMD_PASSTHRU ||
2373				mbox->cmd == MBOXCMD_PASSTHRU64) {
2374
2375				memcpy(scp->sense_buffer, pthru->reqsensearea,
2376						14);
2377
2378				scp->result = DRIVER_SENSE << 24 |
2379					DID_OK << 16 | CHECK_CONDITION << 1;
2380			}
2381			else {
2382				if (mbox->cmd == MBOXCMD_EXTPTHRU) {
2383
2384					memcpy(scp->sense_buffer,
2385						epthru->reqsensearea, 14);
2386
2387					scp->result = DRIVER_SENSE << 24 |
2388						DID_OK << 16 |
2389						CHECK_CONDITION << 1;
2390				} else {
2391					scp->sense_buffer[0] = 0x70;
2392					scp->sense_buffer[2] = ABORTED_COMMAND;
2393					scp->result = CHECK_CONDITION << 1;
2394				}
2395			}
2396			break;
2397
2398		case 0x08:
2399
2400			scp->result = DID_BUS_BUSY << 16 | status;
2401			break;
2402
2403		default:
2404
2405			/*
2406			 * If TEST_UNIT_READY fails, we know RESERVATION_STATUS
2407			 * failed
2408			 */
2409			if (scp->cmnd[0] == TEST_UNIT_READY) {
2410				scp->result = DID_ERROR << 16 |
2411					RESERVATION_CONFLICT << 1;
2412			}
2413			else
2414			/*
2415			 * Error code returned is 1 if Reserve or Release
2416			 * failed or the input parameter is invalid
2417			 */
2418			if (status == 1 && (scp->cmnd[0] == RESERVE ||
2419					 scp->cmnd[0] == RELEASE)) {
2420
2421				scp->result = DID_ERROR << 16 |
2422					RESERVATION_CONFLICT << 1;
2423			}
2424			else {
2425				scp->result = DID_BAD_TARGET << 16 | status;
2426			}
2427		}
2428
2429		// print a debug message for all failed commands
2430		if (status) {
2431			megaraid_mbox_display_scb(adapter, scb);
2432		}
2433
2434		// Free our internal resources and call the mid-layer callback
2435		// routine
2436		megaraid_mbox_sync_scb(adapter, scb);
2437
2438		// remove from local clist
2439		list_del_init(&scb->list);
2440
2441		// put back in free list
2442		megaraid_dealloc_scb(adapter, scb);
2443
2444		// send the scsi packet back to kernel
2445		scp->scsi_done(scp);
2446	}
2447
2448	return;
2449}
2450
2451
2452/**
2453 * megaraid_abort_handler - abort the scsi command
2454 * @scp		: command to be aborted
2455 *
2456 * Abort a previous SCSI request. Only commands on the pending list can be
2457 * aborted. All the commands issued to the F/W must complete.
2458 **/
2459static int
2460megaraid_abort_handler(struct scsi_cmnd *scp)
2461{
2462	adapter_t		*adapter;
2463	mraid_device_t		*raid_dev;
2464	scb_t			*scb;
2465	scb_t			*tmp;
2466	int			found;
2467	unsigned long		flags;
2468	int			i;
2469
2470
2471	adapter		= SCP2ADAPTER(scp);
2472	raid_dev	= ADAP2RAIDDEV(adapter);
2473
2474	con_log(CL_ANN, (KERN_WARNING
2475		"megaraid: aborting cmd=%x <c=%d t=%d l=%d>\n",
2476		scp->cmnd[0], SCP2CHANNEL(scp),
2477		SCP2TARGET(scp), SCP2LUN(scp)));
2478
2479	// If FW has stopped responding, simply return failure
2480	if (raid_dev->hw_error) {
2481		con_log(CL_ANN, (KERN_NOTICE
2482			"megaraid: hw error, not aborting\n"));
2483		return FAILED;
2484	}
2485
2486	// There might a race here, where the command was completed by the
2487	// firmware and now it is on the completed list. Before we could
2488	// complete the command to the kernel in dpc, the abort came.
2489	// Find out if this is the case to avoid the race.
2490	scb = NULL;
2491	spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2492	list_for_each_entry_safe(scb, tmp, &adapter->completed_list, list) {
2493
2494		if (scb->scp == scp) {	// Found command
2495
2496			list_del_init(&scb->list);	// from completed list
2497
2498			con_log(CL_ANN, (KERN_WARNING
2499			"megaraid: %d[%d:%d], abort from completed list\n",
2500				scb->sno, scb->dev_channel, scb->dev_target));
2501
2502			scp->result = (DID_ABORT << 16);
2503			scp->scsi_done(scp);
2504
2505			megaraid_dealloc_scb(adapter, scb);
2506
2507			spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter),
2508				flags);
2509
2510			return SUCCESS;
2511		}
2512	}
2513	spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2514
2515
2516	// Find out if this command is still on the pending list. If it is and
2517	// was never issued, abort and return success. If the command is owned
2518	// by the firmware, we must wait for it to complete by the FW.
2519	spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2520	list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
2521
2522		if (scb->scp == scp) {	// Found command
2523
2524			list_del_init(&scb->list);	// from pending list
2525
2526			ASSERT(!(scb->state & SCB_ISSUED));
2527
2528			con_log(CL_ANN, (KERN_WARNING
2529				"megaraid abort: [%d:%d], driver owner\n",
2530				scb->dev_channel, scb->dev_target));
2531
2532			scp->result = (DID_ABORT << 16);
2533			scp->scsi_done(scp);
2534
2535			megaraid_dealloc_scb(adapter, scb);
2536
2537			spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
2538				flags);
2539
2540			return SUCCESS;
2541		}
2542	}
2543	spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2544
2545
2546	// Check do we even own this command, in which case this would be
2547	// owned by the firmware. The only way to locate the FW scb is to
2548	// traverse through the list of all SCB, since driver does not
2549	// maintain these SCBs on any list
2550	found = 0;
2551	spin_lock_irq(&adapter->lock);
2552	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
2553		scb = adapter->kscb_list + i;
2554
2555		if (scb->scp == scp) {
2556
2557			found = 1;
2558
2559			if (!(scb->state & SCB_ISSUED)) {
2560				con_log(CL_ANN, (KERN_WARNING
2561				"megaraid abort: %d[%d:%d], invalid state\n",
2562				scb->sno, scb->dev_channel, scb->dev_target));
2563				BUG();
2564			}
2565			else {
2566				con_log(CL_ANN, (KERN_WARNING
2567				"megaraid abort: %d[%d:%d], fw owner\n",
2568				scb->sno, scb->dev_channel, scb->dev_target));
2569			}
2570		}
2571	}
2572	spin_unlock_irq(&adapter->lock);
2573
2574	if (!found) {
2575		con_log(CL_ANN, (KERN_WARNING "megaraid abort: do now own\n"));
2576
2577		// FIXME: Should there be a callback for this command?
2578		return SUCCESS;
2579	}
2580
2581	// We cannot actually abort a command owned by firmware, return
2582	// failure and wait for reset. In host reset handler, we will find out
2583	// if the HBA is still live
2584	return FAILED;
2585}
2586
2587/**
2588 * megaraid_reset_handler - device reset hadler for mailbox based driver
2589 * @scp		: reference command
2590 *
2591 * Reset handler for the mailbox based controller. First try to find out if
2592 * the FW is still live, in which case the outstanding commands counter mut go
2593 * down to 0. If that happens, also issue the reservation reset command to
2594 * relinquish (possible) reservations on the logical drives connected to this
2595 * host.
2596 **/
2597static int
2598megaraid_reset_handler(struct scsi_cmnd *scp)
2599{
2600	adapter_t	*adapter;
2601	scb_t		*scb;
2602	scb_t		*tmp;
2603	mraid_device_t	*raid_dev;
2604	unsigned long	flags;
2605	uint8_t		raw_mbox[sizeof(mbox_t)];
2606	int		rval;
2607	int		recovery_window;
2608	int		recovering;
2609	int		i;
2610	uioc_t		*kioc;
2611
2612	adapter		= SCP2ADAPTER(scp);
2613	raid_dev	= ADAP2RAIDDEV(adapter);
2614
2615	// return failure if adapter is not responding
2616	if (raid_dev->hw_error) {
2617		con_log(CL_ANN, (KERN_NOTICE
2618			"megaraid: hw error, cannot reset\n"));
2619		return FAILED;
2620	}
2621
2622
2623	// Under exceptional conditions, FW can take up to 3 minutes to
2624	// complete command processing. Wait for additional 2 minutes for the
2625	// pending commands counter to go down to 0. If it doesn't, let the
2626	// controller be marked offline
2627	// Also, reset all the commands currently owned by the driver
2628	spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2629	list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
2630		list_del_init(&scb->list);	// from pending list
2631
2632		if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
2633			con_log(CL_ANN, (KERN_WARNING
2634			"megaraid: IOCTL packet with %d[%d:%d] being reset\n",
2635			scb->sno, scb->dev_channel, scb->dev_target));
2636
2637			scb->status = -1;
2638
2639			kioc			= (uioc_t *)scb->gp;
2640			kioc->status		= -EFAULT;
2641
2642			megaraid_mbox_mm_done(adapter, scb);
2643		} else {
2644			if (scb->scp == scp) {	// Found command
2645				con_log(CL_ANN, (KERN_WARNING
2646					"megaraid: %d[%d:%d], reset from pending list\n",
2647					scb->sno, scb->dev_channel, scb->dev_target));
2648			} else {
2649				con_log(CL_ANN, (KERN_WARNING
2650				"megaraid: IO packet with %d[%d:%d] being reset\n",
2651				scb->sno, scb->dev_channel, scb->dev_target));
2652			}
2653
2654			scb->scp->result = (DID_RESET << 16);
2655			scb->scp->scsi_done(scb->scp);
2656
2657			megaraid_dealloc_scb(adapter, scb);
2658		}
2659	}
2660	spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2661
2662	if (adapter->outstanding_cmds) {
2663		con_log(CL_ANN, (KERN_NOTICE
2664			"megaraid: %d outstanding commands. Max wait %d sec\n",
2665			adapter->outstanding_cmds,
2666			(MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT)));
2667	}
2668
2669	recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
2670
2671	recovering = adapter->outstanding_cmds;
2672
2673	for (i = 0; i < recovery_window; i++) {
2674
2675		megaraid_ack_sequence(adapter);
2676
2677		// print a message once every 5 seconds only
2678		if (!(i % 5)) {
2679			con_log(CL_ANN, (
2680			"megaraid mbox: Wait for %d commands to complete:%d\n",
2681				adapter->outstanding_cmds,
2682				(MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i));
2683		}
2684
2685		// bailout if no recovery happened in reset time
2686		if (adapter->outstanding_cmds == 0) {
2687			break;
2688		}
2689
2690		msleep(1000);
2691	}
2692
2693	spin_lock(&adapter->lock);
2694
2695	// If still outstanding commands, bail out
2696	if (adapter->outstanding_cmds) {
2697		con_log(CL_ANN, (KERN_WARNING
2698			"megaraid mbox: critical hardware error!\n"));
2699
2700		raid_dev->hw_error = 1;
2701
2702		rval = FAILED;
2703		goto out;
2704	}
2705	else {
2706		con_log(CL_ANN, (KERN_NOTICE
2707		"megaraid mbox: reset sequence completed successfully\n"));
2708	}
2709
2710
2711	// If the controller supports clustering, reset reservations
2712	if (!adapter->ha) {
2713		rval = SUCCESS;
2714		goto out;
2715	}
2716
2717	// clear reservations if any
2718	raw_mbox[0] = CLUSTER_CMD;
2719	raw_mbox[2] = RESET_RESERVATIONS;
2720
2721	rval = SUCCESS;
2722	if (mbox_post_sync_cmd_fast(adapter, raw_mbox) == 0) {
2723		con_log(CL_ANN,
2724			(KERN_INFO "megaraid: reservation reset\n"));
2725	}
2726	else {
2727		rval = FAILED;
2728		con_log(CL_ANN, (KERN_WARNING
2729				"megaraid: reservation reset failed\n"));
2730	}
2731
2732 out:
2733	spin_unlock_irq(&adapter->lock);
2734	return rval;
2735}
2736
2737/*
2738 * START: internal commands library
2739 *
2740 * This section of the driver has the common routine used by the driver and
2741 * also has all the FW routines
2742 */
2743
2744/**
2745 * mbox_post_sync_cmd() - blocking command to the mailbox based controllers
2746 * @adapter	: controller's soft state
2747 * @raw_mbox	: the mailbox
2748 *
2749 * Issue a scb in synchronous and non-interrupt mode for mailbox based
2750 * controllers.
2751 */
2752static int
2753mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
2754{
2755	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
2756	mbox64_t	*mbox64;
2757	mbox_t		*mbox;
2758	uint8_t		status;
2759	int		i;
2760
2761
2762	mbox64	= raid_dev->mbox64;
2763	mbox	= raid_dev->mbox;
2764
2765	/*
2766	 * Wait until mailbox is free
2767	 */
2768	if (megaraid_busywait_mbox(raid_dev) != 0)
2769		goto blocked_mailbox;
2770
2771	/*
2772	 * Copy mailbox data into host structure
2773	 */
2774	memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
2775	mbox->cmdid		= 0xFE;
2776	mbox->busy		= 1;
2777	mbox->poll		= 0;
2778	mbox->ack		= 0;
2779	mbox->numstatus		= 0xFF;
2780	mbox->status		= 0xFF;
2781
2782	wmb();
2783	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
2784
2785	// wait for maximum 1 second for status to post. If the status is not
2786	// available within 1 second, assume FW is initializing and wait
2787	// for an extended amount of time
2788	if (mbox->numstatus == 0xFF) {	// status not yet available
2789		udelay(25);
2790
2791		for (i = 0; mbox->numstatus == 0xFF && i < 1000; i++) {
2792			rmb();
2793			msleep(1);
2794		}
2795
2796
2797		if (i == 1000) {
2798			con_log(CL_ANN, (KERN_NOTICE
2799				"megaraid mailbox: wait for FW to boot      "));
2800
2801			for (i = 0; (mbox->numstatus == 0xFF) &&
2802					(i < MBOX_RESET_WAIT); i++) {
2803				rmb();
2804				con_log(CL_ANN, ("\b\b\b\b\b[%03d]",
2805							MBOX_RESET_WAIT - i));
2806				msleep(1000);
2807			}
2808
2809			if (i == MBOX_RESET_WAIT) {
2810
2811				con_log(CL_ANN, (
2812				"\nmegaraid mailbox: status not available\n"));
2813
2814				return -1;
2815			}
2816			con_log(CL_ANN, ("\b\b\b\b\b[ok] \n"));
2817		}
2818	}
2819
2820	// wait for maximum 1 second for poll semaphore
2821	if (mbox->poll != 0x77) {
2822		udelay(25);
2823
2824		for (i = 0; (mbox->poll != 0x77) && (i < 1000); i++) {
2825			rmb();
2826			msleep(1);
2827		}
2828
2829		if (i == 1000) {
2830			con_log(CL_ANN, (KERN_WARNING
2831			"megaraid mailbox: could not get poll semaphore\n"));
2832			return -1;
2833		}
2834	}
2835
2836	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
2837	wmb();
2838
2839	// wait for maximum 1 second for acknowledgement
2840	if (RDINDOOR(raid_dev) & 0x2) {
2841		udelay(25);
2842
2843		for (i = 0; (RDINDOOR(raid_dev) & 0x2) && (i < 1000); i++) {
2844			rmb();
2845			msleep(1);
2846		}
2847
2848		if (i == 1000) {
2849			con_log(CL_ANN, (KERN_WARNING
2850				"megaraid mailbox: could not acknowledge\n"));
2851			return -1;
2852		}
2853	}
2854	mbox->poll	= 0;
2855	mbox->ack	= 0x77;
2856
2857	status = mbox->status;
2858
2859	// invalidate the completed command id array. After command
2860	// completion, firmware would write the valid id.
2861	mbox->numstatus	= 0xFF;
2862	mbox->status	= 0xFF;
2863	for (i = 0; i < MBOX_MAX_FIRMWARE_STATUS; i++) {
2864		mbox->completed[i] = 0xFF;
2865	}
2866
2867	return status;
2868
2869blocked_mailbox:
2870
2871	con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n") );
2872	return -1;
2873}
2874
2875
2876/**
2877 * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers
2878 * @adapter	: controller's soft state
2879 * @raw_mbox	: the mailbox
2880 *
2881 * Issue a scb in synchronous and non-interrupt mode for mailbox based
2882 * controllers. This is a faster version of the synchronous command and
2883 * therefore can be called in interrupt-context as well.
2884 */
2885static int
2886mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
2887{
2888	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
2889	mbox_t		*mbox;
2890	long		i;
2891
2892
2893	mbox	= raid_dev->mbox;
2894
2895	// return immediately if the mailbox is busy
2896	if (mbox->busy) return -1;
2897
2898	// Copy mailbox data into host structure
2899	memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 14);
2900	mbox->cmdid		= 0xFE;
2901	mbox->busy		= 1;
2902	mbox->poll		= 0;
2903	mbox->ack		= 0;
2904	mbox->numstatus		= 0xFF;
2905	mbox->status		= 0xFF;
2906
2907	wmb();
2908	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
2909
2910	for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) {
2911		if (mbox->numstatus != 0xFF) break;
2912		rmb();
2913		udelay(MBOX_SYNC_DELAY_200);
2914	}
2915
2916	if (i == MBOX_SYNC_WAIT_CNT) {
2917		// We may need to re-calibrate the counter
2918		con_log(CL_ANN, (KERN_CRIT
2919			"megaraid: fast sync command timed out\n"));
2920	}
2921
2922	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
2923	wmb();
2924
2925	return mbox->status;
2926}
2927
2928
2929/**
2930 * megaraid_busywait_mbox() - Wait until the controller's mailbox is available
2931 * @raid_dev	: RAID device (HBA) soft state
2932 *
2933 * Wait until the controller's mailbox is available to accept more commands.
2934 * Wait for at most 1 second.
2935 */
2936static int
2937megaraid_busywait_mbox(mraid_device_t *raid_dev)
2938{
2939	mbox_t	*mbox = raid_dev->mbox;
2940	int	i = 0;
2941
2942	if (mbox->busy) {
2943		udelay(25);
2944		for (i = 0; mbox->busy && i < 1000; i++)
2945			msleep(1);
2946	}
2947
2948	if (i < 1000) return 0;
2949	else return -1;
2950}
2951
2952
2953/**
2954 * megaraid_mbox_product_info - some static information about the controller
2955 * @adapter	: our soft state
2956 *
2957 * Issue commands to the controller to grab some parameters required by our
2958 * caller.
2959 */
2960static int
2961megaraid_mbox_product_info(adapter_t *adapter)
2962{
2963	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
2964	mbox_t			*mbox;
2965	uint8_t			raw_mbox[sizeof(mbox_t)];
2966	mraid_pinfo_t		*pinfo;
2967	dma_addr_t		pinfo_dma_h;
2968	mraid_inquiry3_t	*mraid_inq3;
2969	int			i;
2970
2971
2972	memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
2973	mbox = (mbox_t *)raw_mbox;
2974
2975	/*
2976	 * Issue an ENQUIRY3 command to find out certain adapter parameters,
2977	 * e.g., max channels, max commands etc.
2978	 */
2979	pinfo = pci_alloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
2980			&pinfo_dma_h);
2981
2982	if (pinfo == NULL) {
2983		con_log(CL_ANN, (KERN_WARNING
2984			"megaraid: out of memory, %s %d\n", __func__,
2985			__LINE__));
2986
2987		return -1;
2988	}
2989	memset(pinfo, 0, sizeof(mraid_pinfo_t));
2990
2991	mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
2992	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
2993
2994	raw_mbox[0] = FC_NEW_CONFIG;
2995	raw_mbox[2] = NC_SUBOP_ENQUIRY3;
2996	raw_mbox[3] = ENQ3_GET_SOLICITED_FULL;
2997
2998	// Issue the command
2999	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3000
3001		con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
3002
3003		pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
3004			pinfo, pinfo_dma_h);
3005
3006		return -1;
3007	}
3008
3009	/*
3010	 * Collect information about state of each physical drive
3011	 * attached to the controller. We will expose all the disks
3012	 * which are not part of RAID
3013	 */
3014	mraid_inq3 = (mraid_inquiry3_t *)adapter->ibuf;
3015	for (i = 0; i < MBOX_MAX_PHYSICAL_DRIVES; i++) {
3016		raid_dev->pdrv_state[i] = mraid_inq3->pdrv_state[i];
3017	}
3018
3019	/*
3020	 * Get product info for information like number of channels,
3021	 * maximum commands supported.
3022	 */
3023	memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3024	mbox->xferaddr = (uint32_t)pinfo_dma_h;
3025
3026	raw_mbox[0] = FC_NEW_CONFIG;
3027	raw_mbox[2] = NC_SUBOP_PRODUCT_INFO;
3028
3029	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3030
3031		con_log(CL_ANN, (KERN_WARNING
3032			"megaraid: product info failed\n"));
3033
3034		pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
3035			pinfo, pinfo_dma_h);
3036
3037		return -1;
3038	}
3039
3040	/*
3041	 * Setup some parameters for host, as required by our caller
3042	 */
3043	adapter->max_channel = pinfo->nchannels;
3044
3045	/*
3046	 * we will export all the logical drives on a single channel.
3047	 * Add 1 since inquires do not come for inititor ID
3048	 */
3049	adapter->max_target	= MAX_LOGICAL_DRIVES_40LD + 1;
3050	adapter->max_lun	= 8;	// up to 8 LUNs for non-disk devices
3051
3052	/*
3053	 * These are the maximum outstanding commands for the scsi-layer
3054	 */
3055	adapter->max_cmds	= MBOX_MAX_SCSI_CMDS;
3056
3057	memset(adapter->fw_version, 0, VERSION_SIZE);
3058	memset(adapter->bios_version, 0, VERSION_SIZE);
3059
3060	memcpy(adapter->fw_version, pinfo->fw_version, 4);
3061	adapter->fw_version[4] = 0;
3062
3063	memcpy(adapter->bios_version, pinfo->bios_version, 4);
3064	adapter->bios_version[4] = 0;
3065
3066	con_log(CL_ANN, (KERN_NOTICE
3067		"megaraid: fw version:[%s] bios version:[%s]\n",
3068		adapter->fw_version, adapter->bios_version));
3069
3070	pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
3071			pinfo_dma_h);
3072
3073	return 0;
3074}
3075
3076
3077
3078/**
3079 * megaraid_mbox_extended_cdb - check for support for extended CDBs
3080 * @adapter	: soft state for the controller
3081 *
3082 * This routine check whether the controller in question supports extended
3083 * ( > 10 bytes ) CDBs.
3084 */
3085static int
3086megaraid_mbox_extended_cdb(adapter_t *adapter)
3087{
3088	mbox_t		*mbox;
3089	uint8_t		raw_mbox[sizeof(mbox_t)];
3090	int		rval;
3091
3092	mbox = (mbox_t *)raw_mbox;
3093
3094	memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3095	mbox->xferaddr	= (uint32_t)adapter->ibuf_dma_h;
3096
3097	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3098
3099	raw_mbox[0] = MAIN_MISC_OPCODE;
3100	raw_mbox[2] = SUPPORT_EXT_CDB;
3101
3102	/*
3103	 * Issue the command
3104	 */
3105	rval = 0;
3106	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3107		rval = -1;
3108	}
3109
3110	return rval;
3111}
3112
3113
3114/**
3115 * megaraid_mbox_support_ha - Do we support clustering
3116 * @adapter	: soft state for the controller
3117 * @init_id	: ID of the initiator
3118 *
3119 * Determine if the firmware supports clustering and the ID of the initiator.
3120 */
3121static int
3122megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
3123{
3124	mbox_t		*mbox;
3125	uint8_t		raw_mbox[sizeof(mbox_t)];
3126	int		rval;
3127
3128
3129	mbox = (mbox_t *)raw_mbox;
3130
3131	memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3132
3133	mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3134
3135	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3136
3137	raw_mbox[0] = GET_TARGET_ID;
3138
3139	// Issue the command
3140	*init_id = 7;
3141	rval =  -1;
3142	if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3143
3144		*init_id = *(uint8_t *)adapter->ibuf;
3145
3146		con_log(CL_ANN, (KERN_INFO
3147			"megaraid: cluster firmware, initiator ID: %d\n",
3148			*init_id));
3149
3150		rval =  0;
3151	}
3152
3153	return rval;
3154}
3155
3156
3157/**
3158 * megaraid_mbox_support_random_del - Do we support random deletion
3159 * @adapter	: soft state for the controller
3160 *
3161 * Determine if the firmware supports random deletion.
3162 * Return:	1 is operation supported, 0 otherwise
3163 */
3164static int
3165megaraid_mbox_support_random_del(adapter_t *adapter)
3166{
3167	mbox_t		*mbox;
3168	uint8_t		raw_mbox[sizeof(mbox_t)];
3169	int		rval;
3170
3171	/*
3172	 * Newer firmware on Dell CERC expect a different
3173	 * random deletion handling, so disable it.
3174	 */
3175	if (adapter->pdev->vendor == PCI_VENDOR_ID_AMI &&
3176	    adapter->pdev->device == PCI_DEVICE_ID_AMI_MEGARAID3 &&
3177	    adapter->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
3178	    adapter->pdev->subsystem_device == PCI_SUBSYS_ID_CERC_ATA100_4CH &&
3179	    (adapter->fw_version[0] > '6' ||
3180	     (adapter->fw_version[0] == '6' &&
3181	      adapter->fw_version[2] > '6') ||
3182	     (adapter->fw_version[0] == '6'
3183	      && adapter->fw_version[2] == '6'
3184	      && adapter->fw_version[3] > '1'))) {
3185		con_log(CL_DLEVEL1, ("megaraid: disable random deletion\n"));
3186		return 0;
3187	}
3188
3189	mbox = (mbox_t *)raw_mbox;
3190
3191	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3192
3193	raw_mbox[0] = FC_DEL_LOGDRV;
3194	raw_mbox[2] = OP_SUP_DEL_LOGDRV;
3195
3196	// Issue the command
3197	rval = 0;
3198	if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3199
3200		con_log(CL_DLEVEL1, ("megaraid: supports random deletion\n"));
3201
3202		rval =  1;
3203	}
3204
3205	return rval;
3206}
3207
3208
3209/**
3210 * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware
3211 * @adapter	: soft state for the controller
3212 *
3213 * Find out the maximum number of scatter-gather elements supported by the
3214 * firmware.
3215 */
3216static int
3217megaraid_mbox_get_max_sg(adapter_t *adapter)
3218{
3219	mbox_t		*mbox;
3220	uint8_t		raw_mbox[sizeof(mbox_t)];
3221	int		nsg;
3222
3223
3224	mbox = (mbox_t *)raw_mbox;
3225
3226	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3227
3228	mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3229
3230	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3231
3232	raw_mbox[0] = MAIN_MISC_OPCODE;
3233	raw_mbox[2] = GET_MAX_SG_SUPPORT;
3234
3235	// Issue the command
3236	if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3237		nsg =  *(uint8_t *)adapter->ibuf;
3238	}
3239	else {
3240		nsg =  MBOX_DEFAULT_SG_SIZE;
3241	}
3242
3243	if (nsg > MBOX_MAX_SG_SIZE) nsg = MBOX_MAX_SG_SIZE;
3244
3245	return nsg;
3246}
3247
3248
3249/**
3250 * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels
3251 * @adapter	: soft state for the controller
3252 *
3253 * Enumerate the RAID and SCSI channels for ROMB platforms so that channels
3254 * can be exported as regular SCSI channels.
3255 */
3256static void
3257megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
3258{
3259	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
3260	mbox_t		*mbox;
3261	uint8_t		raw_mbox[sizeof(mbox_t)];
3262
3263
3264	mbox = (mbox_t *)raw_mbox;
3265
3266	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3267
3268	mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3269
3270	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3271
3272	raw_mbox[0] = CHNL_CLASS;
3273	raw_mbox[2] = GET_CHNL_CLASS;
3274
3275	// Issue the command. If the command fails, all channels are RAID
3276	// channels
3277	raid_dev->channel_class = 0xFF;
3278	if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3279		raid_dev->channel_class =  *(uint8_t *)adapter->ibuf;
3280	}
3281
3282	return;
3283}
3284
3285
3286/**
3287 * megaraid_mbox_flush_cache - flush adapter and disks cache
3288 * @adapter		: soft state for the controller
3289 *
3290 * Flush adapter cache followed by disks cache.
3291 */
3292static void
3293megaraid_mbox_flush_cache(adapter_t *adapter)
3294{
3295	mbox_t	*mbox;
3296	uint8_t	raw_mbox[sizeof(mbox_t)];
3297
3298
3299	mbox = (mbox_t *)raw_mbox;
3300
3301	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3302
3303	raw_mbox[0] = FLUSH_ADAPTER;
3304
3305	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3306		con_log(CL_ANN, ("megaraid: flush adapter failed\n"));
3307	}
3308
3309	raw_mbox[0] = FLUSH_SYSTEM;
3310
3311	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3312		con_log(CL_ANN, ("megaraid: flush disks cache failed\n"));
3313	}
3314
3315	return;
3316}
3317
3318
3319/**
3320 * megaraid_mbox_fire_sync_cmd - fire the sync cmd
3321 * @adapter		: soft state for the controller
3322 *
3323 * Clears the pending cmds in FW and reinits its RAID structs.
3324 */
3325static int
3326megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
3327{
3328	mbox_t	*mbox;
3329	uint8_t	raw_mbox[sizeof(mbox_t)];
3330	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
3331	mbox64_t *mbox64;
3332	int	status = 0;
3333	int i;
3334	uint32_t dword;
3335
3336	mbox = (mbox_t *)raw_mbox;
3337
3338	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3339
3340	raw_mbox[0] = 0xFF;
3341
3342	mbox64	= raid_dev->mbox64;
3343	mbox	= raid_dev->mbox;
3344
3345	/* Wait until mailbox is free */
3346	if (megaraid_busywait_mbox(raid_dev) != 0) {
3347		status = 1;
3348		goto blocked_mailbox;
3349	}
3350
3351	/* Copy mailbox data into host structure */
3352	memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
3353	mbox->cmdid		= 0xFE;
3354	mbox->busy		= 1;
3355	mbox->poll		= 0;
3356	mbox->ack		= 0;
3357	mbox->numstatus		= 0;
3358	mbox->status		= 0;
3359
3360	wmb();
3361	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
3362
3363	/* Wait for maximum 1 min for status to post.
3364	 * If the Firmware SUPPORTS the ABOVE COMMAND,
3365	 * mbox->cmd will be set to 0
3366	 * else
3367	 * the firmware will reject the command with
3368	 * mbox->numstatus set to 1
3369	 */
3370
3371	i = 0;
3372	status = 0;
3373	while (!mbox->numstatus && mbox->cmd == 0xFF) {
3374		rmb();
3375		msleep(1);
3376		i++;
3377		if (i > 1000 * 60) {
3378			status = 1;
3379			break;
3380		}
3381	}
3382	if (mbox->numstatus == 1)
3383		status = 1; /*cmd not supported*/
3384
3385	/* Check for interrupt line */
3386	dword = RDOUTDOOR(raid_dev);
3387	WROUTDOOR(raid_dev, dword);
3388	WRINDOOR(raid_dev,2);
3389
3390	return status;
3391
3392blocked_mailbox:
3393	con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n"));
3394	return status;
3395}
3396
3397/**
3398 * megaraid_mbox_display_scb - display SCB information, mostly debug purposes
3399 * @adapter		: controller's soft state
3400 * @scb			: SCB to be displayed
3401 * @level		: debug level for console print
3402 *
3403 * Diplay information about the given SCB iff the current debug level is
3404 * verbose.
3405 */
3406static void
3407megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
3408{
3409	mbox_ccb_t		*ccb;
3410	struct scsi_cmnd	*scp;
3411	mbox_t			*mbox;
3412	int			level;
3413	int			i;
3414
3415
3416	ccb	= (mbox_ccb_t *)scb->ccb;
3417	scp	= scb->scp;
3418	mbox	= ccb->mbox;
3419
3420	level = CL_DLEVEL3;
3421
3422	con_log(level, (KERN_NOTICE
3423		"megaraid mailbox: status:%#x cmd:%#x id:%#x ", scb->status,
3424		mbox->cmd, scb->sno));
3425
3426	con_log(level, ("sec:%#x lba:%#x addr:%#x ld:%d sg:%d\n",
3427		mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv,
3428		mbox->numsge));
3429
3430	if (!scp) return;
3431
3432	con_log(level, (KERN_NOTICE "scsi cmnd: "));
3433
3434	for (i = 0; i < scp->cmd_len; i++) {
3435		con_log(level, ("%#2.02x ", scp->cmnd[i]));
3436	}
3437
3438	con_log(level, ("\n"));
3439
3440	return;
3441}
3442
3443
3444/**
3445 * megaraid_mbox_setup_device_map - manage device ids
3446 * @adapter	: Driver's soft state
3447 *
3448 * Manange the device ids to have an appropriate mapping between the kernel
3449 * scsi addresses and megaraid scsi and logical drive addresses. We export
3450 * scsi devices on their actual addresses, whereas the logical drives are
3451 * exported on a virtual scsi channel.
3452 */
3453static void
3454megaraid_mbox_setup_device_map(adapter_t *adapter)
3455{
3456	uint8_t		c;
3457	uint8_t		t;
3458
3459	/*
3460	 * First fill the values on the logical drive channel
3461	 */
3462	for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
3463		adapter->device_ids[adapter->max_channel][t] =
3464			(t < adapter->init_id) ?  t : t - 1;
3465
3466	adapter->device_ids[adapter->max_channel][adapter->init_id] = 0xFF;
3467
3468	/*
3469	 * Fill the values on the physical devices channels
3470	 */
3471	for (c = 0; c < adapter->max_channel; c++)
3472		for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
3473			adapter->device_ids[c][t] = (c << 8) | t;
3474}
3475
3476
3477/*
3478 * END: internal commands library
3479 */
3480
3481/*
3482 * START: Interface for the common management module
3483 *
3484 * This is the module, which interfaces with the common management module to
3485 * provide support for ioctl and sysfs
3486 */
3487
3488/**
3489 * megaraid_cmm_register - register with the management module
3490 * @adapter		: HBA soft state
3491 *
3492 * Register with the management module, which allows applications to issue
3493 * ioctl calls to the drivers. This interface is used by the management module
3494 * to setup sysfs support as well.
3495 */
3496static int
3497megaraid_cmm_register(adapter_t *adapter)
3498{
3499	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
3500	mraid_mmadp_t	adp;
3501	scb_t		*scb;
3502	mbox_ccb_t	*ccb;
3503	int		rval;
3504	int		i;
3505
3506	// Allocate memory for the base list of scb for management module.
3507	adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
3508
3509	if (adapter->uscb_list == NULL) {
3510		con_log(CL_ANN, (KERN_WARNING
3511			"megaraid: out of memory, %s %d\n", __func__,
3512			__LINE__));
3513		return -1;
3514	}
3515
3516
3517	// Initialize the synchronization parameters for resources for
3518	// commands for management module
3519	INIT_LIST_HEAD(&adapter->uscb_pool);
3520
3521	spin_lock_init(USER_FREE_LIST_LOCK(adapter));
3522
3523
3524
3525	// link all the packets. Note, CCB for commands, coming from the
3526	// commom management module, mailbox physical address are already
3527	// setup by it. We just need placeholder for that in our local command
3528	// control blocks
3529	for (i = 0; i < MBOX_MAX_USER_CMDS; i++) {
3530
3531		scb			= adapter->uscb_list + i;
3532		ccb			= raid_dev->uccb_list + i;
3533
3534		scb->ccb		= (caddr_t)ccb;
3535		ccb->mbox64		= raid_dev->umbox64 + i;
3536		ccb->mbox		= &ccb->mbox64->mbox32;
3537		ccb->raw_mbox		= (uint8_t *)ccb->mbox;
3538
3539		scb->gp			= 0;
3540
3541		// COMMAND ID 0 - (MBOX_MAX_SCSI_CMDS-1) ARE RESERVED FOR
3542		// COMMANDS COMING FROM IO SUBSYSTEM (MID-LAYER)
3543		scb->sno		= i + MBOX_MAX_SCSI_CMDS;
3544
3545		scb->scp		= NULL;
3546		scb->state		= SCB_FREE;
3547		scb->dma_direction	= PCI_DMA_NONE;
3548		scb->dma_type		= MRAID_DMA_NONE;
3549		scb->dev_channel	= -1;
3550		scb->dev_target		= -1;
3551
3552		// put scb in the free pool
3553		list_add_tail(&scb->list, &adapter->uscb_pool);
3554	}
3555
3556	adp.unique_id		= adapter->unique_id;
3557	adp.drvr_type		= DRVRTYPE_MBOX;
3558	adp.drvr_data		= (unsigned long)adapter;
3559	adp.pdev		= adapter->pdev;
3560	adp.issue_uioc		= megaraid_mbox_mm_handler;
3561	adp.timeout		= MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
3562	adp.max_kioc		= MBOX_MAX_USER_CMDS;
3563
3564	if ((rval = mraid_mm_register_adp(&adp)) != 0) {
3565
3566		con_log(CL_ANN, (KERN_WARNING
3567			"megaraid mbox: did not register with CMM\n"));
3568
3569		kfree(adapter->uscb_list);
3570	}
3571
3572	return rval;
3573}
3574
3575
3576/**
3577 * megaraid_cmm_unregister - un-register with the management module
3578 * @adapter		: HBA soft state
3579 *
3580 * Un-register with the management module.
3581 * FIXME: mgmt module must return failure for unregister if it has pending
3582 * commands in LLD.
3583 */
3584static int
3585megaraid_cmm_unregister(adapter_t *adapter)
3586{
3587	kfree(adapter->uscb_list);
3588	mraid_mm_unregister_adp(adapter->unique_id);
3589	return 0;
3590}
3591
3592
3593/**
3594 * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD
3595 * @drvr_data		: LLD specific data
3596 * @kioc		: CMM interface packet
3597 * @action		: command action
3598 *
3599 * This routine is invoked whenever the Common Management Module (CMM) has a
3600 * command for us. The 'action' parameter specifies if this is a new command
3601 * or otherwise.
3602 */
3603static int
3604megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action)
3605{
3606	adapter_t *adapter;
3607
3608	if (action != IOCTL_ISSUE) {
3609		con_log(CL_ANN, (KERN_WARNING
3610			"megaraid: unsupported management action:%#2x\n",
3611			action));
3612		return (-ENOTSUPP);
3613	}
3614
3615	adapter = (adapter_t *)drvr_data;
3616
3617	// make sure this adapter is not being detached right now.
3618	if (atomic_read(&adapter->being_detached)) {
3619		con_log(CL_ANN, (KERN_WARNING
3620			"megaraid: reject management request, detaching\n"));
3621		return (-ENODEV);
3622	}
3623
3624	switch (kioc->opcode) {
3625
3626	case GET_ADAP_INFO:
3627
3628		kioc->status =  gather_hbainfo(adapter, (mraid_hba_info_t *)
3629					(unsigned long)kioc->buf_vaddr);
3630
3631		kioc->done(kioc);
3632
3633		return kioc->status;
3634
3635	case MBOX_CMD:
3636
3637		return megaraid_mbox_mm_command(adapter, kioc);
3638
3639	default:
3640		kioc->status = (-EINVAL);
3641		kioc->done(kioc);
3642		return (-EINVAL);
3643	}
3644
3645	return 0;	// not reached
3646}
3647
3648/**
3649 * megaraid_mbox_mm_command - issues commands routed through CMM
3650 * @adapter		: HBA soft state
3651 * @kioc		: management command packet
3652 *
3653 * Issues commands, which are routed through the management module.
3654 */
3655static int
3656megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc)
3657{
3658	struct list_head	*head = &adapter->uscb_pool;
3659	mbox64_t		*mbox64;
3660	uint8_t			*raw_mbox;
3661	scb_t			*scb;
3662	mbox_ccb_t		*ccb;
3663	unsigned long		flags;
3664
3665	// detach one scb from free pool
3666	spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
3667
3668	if (list_empty(head)) {	// should never happen because of CMM
3669
3670		con_log(CL_ANN, (KERN_WARNING
3671			"megaraid mbox: bug in cmm handler, lost resources\n"));
3672
3673		spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3674
3675		return (-EINVAL);
3676	}
3677
3678	scb = list_entry(head->next, scb_t, list);
3679	list_del_init(&scb->list);
3680
3681	spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3682
3683	scb->state		= SCB_ACTIVE;
3684	scb->dma_type		= MRAID_DMA_NONE;
3685	scb->dma_direction	= PCI_DMA_NONE;
3686
3687	ccb		= (mbox_ccb_t *)scb->ccb;
3688	mbox64		= (mbox64_t *)(unsigned long)kioc->cmdbuf;
3689	raw_mbox	= (uint8_t *)&mbox64->mbox32;
3690
3691	memcpy(ccb->mbox64, mbox64, sizeof(mbox64_t));
3692
3693	scb->gp		= (unsigned long)kioc;
3694
3695	/*
3696	 * If it is a logdrv random delete operation, we have to wait till
3697	 * there are no outstanding cmds at the fw and then issue it directly
3698	 */
3699	if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
3700
3701		if (wait_till_fw_empty(adapter)) {
3702			con_log(CL_ANN, (KERN_NOTICE
3703				"megaraid mbox: LD delete, timed out\n"));
3704
3705			kioc->status = -ETIME;
3706
3707			scb->status = -1;
3708
3709			megaraid_mbox_mm_done(adapter, scb);
3710
3711			return (-ETIME);
3712		}
3713
3714		INIT_LIST_HEAD(&scb->list);
3715
3716		scb->state = SCB_ISSUED;
3717		if (mbox_post_cmd(adapter, scb) != 0) {
3718
3719			con_log(CL_ANN, (KERN_NOTICE
3720				"megaraid mbox: LD delete, mailbox busy\n"));
3721
3722			kioc->status = -EBUSY;
3723
3724			scb->status = -1;
3725
3726			megaraid_mbox_mm_done(adapter, scb);
3727
3728			return (-EBUSY);
3729		}
3730
3731		return 0;
3732	}
3733
3734	// put the command on the pending list and execute
3735	megaraid_mbox_runpendq(adapter, scb);
3736
3737	return 0;
3738}
3739
3740
3741static int
3742wait_till_fw_empty(adapter_t *adapter)
3743{
3744	unsigned long	flags = 0;
3745	int		i;
3746
3747
3748	/*
3749	 * Set the quiescent flag to stop issuing cmds to FW.
3750	 */
3751	spin_lock_irqsave(&adapter->lock, flags);
3752	adapter->quiescent++;
3753	spin_unlock_irqrestore(&adapter->lock, flags);
3754
3755	/*
3756	 * Wait till there are no more cmds outstanding at FW. Try for at most
3757	 * 60 seconds
3758	 */
3759	for (i = 0; i < 60 && adapter->outstanding_cmds; i++) {
3760		con_log(CL_DLEVEL1, (KERN_INFO
3761			"megaraid: FW has %d pending commands\n",
3762			adapter->outstanding_cmds));
3763
3764		msleep(1000);
3765	}
3766
3767	return adapter->outstanding_cmds;
3768}
3769
3770
3771/**
3772 * megaraid_mbox_mm_done - callback for CMM commands
3773 * @adapter	: HBA soft state
3774 * @scb		: completed command
3775 *
3776 * Callback routine for internal commands originated from the management
3777 * module.
3778 */
3779static void
3780megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
3781{
3782	uioc_t			*kioc;
3783	mbox64_t		*mbox64;
3784	uint8_t			*raw_mbox;
3785	unsigned long		flags;
3786
3787	kioc			= (uioc_t *)scb->gp;
3788	mbox64			= (mbox64_t *)(unsigned long)kioc->cmdbuf;
3789	mbox64->mbox32.status	= scb->status;
3790	raw_mbox		= (uint8_t *)&mbox64->mbox32;
3791
3792
3793	// put scb in the free pool
3794	scb->state	= SCB_FREE;
3795	scb->scp	= NULL;
3796
3797	spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
3798
3799	list_add(&scb->list, &adapter->uscb_pool);
3800
3801	spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3802
3803	// if a delete logical drive operation succeeded, restart the
3804	// controller
3805	if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
3806
3807		adapter->quiescent--;
3808
3809		megaraid_mbox_runpendq(adapter, NULL);
3810	}
3811
3812	kioc->done(kioc);
3813
3814	return;
3815}
3816
3817
3818/**
3819 * gather_hbainfo - HBA characteristics for the applications
3820 * @adapter		: HBA soft state
3821 * @hinfo		: pointer to the caller's host info strucuture
3822 */
3823static int
3824gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
3825{
3826	uint8_t	dmajor;
3827
3828	dmajor			= megaraid_mbox_version[0];
3829
3830	hinfo->pci_vendor_id	= adapter->pdev->vendor;
3831	hinfo->pci_device_id	= adapter->pdev->device;
3832	hinfo->subsys_vendor_id	= adapter->pdev->subsystem_vendor;
3833	hinfo->subsys_device_id	= adapter->pdev->subsystem_device;
3834
3835	hinfo->pci_bus		= adapter->pdev->bus->number;
3836	hinfo->pci_dev_fn	= adapter->pdev->devfn;
3837	hinfo->pci_slot		= PCI_SLOT(adapter->pdev->devfn);
3838	hinfo->irq		= adapter->host->irq;
3839	hinfo->baseport		= ADAP2RAIDDEV(adapter)->baseport;
3840
3841	hinfo->unique_id	= (hinfo->pci_bus << 8) | adapter->pdev->devfn;
3842	hinfo->host_no		= adapter->host->host_no;
3843
3844	return 0;
3845}
3846
3847/*
3848 * END: Interface for the common management module
3849 */
3850
3851
3852
3853/**
3854 * megaraid_sysfs_alloc_resources - allocate sysfs related resources
3855 * @adapter	: controller's soft state
3856 *
3857 * Allocate packets required to issue FW calls whenever the sysfs attributes
3858 * are read. These attributes would require up-to-date information from the
3859 * FW. Also set up resources for mutual exclusion to share these resources and
3860 * the wait queue.
3861 *
3862 * Return 0 on success.
3863 * Return -ERROR_CODE on failure.
3864 */
3865static int
3866megaraid_sysfs_alloc_resources(adapter_t *adapter)
3867{
3868	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
3869	int		rval = 0;
3870
3871	raid_dev->sysfs_uioc = kmalloc(sizeof(uioc_t), GFP_KERNEL);
3872
3873	raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
3874
3875	raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev,
3876			PAGE_SIZE, &raid_dev->sysfs_buffer_dma);
3877
3878	if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
3879		!raid_dev->sysfs_buffer) {
3880
3881		con_log(CL_ANN, (KERN_WARNING
3882			"megaraid: out of memory, %s %d\n", __func__,
3883			__LINE__));
3884
3885		rval = -ENOMEM;
3886
3887		megaraid_sysfs_free_resources(adapter);
3888	}
3889
3890	mutex_init(&raid_dev->sysfs_mtx);
3891
3892	init_waitqueue_head(&raid_dev->sysfs_wait_q);
3893
3894	return rval;
3895}
3896
3897
3898/**
3899 * megaraid_sysfs_free_resources - free sysfs related resources
3900 * @adapter	: controller's soft state
3901 *
3902 * Free packets allocated for sysfs FW commands
3903 */
3904static void
3905megaraid_sysfs_free_resources(adapter_t *adapter)
3906{
3907	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
3908
3909	kfree(raid_dev->sysfs_uioc);
3910	kfree(raid_dev->sysfs_mbox64);
3911
3912	if (raid_dev->sysfs_buffer) {
3913		pci_free_consistent(adapter->pdev, PAGE_SIZE,
3914			raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
3915	}
3916}
3917
3918
3919/**
3920 * megaraid_sysfs_get_ldmap_done - callback for get ldmap
3921 * @uioc	: completed packet
3922 *
3923 * Callback routine called in the ISR/tasklet context for get ldmap call
3924 */
3925static void
3926megaraid_sysfs_get_ldmap_done(uioc_t *uioc)
3927{
3928	adapter_t	*adapter = (adapter_t *)uioc->buf_vaddr;
3929	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
3930
3931	uioc->status = 0;
3932
3933	wake_up(&raid_dev->sysfs_wait_q);
3934}
3935
3936
3937/**
3938 * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap
3939 * @data	: timed out packet
3940 *
3941 * Timeout routine to recover and return to application, in case the adapter
3942 * has stopped responding. A timeout of 60 seconds for this command seems like
3943 * a good value.
3944 */
3945static void
3946megaraid_sysfs_get_ldmap_timeout(unsigned long data)
3947{
3948	uioc_t		*uioc = (uioc_t *)data;
3949	adapter_t	*adapter = (adapter_t *)uioc->buf_vaddr;
3950	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
3951
3952	uioc->status = -ETIME;
3953
3954	wake_up(&raid_dev->sysfs_wait_q);
3955}
3956
3957
3958/**
3959 * megaraid_sysfs_get_ldmap - get update logical drive map
3960 * @adapter	: controller's soft state
3961 *
3962 * This routine will be called whenever user reads the logical drive
3963 * attributes, go get the current logical drive mapping table from the
3964 * firmware. We use the management API's to issue commands to the controller.
3965 *
3966 * NOTE: The commands issuance functionality is not generalized and
3967 * implemented in context of "get ld map" command only. If required, the
3968 * command issuance logical can be trivially pulled out and implemented as a
3969 * standalone library. For now, this should suffice since there is no other
3970 * user of this interface.
3971 *
3972 * Return 0 on success.
3973 * Return -1 on failure.
3974 */
3975static int
3976megaraid_sysfs_get_ldmap(adapter_t *adapter)
3977{
3978	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
3979	uioc_t			*uioc;
3980	mbox64_t		*mbox64;
3981	mbox_t			*mbox;
3982	char			*raw_mbox;
3983	struct timer_list	sysfs_timer;
3984	struct timer_list	*timerp;
3985	caddr_t			ldmap;
3986	int			rval = 0;
3987
3988	/*
3989	 * Allow only one read at a time to go through the sysfs attributes
3990	 */
3991	mutex_lock(&raid_dev->sysfs_mtx);
3992
3993	uioc	= raid_dev->sysfs_uioc;
3994	mbox64	= raid_dev->sysfs_mbox64;
3995	ldmap	= raid_dev->sysfs_buffer;
3996
3997	memset(uioc, 0, sizeof(uioc_t));
3998	memset(mbox64, 0, sizeof(mbox64_t));
3999	memset(ldmap, 0, sizeof(raid_dev->curr_ldmap));
4000
4001	mbox		= &mbox64->mbox32;
4002	raw_mbox	= (char *)mbox;
4003	uioc->cmdbuf    = (uint64_t)(unsigned long)mbox64;
4004	uioc->buf_vaddr	= (caddr_t)adapter;
4005	uioc->status	= -ENODATA;
4006	uioc->done	= megaraid_sysfs_get_ldmap_done;
4007
4008	/*
4009	 * Prepare the mailbox packet to get the current logical drive mapping
4010	 * table
4011	 */
4012	mbox->xferaddr = (uint32_t)raid_dev->sysfs_buffer_dma;
4013
4014	raw_mbox[0] = FC_DEL_LOGDRV;
4015	raw_mbox[2] = OP_GET_LDID_MAP;
4016
4017	/*
4018	 * Setup a timer to recover from a non-responding controller
4019	 */
4020	timerp	= &sysfs_timer;
4021	init_timer(timerp);
4022
4023	timerp->function	= megaraid_sysfs_get_ldmap_timeout;
4024	timerp->data		= (unsigned long)uioc;
4025	timerp->expires		= jiffies + 60 * HZ;
4026
4027	add_timer(timerp);
4028
4029	/*
4030	 * Send the command to the firmware
4031	 */
4032	rval = megaraid_mbox_mm_command(adapter, uioc);
4033
4034	if (rval == 0) {	// command successfully issued
4035		wait_event(raid_dev->sysfs_wait_q, (uioc->status != -ENODATA));
4036
4037		/*
4038		 * Check if the command timed out
4039		 */
4040		if (uioc->status == -ETIME) {
4041			con_log(CL_ANN, (KERN_NOTICE
4042				"megaraid: sysfs get ld map timed out\n"));
4043
4044			rval = -ETIME;
4045		}
4046		else {
4047			rval = mbox->status;
4048		}
4049
4050		if (rval == 0) {
4051			memcpy(raid_dev->curr_ldmap, ldmap,
4052				sizeof(raid_dev->curr_ldmap));
4053		}
4054		else {
4055			con_log(CL_ANN, (KERN_NOTICE
4056				"megaraid: get ld map failed with %x\n", rval));
4057		}
4058	}
4059	else {
4060		con_log(CL_ANN, (KERN_NOTICE
4061			"megaraid: could not issue ldmap command:%x\n", rval));
4062	}
4063
4064
4065	del_timer_sync(timerp);
4066
4067	mutex_unlock(&raid_dev->sysfs_mtx);
4068
4069	return rval;
4070}
4071
4072
4073/**
4074 * megaraid_sysfs_show_app_hndl - display application handle for this adapter
4075 * @cdev	: class device object representation for the host
4076 * @buf		: buffer to send data to
4077 *
4078 * Display the handle used by the applications while executing management
4079 * tasks on the adapter. We invoke a management module API to get the adapter
4080 * handle, since we do not interface with applications directly.
4081 */
4082static ssize_t
4083megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
4084			     char *buf)
4085{
4086	struct Scsi_Host *shost = class_to_shost(dev);
4087	adapter_t	*adapter = (adapter_t *)SCSIHOST2ADAP(shost);
4088	uint32_t	app_hndl;
4089
4090	app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id);
4091
4092	return snprintf(buf, 8, "%u\n", app_hndl);
4093}
4094
4095
4096/**
4097 * megaraid_sysfs_show_ldnum - display the logical drive number for this device
4098 * @dev		: device object representation for the scsi device
4099 * @attr	: device attribute to show
4100 * @buf		: buffer to send data to
4101 *
4102 * Display the logical drive number for the device in question, if it a valid
4103 * logical drive. For physical devices, "-1" is returned.
4104 *
4105 * The logical drive number is displayed in following format:
4106 *
4107 * <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE>
4108 *
4109 *   <int>     <int>       <int>            <int>
4110 */
4111static ssize_t
4112megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf)
4113{
4114	struct scsi_device *sdev = to_scsi_device(dev);
4115	adapter_t	*adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host);
4116	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
4117	int		scsi_id = -1;
4118	int		logical_drv = -1;
4119	int		ldid_map = -1;
4120	uint32_t	app_hndl = 0;
4121	int		mapped_sdev_id;
4122	int		rval;
4123	int		i;
4124
4125	if (raid_dev->random_del_supported &&
4126			MRAID_IS_LOGICAL_SDEV(adapter, sdev)) {
4127
4128		rval = megaraid_sysfs_get_ldmap(adapter);
4129		if (rval == 0) {
4130
4131			for (i = 0; i < MAX_LOGICAL_DRIVES_40LD; i++) {
4132
4133				mapped_sdev_id = sdev->id;
4134
4135				if (sdev->id > adapter->init_id) {
4136					mapped_sdev_id -= 1;
4137				}
4138
4139				if (raid_dev->curr_ldmap[i] == mapped_sdev_id) {
4140
4141					scsi_id = sdev->id;
4142
4143					logical_drv = i;
4144
4145					ldid_map = raid_dev->curr_ldmap[i];
4146
4147					app_hndl = mraid_mm_adapter_app_handle(
4148							adapter->unique_id);
4149
4150					break;
4151				}
4152			}
4153		}
4154		else {
4155			con_log(CL_ANN, (KERN_NOTICE
4156				"megaraid: sysfs get ld map failed: %x\n",
4157				rval));
4158		}
4159	}
4160
4161	return snprintf(buf, 36, "%d %d %d %d\n", scsi_id, logical_drv,
4162			ldid_map, app_hndl);
4163}
4164
4165
4166/*
4167 * END: Mailbox Low Level Driver
4168 */
4169module_init(megaraid_init);
4170module_exit(megaraid_exit);
4171
4172/* vim: set ts=8 sw=8 tw=78 ai si: */
v3.5.6
   1/*
   2 *
   3 *			Linux MegaRAID device driver
   4 *
   5 * Copyright (c) 2003-2004  LSI Logic Corporation.
   6 *
   7 *	   This program is free software; you can redistribute it and/or
   8 *	   modify it under the terms of the GNU General Public License
   9 *	   as published by the Free Software Foundation; either version
  10 *	   2 of the License, or (at your option) any later version.
  11 *
  12 * FILE		: megaraid_mbox.c
  13 * Version	: v2.20.5.1 (Nov 16 2006)
  14 *
  15 * Authors:
  16 * 	Atul Mukker		<Atul.Mukker@lsi.com>
  17 * 	Sreenivas Bagalkote	<Sreenivas.Bagalkote@lsi.com>
  18 * 	Manoj Jose		<Manoj.Jose@lsi.com>
  19 * 	Seokmann Ju
  20 *
  21 * List of supported controllers
  22 *
  23 * OEM	Product Name			VID	DID	SSVID	SSID
  24 * ---	------------			---	---	----	----
  25 * Dell PERC3/QC			101E	1960	1028	0471
  26 * Dell PERC3/DC			101E	1960	1028	0493
  27 * Dell PERC3/SC			101E	1960	1028	0475
  28 * Dell PERC3/Di			1028	1960	1028	0123
  29 * Dell PERC4/SC			1000	1960	1028	0520
  30 * Dell PERC4/DC			1000	1960	1028	0518
  31 * Dell PERC4/QC			1000	0407	1028	0531
  32 * Dell PERC4/Di			1028	000F	1028	014A
  33 * Dell PERC 4e/Si			1028	0013	1028	016c
  34 * Dell PERC 4e/Di			1028	0013	1028	016d
  35 * Dell PERC 4e/Di			1028	0013	1028	016e
  36 * Dell PERC 4e/Di			1028	0013	1028	016f
  37 * Dell PERC 4e/Di			1028	0013	1028	0170
  38 * Dell PERC 4e/DC			1000	0408	1028	0002
  39 * Dell PERC 4e/SC			1000	0408	1028	0001
  40 *
  41 *
  42 * LSI MegaRAID SCSI 320-0		1000	1960	1000	A520
  43 * LSI MegaRAID SCSI 320-1		1000	1960	1000	0520
  44 * LSI MegaRAID SCSI 320-2		1000	1960	1000	0518
  45 * LSI MegaRAID SCSI 320-0X		1000	0407	1000	0530
  46 * LSI MegaRAID SCSI 320-2X		1000	0407	1000	0532
  47 * LSI MegaRAID SCSI 320-4X		1000	0407	1000	0531
  48 * LSI MegaRAID SCSI 320-1E		1000	0408	1000	0001
  49 * LSI MegaRAID SCSI 320-2E		1000	0408	1000	0002
  50 * LSI MegaRAID SATA 150-4		1000	1960	1000	4523
  51 * LSI MegaRAID SATA 150-6		1000	1960	1000	0523
  52 * LSI MegaRAID SATA 300-4X		1000	0409	1000	3004
  53 * LSI MegaRAID SATA 300-8X		1000	0409	1000	3008
  54 *
  55 * INTEL RAID Controller SRCU42X	1000	0407	8086	0532
  56 * INTEL RAID Controller SRCS16		1000	1960	8086	0523
  57 * INTEL RAID Controller SRCU42E	1000	0408	8086	0002
  58 * INTEL RAID Controller SRCZCRX	1000	0407	8086	0530
  59 * INTEL RAID Controller SRCS28X	1000	0409	8086	3008
  60 * INTEL RAID Controller SROMBU42E	1000	0408	8086	3431
  61 * INTEL RAID Controller SROMBU42E	1000	0408	8086	3499
  62 * INTEL RAID Controller SRCU51L	1000	1960	8086	0520
  63 *
  64 * FSC	MegaRAID PCI Express ROMB	1000	0408	1734	1065
  65 *
  66 * ACER	MegaRAID ROMB-2E		1000	0408	1025	004D
  67 *
  68 * NEC	MegaRAID PCI Express ROMB	1000	0408	1033	8287
  69 *
  70 * For history of changes, see Documentation/scsi/ChangeLog.megaraid
  71 */
  72
  73#include <linux/slab.h>
  74#include <linux/module.h>
  75#include "megaraid_mbox.h"
  76
  77static int megaraid_init(void);
  78static void megaraid_exit(void);
  79
  80static int megaraid_probe_one(struct pci_dev*, const struct pci_device_id *);
  81static void megaraid_detach_one(struct pci_dev *);
  82static void megaraid_mbox_shutdown(struct pci_dev *);
  83
  84static int megaraid_io_attach(adapter_t *);
  85static void megaraid_io_detach(adapter_t *);
  86
  87static int megaraid_init_mbox(adapter_t *);
  88static void megaraid_fini_mbox(adapter_t *);
  89
  90static int megaraid_alloc_cmd_packets(adapter_t *);
  91static void megaraid_free_cmd_packets(adapter_t *);
  92
  93static int megaraid_mbox_setup_dma_pools(adapter_t *);
  94static void megaraid_mbox_teardown_dma_pools(adapter_t *);
  95
  96static int megaraid_sysfs_alloc_resources(adapter_t *);
  97static void megaraid_sysfs_free_resources(adapter_t *);
  98
  99static int megaraid_abort_handler(struct scsi_cmnd *);
 100static int megaraid_reset_handler(struct scsi_cmnd *);
 101
 102static int mbox_post_sync_cmd(adapter_t *, uint8_t []);
 103static int mbox_post_sync_cmd_fast(adapter_t *, uint8_t []);
 104static int megaraid_busywait_mbox(mraid_device_t *);
 105static int megaraid_mbox_product_info(adapter_t *);
 106static int megaraid_mbox_extended_cdb(adapter_t *);
 107static int megaraid_mbox_support_ha(adapter_t *, uint16_t *);
 108static int megaraid_mbox_support_random_del(adapter_t *);
 109static int megaraid_mbox_get_max_sg(adapter_t *);
 110static void megaraid_mbox_enum_raid_scsi(adapter_t *);
 111static void megaraid_mbox_flush_cache(adapter_t *);
 112static int megaraid_mbox_fire_sync_cmd(adapter_t *);
 113
 114static void megaraid_mbox_display_scb(adapter_t *, scb_t *);
 115static void megaraid_mbox_setup_device_map(adapter_t *);
 116
 117static int megaraid_queue_command(struct Scsi_Host *, struct scsi_cmnd *);
 118static scb_t *megaraid_mbox_build_cmd(adapter_t *, struct scsi_cmnd *, int *);
 119static void megaraid_mbox_runpendq(adapter_t *, scb_t *);
 120static void megaraid_mbox_prepare_pthru(adapter_t *, scb_t *,
 121		struct scsi_cmnd *);
 122static void megaraid_mbox_prepare_epthru(adapter_t *, scb_t *,
 123		struct scsi_cmnd *);
 124
 125static irqreturn_t megaraid_isr(int, void *);
 126
 127static void megaraid_mbox_dpc(unsigned long);
 128
 129static ssize_t megaraid_sysfs_show_app_hndl(struct device *, struct device_attribute *attr, char *);
 130static ssize_t megaraid_sysfs_show_ldnum(struct device *, struct device_attribute *attr, char *);
 131
 132static int megaraid_cmm_register(adapter_t *);
 133static int megaraid_cmm_unregister(adapter_t *);
 134static int megaraid_mbox_mm_handler(unsigned long, uioc_t *, uint32_t);
 135static int megaraid_mbox_mm_command(adapter_t *, uioc_t *);
 136static void megaraid_mbox_mm_done(adapter_t *, scb_t *);
 137static int gather_hbainfo(adapter_t *, mraid_hba_info_t *);
 138static int wait_till_fw_empty(adapter_t *);
 139
 140
 141
 142MODULE_AUTHOR("megaraidlinux@lsi.com");
 143MODULE_DESCRIPTION("LSI Logic MegaRAID Mailbox Driver");
 144MODULE_LICENSE("GPL");
 145MODULE_VERSION(MEGARAID_VERSION);
 146
 147/*
 148 * ### modules parameters for driver ###
 149 */
 150
 151/*
 152 * Set to enable driver to expose unconfigured disk to kernel
 153 */
 154static int megaraid_expose_unconf_disks = 0;
 155module_param_named(unconf_disks, megaraid_expose_unconf_disks, int, 0);
 156MODULE_PARM_DESC(unconf_disks,
 157	"Set to expose unconfigured disks to kernel (default=0)");
 158
 159/*
 160 * driver wait time if the adapter's mailbox is busy
 161 */
 162static unsigned int max_mbox_busy_wait = MBOX_BUSY_WAIT;
 163module_param_named(busy_wait, max_mbox_busy_wait, int, 0);
 164MODULE_PARM_DESC(busy_wait,
 165	"Max wait for mailbox in microseconds if busy (default=10)");
 166
 167/*
 168 * number of sectors per IO command
 169 */
 170static unsigned int megaraid_max_sectors = MBOX_MAX_SECTORS;
 171module_param_named(max_sectors, megaraid_max_sectors, int, 0);
 172MODULE_PARM_DESC(max_sectors,
 173	"Maximum number of sectors per IO command (default=128)");
 174
 175/*
 176 * number of commands per logical unit
 177 */
 178static unsigned int megaraid_cmd_per_lun = MBOX_DEF_CMD_PER_LUN;
 179module_param_named(cmd_per_lun, megaraid_cmd_per_lun, int, 0);
 180MODULE_PARM_DESC(cmd_per_lun,
 181	"Maximum number of commands per logical unit (default=64)");
 182
 183
 184/*
 185 * Fast driver load option, skip scanning for physical devices during load.
 186 * This would result in non-disk devices being skipped during driver load
 187 * time. These can be later added though, using /proc/scsi/scsi
 188 */
 189static unsigned int megaraid_fast_load = 0;
 190module_param_named(fast_load, megaraid_fast_load, int, 0);
 191MODULE_PARM_DESC(fast_load,
 192	"Faster loading of the driver, skips physical devices! (default=0)");
 193
 194
 195/*
 196 * mraid_debug level - threshold for amount of information to be displayed by
 197 * the driver. This level can be changed through modules parameters, ioctl or
 198 * sysfs/proc interface. By default, print the announcement messages only.
 199 */
 200int mraid_debug_level = CL_ANN;
 201module_param_named(debug_level, mraid_debug_level, int, 0);
 202MODULE_PARM_DESC(debug_level, "Debug level for driver (default=0)");
 203
 204/*
 205 * ### global data ###
 206 */
 207static uint8_t megaraid_mbox_version[8] =
 208	{ 0x02, 0x20, 0x04, 0x06, 3, 7, 20, 5 };
 209
 210
 211/*
 212 * PCI table for all supported controllers.
 213 */
 214static struct pci_device_id pci_id_table_g[] =  {
 215	{
 216		PCI_VENDOR_ID_DELL,
 217		PCI_DEVICE_ID_PERC4_DI_DISCOVERY,
 218		PCI_VENDOR_ID_DELL,
 219		PCI_SUBSYS_ID_PERC4_DI_DISCOVERY,
 220	},
 221	{
 222		PCI_VENDOR_ID_LSI_LOGIC,
 223		PCI_DEVICE_ID_PERC4_SC,
 224		PCI_VENDOR_ID_DELL,
 225		PCI_SUBSYS_ID_PERC4_SC,
 226	},
 227	{
 228		PCI_VENDOR_ID_LSI_LOGIC,
 229		PCI_DEVICE_ID_PERC4_DC,
 230		PCI_VENDOR_ID_DELL,
 231		PCI_SUBSYS_ID_PERC4_DC,
 232	},
 233	{
 234		PCI_VENDOR_ID_LSI_LOGIC,
 235		PCI_DEVICE_ID_VERDE,
 236		PCI_ANY_ID,
 237		PCI_ANY_ID,
 238	},
 239	{
 240		PCI_VENDOR_ID_DELL,
 241		PCI_DEVICE_ID_PERC4_DI_EVERGLADES,
 242		PCI_VENDOR_ID_DELL,
 243		PCI_SUBSYS_ID_PERC4_DI_EVERGLADES,
 244	},
 245	{
 246		PCI_VENDOR_ID_DELL,
 247		PCI_DEVICE_ID_PERC4E_SI_BIGBEND,
 248		PCI_VENDOR_ID_DELL,
 249		PCI_SUBSYS_ID_PERC4E_SI_BIGBEND,
 250	},
 251	{
 252		PCI_VENDOR_ID_DELL,
 253		PCI_DEVICE_ID_PERC4E_DI_KOBUK,
 254		PCI_VENDOR_ID_DELL,
 255		PCI_SUBSYS_ID_PERC4E_DI_KOBUK,
 256	},
 257	{
 258		PCI_VENDOR_ID_DELL,
 259		PCI_DEVICE_ID_PERC4E_DI_CORVETTE,
 260		PCI_VENDOR_ID_DELL,
 261		PCI_SUBSYS_ID_PERC4E_DI_CORVETTE,
 262	},
 263	{
 264		PCI_VENDOR_ID_DELL,
 265		PCI_DEVICE_ID_PERC4E_DI_EXPEDITION,
 266		PCI_VENDOR_ID_DELL,
 267		PCI_SUBSYS_ID_PERC4E_DI_EXPEDITION,
 268	},
 269	{
 270		PCI_VENDOR_ID_DELL,
 271		PCI_DEVICE_ID_PERC4E_DI_GUADALUPE,
 272		PCI_VENDOR_ID_DELL,
 273		PCI_SUBSYS_ID_PERC4E_DI_GUADALUPE,
 274	},
 275	{
 276		PCI_VENDOR_ID_LSI_LOGIC,
 277		PCI_DEVICE_ID_DOBSON,
 278		PCI_ANY_ID,
 279		PCI_ANY_ID,
 280	},
 281	{
 282		PCI_VENDOR_ID_AMI,
 283		PCI_DEVICE_ID_AMI_MEGARAID3,
 284		PCI_ANY_ID,
 285		PCI_ANY_ID,
 286	},
 287	{
 288		PCI_VENDOR_ID_LSI_LOGIC,
 289		PCI_DEVICE_ID_AMI_MEGARAID3,
 290		PCI_ANY_ID,
 291		PCI_ANY_ID,
 292	},
 293	{
 294		PCI_VENDOR_ID_LSI_LOGIC,
 295		PCI_DEVICE_ID_LINDSAY,
 296		PCI_ANY_ID,
 297		PCI_ANY_ID,
 298	},
 299	{0}	/* Terminating entry */
 300};
 301MODULE_DEVICE_TABLE(pci, pci_id_table_g);
 302
 303
 304static struct pci_driver megaraid_pci_driver = {
 305	.name		= "megaraid",
 306	.id_table	= pci_id_table_g,
 307	.probe		= megaraid_probe_one,
 308	.remove		= __devexit_p(megaraid_detach_one),
 309	.shutdown	= megaraid_mbox_shutdown,
 310};
 311
 312
 313
 314// definitions for the device attributes for exporting logical drive number
 315// for a scsi address (Host, Channel, Id, Lun)
 316
 317DEVICE_ATTR(megaraid_mbox_app_hndl, S_IRUSR, megaraid_sysfs_show_app_hndl,
 318		NULL);
 319
 320// Host template initializer for megaraid mbox sysfs device attributes
 321static struct device_attribute *megaraid_shost_attrs[] = {
 322	&dev_attr_megaraid_mbox_app_hndl,
 323	NULL,
 324};
 325
 326
 327DEVICE_ATTR(megaraid_mbox_ld, S_IRUSR, megaraid_sysfs_show_ldnum, NULL);
 328
 329// Host template initializer for megaraid mbox sysfs device attributes
 330static struct device_attribute *megaraid_sdev_attrs[] = {
 331	&dev_attr_megaraid_mbox_ld,
 332	NULL,
 333};
 334
 335/**
 336 * megaraid_change_queue_depth - Change the device's queue depth
 337 * @sdev:	scsi device struct
 338 * @qdepth:	depth to set
 339 * @reason:	calling context
 340 *
 341 * Return value:
 342 * 	actual depth set
 343 */
 344static int megaraid_change_queue_depth(struct scsi_device *sdev, int qdepth,
 345				       int reason)
 346{
 347	if (reason != SCSI_QDEPTH_DEFAULT)
 348		return -EOPNOTSUPP;
 349
 350	if (qdepth > MBOX_MAX_SCSI_CMDS)
 351		qdepth = MBOX_MAX_SCSI_CMDS;
 352	scsi_adjust_queue_depth(sdev, 0, qdepth);
 353	return sdev->queue_depth;
 354}
 355
 356/*
 357 * Scsi host template for megaraid unified driver
 358 */
 359static struct scsi_host_template megaraid_template_g = {
 360	.module				= THIS_MODULE,
 361	.name				= "LSI Logic MegaRAID driver",
 362	.proc_name			= "megaraid",
 363	.queuecommand			= megaraid_queue_command,
 364	.eh_abort_handler		= megaraid_abort_handler,
 365	.eh_device_reset_handler	= megaraid_reset_handler,
 366	.eh_bus_reset_handler		= megaraid_reset_handler,
 367	.eh_host_reset_handler		= megaraid_reset_handler,
 368	.change_queue_depth		= megaraid_change_queue_depth,
 369	.use_clustering			= ENABLE_CLUSTERING,
 370	.sdev_attrs			= megaraid_sdev_attrs,
 371	.shost_attrs			= megaraid_shost_attrs,
 372};
 373
 374
 375/**
 376 * megaraid_init - module load hook
 377 *
 378 * We register ourselves as hotplug enabled module and let PCI subsystem
 379 * discover our adapters.
 380 */
 381static int __init
 382megaraid_init(void)
 383{
 384	int	rval;
 385
 386	// Announce the driver version
 387	con_log(CL_ANN, (KERN_INFO "megaraid: %s %s\n", MEGARAID_VERSION,
 388		MEGARAID_EXT_VERSION));
 389
 390	// check validity of module parameters
 391	if (megaraid_cmd_per_lun > MBOX_MAX_SCSI_CMDS) {
 392
 393		con_log(CL_ANN, (KERN_WARNING
 394			"megaraid mailbox: max commands per lun reset to %d\n",
 395			MBOX_MAX_SCSI_CMDS));
 396
 397		megaraid_cmd_per_lun = MBOX_MAX_SCSI_CMDS;
 398	}
 399
 400
 401	// register as a PCI hot-plug driver module
 402	rval = pci_register_driver(&megaraid_pci_driver);
 403	if (rval < 0) {
 404		con_log(CL_ANN, (KERN_WARNING
 405			"megaraid: could not register hotplug support.\n"));
 406	}
 407
 408	return rval;
 409}
 410
 411
 412/**
 413 * megaraid_exit - driver unload entry point
 414 *
 415 * We simply unwrap the megaraid_init routine here.
 416 */
 417static void __exit
 418megaraid_exit(void)
 419{
 420	con_log(CL_DLEVEL1, (KERN_NOTICE "megaraid: unloading framework\n"));
 421
 422	// unregister as PCI hotplug driver
 423	pci_unregister_driver(&megaraid_pci_driver);
 424
 425	return;
 426}
 427
 428
 429/**
 430 * megaraid_probe_one - PCI hotplug entry point
 431 * @pdev	: handle to this controller's PCI configuration space
 432 * @id		: pci device id of the class of controllers
 433 *
 434 * This routine should be called whenever a new adapter is detected by the
 435 * PCI hotplug susbsystem.
 436 */
 437static int __devinit
 438megaraid_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
 439{
 440	adapter_t	*adapter;
 441
 442
 443	// detected a new controller
 444	con_log(CL_ANN, (KERN_INFO
 445		"megaraid: probe new device %#4.04x:%#4.04x:%#4.04x:%#4.04x: ",
 446		pdev->vendor, pdev->device, pdev->subsystem_vendor,
 447		pdev->subsystem_device));
 448
 449	con_log(CL_ANN, ("bus %d:slot %d:func %d\n", pdev->bus->number,
 450		PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)));
 451
 452	if (pci_enable_device(pdev)) {
 453		con_log(CL_ANN, (KERN_WARNING
 454				"megaraid: pci_enable_device failed\n"));
 455
 456		return -ENODEV;
 457	}
 458
 459	// Enable bus-mastering on this controller
 460	pci_set_master(pdev);
 461
 462	// Allocate the per driver initialization structure
 463	adapter = kzalloc(sizeof(adapter_t), GFP_KERNEL);
 464
 465	if (adapter == NULL) {
 466		con_log(CL_ANN, (KERN_WARNING
 467		"megaraid: out of memory, %s %d.\n", __func__, __LINE__));
 468
 469		goto out_probe_one;
 470	}
 471
 472
 473	// set up PCI related soft state and other pre-known parameters
 474	adapter->unique_id	= pdev->bus->number << 8 | pdev->devfn;
 475	adapter->irq		= pdev->irq;
 476	adapter->pdev		= pdev;
 477
 478	atomic_set(&adapter->being_detached, 0);
 479
 480	// Setup the default DMA mask. This would be changed later on
 481	// depending on hardware capabilities
 482	if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(32)) != 0) {
 483
 484		con_log(CL_ANN, (KERN_WARNING
 485			"megaraid: pci_set_dma_mask failed:%d\n", __LINE__));
 486
 487		goto out_free_adapter;
 488	}
 489
 490
 491	// Initialize the synchronization lock for kernel and LLD
 492	spin_lock_init(&adapter->lock);
 493
 494	// Initialize the command queues: the list of free SCBs and the list
 495	// of pending SCBs.
 496	INIT_LIST_HEAD(&adapter->kscb_pool);
 497	spin_lock_init(SCSI_FREE_LIST_LOCK(adapter));
 498
 499	INIT_LIST_HEAD(&adapter->pend_list);
 500	spin_lock_init(PENDING_LIST_LOCK(adapter));
 501
 502	INIT_LIST_HEAD(&adapter->completed_list);
 503	spin_lock_init(COMPLETED_LIST_LOCK(adapter));
 504
 505
 506	// Start the mailbox based controller
 507	if (megaraid_init_mbox(adapter) != 0) {
 508		con_log(CL_ANN, (KERN_WARNING
 509			"megaraid: maibox adapter did not initialize\n"));
 510
 511		goto out_free_adapter;
 512	}
 513
 514	// Register with LSI Common Management Module
 515	if (megaraid_cmm_register(adapter) != 0) {
 516
 517		con_log(CL_ANN, (KERN_WARNING
 518		"megaraid: could not register with management module\n"));
 519
 520		goto out_fini_mbox;
 521	}
 522
 523	// setup adapter handle in PCI soft state
 524	pci_set_drvdata(pdev, adapter);
 525
 526	// attach with scsi mid-layer
 527	if (megaraid_io_attach(adapter) != 0) {
 528
 529		con_log(CL_ANN, (KERN_WARNING "megaraid: io attach failed\n"));
 530
 531		goto out_cmm_unreg;
 532	}
 533
 534	return 0;
 535
 536out_cmm_unreg:
 537	pci_set_drvdata(pdev, NULL);
 538	megaraid_cmm_unregister(adapter);
 539out_fini_mbox:
 540	megaraid_fini_mbox(adapter);
 541out_free_adapter:
 542	kfree(adapter);
 543out_probe_one:
 544	pci_disable_device(pdev);
 545
 546	return -ENODEV;
 547}
 548
 549
 550/**
 551 * megaraid_detach_one - release framework resources and call LLD release routine
 552 * @pdev	: handle for our PCI cofiguration space
 553 *
 554 * This routine is called during driver unload. We free all the allocated
 555 * resources and call the corresponding LLD so that it can also release all
 556 * its resources.
 557 *
 558 * This routine is also called from the PCI hotplug system.
 559 */
 560static void
 561megaraid_detach_one(struct pci_dev *pdev)
 562{
 563	adapter_t		*adapter;
 564	struct Scsi_Host	*host;
 565
 566
 567	// Start a rollback on this adapter
 568	adapter = pci_get_drvdata(pdev);
 569
 570	if (!adapter) {
 571		con_log(CL_ANN, (KERN_CRIT
 572		"megaraid: Invalid detach on %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
 573			pdev->vendor, pdev->device, pdev->subsystem_vendor,
 574			pdev->subsystem_device));
 575
 576		return;
 577	}
 578	else {
 579		con_log(CL_ANN, (KERN_NOTICE
 580		"megaraid: detaching device %#4.04x:%#4.04x:%#4.04x:%#4.04x\n",
 581			pdev->vendor, pdev->device, pdev->subsystem_vendor,
 582			pdev->subsystem_device));
 583	}
 584
 585
 586	host = adapter->host;
 587
 588	// do not allow any more requests from the management module for this
 589	// adapter.
 590	// FIXME: How do we account for the request which might still be
 591	// pending with us?
 592	atomic_set(&adapter->being_detached, 1);
 593
 594	// detach from the IO sub-system
 595	megaraid_io_detach(adapter);
 596
 597	// reset the device state in the PCI structure. We check this
 598	// condition when we enter here. If the device state is NULL,
 599	// that would mean the device has already been removed
 600	pci_set_drvdata(pdev, NULL);
 601
 602	// Unregister from common management module
 603	//
 604	// FIXME: this must return success or failure for conditions if there
 605	// is a command pending with LLD or not.
 606	megaraid_cmm_unregister(adapter);
 607
 608	// finalize the mailbox based controller and release all resources
 609	megaraid_fini_mbox(adapter);
 610
 611	kfree(adapter);
 612
 613	scsi_host_put(host);
 614
 615	pci_disable_device(pdev);
 616
 617	return;
 618}
 619
 620
 621/**
 622 * megaraid_mbox_shutdown - PCI shutdown for megaraid HBA
 623 * @pdev		: generic driver model device
 624 *
 625 * Shutdown notification, perform flush cache.
 626 */
 627static void
 628megaraid_mbox_shutdown(struct pci_dev *pdev)
 629{
 630	adapter_t		*adapter = pci_get_drvdata(pdev);
 631	static int		counter;
 632
 633	if (!adapter) {
 634		con_log(CL_ANN, (KERN_WARNING
 635			"megaraid: null device in shutdown\n"));
 636		return;
 637	}
 638
 639	// flush caches now
 640	con_log(CL_ANN, (KERN_INFO "megaraid: flushing adapter %d...",
 641		counter++));
 642
 643	megaraid_mbox_flush_cache(adapter);
 644
 645	con_log(CL_ANN, ("done\n"));
 646}
 647
 648
 649/**
 650 * megaraid_io_attach - attach a device with the IO subsystem
 651 * @adapter		: controller's soft state
 652 *
 653 * Attach this device with the IO subsystem.
 654 */
 655static int
 656megaraid_io_attach(adapter_t *adapter)
 657{
 658	struct Scsi_Host	*host;
 659
 660	// Initialize SCSI Host structure
 661	host = scsi_host_alloc(&megaraid_template_g, 8);
 662	if (!host) {
 663		con_log(CL_ANN, (KERN_WARNING
 664			"megaraid mbox: scsi_register failed\n"));
 665
 666		return -1;
 667	}
 668
 669	SCSIHOST2ADAP(host)	= (caddr_t)adapter;
 670	adapter->host		= host;
 671
 672	host->irq		= adapter->irq;
 673	host->unique_id		= adapter->unique_id;
 674	host->can_queue		= adapter->max_cmds;
 675	host->this_id		= adapter->init_id;
 676	host->sg_tablesize	= adapter->sglen;
 677	host->max_sectors	= adapter->max_sectors;
 678	host->cmd_per_lun	= adapter->cmd_per_lun;
 679	host->max_channel	= adapter->max_channel;
 680	host->max_id		= adapter->max_target;
 681	host->max_lun		= adapter->max_lun;
 682
 683
 684	// notify mid-layer about the new controller
 685	if (scsi_add_host(host, &adapter->pdev->dev)) {
 686
 687		con_log(CL_ANN, (KERN_WARNING
 688			"megaraid mbox: scsi_add_host failed\n"));
 689
 690		scsi_host_put(host);
 691
 692		return -1;
 693	}
 694
 695	scsi_scan_host(host);
 696
 697	return 0;
 698}
 699
 700
 701/**
 702 * megaraid_io_detach - detach a device from the IO subsystem
 703 * @adapter		: controller's soft state
 704 *
 705 * Detach this device from the IO subsystem.
 706 */
 707static void
 708megaraid_io_detach(adapter_t *adapter)
 709{
 710	struct Scsi_Host	*host;
 711
 712	con_log(CL_DLEVEL1, (KERN_INFO "megaraid: io detach\n"));
 713
 714	host = adapter->host;
 715
 716	scsi_remove_host(host);
 717
 718	return;
 719}
 720
 721
 722/*
 723 * START: Mailbox Low Level Driver
 724 *
 725 * This is section specific to the single mailbox based controllers
 726 */
 727
 728/**
 729 * megaraid_init_mbox - initialize controller
 730 * @adapter		: our soft state
 731 *
 732 * - Allocate 16-byte aligned mailbox memory for firmware handshake
 733 * - Allocate controller's memory resources
 734 * - Find out all initialization data
 735 * - Allocate memory required for all the commands
 736 * - Use internal library of FW routines, build up complete soft state
 737 */
 738static int __devinit
 739megaraid_init_mbox(adapter_t *adapter)
 740{
 741	struct pci_dev		*pdev;
 742	mraid_device_t		*raid_dev;
 743	int			i;
 744	uint32_t		magic64;
 745
 746
 747	adapter->ito	= MBOX_TIMEOUT;
 748	pdev		= adapter->pdev;
 749
 750	/*
 751	 * Allocate and initialize the init data structure for mailbox
 752	 * controllers
 753	 */
 754	raid_dev = kzalloc(sizeof(mraid_device_t), GFP_KERNEL);
 755	if (raid_dev == NULL) return -1;
 756
 757
 758	/*
 759	 * Attach the adapter soft state to raid device soft state
 760	 */
 761	adapter->raid_device	= (caddr_t)raid_dev;
 762	raid_dev->fast_load	= megaraid_fast_load;
 763
 764
 765	// our baseport
 766	raid_dev->baseport = pci_resource_start(pdev, 0);
 767
 768	if (pci_request_regions(pdev, "MegaRAID: LSI Logic Corporation") != 0) {
 769
 770		con_log(CL_ANN, (KERN_WARNING
 771				"megaraid: mem region busy\n"));
 772
 773		goto out_free_raid_dev;
 774	}
 775
 776	raid_dev->baseaddr = ioremap_nocache(raid_dev->baseport, 128);
 777
 778	if (!raid_dev->baseaddr) {
 779
 780		con_log(CL_ANN, (KERN_WARNING
 781			"megaraid: could not map hba memory\n") );
 782
 783		goto out_release_regions;
 784	}
 785
 786	/* initialize the mutual exclusion lock for the mailbox */
 787	spin_lock_init(&raid_dev->mailbox_lock);
 788
 789	/* allocate memory required for commands */
 790	if (megaraid_alloc_cmd_packets(adapter) != 0)
 791		goto out_iounmap;
 792
 793	/*
 794	 * Issue SYNC cmd to flush the pending cmds in the adapter
 795	 * and initialize its internal state
 796	 */
 797
 798	if (megaraid_mbox_fire_sync_cmd(adapter))
 799		con_log(CL_ANN, ("megaraid: sync cmd failed\n"));
 800
 801	/*
 802	 * Setup the rest of the soft state using the library of
 803	 * FW routines
 804	 */
 805
 806	/* request IRQ and register the interrupt service routine */
 807	if (request_irq(adapter->irq, megaraid_isr, IRQF_SHARED, "megaraid",
 808		adapter)) {
 809
 810		con_log(CL_ANN, (KERN_WARNING
 811			"megaraid: Couldn't register IRQ %d!\n", adapter->irq));
 812		goto out_alloc_cmds;
 813
 814	}
 815
 816	// Product info
 817	if (megaraid_mbox_product_info(adapter) != 0)
 818		goto out_free_irq;
 819
 820	// Do we support extended CDBs
 821	adapter->max_cdb_sz = 10;
 822	if (megaraid_mbox_extended_cdb(adapter) == 0) {
 823		adapter->max_cdb_sz = 16;
 824	}
 825
 826	/*
 827	 * Do we support cluster environment, if we do, what is the initiator
 828	 * id.
 829	 * NOTE: In a non-cluster aware firmware environment, the LLD should
 830	 * return 7 as initiator id.
 831	 */
 832	adapter->ha		= 0;
 833	adapter->init_id	= -1;
 834	if (megaraid_mbox_support_ha(adapter, &adapter->init_id) == 0) {
 835		adapter->ha = 1;
 836	}
 837
 838	/*
 839	 * Prepare the device ids array to have the mapping between the kernel
 840	 * device address and megaraid device address.
 841	 * We export the physical devices on their actual addresses. The
 842	 * logical drives are exported on a virtual SCSI channel
 843	 */
 844	megaraid_mbox_setup_device_map(adapter);
 845
 846	// If the firmware supports random deletion, update the device id map
 847	if (megaraid_mbox_support_random_del(adapter)) {
 848
 849		// Change the logical drives numbers in device_ids array one
 850		// slot in device_ids is reserved for target id, that's why
 851		// "<=" below
 852		for (i = 0; i <= MAX_LOGICAL_DRIVES_40LD; i++) {
 853			adapter->device_ids[adapter->max_channel][i] += 0x80;
 854		}
 855		adapter->device_ids[adapter->max_channel][adapter->init_id] =
 856			0xFF;
 857
 858		raid_dev->random_del_supported = 1;
 859	}
 860
 861	/*
 862	 * find out the maximum number of scatter-gather elements supported by
 863	 * this firmware
 864	 */
 865	adapter->sglen = megaraid_mbox_get_max_sg(adapter);
 866
 867	// enumerate RAID and SCSI channels so that all devices on SCSI
 868	// channels can later be exported, including disk devices
 869	megaraid_mbox_enum_raid_scsi(adapter);
 870
 871	/*
 872	 * Other parameters required by upper layer
 873	 *
 874	 * maximum number of sectors per IO command
 875	 */
 876	adapter->max_sectors = megaraid_max_sectors;
 877
 878	/*
 879	 * number of queued commands per LUN.
 880	 */
 881	adapter->cmd_per_lun = megaraid_cmd_per_lun;
 882
 883	/*
 884	 * Allocate resources required to issue FW calls, when sysfs is
 885	 * accessed
 886	 */
 887	if (megaraid_sysfs_alloc_resources(adapter) != 0)
 888		goto out_free_irq;
 889
 890	// Set the DMA mask to 64-bit. All supported controllers as capable of
 891	// DMA in this range
 892	pci_read_config_dword(adapter->pdev, PCI_CONF_AMISIG64, &magic64);
 893
 894	if (((magic64 == HBA_SIGNATURE_64_BIT) &&
 895		((adapter->pdev->subsystem_device !=
 896		PCI_SUBSYS_ID_MEGARAID_SATA_150_6) &&
 897		(adapter->pdev->subsystem_device !=
 898		PCI_SUBSYS_ID_MEGARAID_SATA_150_4))) ||
 899		(adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
 900		adapter->pdev->device == PCI_DEVICE_ID_VERDE) ||
 901		(adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
 902		adapter->pdev->device == PCI_DEVICE_ID_DOBSON) ||
 903		(adapter->pdev->vendor == PCI_VENDOR_ID_LSI_LOGIC &&
 904		adapter->pdev->device == PCI_DEVICE_ID_LINDSAY) ||
 905		(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
 906		adapter->pdev->device == PCI_DEVICE_ID_PERC4_DI_EVERGLADES) ||
 907		(adapter->pdev->vendor == PCI_VENDOR_ID_DELL &&
 908		adapter->pdev->device == PCI_DEVICE_ID_PERC4E_DI_KOBUK)) {
 909		if (pci_set_dma_mask(adapter->pdev, DMA_BIT_MASK(64))) {
 910			con_log(CL_ANN, (KERN_WARNING
 911				"megaraid: DMA mask for 64-bit failed\n"));
 912
 913			if (pci_set_dma_mask (adapter->pdev, DMA_BIT_MASK(32))) {
 914				con_log(CL_ANN, (KERN_WARNING
 915					"megaraid: 32-bit DMA mask failed\n"));
 916				goto out_free_sysfs_res;
 917			}
 918		}
 919	}
 920
 921	// setup tasklet for DPC
 922	tasklet_init(&adapter->dpc_h, megaraid_mbox_dpc,
 923			(unsigned long)adapter);
 924
 925	con_log(CL_DLEVEL1, (KERN_INFO
 926		"megaraid mbox hba successfully initialized\n"));
 927
 928	return 0;
 929
 930out_free_sysfs_res:
 931	megaraid_sysfs_free_resources(adapter);
 932out_free_irq:
 933	free_irq(adapter->irq, adapter);
 934out_alloc_cmds:
 935	megaraid_free_cmd_packets(adapter);
 936out_iounmap:
 937	iounmap(raid_dev->baseaddr);
 938out_release_regions:
 939	pci_release_regions(pdev);
 940out_free_raid_dev:
 941	kfree(raid_dev);
 942
 943	return -1;
 944}
 945
 946
 947/**
 948 * megaraid_fini_mbox - undo controller initialization
 949 * @adapter		: our soft state
 950 */
 951static void
 952megaraid_fini_mbox(adapter_t *adapter)
 953{
 954	mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
 955
 956	// flush all caches
 957	megaraid_mbox_flush_cache(adapter);
 958
 959	tasklet_kill(&adapter->dpc_h);
 960
 961	megaraid_sysfs_free_resources(adapter);
 962
 963	megaraid_free_cmd_packets(adapter);
 964
 965	free_irq(adapter->irq, adapter);
 966
 967	iounmap(raid_dev->baseaddr);
 968
 969	pci_release_regions(adapter->pdev);
 970
 971	kfree(raid_dev);
 972
 973	return;
 974}
 975
 976
 977/**
 978 * megaraid_alloc_cmd_packets - allocate shared mailbox
 979 * @adapter		: soft state of the raid controller
 980 *
 981 * Allocate and align the shared mailbox. This maibox is used to issue
 982 * all the commands. For IO based controllers, the mailbox is also regsitered
 983 * with the FW. Allocate memory for all commands as well.
 984 * This is our big allocator.
 985 */
 986static int
 987megaraid_alloc_cmd_packets(adapter_t *adapter)
 988{
 989	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
 990	struct pci_dev		*pdev;
 991	unsigned long		align;
 992	scb_t			*scb;
 993	mbox_ccb_t		*ccb;
 994	struct mraid_pci_blk	*epthru_pci_blk;
 995	struct mraid_pci_blk	*sg_pci_blk;
 996	struct mraid_pci_blk	*mbox_pci_blk;
 997	int			i;
 998
 999	pdev = adapter->pdev;
1000
1001	/*
1002	 * Setup the mailbox
1003	 * Allocate the common 16-byte aligned memory for the handshake
1004	 * mailbox.
1005	 */
1006	raid_dev->una_mbox64 = pci_alloc_consistent(adapter->pdev,
1007			sizeof(mbox64_t), &raid_dev->una_mbox64_dma);
1008
1009	if (!raid_dev->una_mbox64) {
1010		con_log(CL_ANN, (KERN_WARNING
1011			"megaraid: out of memory, %s %d\n", __func__,
1012			__LINE__));
1013		return -1;
1014	}
1015	memset(raid_dev->una_mbox64, 0, sizeof(mbox64_t));
1016
1017	/*
1018	 * Align the mailbox at 16-byte boundary
1019	 */
1020	raid_dev->mbox	= &raid_dev->una_mbox64->mbox32;
1021
1022	raid_dev->mbox	= (mbox_t *)((((unsigned long)raid_dev->mbox) + 15) &
1023				(~0UL ^ 0xFUL));
1024
1025	raid_dev->mbox64 = (mbox64_t *)(((unsigned long)raid_dev->mbox) - 8);
1026
1027	align = ((void *)raid_dev->mbox -
1028			((void *)&raid_dev->una_mbox64->mbox32));
1029
1030	raid_dev->mbox_dma = (unsigned long)raid_dev->una_mbox64_dma + 8 +
1031			align;
1032
1033	// Allocate memory for commands issued internally
1034	adapter->ibuf = pci_alloc_consistent(pdev, MBOX_IBUF_SIZE,
1035				&adapter->ibuf_dma_h);
1036	if (!adapter->ibuf) {
1037
1038		con_log(CL_ANN, (KERN_WARNING
1039			"megaraid: out of memory, %s %d\n", __func__,
1040			__LINE__));
1041
1042		goto out_free_common_mbox;
1043	}
1044	memset(adapter->ibuf, 0, MBOX_IBUF_SIZE);
1045
1046	// Allocate memory for our SCSI Command Blocks and their associated
1047	// memory
1048
1049	/*
1050	 * Allocate memory for the base list of scb. Later allocate memory for
1051	 * CCBs and embedded components of each CCB and point the pointers in
1052	 * scb to the allocated components
1053	 * NOTE: The code to allocate SCB will be duplicated in all the LLD
1054	 * since the calling routine does not yet know the number of available
1055	 * commands.
1056	 */
1057	adapter->kscb_list = kcalloc(MBOX_MAX_SCSI_CMDS, sizeof(scb_t), GFP_KERNEL);
1058
1059	if (adapter->kscb_list == NULL) {
1060		con_log(CL_ANN, (KERN_WARNING
1061			"megaraid: out of memory, %s %d\n", __func__,
1062			__LINE__));
1063		goto out_free_ibuf;
1064	}
1065
1066	// memory allocation for our command packets
1067	if (megaraid_mbox_setup_dma_pools(adapter) != 0) {
1068		con_log(CL_ANN, (KERN_WARNING
1069			"megaraid: out of memory, %s %d\n", __func__,
1070			__LINE__));
1071		goto out_free_scb_list;
1072	}
1073
1074	// Adjust the scb pointers and link in the free pool
1075	epthru_pci_blk	= raid_dev->epthru_pool;
1076	sg_pci_blk	= raid_dev->sg_pool;
1077	mbox_pci_blk	= raid_dev->mbox_pool;
1078
1079	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1080		scb			= adapter->kscb_list + i;
1081		ccb			= raid_dev->ccb_list + i;
1082
1083		ccb->mbox	= (mbox_t *)(mbox_pci_blk[i].vaddr + 16);
1084		ccb->raw_mbox	= (uint8_t *)ccb->mbox;
1085		ccb->mbox64	= (mbox64_t *)(mbox_pci_blk[i].vaddr + 8);
1086		ccb->mbox_dma_h	= (unsigned long)mbox_pci_blk[i].dma_addr + 16;
1087
1088		// make sure the mailbox is aligned properly
1089		if (ccb->mbox_dma_h & 0x0F) {
1090			con_log(CL_ANN, (KERN_CRIT
1091				"megaraid mbox: not aligned on 16-bytes\n"));
1092
1093			goto out_teardown_dma_pools;
1094		}
1095
1096		ccb->epthru		= (mraid_epassthru_t *)
1097						epthru_pci_blk[i].vaddr;
1098		ccb->epthru_dma_h	= epthru_pci_blk[i].dma_addr;
1099		ccb->pthru		= (mraid_passthru_t *)ccb->epthru;
1100		ccb->pthru_dma_h	= ccb->epthru_dma_h;
1101
1102
1103		ccb->sgl64		= (mbox_sgl64 *)sg_pci_blk[i].vaddr;
1104		ccb->sgl_dma_h		= sg_pci_blk[i].dma_addr;
1105		ccb->sgl32		= (mbox_sgl32 *)ccb->sgl64;
1106
1107		scb->ccb		= (caddr_t)ccb;
1108		scb->gp			= 0;
1109
1110		scb->sno		= i;	// command index
1111
1112		scb->scp		= NULL;
1113		scb->state		= SCB_FREE;
1114		scb->dma_direction	= PCI_DMA_NONE;
1115		scb->dma_type		= MRAID_DMA_NONE;
1116		scb->dev_channel	= -1;
1117		scb->dev_target		= -1;
1118
1119		// put scb in the free pool
1120		list_add_tail(&scb->list, &adapter->kscb_pool);
1121	}
1122
1123	return 0;
1124
1125out_teardown_dma_pools:
1126	megaraid_mbox_teardown_dma_pools(adapter);
1127out_free_scb_list:
1128	kfree(adapter->kscb_list);
1129out_free_ibuf:
1130	pci_free_consistent(pdev, MBOX_IBUF_SIZE, (void *)adapter->ibuf,
1131		adapter->ibuf_dma_h);
1132out_free_common_mbox:
1133	pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
1134		(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
1135
1136	return -1;
1137}
1138
1139
1140/**
1141 * megaraid_free_cmd_packets - free memory
1142 * @adapter		: soft state of the raid controller
1143 *
1144 * Release memory resources allocated for commands.
1145 */
1146static void
1147megaraid_free_cmd_packets(adapter_t *adapter)
1148{
1149	mraid_device_t *raid_dev = ADAP2RAIDDEV(adapter);
1150
1151	megaraid_mbox_teardown_dma_pools(adapter);
1152
1153	kfree(adapter->kscb_list);
1154
1155	pci_free_consistent(adapter->pdev, MBOX_IBUF_SIZE,
1156		(void *)adapter->ibuf, adapter->ibuf_dma_h);
1157
1158	pci_free_consistent(adapter->pdev, sizeof(mbox64_t),
1159		(caddr_t)raid_dev->una_mbox64, raid_dev->una_mbox64_dma);
1160	return;
1161}
1162
1163
1164/**
1165 * megaraid_mbox_setup_dma_pools - setup dma pool for command packets
1166 * @adapter		: HBA soft state
1167 *
1168 * Setup the dma pools for mailbox, passthru and extended passthru structures,
1169 * and scatter-gather lists.
1170 */
1171static int
1172megaraid_mbox_setup_dma_pools(adapter_t *adapter)
1173{
1174	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
1175	struct mraid_pci_blk	*epthru_pci_blk;
1176	struct mraid_pci_blk	*sg_pci_blk;
1177	struct mraid_pci_blk	*mbox_pci_blk;
1178	int			i;
1179
1180
1181
1182	// Allocate memory for 16-bytes aligned mailboxes
1183	raid_dev->mbox_pool_handle = pci_pool_create("megaraid mbox pool",
1184						adapter->pdev,
1185						sizeof(mbox64_t) + 16,
1186						16, 0);
1187
1188	if (raid_dev->mbox_pool_handle == NULL) {
1189		goto fail_setup_dma_pool;
1190	}
1191
1192	mbox_pci_blk = raid_dev->mbox_pool;
1193	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1194		mbox_pci_blk[i].vaddr = pci_pool_alloc(
1195						raid_dev->mbox_pool_handle,
1196						GFP_KERNEL,
1197						&mbox_pci_blk[i].dma_addr);
1198		if (!mbox_pci_blk[i].vaddr) {
1199			goto fail_setup_dma_pool;
1200		}
1201	}
1202
1203	/*
1204	 * Allocate memory for each embedded passthru strucuture pointer
1205	 * Request for a 128 bytes aligned structure for each passthru command
1206	 * structure
1207	 * Since passthru and extended passthru commands are exclusive, they
1208	 * share common memory pool. Passthru structures piggyback on memory
1209	 * allocted to extended passthru since passthru is smaller of the two
1210	 */
1211	raid_dev->epthru_pool_handle = pci_pool_create("megaraid mbox pthru",
1212			adapter->pdev, sizeof(mraid_epassthru_t), 128, 0);
1213
1214	if (raid_dev->epthru_pool_handle == NULL) {
1215		goto fail_setup_dma_pool;
1216	}
1217
1218	epthru_pci_blk = raid_dev->epthru_pool;
1219	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1220		epthru_pci_blk[i].vaddr = pci_pool_alloc(
1221						raid_dev->epthru_pool_handle,
1222						GFP_KERNEL,
1223						&epthru_pci_blk[i].dma_addr);
1224		if (!epthru_pci_blk[i].vaddr) {
1225			goto fail_setup_dma_pool;
1226		}
1227	}
1228
1229
1230	// Allocate memory for each scatter-gather list. Request for 512 bytes
1231	// alignment for each sg list
1232	raid_dev->sg_pool_handle = pci_pool_create("megaraid mbox sg",
1233					adapter->pdev,
1234					sizeof(mbox_sgl64) * MBOX_MAX_SG_SIZE,
1235					512, 0);
1236
1237	if (raid_dev->sg_pool_handle == NULL) {
1238		goto fail_setup_dma_pool;
1239	}
1240
1241	sg_pci_blk = raid_dev->sg_pool;
1242	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
1243		sg_pci_blk[i].vaddr = pci_pool_alloc(
1244						raid_dev->sg_pool_handle,
1245						GFP_KERNEL,
1246						&sg_pci_blk[i].dma_addr);
1247		if (!sg_pci_blk[i].vaddr) {
1248			goto fail_setup_dma_pool;
1249		}
1250	}
1251
1252	return 0;
1253
1254fail_setup_dma_pool:
1255	megaraid_mbox_teardown_dma_pools(adapter);
1256	return -1;
1257}
1258
1259
1260/**
1261 * megaraid_mbox_teardown_dma_pools - teardown dma pools for command packets
1262 * @adapter		: HBA soft state
1263 *
1264 * Teardown the dma pool for mailbox, passthru and extended passthru
1265 * structures, and scatter-gather lists.
1266 */
1267static void
1268megaraid_mbox_teardown_dma_pools(adapter_t *adapter)
1269{
1270	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
1271	struct mraid_pci_blk	*epthru_pci_blk;
1272	struct mraid_pci_blk	*sg_pci_blk;
1273	struct mraid_pci_blk	*mbox_pci_blk;
1274	int			i;
1275
1276
1277	sg_pci_blk = raid_dev->sg_pool;
1278	for (i = 0; i < MBOX_MAX_SCSI_CMDS && sg_pci_blk[i].vaddr; i++) {
1279		pci_pool_free(raid_dev->sg_pool_handle, sg_pci_blk[i].vaddr,
1280			sg_pci_blk[i].dma_addr);
1281	}
1282	if (raid_dev->sg_pool_handle)
1283		pci_pool_destroy(raid_dev->sg_pool_handle);
1284
1285
1286	epthru_pci_blk = raid_dev->epthru_pool;
1287	for (i = 0; i < MBOX_MAX_SCSI_CMDS && epthru_pci_blk[i].vaddr; i++) {
1288		pci_pool_free(raid_dev->epthru_pool_handle,
1289			epthru_pci_blk[i].vaddr, epthru_pci_blk[i].dma_addr);
1290	}
1291	if (raid_dev->epthru_pool_handle)
1292		pci_pool_destroy(raid_dev->epthru_pool_handle);
1293
1294
1295	mbox_pci_blk = raid_dev->mbox_pool;
1296	for (i = 0; i < MBOX_MAX_SCSI_CMDS && mbox_pci_blk[i].vaddr; i++) {
1297		pci_pool_free(raid_dev->mbox_pool_handle,
1298			mbox_pci_blk[i].vaddr, mbox_pci_blk[i].dma_addr);
1299	}
1300	if (raid_dev->mbox_pool_handle)
1301		pci_pool_destroy(raid_dev->mbox_pool_handle);
1302
1303	return;
1304}
1305
1306
1307/**
1308 * megaraid_alloc_scb - detach and return a scb from the free list
1309 * @adapter	: controller's soft state
1310 * @scp		: pointer to the scsi command to be executed
1311 *
1312 * Return the scb from the head of the free list. %NULL if there are none
1313 * available.
1314 */
1315static scb_t *
1316megaraid_alloc_scb(adapter_t *adapter, struct scsi_cmnd *scp)
1317{
1318	struct list_head	*head = &adapter->kscb_pool;
1319	scb_t			*scb = NULL;
1320	unsigned long		flags;
1321
1322	// detach scb from free pool
1323	spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
1324
1325	if (list_empty(head)) {
1326		spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1327		return NULL;
1328	}
1329
1330	scb = list_entry(head->next, scb_t, list);
1331	list_del_init(&scb->list);
1332
1333	spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1334
1335	scb->state	= SCB_ACTIVE;
1336	scb->scp	= scp;
1337	scb->dma_type	= MRAID_DMA_NONE;
1338
1339	return scb;
1340}
1341
1342
1343/**
1344 * megaraid_dealloc_scb - return the scb to the free pool
1345 * @adapter	: controller's soft state
1346 * @scb		: scb to be freed
1347 *
1348 * Return the scb back to the free list of scbs. The caller must 'flush' the
1349 * SCB before calling us. E.g., performing pci_unamp and/or pci_sync etc.
1350 * NOTE NOTE: Make sure the scb is not on any list before calling this
1351 * routine.
1352 */
1353static inline void
1354megaraid_dealloc_scb(adapter_t *adapter, scb_t *scb)
1355{
1356	unsigned long		flags;
1357
1358	// put scb in the free pool
1359	scb->state	= SCB_FREE;
1360	scb->scp	= NULL;
1361	spin_lock_irqsave(SCSI_FREE_LIST_LOCK(adapter), flags);
1362
1363	list_add(&scb->list, &adapter->kscb_pool);
1364
1365	spin_unlock_irqrestore(SCSI_FREE_LIST_LOCK(adapter), flags);
1366
1367	return;
1368}
1369
1370
1371/**
1372 * megaraid_mbox_mksgl - make the scatter-gather list
1373 * @adapter	: controller's soft state
1374 * @scb		: scsi control block
1375 *
1376 * Prepare the scatter-gather list.
1377 */
1378static int
1379megaraid_mbox_mksgl(adapter_t *adapter, scb_t *scb)
1380{
1381	struct scatterlist	*sgl;
1382	mbox_ccb_t		*ccb;
1383	struct scsi_cmnd	*scp;
1384	int			sgcnt;
1385	int			i;
1386
1387
1388	scp	= scb->scp;
1389	ccb	= (mbox_ccb_t *)scb->ccb;
1390
1391	sgcnt = scsi_dma_map(scp);
1392	BUG_ON(sgcnt < 0 || sgcnt > adapter->sglen);
1393
1394	// no mapping required if no data to be transferred
1395	if (!sgcnt)
1396		return 0;
1397
1398	scb->dma_type = MRAID_DMA_WSG;
1399
1400	scsi_for_each_sg(scp, sgl, sgcnt, i) {
1401		ccb->sgl64[i].address	= sg_dma_address(sgl);
1402		ccb->sgl64[i].length	= sg_dma_len(sgl);
1403	}
1404
1405	// Return count of SG nodes
1406	return sgcnt;
1407}
1408
1409
1410/**
1411 * mbox_post_cmd - issue a mailbox command
1412 * @adapter	: controller's soft state
1413 * @scb		: command to be issued
1414 *
1415 * Post the command to the controller if mailbox is available.
1416 */
1417static int
1418mbox_post_cmd(adapter_t *adapter, scb_t *scb)
1419{
1420	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
1421	mbox64_t	*mbox64;
1422	mbox_t		*mbox;
1423	mbox_ccb_t	*ccb;
1424	unsigned long	flags;
1425	unsigned int	i = 0;
1426
1427
1428	ccb	= (mbox_ccb_t *)scb->ccb;
1429	mbox	= raid_dev->mbox;
1430	mbox64	= raid_dev->mbox64;
1431
1432	/*
1433	 * Check for busy mailbox. If it is, return failure - the caller
1434	 * should retry later.
1435	 */
1436	spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
1437
1438	if (unlikely(mbox->busy)) {
1439		do {
1440			udelay(1);
1441			i++;
1442			rmb();
1443		} while(mbox->busy && (i < max_mbox_busy_wait));
1444
1445		if (mbox->busy) {
1446
1447			spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
1448
1449			return -1;
1450		}
1451	}
1452
1453
1454	// Copy this command's mailbox data into "adapter's" mailbox
1455	memcpy((caddr_t)mbox64, (caddr_t)ccb->mbox64, 22);
1456	mbox->cmdid = scb->sno;
1457
1458	adapter->outstanding_cmds++;
1459
1460	if (scb->dma_direction == PCI_DMA_TODEVICE)
1461		pci_dma_sync_sg_for_device(adapter->pdev,
1462					   scsi_sglist(scb->scp),
1463					   scsi_sg_count(scb->scp),
1464					   PCI_DMA_TODEVICE);
1465
1466	mbox->busy	= 1;	// Set busy
1467	mbox->poll	= 0;
1468	mbox->ack	= 0;
1469	wmb();
1470
1471	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
1472
1473	spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
1474
1475	return 0;
1476}
1477
1478
1479/**
1480 * megaraid_queue_command - generic queue entry point for all LLDs
1481 * @scp		: pointer to the scsi command to be executed
1482 * @done	: callback routine to be called after the cmd has be completed
1483 *
1484 * Queue entry point for mailbox based controllers.
1485 */
1486static int
1487megaraid_queue_command_lck(struct scsi_cmnd *scp, void (*done)(struct scsi_cmnd *))
1488{
1489	adapter_t	*adapter;
1490	scb_t		*scb;
1491	int		if_busy;
1492
1493	adapter		= SCP2ADAPTER(scp);
1494	scp->scsi_done	= done;
1495	scp->result	= 0;
1496
1497	/*
1498	 * Allocate and build a SCB request
1499	 * if_busy flag will be set if megaraid_mbox_build_cmd() command could
1500	 * not allocate scb. We will return non-zero status in that case.
1501	 * NOTE: scb can be null even though certain commands completed
1502	 * successfully, e.g., MODE_SENSE and TEST_UNIT_READY, it would
1503	 * return 0 in that case, and we would do the callback right away.
1504	 */
1505	if_busy	= 0;
1506	scb = megaraid_mbox_build_cmd(adapter, scp, &if_busy);
1507	if (!scb) {	// command already completed
1508		done(scp);
1509		return 0;
1510	}
1511
1512	megaraid_mbox_runpendq(adapter, scb);
1513	return if_busy;
1514}
1515
1516static DEF_SCSI_QCMD(megaraid_queue_command)
1517
1518/**
1519 * megaraid_mbox_build_cmd - transform the mid-layer scsi commands
1520 * @adapter	: controller's soft state
1521 * @scp		: mid-layer scsi command pointer
1522 * @busy	: set if request could not be completed because of lack of
1523 *		resources
1524 *
1525 * Transform the mid-layer scsi command to megaraid firmware lingua.
1526 * Convert the command issued by mid-layer to format understood by megaraid
1527 * firmware. We also complete certain commands without sending them to firmware.
1528 */
1529static scb_t *
1530megaraid_mbox_build_cmd(adapter_t *adapter, struct scsi_cmnd *scp, int *busy)
1531{
1532	mraid_device_t		*rdev = ADAP2RAIDDEV(adapter);
1533	int			channel;
1534	int			target;
1535	int			islogical;
1536	mbox_ccb_t		*ccb;
1537	mraid_passthru_t	*pthru;
1538	mbox64_t		*mbox64;
1539	mbox_t			*mbox;
1540	scb_t			*scb;
1541	char			skip[] = "skipping";
1542	char			scan[] = "scanning";
1543	char			*ss;
1544
1545
1546	/*
1547	 * Get the appropriate device map for the device this command is
1548	 * intended for
1549	 */
1550	MRAID_GET_DEVICE_MAP(adapter, scp, channel, target, islogical);
1551
1552	/*
1553	 * Logical drive commands
1554	 */
1555	if (islogical) {
1556		switch (scp->cmnd[0]) {
1557		case TEST_UNIT_READY:
1558			/*
1559			 * Do we support clustering and is the support enabled
1560			 * If no, return success always
1561			 */
1562			if (!adapter->ha) {
1563				scp->result = (DID_OK << 16);
1564				return NULL;
1565			}
1566
1567			if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1568				scp->result = (DID_ERROR << 16);
1569				*busy = 1;
1570				return NULL;
1571			}
1572
1573			scb->dma_direction	= scp->sc_data_direction;
1574			scb->dev_channel	= 0xFF;
1575			scb->dev_target		= target;
1576			ccb			= (mbox_ccb_t *)scb->ccb;
1577
1578			/*
1579			 * The command id will be provided by the command
1580			 * issuance routine
1581			 */
1582			ccb->raw_mbox[0]	= CLUSTER_CMD;
1583			ccb->raw_mbox[2]	= RESERVATION_STATUS;
1584			ccb->raw_mbox[3]	= target;
1585
1586			return scb;
1587
1588		case MODE_SENSE:
1589		{
1590			struct scatterlist	*sgl;
1591			caddr_t			vaddr;
1592
1593			sgl = scsi_sglist(scp);
1594			if (sg_page(sgl)) {
1595				vaddr = (caddr_t) sg_virt(&sgl[0]);
1596
1597				memset(vaddr, 0, scp->cmnd[4]);
1598			}
1599			else {
1600				con_log(CL_ANN, (KERN_WARNING
1601						 "megaraid mailbox: invalid sg:%d\n",
1602						 __LINE__));
1603			}
1604		}
1605		scp->result = (DID_OK << 16);
1606		return NULL;
1607
1608		case INQUIRY:
1609			/*
1610			 * Display the channel scan for logical drives
1611			 * Do not display scan for a channel if already done.
1612			 */
1613			if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
1614
1615				con_log(CL_ANN, (KERN_INFO
1616					"scsi[%d]: scanning scsi channel %d",
1617					adapter->host->host_no,
1618					SCP2CHANNEL(scp)));
1619
1620				con_log(CL_ANN, (
1621					" [virtual] for logical drives\n"));
1622
1623				rdev->last_disp |= (1L << SCP2CHANNEL(scp));
1624			}
1625
1626			if (scp->cmnd[1] & MEGA_SCSI_INQ_EVPD) {
1627				scp->sense_buffer[0] = 0x70;
1628				scp->sense_buffer[2] = ILLEGAL_REQUEST;
1629				scp->sense_buffer[12] = MEGA_INVALID_FIELD_IN_CDB;
1630				scp->result = CHECK_CONDITION << 1;
1631				return NULL;
1632			}
1633
1634			/* Fall through */
1635
1636		case READ_CAPACITY:
1637			/*
1638			 * Do not allow LUN > 0 for logical drives and
1639			 * requests for more than 40 logical drives
1640			 */
1641			if (SCP2LUN(scp)) {
1642				scp->result = (DID_BAD_TARGET << 16);
1643				return NULL;
1644			}
1645			if ((target % 0x80) >= MAX_LOGICAL_DRIVES_40LD) {
1646				scp->result = (DID_BAD_TARGET << 16);
1647				return NULL;
1648			}
1649
1650
1651			/* Allocate a SCB and initialize passthru */
1652			if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1653				scp->result = (DID_ERROR << 16);
1654				*busy = 1;
1655				return NULL;
1656			}
1657
1658			ccb			= (mbox_ccb_t *)scb->ccb;
1659			scb->dev_channel	= 0xFF;
1660			scb->dev_target		= target;
1661			pthru			= ccb->pthru;
1662			mbox			= ccb->mbox;
1663			mbox64			= ccb->mbox64;
1664
1665			pthru->timeout		= 0;
1666			pthru->ars		= 1;
1667			pthru->reqsenselen	= 14;
1668			pthru->islogical	= 1;
1669			pthru->logdrv		= target;
1670			pthru->cdblen		= scp->cmd_len;
1671			memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
1672
1673			mbox->cmd		= MBOXCMD_PASSTHRU64;
1674			scb->dma_direction	= scp->sc_data_direction;
1675
1676			pthru->dataxferlen	= scsi_bufflen(scp);
1677			pthru->dataxferaddr	= ccb->sgl_dma_h;
1678			pthru->numsge		= megaraid_mbox_mksgl(adapter,
1679							scb);
1680
1681			mbox->xferaddr		= 0xFFFFFFFF;
1682			mbox64->xferaddr_lo	= (uint32_t )ccb->pthru_dma_h;
1683			mbox64->xferaddr_hi	= 0;
1684
1685			return scb;
1686
1687		case READ_6:
1688		case WRITE_6:
1689		case READ_10:
1690		case WRITE_10:
1691		case READ_12:
1692		case WRITE_12:
1693
1694			/*
1695			 * Allocate a SCB and initialize mailbox
1696			 */
1697			if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1698				scp->result = (DID_ERROR << 16);
1699				*busy = 1;
1700				return NULL;
1701			}
1702			ccb			= (mbox_ccb_t *)scb->ccb;
1703			scb->dev_channel	= 0xFF;
1704			scb->dev_target		= target;
1705			mbox			= ccb->mbox;
1706			mbox64			= ccb->mbox64;
1707			mbox->logdrv		= target;
1708
1709			/*
1710			 * A little HACK: 2nd bit is zero for all scsi read
1711			 * commands and is set for all scsi write commands
1712			 */
1713			mbox->cmd = (scp->cmnd[0] & 0x02) ?  MBOXCMD_LWRITE64:
1714					MBOXCMD_LREAD64 ;
1715
1716			/*
1717			 * 6-byte READ(0x08) or WRITE(0x0A) cdb
1718			 */
1719			if (scp->cmd_len == 6) {
1720				mbox->numsectors = (uint32_t)scp->cmnd[4];
1721				mbox->lba =
1722					((uint32_t)scp->cmnd[1] << 16)	|
1723					((uint32_t)scp->cmnd[2] << 8)	|
1724					(uint32_t)scp->cmnd[3];
1725
1726				mbox->lba &= 0x1FFFFF;
1727			}
1728
1729			/*
1730			 * 10-byte READ(0x28) or WRITE(0x2A) cdb
1731			 */
1732			else if (scp->cmd_len == 10) {
1733				mbox->numsectors =
1734					(uint32_t)scp->cmnd[8] |
1735					((uint32_t)scp->cmnd[7] << 8);
1736				mbox->lba =
1737					((uint32_t)scp->cmnd[2] << 24) |
1738					((uint32_t)scp->cmnd[3] << 16) |
1739					((uint32_t)scp->cmnd[4] << 8) |
1740					(uint32_t)scp->cmnd[5];
1741			}
1742
1743			/*
1744			 * 12-byte READ(0xA8) or WRITE(0xAA) cdb
1745			 */
1746			else if (scp->cmd_len == 12) {
1747				mbox->lba =
1748					((uint32_t)scp->cmnd[2] << 24) |
1749					((uint32_t)scp->cmnd[3] << 16) |
1750					((uint32_t)scp->cmnd[4] << 8) |
1751					(uint32_t)scp->cmnd[5];
1752
1753				mbox->numsectors =
1754					((uint32_t)scp->cmnd[6] << 24) |
1755					((uint32_t)scp->cmnd[7] << 16) |
1756					((uint32_t)scp->cmnd[8] << 8) |
1757					(uint32_t)scp->cmnd[9];
1758			}
1759			else {
1760				con_log(CL_ANN, (KERN_WARNING
1761					"megaraid: unsupported CDB length\n"));
1762
1763				megaraid_dealloc_scb(adapter, scb);
1764
1765				scp->result = (DID_ERROR << 16);
1766				return NULL;
1767			}
1768
1769			scb->dma_direction = scp->sc_data_direction;
1770
1771			// Calculate Scatter-Gather info
1772			mbox64->xferaddr_lo	= (uint32_t )ccb->sgl_dma_h;
1773			mbox->numsge		= megaraid_mbox_mksgl(adapter,
1774							scb);
1775			mbox->xferaddr		= 0xFFFFFFFF;
1776			mbox64->xferaddr_hi	= 0;
1777
1778			return scb;
1779
1780		case RESERVE:
1781		case RELEASE:
1782			/*
1783			 * Do we support clustering and is the support enabled
1784			 */
1785			if (!adapter->ha) {
1786				scp->result = (DID_BAD_TARGET << 16);
1787				return NULL;
1788			}
1789
1790			/*
1791			 * Allocate a SCB and initialize mailbox
1792			 */
1793			if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1794				scp->result = (DID_ERROR << 16);
1795				*busy = 1;
1796				return NULL;
1797			}
1798
1799			ccb			= (mbox_ccb_t *)scb->ccb;
1800			scb->dev_channel	= 0xFF;
1801			scb->dev_target		= target;
1802			ccb->raw_mbox[0]	= CLUSTER_CMD;
1803			ccb->raw_mbox[2]	=  (scp->cmnd[0] == RESERVE) ?
1804						RESERVE_LD : RELEASE_LD;
1805
1806			ccb->raw_mbox[3]	= target;
1807			scb->dma_direction	= scp->sc_data_direction;
1808
1809			return scb;
1810
1811		default:
1812			scp->result = (DID_BAD_TARGET << 16);
1813			return NULL;
1814		}
1815	}
1816	else { // Passthru device commands
1817
1818		// Do not allow access to target id > 15 or LUN > 7
1819		if (target > 15 || SCP2LUN(scp) > 7) {
1820			scp->result = (DID_BAD_TARGET << 16);
1821			return NULL;
1822		}
1823
1824		// if fast load option was set and scan for last device is
1825		// over, reset the fast_load flag so that during a possible
1826		// next scan, devices can be made available
1827		if (rdev->fast_load && (target == 15) &&
1828			(SCP2CHANNEL(scp) == adapter->max_channel -1)) {
1829
1830			con_log(CL_ANN, (KERN_INFO
1831			"megaraid[%d]: physical device scan re-enabled\n",
1832				adapter->host->host_no));
1833			rdev->fast_load = 0;
1834		}
1835
1836		/*
1837		 * Display the channel scan for physical devices
1838		 */
1839		if (!(rdev->last_disp & (1L << SCP2CHANNEL(scp)))) {
1840
1841			ss = rdev->fast_load ? skip : scan;
1842
1843			con_log(CL_ANN, (KERN_INFO
1844				"scsi[%d]: %s scsi channel %d [Phy %d]",
1845				adapter->host->host_no, ss, SCP2CHANNEL(scp),
1846				channel));
1847
1848			con_log(CL_ANN, (
1849				" for non-raid devices\n"));
1850
1851			rdev->last_disp |= (1L << SCP2CHANNEL(scp));
1852		}
1853
1854		// disable channel sweep if fast load option given
1855		if (rdev->fast_load) {
1856			scp->result = (DID_BAD_TARGET << 16);
1857			return NULL;
1858		}
1859
1860		// Allocate a SCB and initialize passthru
1861		if (!(scb = megaraid_alloc_scb(adapter, scp))) {
1862			scp->result = (DID_ERROR << 16);
1863			*busy = 1;
1864			return NULL;
1865		}
1866
1867		ccb			= (mbox_ccb_t *)scb->ccb;
1868		scb->dev_channel	= channel;
1869		scb->dev_target		= target;
1870		scb->dma_direction	= scp->sc_data_direction;
1871		mbox			= ccb->mbox;
1872		mbox64			= ccb->mbox64;
1873
1874		// Does this firmware support extended CDBs
1875		if (adapter->max_cdb_sz == 16) {
1876			mbox->cmd		= MBOXCMD_EXTPTHRU;
1877
1878			megaraid_mbox_prepare_epthru(adapter, scb, scp);
1879
1880			mbox64->xferaddr_lo	= (uint32_t)ccb->epthru_dma_h;
1881			mbox64->xferaddr_hi	= 0;
1882			mbox->xferaddr		= 0xFFFFFFFF;
1883		}
1884		else {
1885			mbox->cmd = MBOXCMD_PASSTHRU64;
1886
1887			megaraid_mbox_prepare_pthru(adapter, scb, scp);
1888
1889			mbox64->xferaddr_lo	= (uint32_t)ccb->pthru_dma_h;
1890			mbox64->xferaddr_hi	= 0;
1891			mbox->xferaddr		= 0xFFFFFFFF;
1892		}
1893		return scb;
1894	}
1895
1896	// NOT REACHED
1897}
1898
1899
1900/**
1901 * megaraid_mbox_runpendq - execute commands queued in the pending queue
1902 * @adapter	: controller's soft state
1903 * @scb_q	: SCB to be queued in the pending list
1904 *
1905 * Scan the pending list for commands which are not yet issued and try to
1906 * post to the controller. The SCB can be a null pointer, which would indicate
1907 * no SCB to be queue, just try to execute the ones in the pending list.
1908 *
1909 * NOTE: We do not actually traverse the pending list. The SCBs are plucked
1910 * out from the head of the pending list. If it is successfully issued, the
1911 * next SCB is at the head now.
1912 */
1913static void
1914megaraid_mbox_runpendq(adapter_t *adapter, scb_t *scb_q)
1915{
1916	scb_t			*scb;
1917	unsigned long		flags;
1918
1919	spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
1920
1921	if (scb_q) {
1922		scb_q->state = SCB_PENDQ;
1923		list_add_tail(&scb_q->list, &adapter->pend_list);
1924	}
1925
1926	// if the adapter in not in quiescent mode, post the commands to FW
1927	if (adapter->quiescent) {
1928		spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
1929		return;
1930	}
1931
1932	while (!list_empty(&adapter->pend_list)) {
1933
1934		assert_spin_locked(PENDING_LIST_LOCK(adapter));
1935
1936		scb = list_entry(adapter->pend_list.next, scb_t, list);
1937
1938		// remove the scb from the pending list and try to
1939		// issue. If we are unable to issue it, put back in
1940		// the pending list and return
1941
1942		list_del_init(&scb->list);
1943
1944		spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
1945
1946		// if mailbox was busy, return SCB back to pending
1947		// list. Make sure to add at the head, since that's
1948		// where it would have been removed from
1949
1950		scb->state = SCB_ISSUED;
1951
1952		if (mbox_post_cmd(adapter, scb) != 0) {
1953
1954			spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
1955
1956			scb->state = SCB_PENDQ;
1957
1958			list_add(&scb->list, &adapter->pend_list);
1959
1960			spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
1961				flags);
1962
1963			return;
1964		}
1965
1966		spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
1967	}
1968
1969	spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
1970
1971
1972	return;
1973}
1974
1975
1976/**
1977 * megaraid_mbox_prepare_pthru - prepare a command for physical devices
1978 * @adapter	: pointer to controller's soft state
1979 * @scb		: scsi control block
1980 * @scp		: scsi command from the mid-layer
1981 *
1982 * Prepare a command for the scsi physical devices.
1983 */
1984static void
1985megaraid_mbox_prepare_pthru(adapter_t *adapter, scb_t *scb,
1986		struct scsi_cmnd *scp)
1987{
1988	mbox_ccb_t		*ccb;
1989	mraid_passthru_t	*pthru;
1990	uint8_t			channel;
1991	uint8_t			target;
1992
1993	ccb	= (mbox_ccb_t *)scb->ccb;
1994	pthru	= ccb->pthru;
1995	channel	= scb->dev_channel;
1996	target	= scb->dev_target;
1997
1998	// 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
1999	pthru->timeout		= 4;	
2000	pthru->ars		= 1;
2001	pthru->islogical	= 0;
2002	pthru->channel		= 0;
2003	pthru->target		= (channel << 4) | target;
2004	pthru->logdrv		= SCP2LUN(scp);
2005	pthru->reqsenselen	= 14;
2006	pthru->cdblen		= scp->cmd_len;
2007
2008	memcpy(pthru->cdb, scp->cmnd, scp->cmd_len);
2009
2010	if (scsi_bufflen(scp)) {
2011		pthru->dataxferlen	= scsi_bufflen(scp);
2012		pthru->dataxferaddr	= ccb->sgl_dma_h;
2013		pthru->numsge		= megaraid_mbox_mksgl(adapter, scb);
2014	}
2015	else {
2016		pthru->dataxferaddr	= 0;
2017		pthru->dataxferlen	= 0;
2018		pthru->numsge		= 0;
2019	}
2020	return;
2021}
2022
2023
2024/**
2025 * megaraid_mbox_prepare_epthru - prepare a command for physical devices
2026 * @adapter	: pointer to controller's soft state
2027 * @scb		: scsi control block
2028 * @scp		: scsi command from the mid-layer
2029 *
2030 * Prepare a command for the scsi physical devices. This rountine prepares
2031 * commands for devices which can take extended CDBs (>10 bytes).
2032 */
2033static void
2034megaraid_mbox_prepare_epthru(adapter_t *adapter, scb_t *scb,
2035		struct scsi_cmnd *scp)
2036{
2037	mbox_ccb_t		*ccb;
2038	mraid_epassthru_t	*epthru;
2039	uint8_t			channel;
2040	uint8_t			target;
2041
2042	ccb	= (mbox_ccb_t *)scb->ccb;
2043	epthru	= ccb->epthru;
2044	channel	= scb->dev_channel;
2045	target	= scb->dev_target;
2046
2047	// 0=6sec, 1=60sec, 2=10min, 3=3hrs, 4=NO timeout
2048	epthru->timeout		= 4;	
2049	epthru->ars		= 1;
2050	epthru->islogical	= 0;
2051	epthru->channel		= 0;
2052	epthru->target		= (channel << 4) | target;
2053	epthru->logdrv		= SCP2LUN(scp);
2054	epthru->reqsenselen	= 14;
2055	epthru->cdblen		= scp->cmd_len;
2056
2057	memcpy(epthru->cdb, scp->cmnd, scp->cmd_len);
2058
2059	if (scsi_bufflen(scp)) {
2060		epthru->dataxferlen	= scsi_bufflen(scp);
2061		epthru->dataxferaddr	= ccb->sgl_dma_h;
2062		epthru->numsge		= megaraid_mbox_mksgl(adapter, scb);
2063	}
2064	else {
2065		epthru->dataxferaddr	= 0;
2066		epthru->dataxferlen	= 0;
2067		epthru->numsge		= 0;
2068	}
2069	return;
2070}
2071
2072
2073/**
2074 * megaraid_ack_sequence - interrupt ack sequence for memory mapped HBAs
2075 * @adapter	: controller's soft state
2076 *
2077 * Interrupt acknowledgement sequence for memory mapped HBAs. Find out the
2078 * completed command and put them on the completed list for later processing.
2079 *
2080 * Returns:	1 if the interrupt is valid, 0 otherwise
2081 */
2082static int
2083megaraid_ack_sequence(adapter_t *adapter)
2084{
2085	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
2086	mbox_t			*mbox;
2087	scb_t			*scb;
2088	uint8_t			nstatus;
2089	uint8_t			completed[MBOX_MAX_FIRMWARE_STATUS];
2090	struct list_head	clist;
2091	int			handled;
2092	uint32_t		dword;
2093	unsigned long		flags;
2094	int			i, j;
2095
2096
2097	mbox	= raid_dev->mbox;
2098
2099	// move the SCBs from the firmware completed array to our local list
2100	INIT_LIST_HEAD(&clist);
2101
2102	// loop till F/W has more commands for us to complete
2103	handled = 0;
2104	spin_lock_irqsave(MAILBOX_LOCK(raid_dev), flags);
2105	do {
2106		/*
2107		 * Check if a valid interrupt is pending. If found, force the
2108		 * interrupt line low.
2109		 */
2110		dword = RDOUTDOOR(raid_dev);
2111		if (dword != 0x10001234) break;
2112
2113		handled = 1;
2114
2115		WROUTDOOR(raid_dev, 0x10001234);
2116
2117		nstatus = 0;
2118		// wait for valid numstatus to post
2119		for (i = 0; i < 0xFFFFF; i++) {
2120			if (mbox->numstatus != 0xFF) {
2121				nstatus = mbox->numstatus;
2122				break;
2123			}
2124			rmb();
2125		}
2126		mbox->numstatus = 0xFF;
2127
2128		adapter->outstanding_cmds -= nstatus;
2129
2130		for (i = 0; i < nstatus; i++) {
2131
2132			// wait for valid command index to post
2133			for (j = 0; j < 0xFFFFF; j++) {
2134				if (mbox->completed[i] != 0xFF) break;
2135				rmb();
2136			}
2137			completed[i]		= mbox->completed[i];
2138			mbox->completed[i]	= 0xFF;
2139
2140			if (completed[i] == 0xFF) {
2141				con_log(CL_ANN, (KERN_CRIT
2142				"megaraid: command posting timed out\n"));
2143
2144				BUG();
2145				continue;
2146			}
2147
2148			// Get SCB associated with this command id
2149			if (completed[i] >= MBOX_MAX_SCSI_CMDS) {
2150				// a cmm command
2151				scb = adapter->uscb_list + (completed[i] -
2152						MBOX_MAX_SCSI_CMDS);
2153			}
2154			else {
2155				// an os command
2156				scb = adapter->kscb_list + completed[i];
2157			}
2158
2159			scb->status = mbox->status;
2160			list_add_tail(&scb->list, &clist);
2161		}
2162
2163		// Acknowledge interrupt
2164		WRINDOOR(raid_dev, 0x02);
2165
2166	} while(1);
2167
2168	spin_unlock_irqrestore(MAILBOX_LOCK(raid_dev), flags);
2169
2170
2171	// put the completed commands in the completed list. DPC would
2172	// complete these commands later
2173	spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2174
2175	list_splice(&clist, &adapter->completed_list);
2176
2177	spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2178
2179
2180	// schedule the DPC if there is some work for it
2181	if (handled)
2182		tasklet_schedule(&adapter->dpc_h);
2183
2184	return handled;
2185}
2186
2187
2188/**
2189 * megaraid_isr - isr for memory based mailbox based controllers
2190 * @irq		: irq
2191 * @devp	: pointer to our soft state
2192 *
2193 * Interrupt service routine for memory-mapped mailbox controllers.
2194 */
2195static irqreturn_t
2196megaraid_isr(int irq, void *devp)
2197{
2198	adapter_t	*adapter = devp;
2199	int		handled;
2200
2201	handled = megaraid_ack_sequence(adapter);
2202
2203	/* Loop through any pending requests */
2204	if (!adapter->quiescent) {
2205		megaraid_mbox_runpendq(adapter, NULL);
2206	}
2207
2208	return IRQ_RETVAL(handled);
2209}
2210
2211
2212/**
2213 * megaraid_mbox_sync_scb - sync kernel buffers
2214 * @adapter	: controller's soft state
2215 * @scb		: pointer to the resource packet
2216 *
2217 * DMA sync if required.
2218 */
2219static void
2220megaraid_mbox_sync_scb(adapter_t *adapter, scb_t *scb)
2221{
2222	mbox_ccb_t	*ccb;
2223
2224	ccb	= (mbox_ccb_t *)scb->ccb;
2225
2226	if (scb->dma_direction == PCI_DMA_FROMDEVICE)
2227		pci_dma_sync_sg_for_cpu(adapter->pdev,
2228					scsi_sglist(scb->scp),
2229					scsi_sg_count(scb->scp),
2230					PCI_DMA_FROMDEVICE);
2231
2232	scsi_dma_unmap(scb->scp);
2233	return;
2234}
2235
2236
2237/**
2238 * megaraid_mbox_dpc - the tasklet to complete the commands from completed list
2239 * @devp	: pointer to HBA soft state
2240 *
2241 * Pick up the commands from the completed list and send back to the owners.
2242 * This is a reentrant function and does not assume any locks are held while
2243 * it is being called.
2244 */
2245static void
2246megaraid_mbox_dpc(unsigned long devp)
2247{
2248	adapter_t		*adapter = (adapter_t *)devp;
2249	mraid_device_t		*raid_dev;
2250	struct list_head	clist;
2251	struct scatterlist	*sgl;
2252	scb_t			*scb;
2253	scb_t			*tmp;
2254	struct scsi_cmnd	*scp;
2255	mraid_passthru_t	*pthru;
2256	mraid_epassthru_t	*epthru;
2257	mbox_ccb_t		*ccb;
2258	int			islogical;
2259	int			pdev_index;
2260	int			pdev_state;
2261	mbox_t			*mbox;
2262	unsigned long		flags;
2263	uint8_t			c;
2264	int			status;
2265	uioc_t			*kioc;
2266
2267
2268	if (!adapter) return;
2269
2270	raid_dev = ADAP2RAIDDEV(adapter);
2271
2272	// move the SCBs from the completed list to our local list
2273	INIT_LIST_HEAD(&clist);
2274
2275	spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2276
2277	list_splice_init(&adapter->completed_list, &clist);
2278
2279	spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2280
2281
2282	list_for_each_entry_safe(scb, tmp, &clist, list) {
2283
2284		status		= scb->status;
2285		scp		= scb->scp;
2286		ccb		= (mbox_ccb_t *)scb->ccb;
2287		pthru		= ccb->pthru;
2288		epthru		= ccb->epthru;
2289		mbox		= ccb->mbox;
2290
2291		// Make sure f/w has completed a valid command
2292		if (scb->state != SCB_ISSUED) {
2293			con_log(CL_ANN, (KERN_CRIT
2294			"megaraid critical err: invalid command %d:%d:%p\n",
2295				scb->sno, scb->state, scp));
2296			BUG();
2297			continue;	// Must never happen!
2298		}
2299
2300		// check for the management command and complete it right away
2301		if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
2302			scb->state	= SCB_FREE;
2303			scb->status	= status;
2304
2305			// remove from local clist
2306			list_del_init(&scb->list);
2307
2308			kioc			= (uioc_t *)scb->gp;
2309			kioc->status		= 0;
2310
2311			megaraid_mbox_mm_done(adapter, scb);
2312
2313			continue;
2314		}
2315
2316		// Was an abort issued for this command earlier
2317		if (scb->state & SCB_ABORT) {
2318			con_log(CL_ANN, (KERN_NOTICE
2319			"megaraid: aborted cmd [%x] completed\n",
2320				scb->sno));
2321		}
2322
2323		/*
2324		 * If the inquiry came of a disk drive which is not part of
2325		 * any RAID array, expose it to the kernel. For this to be
2326		 * enabled, user must set the "megaraid_expose_unconf_disks"
2327		 * flag to 1 by specifying it on module parameter list.
2328		 * This would enable data migration off drives from other
2329		 * configurations.
2330		 */
2331		islogical = MRAID_IS_LOGICAL(adapter, scp);
2332		if (scp->cmnd[0] == INQUIRY && status == 0 && islogical == 0
2333				&& IS_RAID_CH(raid_dev, scb->dev_channel)) {
2334
2335			sgl = scsi_sglist(scp);
2336			if (sg_page(sgl)) {
2337				c = *(unsigned char *) sg_virt(&sgl[0]);
2338			} else {
2339				con_log(CL_ANN, (KERN_WARNING
2340						 "megaraid mailbox: invalid sg:%d\n",
2341						 __LINE__));
2342				c = 0;
2343			}
2344
2345			if ((c & 0x1F ) == TYPE_DISK) {
2346				pdev_index = (scb->dev_channel * 16) +
2347					scb->dev_target;
2348				pdev_state =
2349					raid_dev->pdrv_state[pdev_index] & 0x0F;
2350
2351				if (pdev_state == PDRV_ONLINE		||
2352					pdev_state == PDRV_FAILED	||
2353					pdev_state == PDRV_RBLD		||
2354					pdev_state == PDRV_HOTSPARE	||
2355					megaraid_expose_unconf_disks == 0) {
2356
2357					status = 0xF0;
2358				}
2359			}
2360		}
2361
2362		// Convert MegaRAID status to Linux error code
2363		switch (status) {
2364
2365		case 0x00:
2366
2367			scp->result = (DID_OK << 16);
2368			break;
2369
2370		case 0x02:
2371
2372			/* set sense_buffer and result fields */
2373			if (mbox->cmd == MBOXCMD_PASSTHRU ||
2374				mbox->cmd == MBOXCMD_PASSTHRU64) {
2375
2376				memcpy(scp->sense_buffer, pthru->reqsensearea,
2377						14);
2378
2379				scp->result = DRIVER_SENSE << 24 |
2380					DID_OK << 16 | CHECK_CONDITION << 1;
2381			}
2382			else {
2383				if (mbox->cmd == MBOXCMD_EXTPTHRU) {
2384
2385					memcpy(scp->sense_buffer,
2386						epthru->reqsensearea, 14);
2387
2388					scp->result = DRIVER_SENSE << 24 |
2389						DID_OK << 16 |
2390						CHECK_CONDITION << 1;
2391				} else {
2392					scp->sense_buffer[0] = 0x70;
2393					scp->sense_buffer[2] = ABORTED_COMMAND;
2394					scp->result = CHECK_CONDITION << 1;
2395				}
2396			}
2397			break;
2398
2399		case 0x08:
2400
2401			scp->result = DID_BUS_BUSY << 16 | status;
2402			break;
2403
2404		default:
2405
2406			/*
2407			 * If TEST_UNIT_READY fails, we know RESERVATION_STATUS
2408			 * failed
2409			 */
2410			if (scp->cmnd[0] == TEST_UNIT_READY) {
2411				scp->result = DID_ERROR << 16 |
2412					RESERVATION_CONFLICT << 1;
2413			}
2414			else
2415			/*
2416			 * Error code returned is 1 if Reserve or Release
2417			 * failed or the input parameter is invalid
2418			 */
2419			if (status == 1 && (scp->cmnd[0] == RESERVE ||
2420					 scp->cmnd[0] == RELEASE)) {
2421
2422				scp->result = DID_ERROR << 16 |
2423					RESERVATION_CONFLICT << 1;
2424			}
2425			else {
2426				scp->result = DID_BAD_TARGET << 16 | status;
2427			}
2428		}
2429
2430		// print a debug message for all failed commands
2431		if (status) {
2432			megaraid_mbox_display_scb(adapter, scb);
2433		}
2434
2435		// Free our internal resources and call the mid-layer callback
2436		// routine
2437		megaraid_mbox_sync_scb(adapter, scb);
2438
2439		// remove from local clist
2440		list_del_init(&scb->list);
2441
2442		// put back in free list
2443		megaraid_dealloc_scb(adapter, scb);
2444
2445		// send the scsi packet back to kernel
2446		scp->scsi_done(scp);
2447	}
2448
2449	return;
2450}
2451
2452
2453/**
2454 * megaraid_abort_handler - abort the scsi command
2455 * @scp		: command to be aborted
2456 *
2457 * Abort a previous SCSI request. Only commands on the pending list can be
2458 * aborted. All the commands issued to the F/W must complete.
2459 **/
2460static int
2461megaraid_abort_handler(struct scsi_cmnd *scp)
2462{
2463	adapter_t		*adapter;
2464	mraid_device_t		*raid_dev;
2465	scb_t			*scb;
2466	scb_t			*tmp;
2467	int			found;
2468	unsigned long		flags;
2469	int			i;
2470
2471
2472	adapter		= SCP2ADAPTER(scp);
2473	raid_dev	= ADAP2RAIDDEV(adapter);
2474
2475	con_log(CL_ANN, (KERN_WARNING
2476		"megaraid: aborting cmd=%x <c=%d t=%d l=%d>\n",
2477		scp->cmnd[0], SCP2CHANNEL(scp),
2478		SCP2TARGET(scp), SCP2LUN(scp)));
2479
2480	// If FW has stopped responding, simply return failure
2481	if (raid_dev->hw_error) {
2482		con_log(CL_ANN, (KERN_NOTICE
2483			"megaraid: hw error, not aborting\n"));
2484		return FAILED;
2485	}
2486
2487	// There might a race here, where the command was completed by the
2488	// firmware and now it is on the completed list. Before we could
2489	// complete the command to the kernel in dpc, the abort came.
2490	// Find out if this is the case to avoid the race.
2491	scb = NULL;
2492	spin_lock_irqsave(COMPLETED_LIST_LOCK(adapter), flags);
2493	list_for_each_entry_safe(scb, tmp, &adapter->completed_list, list) {
2494
2495		if (scb->scp == scp) {	// Found command
2496
2497			list_del_init(&scb->list);	// from completed list
2498
2499			con_log(CL_ANN, (KERN_WARNING
2500			"megaraid: %d[%d:%d], abort from completed list\n",
2501				scb->sno, scb->dev_channel, scb->dev_target));
2502
2503			scp->result = (DID_ABORT << 16);
2504			scp->scsi_done(scp);
2505
2506			megaraid_dealloc_scb(adapter, scb);
2507
2508			spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter),
2509				flags);
2510
2511			return SUCCESS;
2512		}
2513	}
2514	spin_unlock_irqrestore(COMPLETED_LIST_LOCK(adapter), flags);
2515
2516
2517	// Find out if this command is still on the pending list. If it is and
2518	// was never issued, abort and return success. If the command is owned
2519	// by the firmware, we must wait for it to complete by the FW.
2520	spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2521	list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
2522
2523		if (scb->scp == scp) {	// Found command
2524
2525			list_del_init(&scb->list);	// from pending list
2526
2527			ASSERT(!(scb->state & SCB_ISSUED));
2528
2529			con_log(CL_ANN, (KERN_WARNING
2530				"megaraid abort: [%d:%d], driver owner\n",
2531				scb->dev_channel, scb->dev_target));
2532
2533			scp->result = (DID_ABORT << 16);
2534			scp->scsi_done(scp);
2535
2536			megaraid_dealloc_scb(adapter, scb);
2537
2538			spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter),
2539				flags);
2540
2541			return SUCCESS;
2542		}
2543	}
2544	spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2545
2546
2547	// Check do we even own this command, in which case this would be
2548	// owned by the firmware. The only way to locate the FW scb is to
2549	// traverse through the list of all SCB, since driver does not
2550	// maintain these SCBs on any list
2551	found = 0;
2552	spin_lock_irq(&adapter->lock);
2553	for (i = 0; i < MBOX_MAX_SCSI_CMDS; i++) {
2554		scb = adapter->kscb_list + i;
2555
2556		if (scb->scp == scp) {
2557
2558			found = 1;
2559
2560			if (!(scb->state & SCB_ISSUED)) {
2561				con_log(CL_ANN, (KERN_WARNING
2562				"megaraid abort: %d[%d:%d], invalid state\n",
2563				scb->sno, scb->dev_channel, scb->dev_target));
2564				BUG();
2565			}
2566			else {
2567				con_log(CL_ANN, (KERN_WARNING
2568				"megaraid abort: %d[%d:%d], fw owner\n",
2569				scb->sno, scb->dev_channel, scb->dev_target));
2570			}
2571		}
2572	}
2573	spin_unlock_irq(&adapter->lock);
2574
2575	if (!found) {
2576		con_log(CL_ANN, (KERN_WARNING "megaraid abort: do now own\n"));
2577
2578		// FIXME: Should there be a callback for this command?
2579		return SUCCESS;
2580	}
2581
2582	// We cannot actually abort a command owned by firmware, return
2583	// failure and wait for reset. In host reset handler, we will find out
2584	// if the HBA is still live
2585	return FAILED;
2586}
2587
2588/**
2589 * megaraid_reset_handler - device reset hadler for mailbox based driver
2590 * @scp		: reference command
2591 *
2592 * Reset handler for the mailbox based controller. First try to find out if
2593 * the FW is still live, in which case the outstanding commands counter mut go
2594 * down to 0. If that happens, also issue the reservation reset command to
2595 * relinquish (possible) reservations on the logical drives connected to this
2596 * host.
2597 **/
2598static int
2599megaraid_reset_handler(struct scsi_cmnd *scp)
2600{
2601	adapter_t	*adapter;
2602	scb_t		*scb;
2603	scb_t		*tmp;
2604	mraid_device_t	*raid_dev;
2605	unsigned long	flags;
2606	uint8_t		raw_mbox[sizeof(mbox_t)];
2607	int		rval;
2608	int		recovery_window;
2609	int		recovering;
2610	int		i;
2611	uioc_t		*kioc;
2612
2613	adapter		= SCP2ADAPTER(scp);
2614	raid_dev	= ADAP2RAIDDEV(adapter);
2615
2616	// return failure if adapter is not responding
2617	if (raid_dev->hw_error) {
2618		con_log(CL_ANN, (KERN_NOTICE
2619			"megaraid: hw error, cannot reset\n"));
2620		return FAILED;
2621	}
2622
2623
2624	// Under exceptional conditions, FW can take up to 3 minutes to
2625	// complete command processing. Wait for additional 2 minutes for the
2626	// pending commands counter to go down to 0. If it doesn't, let the
2627	// controller be marked offline
2628	// Also, reset all the commands currently owned by the driver
2629	spin_lock_irqsave(PENDING_LIST_LOCK(adapter), flags);
2630	list_for_each_entry_safe(scb, tmp, &adapter->pend_list, list) {
2631		list_del_init(&scb->list);	// from pending list
2632
2633		if (scb->sno >= MBOX_MAX_SCSI_CMDS) {
2634			con_log(CL_ANN, (KERN_WARNING
2635			"megaraid: IOCTL packet with %d[%d:%d] being reset\n",
2636			scb->sno, scb->dev_channel, scb->dev_target));
2637
2638			scb->status = -1;
2639
2640			kioc			= (uioc_t *)scb->gp;
2641			kioc->status		= -EFAULT;
2642
2643			megaraid_mbox_mm_done(adapter, scb);
2644		} else {
2645			if (scb->scp == scp) {	// Found command
2646				con_log(CL_ANN, (KERN_WARNING
2647					"megaraid: %d[%d:%d], reset from pending list\n",
2648					scb->sno, scb->dev_channel, scb->dev_target));
2649			} else {
2650				con_log(CL_ANN, (KERN_WARNING
2651				"megaraid: IO packet with %d[%d:%d] being reset\n",
2652				scb->sno, scb->dev_channel, scb->dev_target));
2653			}
2654
2655			scb->scp->result = (DID_RESET << 16);
2656			scb->scp->scsi_done(scb->scp);
2657
2658			megaraid_dealloc_scb(adapter, scb);
2659		}
2660	}
2661	spin_unlock_irqrestore(PENDING_LIST_LOCK(adapter), flags);
2662
2663	if (adapter->outstanding_cmds) {
2664		con_log(CL_ANN, (KERN_NOTICE
2665			"megaraid: %d outstanding commands. Max wait %d sec\n",
2666			adapter->outstanding_cmds,
2667			(MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT)));
2668	}
2669
2670	recovery_window = MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
2671
2672	recovering = adapter->outstanding_cmds;
2673
2674	for (i = 0; i < recovery_window; i++) {
2675
2676		megaraid_ack_sequence(adapter);
2677
2678		// print a message once every 5 seconds only
2679		if (!(i % 5)) {
2680			con_log(CL_ANN, (
2681			"megaraid mbox: Wait for %d commands to complete:%d\n",
2682				adapter->outstanding_cmds,
2683				(MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT) - i));
2684		}
2685
2686		// bailout if no recovery happened in reset time
2687		if (adapter->outstanding_cmds == 0) {
2688			break;
2689		}
2690
2691		msleep(1000);
2692	}
2693
2694	spin_lock(&adapter->lock);
2695
2696	// If still outstanding commands, bail out
2697	if (adapter->outstanding_cmds) {
2698		con_log(CL_ANN, (KERN_WARNING
2699			"megaraid mbox: critical hardware error!\n"));
2700
2701		raid_dev->hw_error = 1;
2702
2703		rval = FAILED;
2704		goto out;
2705	}
2706	else {
2707		con_log(CL_ANN, (KERN_NOTICE
2708		"megaraid mbox: reset sequence completed successfully\n"));
2709	}
2710
2711
2712	// If the controller supports clustering, reset reservations
2713	if (!adapter->ha) {
2714		rval = SUCCESS;
2715		goto out;
2716	}
2717
2718	// clear reservations if any
2719	raw_mbox[0] = CLUSTER_CMD;
2720	raw_mbox[2] = RESET_RESERVATIONS;
2721
2722	rval = SUCCESS;
2723	if (mbox_post_sync_cmd_fast(adapter, raw_mbox) == 0) {
2724		con_log(CL_ANN,
2725			(KERN_INFO "megaraid: reservation reset\n"));
2726	}
2727	else {
2728		rval = FAILED;
2729		con_log(CL_ANN, (KERN_WARNING
2730				"megaraid: reservation reset failed\n"));
2731	}
2732
2733 out:
2734	spin_unlock_irq(&adapter->lock);
2735	return rval;
2736}
2737
2738/*
2739 * START: internal commands library
2740 *
2741 * This section of the driver has the common routine used by the driver and
2742 * also has all the FW routines
2743 */
2744
2745/**
2746 * mbox_post_sync_cmd() - blocking command to the mailbox based controllers
2747 * @adapter	: controller's soft state
2748 * @raw_mbox	: the mailbox
2749 *
2750 * Issue a scb in synchronous and non-interrupt mode for mailbox based
2751 * controllers.
2752 */
2753static int
2754mbox_post_sync_cmd(adapter_t *adapter, uint8_t raw_mbox[])
2755{
2756	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
2757	mbox64_t	*mbox64;
2758	mbox_t		*mbox;
2759	uint8_t		status;
2760	int		i;
2761
2762
2763	mbox64	= raid_dev->mbox64;
2764	mbox	= raid_dev->mbox;
2765
2766	/*
2767	 * Wait until mailbox is free
2768	 */
2769	if (megaraid_busywait_mbox(raid_dev) != 0)
2770		goto blocked_mailbox;
2771
2772	/*
2773	 * Copy mailbox data into host structure
2774	 */
2775	memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
2776	mbox->cmdid		= 0xFE;
2777	mbox->busy		= 1;
2778	mbox->poll		= 0;
2779	mbox->ack		= 0;
2780	mbox->numstatus		= 0xFF;
2781	mbox->status		= 0xFF;
2782
2783	wmb();
2784	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
2785
2786	// wait for maximum 1 second for status to post. If the status is not
2787	// available within 1 second, assume FW is initializing and wait
2788	// for an extended amount of time
2789	if (mbox->numstatus == 0xFF) {	// status not yet available
2790		udelay(25);
2791
2792		for (i = 0; mbox->numstatus == 0xFF && i < 1000; i++) {
2793			rmb();
2794			msleep(1);
2795		}
2796
2797
2798		if (i == 1000) {
2799			con_log(CL_ANN, (KERN_NOTICE
2800				"megaraid mailbox: wait for FW to boot      "));
2801
2802			for (i = 0; (mbox->numstatus == 0xFF) &&
2803					(i < MBOX_RESET_WAIT); i++) {
2804				rmb();
2805				con_log(CL_ANN, ("\b\b\b\b\b[%03d]",
2806							MBOX_RESET_WAIT - i));
2807				msleep(1000);
2808			}
2809
2810			if (i == MBOX_RESET_WAIT) {
2811
2812				con_log(CL_ANN, (
2813				"\nmegaraid mailbox: status not available\n"));
2814
2815				return -1;
2816			}
2817			con_log(CL_ANN, ("\b\b\b\b\b[ok] \n"));
2818		}
2819	}
2820
2821	// wait for maximum 1 second for poll semaphore
2822	if (mbox->poll != 0x77) {
2823		udelay(25);
2824
2825		for (i = 0; (mbox->poll != 0x77) && (i < 1000); i++) {
2826			rmb();
2827			msleep(1);
2828		}
2829
2830		if (i == 1000) {
2831			con_log(CL_ANN, (KERN_WARNING
2832			"megaraid mailbox: could not get poll semaphore\n"));
2833			return -1;
2834		}
2835	}
2836
2837	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
2838	wmb();
2839
2840	// wait for maximum 1 second for acknowledgement
2841	if (RDINDOOR(raid_dev) & 0x2) {
2842		udelay(25);
2843
2844		for (i = 0; (RDINDOOR(raid_dev) & 0x2) && (i < 1000); i++) {
2845			rmb();
2846			msleep(1);
2847		}
2848
2849		if (i == 1000) {
2850			con_log(CL_ANN, (KERN_WARNING
2851				"megaraid mailbox: could not acknowledge\n"));
2852			return -1;
2853		}
2854	}
2855	mbox->poll	= 0;
2856	mbox->ack	= 0x77;
2857
2858	status = mbox->status;
2859
2860	// invalidate the completed command id array. After command
2861	// completion, firmware would write the valid id.
2862	mbox->numstatus	= 0xFF;
2863	mbox->status	= 0xFF;
2864	for (i = 0; i < MBOX_MAX_FIRMWARE_STATUS; i++) {
2865		mbox->completed[i] = 0xFF;
2866	}
2867
2868	return status;
2869
2870blocked_mailbox:
2871
2872	con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n") );
2873	return -1;
2874}
2875
2876
2877/**
2878 * mbox_post_sync_cmd_fast - blocking command to the mailbox based controllers
2879 * @adapter	: controller's soft state
2880 * @raw_mbox	: the mailbox
2881 *
2882 * Issue a scb in synchronous and non-interrupt mode for mailbox based
2883 * controllers. This is a faster version of the synchronous command and
2884 * therefore can be called in interrupt-context as well.
2885 */
2886static int
2887mbox_post_sync_cmd_fast(adapter_t *adapter, uint8_t raw_mbox[])
2888{
2889	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
2890	mbox_t		*mbox;
2891	long		i;
2892
2893
2894	mbox	= raid_dev->mbox;
2895
2896	// return immediately if the mailbox is busy
2897	if (mbox->busy) return -1;
2898
2899	// Copy mailbox data into host structure
2900	memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 14);
2901	mbox->cmdid		= 0xFE;
2902	mbox->busy		= 1;
2903	mbox->poll		= 0;
2904	mbox->ack		= 0;
2905	mbox->numstatus		= 0xFF;
2906	mbox->status		= 0xFF;
2907
2908	wmb();
2909	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
2910
2911	for (i = 0; i < MBOX_SYNC_WAIT_CNT; i++) {
2912		if (mbox->numstatus != 0xFF) break;
2913		rmb();
2914		udelay(MBOX_SYNC_DELAY_200);
2915	}
2916
2917	if (i == MBOX_SYNC_WAIT_CNT) {
2918		// We may need to re-calibrate the counter
2919		con_log(CL_ANN, (KERN_CRIT
2920			"megaraid: fast sync command timed out\n"));
2921	}
2922
2923	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x2);
2924	wmb();
2925
2926	return mbox->status;
2927}
2928
2929
2930/**
2931 * megaraid_busywait_mbox() - Wait until the controller's mailbox is available
2932 * @raid_dev	: RAID device (HBA) soft state
2933 *
2934 * Wait until the controller's mailbox is available to accept more commands.
2935 * Wait for at most 1 second.
2936 */
2937static int
2938megaraid_busywait_mbox(mraid_device_t *raid_dev)
2939{
2940	mbox_t	*mbox = raid_dev->mbox;
2941	int	i = 0;
2942
2943	if (mbox->busy) {
2944		udelay(25);
2945		for (i = 0; mbox->busy && i < 1000; i++)
2946			msleep(1);
2947	}
2948
2949	if (i < 1000) return 0;
2950	else return -1;
2951}
2952
2953
2954/**
2955 * megaraid_mbox_product_info - some static information about the controller
2956 * @adapter	: our soft state
2957 *
2958 * Issue commands to the controller to grab some parameters required by our
2959 * caller.
2960 */
2961static int
2962megaraid_mbox_product_info(adapter_t *adapter)
2963{
2964	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
2965	mbox_t			*mbox;
2966	uint8_t			raw_mbox[sizeof(mbox_t)];
2967	mraid_pinfo_t		*pinfo;
2968	dma_addr_t		pinfo_dma_h;
2969	mraid_inquiry3_t	*mraid_inq3;
2970	int			i;
2971
2972
2973	memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
2974	mbox = (mbox_t *)raw_mbox;
2975
2976	/*
2977	 * Issue an ENQUIRY3 command to find out certain adapter parameters,
2978	 * e.g., max channels, max commands etc.
2979	 */
2980	pinfo = pci_alloc_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
2981			&pinfo_dma_h);
2982
2983	if (pinfo == NULL) {
2984		con_log(CL_ANN, (KERN_WARNING
2985			"megaraid: out of memory, %s %d\n", __func__,
2986			__LINE__));
2987
2988		return -1;
2989	}
2990	memset(pinfo, 0, sizeof(mraid_pinfo_t));
2991
2992	mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
2993	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
2994
2995	raw_mbox[0] = FC_NEW_CONFIG;
2996	raw_mbox[2] = NC_SUBOP_ENQUIRY3;
2997	raw_mbox[3] = ENQ3_GET_SOLICITED_FULL;
2998
2999	// Issue the command
3000	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3001
3002		con_log(CL_ANN, (KERN_WARNING "megaraid: Inquiry3 failed\n"));
3003
3004		pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
3005			pinfo, pinfo_dma_h);
3006
3007		return -1;
3008	}
3009
3010	/*
3011	 * Collect information about state of each physical drive
3012	 * attached to the controller. We will expose all the disks
3013	 * which are not part of RAID
3014	 */
3015	mraid_inq3 = (mraid_inquiry3_t *)adapter->ibuf;
3016	for (i = 0; i < MBOX_MAX_PHYSICAL_DRIVES; i++) {
3017		raid_dev->pdrv_state[i] = mraid_inq3->pdrv_state[i];
3018	}
3019
3020	/*
3021	 * Get product info for information like number of channels,
3022	 * maximum commands supported.
3023	 */
3024	memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3025	mbox->xferaddr = (uint32_t)pinfo_dma_h;
3026
3027	raw_mbox[0] = FC_NEW_CONFIG;
3028	raw_mbox[2] = NC_SUBOP_PRODUCT_INFO;
3029
3030	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3031
3032		con_log(CL_ANN, (KERN_WARNING
3033			"megaraid: product info failed\n"));
3034
3035		pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t),
3036			pinfo, pinfo_dma_h);
3037
3038		return -1;
3039	}
3040
3041	/*
3042	 * Setup some parameters for host, as required by our caller
3043	 */
3044	adapter->max_channel = pinfo->nchannels;
3045
3046	/*
3047	 * we will export all the logical drives on a single channel.
3048	 * Add 1 since inquires do not come for inititor ID
3049	 */
3050	adapter->max_target	= MAX_LOGICAL_DRIVES_40LD + 1;
3051	adapter->max_lun	= 8;	// up to 8 LUNs for non-disk devices
3052
3053	/*
3054	 * These are the maximum outstanding commands for the scsi-layer
3055	 */
3056	adapter->max_cmds	= MBOX_MAX_SCSI_CMDS;
3057
3058	memset(adapter->fw_version, 0, VERSION_SIZE);
3059	memset(adapter->bios_version, 0, VERSION_SIZE);
3060
3061	memcpy(adapter->fw_version, pinfo->fw_version, 4);
3062	adapter->fw_version[4] = 0;
3063
3064	memcpy(adapter->bios_version, pinfo->bios_version, 4);
3065	adapter->bios_version[4] = 0;
3066
3067	con_log(CL_ANN, (KERN_NOTICE
3068		"megaraid: fw version:[%s] bios version:[%s]\n",
3069		adapter->fw_version, adapter->bios_version));
3070
3071	pci_free_consistent(adapter->pdev, sizeof(mraid_pinfo_t), pinfo,
3072			pinfo_dma_h);
3073
3074	return 0;
3075}
3076
3077
3078
3079/**
3080 * megaraid_mbox_extended_cdb - check for support for extended CDBs
3081 * @adapter	: soft state for the controller
3082 *
3083 * This routine check whether the controller in question supports extended
3084 * ( > 10 bytes ) CDBs.
3085 */
3086static int
3087megaraid_mbox_extended_cdb(adapter_t *adapter)
3088{
3089	mbox_t		*mbox;
3090	uint8_t		raw_mbox[sizeof(mbox_t)];
3091	int		rval;
3092
3093	mbox = (mbox_t *)raw_mbox;
3094
3095	memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3096	mbox->xferaddr	= (uint32_t)adapter->ibuf_dma_h;
3097
3098	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3099
3100	raw_mbox[0] = MAIN_MISC_OPCODE;
3101	raw_mbox[2] = SUPPORT_EXT_CDB;
3102
3103	/*
3104	 * Issue the command
3105	 */
3106	rval = 0;
3107	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3108		rval = -1;
3109	}
3110
3111	return rval;
3112}
3113
3114
3115/**
3116 * megaraid_mbox_support_ha - Do we support clustering
3117 * @adapter	: soft state for the controller
3118 * @init_id	: ID of the initiator
3119 *
3120 * Determine if the firmware supports clustering and the ID of the initiator.
3121 */
3122static int
3123megaraid_mbox_support_ha(adapter_t *adapter, uint16_t *init_id)
3124{
3125	mbox_t		*mbox;
3126	uint8_t		raw_mbox[sizeof(mbox_t)];
3127	int		rval;
3128
3129
3130	mbox = (mbox_t *)raw_mbox;
3131
3132	memset((caddr_t)raw_mbox, 0, sizeof(raw_mbox));
3133
3134	mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3135
3136	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3137
3138	raw_mbox[0] = GET_TARGET_ID;
3139
3140	// Issue the command
3141	*init_id = 7;
3142	rval =  -1;
3143	if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3144
3145		*init_id = *(uint8_t *)adapter->ibuf;
3146
3147		con_log(CL_ANN, (KERN_INFO
3148			"megaraid: cluster firmware, initiator ID: %d\n",
3149			*init_id));
3150
3151		rval =  0;
3152	}
3153
3154	return rval;
3155}
3156
3157
3158/**
3159 * megaraid_mbox_support_random_del - Do we support random deletion
3160 * @adapter	: soft state for the controller
3161 *
3162 * Determine if the firmware supports random deletion.
3163 * Return:	1 is operation supported, 0 otherwise
3164 */
3165static int
3166megaraid_mbox_support_random_del(adapter_t *adapter)
3167{
3168	mbox_t		*mbox;
3169	uint8_t		raw_mbox[sizeof(mbox_t)];
3170	int		rval;
3171
3172	/*
3173	 * Newer firmware on Dell CERC expect a different
3174	 * random deletion handling, so disable it.
3175	 */
3176	if (adapter->pdev->vendor == PCI_VENDOR_ID_AMI &&
3177	    adapter->pdev->device == PCI_DEVICE_ID_AMI_MEGARAID3 &&
3178	    adapter->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
3179	    adapter->pdev->subsystem_device == PCI_SUBSYS_ID_CERC_ATA100_4CH &&
3180	    (adapter->fw_version[0] > '6' ||
3181	     (adapter->fw_version[0] == '6' &&
3182	      adapter->fw_version[2] > '6') ||
3183	     (adapter->fw_version[0] == '6'
3184	      && adapter->fw_version[2] == '6'
3185	      && adapter->fw_version[3] > '1'))) {
3186		con_log(CL_DLEVEL1, ("megaraid: disable random deletion\n"));
3187		return 0;
3188	}
3189
3190	mbox = (mbox_t *)raw_mbox;
3191
3192	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3193
3194	raw_mbox[0] = FC_DEL_LOGDRV;
3195	raw_mbox[2] = OP_SUP_DEL_LOGDRV;
3196
3197	// Issue the command
3198	rval = 0;
3199	if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3200
3201		con_log(CL_DLEVEL1, ("megaraid: supports random deletion\n"));
3202
3203		rval =  1;
3204	}
3205
3206	return rval;
3207}
3208
3209
3210/**
3211 * megaraid_mbox_get_max_sg - maximum sg elements supported by the firmware
3212 * @adapter	: soft state for the controller
3213 *
3214 * Find out the maximum number of scatter-gather elements supported by the
3215 * firmware.
3216 */
3217static int
3218megaraid_mbox_get_max_sg(adapter_t *adapter)
3219{
3220	mbox_t		*mbox;
3221	uint8_t		raw_mbox[sizeof(mbox_t)];
3222	int		nsg;
3223
3224
3225	mbox = (mbox_t *)raw_mbox;
3226
3227	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3228
3229	mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3230
3231	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3232
3233	raw_mbox[0] = MAIN_MISC_OPCODE;
3234	raw_mbox[2] = GET_MAX_SG_SUPPORT;
3235
3236	// Issue the command
3237	if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3238		nsg =  *(uint8_t *)adapter->ibuf;
3239	}
3240	else {
3241		nsg =  MBOX_DEFAULT_SG_SIZE;
3242	}
3243
3244	if (nsg > MBOX_MAX_SG_SIZE) nsg = MBOX_MAX_SG_SIZE;
3245
3246	return nsg;
3247}
3248
3249
3250/**
3251 * megaraid_mbox_enum_raid_scsi - enumerate the RAID and SCSI channels
3252 * @adapter	: soft state for the controller
3253 *
3254 * Enumerate the RAID and SCSI channels for ROMB platforms so that channels
3255 * can be exported as regular SCSI channels.
3256 */
3257static void
3258megaraid_mbox_enum_raid_scsi(adapter_t *adapter)
3259{
3260	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
3261	mbox_t		*mbox;
3262	uint8_t		raw_mbox[sizeof(mbox_t)];
3263
3264
3265	mbox = (mbox_t *)raw_mbox;
3266
3267	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3268
3269	mbox->xferaddr = (uint32_t)adapter->ibuf_dma_h;
3270
3271	memset((void *)adapter->ibuf, 0, MBOX_IBUF_SIZE);
3272
3273	raw_mbox[0] = CHNL_CLASS;
3274	raw_mbox[2] = GET_CHNL_CLASS;
3275
3276	// Issue the command. If the command fails, all channels are RAID
3277	// channels
3278	raid_dev->channel_class = 0xFF;
3279	if (mbox_post_sync_cmd(adapter, raw_mbox) == 0) {
3280		raid_dev->channel_class =  *(uint8_t *)adapter->ibuf;
3281	}
3282
3283	return;
3284}
3285
3286
3287/**
3288 * megaraid_mbox_flush_cache - flush adapter and disks cache
3289 * @adapter		: soft state for the controller
3290 *
3291 * Flush adapter cache followed by disks cache.
3292 */
3293static void
3294megaraid_mbox_flush_cache(adapter_t *adapter)
3295{
3296	mbox_t	*mbox;
3297	uint8_t	raw_mbox[sizeof(mbox_t)];
3298
3299
3300	mbox = (mbox_t *)raw_mbox;
3301
3302	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3303
3304	raw_mbox[0] = FLUSH_ADAPTER;
3305
3306	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3307		con_log(CL_ANN, ("megaraid: flush adapter failed\n"));
3308	}
3309
3310	raw_mbox[0] = FLUSH_SYSTEM;
3311
3312	if (mbox_post_sync_cmd(adapter, raw_mbox) != 0) {
3313		con_log(CL_ANN, ("megaraid: flush disks cache failed\n"));
3314	}
3315
3316	return;
3317}
3318
3319
3320/**
3321 * megaraid_mbox_fire_sync_cmd - fire the sync cmd
3322 * @adapter		: soft state for the controller
3323 *
3324 * Clears the pending cmds in FW and reinits its RAID structs.
3325 */
3326static int
3327megaraid_mbox_fire_sync_cmd(adapter_t *adapter)
3328{
3329	mbox_t	*mbox;
3330	uint8_t	raw_mbox[sizeof(mbox_t)];
3331	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
3332	mbox64_t *mbox64;
3333	int	status = 0;
3334	int i;
3335	uint32_t dword;
3336
3337	mbox = (mbox_t *)raw_mbox;
3338
3339	memset((caddr_t)raw_mbox, 0, sizeof(mbox_t));
3340
3341	raw_mbox[0] = 0xFF;
3342
3343	mbox64	= raid_dev->mbox64;
3344	mbox	= raid_dev->mbox;
3345
3346	/* Wait until mailbox is free */
3347	if (megaraid_busywait_mbox(raid_dev) != 0) {
3348		status = 1;
3349		goto blocked_mailbox;
3350	}
3351
3352	/* Copy mailbox data into host structure */
3353	memcpy((caddr_t)mbox, (caddr_t)raw_mbox, 16);
3354	mbox->cmdid		= 0xFE;
3355	mbox->busy		= 1;
3356	mbox->poll		= 0;
3357	mbox->ack		= 0;
3358	mbox->numstatus		= 0;
3359	mbox->status		= 0;
3360
3361	wmb();
3362	WRINDOOR(raid_dev, raid_dev->mbox_dma | 0x1);
3363
3364	/* Wait for maximum 1 min for status to post.
3365	 * If the Firmware SUPPORTS the ABOVE COMMAND,
3366	 * mbox->cmd will be set to 0
3367	 * else
3368	 * the firmware will reject the command with
3369	 * mbox->numstatus set to 1
3370	 */
3371
3372	i = 0;
3373	status = 0;
3374	while (!mbox->numstatus && mbox->cmd == 0xFF) {
3375		rmb();
3376		msleep(1);
3377		i++;
3378		if (i > 1000 * 60) {
3379			status = 1;
3380			break;
3381		}
3382	}
3383	if (mbox->numstatus == 1)
3384		status = 1; /*cmd not supported*/
3385
3386	/* Check for interrupt line */
3387	dword = RDOUTDOOR(raid_dev);
3388	WROUTDOOR(raid_dev, dword);
3389	WRINDOOR(raid_dev,2);
3390
3391	return status;
3392
3393blocked_mailbox:
3394	con_log(CL_ANN, (KERN_WARNING "megaraid: blocked mailbox\n"));
3395	return status;
3396}
3397
3398/**
3399 * megaraid_mbox_display_scb - display SCB information, mostly debug purposes
3400 * @adapter		: controller's soft state
3401 * @scb			: SCB to be displayed
3402 * @level		: debug level for console print
3403 *
3404 * Diplay information about the given SCB iff the current debug level is
3405 * verbose.
3406 */
3407static void
3408megaraid_mbox_display_scb(adapter_t *adapter, scb_t *scb)
3409{
3410	mbox_ccb_t		*ccb;
3411	struct scsi_cmnd	*scp;
3412	mbox_t			*mbox;
3413	int			level;
3414	int			i;
3415
3416
3417	ccb	= (mbox_ccb_t *)scb->ccb;
3418	scp	= scb->scp;
3419	mbox	= ccb->mbox;
3420
3421	level = CL_DLEVEL3;
3422
3423	con_log(level, (KERN_NOTICE
3424		"megaraid mailbox: status:%#x cmd:%#x id:%#x ", scb->status,
3425		mbox->cmd, scb->sno));
3426
3427	con_log(level, ("sec:%#x lba:%#x addr:%#x ld:%d sg:%d\n",
3428		mbox->numsectors, mbox->lba, mbox->xferaddr, mbox->logdrv,
3429		mbox->numsge));
3430
3431	if (!scp) return;
3432
3433	con_log(level, (KERN_NOTICE "scsi cmnd: "));
3434
3435	for (i = 0; i < scp->cmd_len; i++) {
3436		con_log(level, ("%#2.02x ", scp->cmnd[i]));
3437	}
3438
3439	con_log(level, ("\n"));
3440
3441	return;
3442}
3443
3444
3445/**
3446 * megaraid_mbox_setup_device_map - manage device ids
3447 * @adapter	: Driver's soft state
3448 *
3449 * Manange the device ids to have an appropriate mapping between the kernel
3450 * scsi addresses and megaraid scsi and logical drive addresses. We export
3451 * scsi devices on their actual addresses, whereas the logical drives are
3452 * exported on a virtual scsi channel.
3453 */
3454static void
3455megaraid_mbox_setup_device_map(adapter_t *adapter)
3456{
3457	uint8_t		c;
3458	uint8_t		t;
3459
3460	/*
3461	 * First fill the values on the logical drive channel
3462	 */
3463	for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
3464		adapter->device_ids[adapter->max_channel][t] =
3465			(t < adapter->init_id) ?  t : t - 1;
3466
3467	adapter->device_ids[adapter->max_channel][adapter->init_id] = 0xFF;
3468
3469	/*
3470	 * Fill the values on the physical devices channels
3471	 */
3472	for (c = 0; c < adapter->max_channel; c++)
3473		for (t = 0; t < LSI_MAX_LOGICAL_DRIVES_64LD; t++)
3474			adapter->device_ids[c][t] = (c << 8) | t;
3475}
3476
3477
3478/*
3479 * END: internal commands library
3480 */
3481
3482/*
3483 * START: Interface for the common management module
3484 *
3485 * This is the module, which interfaces with the common management module to
3486 * provide support for ioctl and sysfs
3487 */
3488
3489/**
3490 * megaraid_cmm_register - register with the management module
3491 * @adapter		: HBA soft state
3492 *
3493 * Register with the management module, which allows applications to issue
3494 * ioctl calls to the drivers. This interface is used by the management module
3495 * to setup sysfs support as well.
3496 */
3497static int
3498megaraid_cmm_register(adapter_t *adapter)
3499{
3500	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
3501	mraid_mmadp_t	adp;
3502	scb_t		*scb;
3503	mbox_ccb_t	*ccb;
3504	int		rval;
3505	int		i;
3506
3507	// Allocate memory for the base list of scb for management module.
3508	adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
3509
3510	if (adapter->uscb_list == NULL) {
3511		con_log(CL_ANN, (KERN_WARNING
3512			"megaraid: out of memory, %s %d\n", __func__,
3513			__LINE__));
3514		return -1;
3515	}
3516
3517
3518	// Initialize the synchronization parameters for resources for
3519	// commands for management module
3520	INIT_LIST_HEAD(&adapter->uscb_pool);
3521
3522	spin_lock_init(USER_FREE_LIST_LOCK(adapter));
3523
3524
3525
3526	// link all the packets. Note, CCB for commands, coming from the
3527	// commom management module, mailbox physical address are already
3528	// setup by it. We just need placeholder for that in our local command
3529	// control blocks
3530	for (i = 0; i < MBOX_MAX_USER_CMDS; i++) {
3531
3532		scb			= adapter->uscb_list + i;
3533		ccb			= raid_dev->uccb_list + i;
3534
3535		scb->ccb		= (caddr_t)ccb;
3536		ccb->mbox64		= raid_dev->umbox64 + i;
3537		ccb->mbox		= &ccb->mbox64->mbox32;
3538		ccb->raw_mbox		= (uint8_t *)ccb->mbox;
3539
3540		scb->gp			= 0;
3541
3542		// COMMAND ID 0 - (MBOX_MAX_SCSI_CMDS-1) ARE RESERVED FOR
3543		// COMMANDS COMING FROM IO SUBSYSTEM (MID-LAYER)
3544		scb->sno		= i + MBOX_MAX_SCSI_CMDS;
3545
3546		scb->scp		= NULL;
3547		scb->state		= SCB_FREE;
3548		scb->dma_direction	= PCI_DMA_NONE;
3549		scb->dma_type		= MRAID_DMA_NONE;
3550		scb->dev_channel	= -1;
3551		scb->dev_target		= -1;
3552
3553		// put scb in the free pool
3554		list_add_tail(&scb->list, &adapter->uscb_pool);
3555	}
3556
3557	adp.unique_id		= adapter->unique_id;
3558	adp.drvr_type		= DRVRTYPE_MBOX;
3559	adp.drvr_data		= (unsigned long)adapter;
3560	adp.pdev		= adapter->pdev;
3561	adp.issue_uioc		= megaraid_mbox_mm_handler;
3562	adp.timeout		= MBOX_RESET_WAIT + MBOX_RESET_EXT_WAIT;
3563	adp.max_kioc		= MBOX_MAX_USER_CMDS;
3564
3565	if ((rval = mraid_mm_register_adp(&adp)) != 0) {
3566
3567		con_log(CL_ANN, (KERN_WARNING
3568			"megaraid mbox: did not register with CMM\n"));
3569
3570		kfree(adapter->uscb_list);
3571	}
3572
3573	return rval;
3574}
3575
3576
3577/**
3578 * megaraid_cmm_unregister - un-register with the management module
3579 * @adapter		: HBA soft state
3580 *
3581 * Un-register with the management module.
3582 * FIXME: mgmt module must return failure for unregister if it has pending
3583 * commands in LLD.
3584 */
3585static int
3586megaraid_cmm_unregister(adapter_t *adapter)
3587{
3588	kfree(adapter->uscb_list);
3589	mraid_mm_unregister_adp(adapter->unique_id);
3590	return 0;
3591}
3592
3593
3594/**
3595 * megaraid_mbox_mm_handler - interface for CMM to issue commands to LLD
3596 * @drvr_data		: LLD specific data
3597 * @kioc		: CMM interface packet
3598 * @action		: command action
3599 *
3600 * This routine is invoked whenever the Common Management Module (CMM) has a
3601 * command for us. The 'action' parameter specifies if this is a new command
3602 * or otherwise.
3603 */
3604static int
3605megaraid_mbox_mm_handler(unsigned long drvr_data, uioc_t *kioc, uint32_t action)
3606{
3607	adapter_t *adapter;
3608
3609	if (action != IOCTL_ISSUE) {
3610		con_log(CL_ANN, (KERN_WARNING
3611			"megaraid: unsupported management action:%#2x\n",
3612			action));
3613		return (-ENOTSUPP);
3614	}
3615
3616	adapter = (adapter_t *)drvr_data;
3617
3618	// make sure this adapter is not being detached right now.
3619	if (atomic_read(&adapter->being_detached)) {
3620		con_log(CL_ANN, (KERN_WARNING
3621			"megaraid: reject management request, detaching\n"));
3622		return (-ENODEV);
3623	}
3624
3625	switch (kioc->opcode) {
3626
3627	case GET_ADAP_INFO:
3628
3629		kioc->status =  gather_hbainfo(adapter, (mraid_hba_info_t *)
3630					(unsigned long)kioc->buf_vaddr);
3631
3632		kioc->done(kioc);
3633
3634		return kioc->status;
3635
3636	case MBOX_CMD:
3637
3638		return megaraid_mbox_mm_command(adapter, kioc);
3639
3640	default:
3641		kioc->status = (-EINVAL);
3642		kioc->done(kioc);
3643		return (-EINVAL);
3644	}
3645
3646	return 0;	// not reached
3647}
3648
3649/**
3650 * megaraid_mbox_mm_command - issues commands routed through CMM
3651 * @adapter		: HBA soft state
3652 * @kioc		: management command packet
3653 *
3654 * Issues commands, which are routed through the management module.
3655 */
3656static int
3657megaraid_mbox_mm_command(adapter_t *adapter, uioc_t *kioc)
3658{
3659	struct list_head	*head = &adapter->uscb_pool;
3660	mbox64_t		*mbox64;
3661	uint8_t			*raw_mbox;
3662	scb_t			*scb;
3663	mbox_ccb_t		*ccb;
3664	unsigned long		flags;
3665
3666	// detach one scb from free pool
3667	spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
3668
3669	if (list_empty(head)) {	// should never happen because of CMM
3670
3671		con_log(CL_ANN, (KERN_WARNING
3672			"megaraid mbox: bug in cmm handler, lost resources\n"));
3673
3674		spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3675
3676		return (-EINVAL);
3677	}
3678
3679	scb = list_entry(head->next, scb_t, list);
3680	list_del_init(&scb->list);
3681
3682	spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3683
3684	scb->state		= SCB_ACTIVE;
3685	scb->dma_type		= MRAID_DMA_NONE;
3686	scb->dma_direction	= PCI_DMA_NONE;
3687
3688	ccb		= (mbox_ccb_t *)scb->ccb;
3689	mbox64		= (mbox64_t *)(unsigned long)kioc->cmdbuf;
3690	raw_mbox	= (uint8_t *)&mbox64->mbox32;
3691
3692	memcpy(ccb->mbox64, mbox64, sizeof(mbox64_t));
3693
3694	scb->gp		= (unsigned long)kioc;
3695
3696	/*
3697	 * If it is a logdrv random delete operation, we have to wait till
3698	 * there are no outstanding cmds at the fw and then issue it directly
3699	 */
3700	if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
3701
3702		if (wait_till_fw_empty(adapter)) {
3703			con_log(CL_ANN, (KERN_NOTICE
3704				"megaraid mbox: LD delete, timed out\n"));
3705
3706			kioc->status = -ETIME;
3707
3708			scb->status = -1;
3709
3710			megaraid_mbox_mm_done(adapter, scb);
3711
3712			return (-ETIME);
3713		}
3714
3715		INIT_LIST_HEAD(&scb->list);
3716
3717		scb->state = SCB_ISSUED;
3718		if (mbox_post_cmd(adapter, scb) != 0) {
3719
3720			con_log(CL_ANN, (KERN_NOTICE
3721				"megaraid mbox: LD delete, mailbox busy\n"));
3722
3723			kioc->status = -EBUSY;
3724
3725			scb->status = -1;
3726
3727			megaraid_mbox_mm_done(adapter, scb);
3728
3729			return (-EBUSY);
3730		}
3731
3732		return 0;
3733	}
3734
3735	// put the command on the pending list and execute
3736	megaraid_mbox_runpendq(adapter, scb);
3737
3738	return 0;
3739}
3740
3741
3742static int
3743wait_till_fw_empty(adapter_t *adapter)
3744{
3745	unsigned long	flags = 0;
3746	int		i;
3747
3748
3749	/*
3750	 * Set the quiescent flag to stop issuing cmds to FW.
3751	 */
3752	spin_lock_irqsave(&adapter->lock, flags);
3753	adapter->quiescent++;
3754	spin_unlock_irqrestore(&adapter->lock, flags);
3755
3756	/*
3757	 * Wait till there are no more cmds outstanding at FW. Try for at most
3758	 * 60 seconds
3759	 */
3760	for (i = 0; i < 60 && adapter->outstanding_cmds; i++) {
3761		con_log(CL_DLEVEL1, (KERN_INFO
3762			"megaraid: FW has %d pending commands\n",
3763			adapter->outstanding_cmds));
3764
3765		msleep(1000);
3766	}
3767
3768	return adapter->outstanding_cmds;
3769}
3770
3771
3772/**
3773 * megaraid_mbox_mm_done - callback for CMM commands
3774 * @adapter	: HBA soft state
3775 * @scb		: completed command
3776 *
3777 * Callback routine for internal commands originated from the management
3778 * module.
3779 */
3780static void
3781megaraid_mbox_mm_done(adapter_t *adapter, scb_t *scb)
3782{
3783	uioc_t			*kioc;
3784	mbox64_t		*mbox64;
3785	uint8_t			*raw_mbox;
3786	unsigned long		flags;
3787
3788	kioc			= (uioc_t *)scb->gp;
3789	mbox64			= (mbox64_t *)(unsigned long)kioc->cmdbuf;
3790	mbox64->mbox32.status	= scb->status;
3791	raw_mbox		= (uint8_t *)&mbox64->mbox32;
3792
3793
3794	// put scb in the free pool
3795	scb->state	= SCB_FREE;
3796	scb->scp	= NULL;
3797
3798	spin_lock_irqsave(USER_FREE_LIST_LOCK(adapter), flags);
3799
3800	list_add(&scb->list, &adapter->uscb_pool);
3801
3802	spin_unlock_irqrestore(USER_FREE_LIST_LOCK(adapter), flags);
3803
3804	// if a delete logical drive operation succeeded, restart the
3805	// controller
3806	if (raw_mbox[0] == FC_DEL_LOGDRV && raw_mbox[2] == OP_DEL_LOGDRV) {
3807
3808		adapter->quiescent--;
3809
3810		megaraid_mbox_runpendq(adapter, NULL);
3811	}
3812
3813	kioc->done(kioc);
3814
3815	return;
3816}
3817
3818
3819/**
3820 * gather_hbainfo - HBA characteristics for the applications
3821 * @adapter		: HBA soft state
3822 * @hinfo		: pointer to the caller's host info strucuture
3823 */
3824static int
3825gather_hbainfo(adapter_t *adapter, mraid_hba_info_t *hinfo)
3826{
3827	uint8_t	dmajor;
3828
3829	dmajor			= megaraid_mbox_version[0];
3830
3831	hinfo->pci_vendor_id	= adapter->pdev->vendor;
3832	hinfo->pci_device_id	= adapter->pdev->device;
3833	hinfo->subsys_vendor_id	= adapter->pdev->subsystem_vendor;
3834	hinfo->subsys_device_id	= adapter->pdev->subsystem_device;
3835
3836	hinfo->pci_bus		= adapter->pdev->bus->number;
3837	hinfo->pci_dev_fn	= adapter->pdev->devfn;
3838	hinfo->pci_slot		= PCI_SLOT(adapter->pdev->devfn);
3839	hinfo->irq		= adapter->host->irq;
3840	hinfo->baseport		= ADAP2RAIDDEV(adapter)->baseport;
3841
3842	hinfo->unique_id	= (hinfo->pci_bus << 8) | adapter->pdev->devfn;
3843	hinfo->host_no		= adapter->host->host_no;
3844
3845	return 0;
3846}
3847
3848/*
3849 * END: Interface for the common management module
3850 */
3851
3852
3853
3854/**
3855 * megaraid_sysfs_alloc_resources - allocate sysfs related resources
3856 * @adapter	: controller's soft state
3857 *
3858 * Allocate packets required to issue FW calls whenever the sysfs attributes
3859 * are read. These attributes would require up-to-date information from the
3860 * FW. Also set up resources for mutual exclusion to share these resources and
3861 * the wait queue.
3862 *
3863 * Return 0 on success.
3864 * Return -ERROR_CODE on failure.
3865 */
3866static int
3867megaraid_sysfs_alloc_resources(adapter_t *adapter)
3868{
3869	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
3870	int		rval = 0;
3871
3872	raid_dev->sysfs_uioc = kmalloc(sizeof(uioc_t), GFP_KERNEL);
3873
3874	raid_dev->sysfs_mbox64 = kmalloc(sizeof(mbox64_t), GFP_KERNEL);
3875
3876	raid_dev->sysfs_buffer = pci_alloc_consistent(adapter->pdev,
3877			PAGE_SIZE, &raid_dev->sysfs_buffer_dma);
3878
3879	if (!raid_dev->sysfs_uioc || !raid_dev->sysfs_mbox64 ||
3880		!raid_dev->sysfs_buffer) {
3881
3882		con_log(CL_ANN, (KERN_WARNING
3883			"megaraid: out of memory, %s %d\n", __func__,
3884			__LINE__));
3885
3886		rval = -ENOMEM;
3887
3888		megaraid_sysfs_free_resources(adapter);
3889	}
3890
3891	mutex_init(&raid_dev->sysfs_mtx);
3892
3893	init_waitqueue_head(&raid_dev->sysfs_wait_q);
3894
3895	return rval;
3896}
3897
3898
3899/**
3900 * megaraid_sysfs_free_resources - free sysfs related resources
3901 * @adapter	: controller's soft state
3902 *
3903 * Free packets allocated for sysfs FW commands
3904 */
3905static void
3906megaraid_sysfs_free_resources(adapter_t *adapter)
3907{
3908	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
3909
3910	kfree(raid_dev->sysfs_uioc);
3911	kfree(raid_dev->sysfs_mbox64);
3912
3913	if (raid_dev->sysfs_buffer) {
3914		pci_free_consistent(adapter->pdev, PAGE_SIZE,
3915			raid_dev->sysfs_buffer, raid_dev->sysfs_buffer_dma);
3916	}
3917}
3918
3919
3920/**
3921 * megaraid_sysfs_get_ldmap_done - callback for get ldmap
3922 * @uioc	: completed packet
3923 *
3924 * Callback routine called in the ISR/tasklet context for get ldmap call
3925 */
3926static void
3927megaraid_sysfs_get_ldmap_done(uioc_t *uioc)
3928{
3929	adapter_t	*adapter = (adapter_t *)uioc->buf_vaddr;
3930	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
3931
3932	uioc->status = 0;
3933
3934	wake_up(&raid_dev->sysfs_wait_q);
3935}
3936
3937
3938/**
3939 * megaraid_sysfs_get_ldmap_timeout - timeout handling for get ldmap
3940 * @data	: timed out packet
3941 *
3942 * Timeout routine to recover and return to application, in case the adapter
3943 * has stopped responding. A timeout of 60 seconds for this command seems like
3944 * a good value.
3945 */
3946static void
3947megaraid_sysfs_get_ldmap_timeout(unsigned long data)
3948{
3949	uioc_t		*uioc = (uioc_t *)data;
3950	adapter_t	*adapter = (adapter_t *)uioc->buf_vaddr;
3951	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
3952
3953	uioc->status = -ETIME;
3954
3955	wake_up(&raid_dev->sysfs_wait_q);
3956}
3957
3958
3959/**
3960 * megaraid_sysfs_get_ldmap - get update logical drive map
3961 * @adapter	: controller's soft state
3962 *
3963 * This routine will be called whenever user reads the logical drive
3964 * attributes, go get the current logical drive mapping table from the
3965 * firmware. We use the management API's to issue commands to the controller.
3966 *
3967 * NOTE: The commands issuance functionality is not generalized and
3968 * implemented in context of "get ld map" command only. If required, the
3969 * command issuance logical can be trivially pulled out and implemented as a
3970 * standalone library. For now, this should suffice since there is no other
3971 * user of this interface.
3972 *
3973 * Return 0 on success.
3974 * Return -1 on failure.
3975 */
3976static int
3977megaraid_sysfs_get_ldmap(adapter_t *adapter)
3978{
3979	mraid_device_t		*raid_dev = ADAP2RAIDDEV(adapter);
3980	uioc_t			*uioc;
3981	mbox64_t		*mbox64;
3982	mbox_t			*mbox;
3983	char			*raw_mbox;
3984	struct timer_list	sysfs_timer;
3985	struct timer_list	*timerp;
3986	caddr_t			ldmap;
3987	int			rval = 0;
3988
3989	/*
3990	 * Allow only one read at a time to go through the sysfs attributes
3991	 */
3992	mutex_lock(&raid_dev->sysfs_mtx);
3993
3994	uioc	= raid_dev->sysfs_uioc;
3995	mbox64	= raid_dev->sysfs_mbox64;
3996	ldmap	= raid_dev->sysfs_buffer;
3997
3998	memset(uioc, 0, sizeof(uioc_t));
3999	memset(mbox64, 0, sizeof(mbox64_t));
4000	memset(ldmap, 0, sizeof(raid_dev->curr_ldmap));
4001
4002	mbox		= &mbox64->mbox32;
4003	raw_mbox	= (char *)mbox;
4004	uioc->cmdbuf    = (uint64_t)(unsigned long)mbox64;
4005	uioc->buf_vaddr	= (caddr_t)adapter;
4006	uioc->status	= -ENODATA;
4007	uioc->done	= megaraid_sysfs_get_ldmap_done;
4008
4009	/*
4010	 * Prepare the mailbox packet to get the current logical drive mapping
4011	 * table
4012	 */
4013	mbox->xferaddr = (uint32_t)raid_dev->sysfs_buffer_dma;
4014
4015	raw_mbox[0] = FC_DEL_LOGDRV;
4016	raw_mbox[2] = OP_GET_LDID_MAP;
4017
4018	/*
4019	 * Setup a timer to recover from a non-responding controller
4020	 */
4021	timerp	= &sysfs_timer;
4022	init_timer(timerp);
4023
4024	timerp->function	= megaraid_sysfs_get_ldmap_timeout;
4025	timerp->data		= (unsigned long)uioc;
4026	timerp->expires		= jiffies + 60 * HZ;
4027
4028	add_timer(timerp);
4029
4030	/*
4031	 * Send the command to the firmware
4032	 */
4033	rval = megaraid_mbox_mm_command(adapter, uioc);
4034
4035	if (rval == 0) {	// command successfully issued
4036		wait_event(raid_dev->sysfs_wait_q, (uioc->status != -ENODATA));
4037
4038		/*
4039		 * Check if the command timed out
4040		 */
4041		if (uioc->status == -ETIME) {
4042			con_log(CL_ANN, (KERN_NOTICE
4043				"megaraid: sysfs get ld map timed out\n"));
4044
4045			rval = -ETIME;
4046		}
4047		else {
4048			rval = mbox->status;
4049		}
4050
4051		if (rval == 0) {
4052			memcpy(raid_dev->curr_ldmap, ldmap,
4053				sizeof(raid_dev->curr_ldmap));
4054		}
4055		else {
4056			con_log(CL_ANN, (KERN_NOTICE
4057				"megaraid: get ld map failed with %x\n", rval));
4058		}
4059	}
4060	else {
4061		con_log(CL_ANN, (KERN_NOTICE
4062			"megaraid: could not issue ldmap command:%x\n", rval));
4063	}
4064
4065
4066	del_timer_sync(timerp);
4067
4068	mutex_unlock(&raid_dev->sysfs_mtx);
4069
4070	return rval;
4071}
4072
4073
4074/**
4075 * megaraid_sysfs_show_app_hndl - display application handle for this adapter
4076 * @cdev	: class device object representation for the host
4077 * @buf		: buffer to send data to
4078 *
4079 * Display the handle used by the applications while executing management
4080 * tasks on the adapter. We invoke a management module API to get the adapter
4081 * handle, since we do not interface with applications directly.
4082 */
4083static ssize_t
4084megaraid_sysfs_show_app_hndl(struct device *dev, struct device_attribute *attr,
4085			     char *buf)
4086{
4087	struct Scsi_Host *shost = class_to_shost(dev);
4088	adapter_t	*adapter = (adapter_t *)SCSIHOST2ADAP(shost);
4089	uint32_t	app_hndl;
4090
4091	app_hndl = mraid_mm_adapter_app_handle(adapter->unique_id);
4092
4093	return snprintf(buf, 8, "%u\n", app_hndl);
4094}
4095
4096
4097/**
4098 * megaraid_sysfs_show_ldnum - display the logical drive number for this device
4099 * @dev		: device object representation for the scsi device
4100 * @attr	: device attribute to show
4101 * @buf		: buffer to send data to
4102 *
4103 * Display the logical drive number for the device in question, if it a valid
4104 * logical drive. For physical devices, "-1" is returned.
4105 *
4106 * The logical drive number is displayed in following format:
4107 *
4108 * <SCSI ID> <LD NUM> <LD STICKY ID> <APP ADAPTER HANDLE>
4109 *
4110 *   <int>     <int>       <int>            <int>
4111 */
4112static ssize_t
4113megaraid_sysfs_show_ldnum(struct device *dev, struct device_attribute *attr, char *buf)
4114{
4115	struct scsi_device *sdev = to_scsi_device(dev);
4116	adapter_t	*adapter = (adapter_t *)SCSIHOST2ADAP(sdev->host);
4117	mraid_device_t	*raid_dev = ADAP2RAIDDEV(adapter);
4118	int		scsi_id = -1;
4119	int		logical_drv = -1;
4120	int		ldid_map = -1;
4121	uint32_t	app_hndl = 0;
4122	int		mapped_sdev_id;
4123	int		rval;
4124	int		i;
4125
4126	if (raid_dev->random_del_supported &&
4127			MRAID_IS_LOGICAL_SDEV(adapter, sdev)) {
4128
4129		rval = megaraid_sysfs_get_ldmap(adapter);
4130		if (rval == 0) {
4131
4132			for (i = 0; i < MAX_LOGICAL_DRIVES_40LD; i++) {
4133
4134				mapped_sdev_id = sdev->id;
4135
4136				if (sdev->id > adapter->init_id) {
4137					mapped_sdev_id -= 1;
4138				}
4139
4140				if (raid_dev->curr_ldmap[i] == mapped_sdev_id) {
4141
4142					scsi_id = sdev->id;
4143
4144					logical_drv = i;
4145
4146					ldid_map = raid_dev->curr_ldmap[i];
4147
4148					app_hndl = mraid_mm_adapter_app_handle(
4149							adapter->unique_id);
4150
4151					break;
4152				}
4153			}
4154		}
4155		else {
4156			con_log(CL_ANN, (KERN_NOTICE
4157				"megaraid: sysfs get ld map failed: %x\n",
4158				rval));
4159		}
4160	}
4161
4162	return snprintf(buf, 36, "%d %d %d %d\n", scsi_id, logical_drv,
4163			ldid_map, app_hndl);
4164}
4165
4166
4167/*
4168 * END: Mailbox Low Level Driver
4169 */
4170module_init(megaraid_init);
4171module_exit(megaraid_exit);
4172
4173/* vim: set ts=8 sw=8 tw=78 ai si: */