Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 *    Disk Array driver for Compaq SMART2 Controllers
   3 *    Copyright 1998 Compaq Computer Corporation
   4 *
   5 *    This program is free software; you can redistribute it and/or modify
   6 *    it under the terms of the GNU General Public License as published by
   7 *    the Free Software Foundation; either version 2 of the License, or
   8 *    (at your option) any later version.
   9 *
  10 *    This program is distributed in the hope that it will be useful,
  11 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13 *    NON INFRINGEMENT.  See the GNU General Public License for more details.
  14 *
  15 *    You should have received a copy of the GNU General Public License
  16 *    along with this program; if not, write to the Free Software
  17 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18 *
  19 *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
  20 *
  21 */
  22#include <linux/module.h>
  23#include <linux/types.h>
  24#include <linux/pci.h>
  25#include <linux/bio.h>
  26#include <linux/interrupt.h>
  27#include <linux/kernel.h>
  28#include <linux/slab.h>
  29#include <linux/delay.h>
  30#include <linux/major.h>
  31#include <linux/fs.h>
  32#include <linux/blkpg.h>
  33#include <linux/timer.h>
  34#include <linux/proc_fs.h>
  35#include <linux/seq_file.h>
  36#include <linux/init.h>
  37#include <linux/hdreg.h>
  38#include <linux/mutex.h>
  39#include <linux/spinlock.h>
  40#include <linux/blkdev.h>
  41#include <linux/genhd.h>
  42#include <linux/scatterlist.h>
  43#include <asm/uaccess.h>
  44#include <asm/io.h>
  45
  46
  47#define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
  48
  49#define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
  50#define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
  51
  52/* Embedded module documentation macros - see modules.h */
  53/* Original author Chris Frantz - Compaq Computer Corporation */
  54MODULE_AUTHOR("Compaq Computer Corporation");
  55MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
  56MODULE_LICENSE("GPL");
  57
  58#include "cpqarray.h"
  59#include "ida_cmd.h"
  60#include "smart1,2.h"
  61#include "ida_ioctl.h"
  62
  63#define READ_AHEAD	128
  64#define NR_CMDS		128 /* This could probably go as high as ~400 */
  65
  66#define MAX_CTLR	8
  67#define CTLR_SHIFT	8
  68
  69#define CPQARRAY_DMA_MASK	0xFFFFFFFF	/* 32 bit DMA */
  70
  71static DEFINE_MUTEX(cpqarray_mutex);
  72static int nr_ctlr;
  73static ctlr_info_t *hba[MAX_CTLR];
  74
  75static int eisa[8];
  76
  77#define NR_PRODUCTS ARRAY_SIZE(products)
  78
  79/*  board_id = Subsystem Device ID & Vendor ID
  80 *  product = Marketing Name for the board
  81 *  access = Address of the struct of function pointers
  82 */
  83static struct board_type products[] = {
  84	{ 0x0040110E, "IDA",			&smart1_access },
  85	{ 0x0140110E, "IDA-2",			&smart1_access },
  86	{ 0x1040110E, "IAES",			&smart1_access },
  87	{ 0x2040110E, "SMART",			&smart1_access },
  88	{ 0x3040110E, "SMART-2/E",		&smart2e_access },
  89	{ 0x40300E11, "SMART-2/P",		&smart2_access },
  90	{ 0x40310E11, "SMART-2SL",		&smart2_access },
  91	{ 0x40320E11, "Smart Array 3200",	&smart2_access },
  92	{ 0x40330E11, "Smart Array 3100ES",	&smart2_access },
  93	{ 0x40340E11, "Smart Array 221",	&smart2_access },
  94	{ 0x40400E11, "Integrated Array",	&smart4_access },
  95	{ 0x40480E11, "Compaq Raid LC2",        &smart4_access },
  96	{ 0x40500E11, "Smart Array 4200",	&smart4_access },
  97	{ 0x40510E11, "Smart Array 4250ES",	&smart4_access },
  98	{ 0x40580E11, "Smart Array 431",	&smart4_access },
  99};
 100
 101/* define the PCI info for the PCI cards this driver can control */
 102static const struct pci_device_id cpqarray_pci_device_id[] =
 103{
 104	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
 105		0x0E11, 0x4058, 0, 0, 0},       /* SA431 */
 106	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
 107		0x0E11, 0x4051, 0, 0, 0},      /* SA4250ES */
 108	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
 109		0x0E11, 0x4050, 0, 0, 0},      /* SA4200 */
 110	{ PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
 111		0x0E11, 0x4048, 0, 0, 0},       /* LC2 */
 112	{ PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
 113		0x0E11, 0x4040, 0, 0, 0},      /* Integrated Array */
 114	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
 115		0x0E11, 0x4034, 0, 0, 0},       /* SA 221 */
 116	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
 117		0x0E11, 0x4033, 0, 0, 0},       /* SA 3100ES*/
 118	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
 119		0x0E11, 0x4032, 0, 0, 0},       /* SA 3200*/
 120	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
 121		0x0E11, 0x4031, 0, 0, 0},       /* SA 2SL*/
 122	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
 123		0x0E11, 0x4030, 0, 0, 0},       /* SA 2P */
 124	{ 0 }
 125};
 126
 127MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
 128
 129static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
 130
 131/* Debug... */
 132#define DBG(s)	do { s } while(0)
 133/* Debug (general info)... */
 134#define DBGINFO(s) do { } while(0)
 135/* Debug Paranoid... */
 136#define DBGP(s)  do { } while(0)
 137/* Debug Extra Paranoid... */
 138#define DBGPX(s) do { } while(0)
 139
 140static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
 141static void __iomem *remap_pci_mem(ulong base, ulong size);
 142static int cpqarray_eisa_detect(void);
 143static int pollcomplete(int ctlr);
 144static void getgeometry(int ctlr);
 145static void start_fwbk(int ctlr);
 146
 147static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
 148static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
 149
 150static void free_hba(int i);
 151static int alloc_cpqarray_hba(void);
 152
 153static int sendcmd(
 154	__u8	cmd,
 155	int	ctlr,
 156	void	*buff,
 157	size_t	size,
 158	unsigned int blk,
 159	unsigned int blkcnt,
 160	unsigned int log_unit );
 161
 162static int ida_unlocked_open(struct block_device *bdev, fmode_t mode);
 163static int ida_release(struct gendisk *disk, fmode_t mode);
 164static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
 165static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
 166static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
 167
 168static void do_ida_request(struct request_queue *q);
 169static void start_io(ctlr_info_t *h);
 170
 171static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
 172static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
 173static inline void complete_command(cmdlist_t *cmd, int timeout);
 174
 175static irqreturn_t do_ida_intr(int irq, void *dev_id);
 176static void ida_timer(unsigned long tdata);
 177static int ida_revalidate(struct gendisk *disk);
 178static int revalidate_allvol(ctlr_info_t *host);
 179static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
 180
 181#ifdef CONFIG_PROC_FS
 182static void ida_procinit(int i);
 183#else
 184static void ida_procinit(int i) {}
 185#endif
 186
 187static inline drv_info_t *get_drv(struct gendisk *disk)
 188{
 189	return disk->private_data;
 190}
 191
 192static inline ctlr_info_t *get_host(struct gendisk *disk)
 193{
 194	return disk->queue->queuedata;
 195}
 196
 197
 198static const struct block_device_operations ida_fops  = {
 199	.owner		= THIS_MODULE,
 200	.open		= ida_unlocked_open,
 201	.release	= ida_release,
 202	.ioctl		= ida_ioctl,
 203	.getgeo		= ida_getgeo,
 204	.revalidate_disk= ida_revalidate,
 205};
 206
 207
 208#ifdef CONFIG_PROC_FS
 209
 210static struct proc_dir_entry *proc_array;
 211static const struct file_operations ida_proc_fops;
 212
 213/*
 214 * Get us a file in /proc/array that says something about each controller.
 215 * Create /proc/array if it doesn't exist yet.
 216 */
 217static void __init ida_procinit(int i)
 218{
 219	if (proc_array == NULL) {
 220		proc_array = proc_mkdir("driver/cpqarray", NULL);
 221		if (!proc_array) return;
 222	}
 223
 224	proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]);
 225}
 226
 227/*
 228 * Report information about this controller.
 229 */
 230static int ida_proc_show(struct seq_file *m, void *v)
 231{
 232	int i, ctlr;
 233	ctlr_info_t *h = (ctlr_info_t*)m->private;
 234	drv_info_t *drv;
 235#ifdef CPQ_PROC_PRINT_QUEUES
 236	cmdlist_t *c;
 237	unsigned long flags;
 238#endif
 239
 240	ctlr = h->ctlr;
 241	seq_printf(m, "%s:  Compaq %s Controller\n"
 242		"       Board ID: 0x%08lx\n"
 243		"       Firmware Revision: %c%c%c%c\n"
 244		"       Controller Sig: 0x%08lx\n"
 245		"       Memory Address: 0x%08lx\n"
 246		"       I/O Port: 0x%04x\n"
 247		"       IRQ: %d\n"
 248		"       Logical drives: %d\n"
 249		"       Physical drives: %d\n\n"
 250		"       Current Q depth: %d\n"
 251		"       Max Q depth since init: %d\n\n",
 252		h->devname, 
 253		h->product_name,
 254		(unsigned long)h->board_id,
 255		h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
 256		(unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
 257		(unsigned int) h->io_mem_addr, (unsigned int)h->intr,
 258		h->log_drives, h->phys_drives,
 259		h->Qdepth, h->maxQsinceinit);
 260
 261	seq_puts(m, "Logical Drive Info:\n");
 262
 263	for(i=0; i<h->log_drives; i++) {
 264		drv = &h->drv[i];
 265		seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
 266				ctlr, i, drv->blk_size, drv->nr_blks);
 267	}
 268
 269#ifdef CPQ_PROC_PRINT_QUEUES
 270	spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); 
 271	seq_puts(m, "\nCurrent Queues:\n");
 272
 273	c = h->reqQ;
 274	seq_printf(m, "reqQ = %p", c);
 275	if (c) c=c->next;
 276	while(c && c != h->reqQ) {
 277		seq_printf(m, "->%p", c);
 278		c=c->next;
 279	}
 280
 281	c = h->cmpQ;
 282	seq_printf(m, "\ncmpQ = %p", c);
 283	if (c) c=c->next;
 284	while(c && c != h->cmpQ) {
 285		seq_printf(m, "->%p", c);
 286		c=c->next;
 287	}
 288
 289	seq_putc(m, '\n');
 290	spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); 
 291#endif
 292	seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n",
 293			h->nr_allocs, h->nr_frees);
 294	return 0;
 295}
 296
 297static int ida_proc_open(struct inode *inode, struct file *file)
 298{
 299	return single_open(file, ida_proc_show, PDE(inode)->data);
 300}
 301
 302static const struct file_operations ida_proc_fops = {
 303	.owner		= THIS_MODULE,
 304	.open		= ida_proc_open,
 305	.read		= seq_read,
 306	.llseek		= seq_lseek,
 307	.release	= single_release,
 308};
 309#endif /* CONFIG_PROC_FS */
 310
 311module_param_array(eisa, int, NULL, 0);
 312
 313static void release_io_mem(ctlr_info_t *c)
 314{
 315	/* if IO mem was not protected do nothing */
 316	if( c->io_mem_addr == 0)
 317		return;
 318	release_region(c->io_mem_addr, c->io_mem_length);
 319	c->io_mem_addr = 0;
 320	c->io_mem_length = 0;
 321}
 322
 323static void __devexit cpqarray_remove_one(int i)
 324{
 325	int j;
 326	char buff[4];
 327
 328	/* sendcmd will turn off interrupt, and send the flush...
 329	 * To write all data in the battery backed cache to disks
 330	 * no data returned, but don't want to send NULL to sendcmd */
 331	if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
 332	{
 333		printk(KERN_WARNING "Unable to flush cache on controller %d\n",
 334				i);
 335	}
 336	free_irq(hba[i]->intr, hba[i]);
 337	iounmap(hba[i]->vaddr);
 338	unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
 339	del_timer(&hba[i]->timer);
 340	remove_proc_entry(hba[i]->devname, proc_array);
 341	pci_free_consistent(hba[i]->pci_dev,
 342			NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
 343			hba[i]->cmd_pool_dhandle);
 344	kfree(hba[i]->cmd_pool_bits);
 345	for(j = 0; j < NWD; j++) {
 346		if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
 347			del_gendisk(ida_gendisk[i][j]);
 348		put_disk(ida_gendisk[i][j]);
 349	}
 350	blk_cleanup_queue(hba[i]->queue);
 351	release_io_mem(hba[i]);
 352	free_hba(i);
 353}
 354
 355static void __devexit cpqarray_remove_one_pci (struct pci_dev *pdev)
 356{
 357	int i;
 358	ctlr_info_t *tmp_ptr;
 359
 360	if (pci_get_drvdata(pdev) == NULL) {
 361		printk( KERN_ERR "cpqarray: Unable to remove device \n");
 362		return;
 363	}
 364
 365	tmp_ptr = pci_get_drvdata(pdev);
 366	i = tmp_ptr->ctlr;
 367	if (hba[i] == NULL) {
 368		printk(KERN_ERR "cpqarray: controller %d appears to have"
 369			"already been removed \n", i);
 370		return;
 371        }
 372	pci_set_drvdata(pdev, NULL);
 373
 374	cpqarray_remove_one(i);
 375}
 376
 377/* removing an instance that was not removed automatically..
 378 * must be an eisa card.
 379 */
 380static void __devexit cpqarray_remove_one_eisa (int i)
 381{
 382	if (hba[i] == NULL) {
 383		printk(KERN_ERR "cpqarray: controller %d appears to have"
 384			"already been removed \n", i);
 385		return;
 386        }
 387	cpqarray_remove_one(i);
 388}
 389
 390/* pdev is NULL for eisa */
 391static int __devinit cpqarray_register_ctlr( int i, struct pci_dev *pdev)
 392{
 393	struct request_queue *q;
 394	int j;
 395
 396	/* 
 397	 * register block devices
 398	 * Find disks and fill in structs
 399	 * Get an interrupt, set the Q depth and get into /proc
 400	 */
 401
 402	/* If this successful it should insure that we are the only */
 403	/* instance of the driver */
 404	if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
 405		goto Enomem4;
 406	}
 407	hba[i]->access.set_intr_mask(hba[i], 0);
 408	if (request_irq(hba[i]->intr, do_ida_intr,
 409		IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
 410	{
 411		printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
 412				hba[i]->intr, hba[i]->devname);
 413		goto Enomem3;
 414	}
 415		
 416	for (j=0; j<NWD; j++) {
 417		ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
 418		if (!ida_gendisk[i][j])
 419			goto Enomem2;
 420	}
 421
 422	hba[i]->cmd_pool = pci_alloc_consistent(
 423		hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
 424		&(hba[i]->cmd_pool_dhandle));
 425	hba[i]->cmd_pool_bits = kcalloc(
 426		DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long),
 427		GFP_KERNEL);
 428
 429	if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
 430			goto Enomem1;
 431
 432	memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
 433	printk(KERN_INFO "cpqarray: Finding drives on %s",
 434		hba[i]->devname);
 435
 436	spin_lock_init(&hba[i]->lock);
 437	q = blk_init_queue(do_ida_request, &hba[i]->lock);
 438	if (!q)
 439		goto Enomem1;
 440
 441	hba[i]->queue = q;
 442	q->queuedata = hba[i];
 443
 444	getgeometry(i);
 445	start_fwbk(i);
 446
 447	ida_procinit(i);
 448
 449	if (pdev)
 450		blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
 451
 452	/* This is a hardware imposed limit. */
 453	blk_queue_max_segments(q, SG_MAX);
 454
 455	init_timer(&hba[i]->timer);
 456	hba[i]->timer.expires = jiffies + IDA_TIMER;
 457	hba[i]->timer.data = (unsigned long)hba[i];
 458	hba[i]->timer.function = ida_timer;
 459	add_timer(&hba[i]->timer);
 460
 461	/* Enable IRQ now that spinlock and rate limit timer are set up */
 462	hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
 463
 464	for(j=0; j<NWD; j++) {
 465		struct gendisk *disk = ida_gendisk[i][j];
 466		drv_info_t *drv = &hba[i]->drv[j];
 467		sprintf(disk->disk_name, "ida/c%dd%d", i, j);
 468		disk->major = COMPAQ_SMART2_MAJOR + i;
 469		disk->first_minor = j<<NWD_SHIFT;
 470		disk->fops = &ida_fops;
 471		if (j && !drv->nr_blks)
 472			continue;
 473		blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
 474		set_capacity(disk, drv->nr_blks);
 475		disk->queue = hba[i]->queue;
 476		disk->private_data = drv;
 477		add_disk(disk);
 478	}
 479
 480	/* done ! */
 481	return(i);
 482
 483Enomem1:
 484	nr_ctlr = i; 
 485	kfree(hba[i]->cmd_pool_bits);
 486	if (hba[i]->cmd_pool)
 487		pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t), 
 488				    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
 489Enomem2:
 490	while (j--) {
 491		put_disk(ida_gendisk[i][j]);
 492		ida_gendisk[i][j] = NULL;
 493	}
 494	free_irq(hba[i]->intr, hba[i]);
 495Enomem3:
 496	unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
 497Enomem4:
 498	if (pdev)
 499		pci_set_drvdata(pdev, NULL);
 500	release_io_mem(hba[i]);
 501	free_hba(i);
 502
 503	printk( KERN_ERR "cpqarray: out of memory");
 504
 505	return -1;
 506}
 507
 508static int __devinit cpqarray_init_one( struct pci_dev *pdev,
 509	const struct pci_device_id *ent)
 510{
 511	int i;
 512
 513	printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
 514			" bus %d dev %d func %d\n",
 515			pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
 516			PCI_FUNC(pdev->devfn));
 517	i = alloc_cpqarray_hba();
 518	if( i < 0 )
 519		return (-1);
 520	memset(hba[i], 0, sizeof(ctlr_info_t));
 521	sprintf(hba[i]->devname, "ida%d", i);
 522	hba[i]->ctlr = i;
 523	/* Initialize the pdev driver private data */
 524	pci_set_drvdata(pdev, hba[i]);
 525
 526	if (cpqarray_pci_init(hba[i], pdev) != 0) {
 527		pci_set_drvdata(pdev, NULL);
 528		release_io_mem(hba[i]);
 529		free_hba(i);
 530		return -1;
 531	}
 532
 533	return (cpqarray_register_ctlr(i, pdev));
 534}
 535
 536static struct pci_driver cpqarray_pci_driver = {
 537	.name = "cpqarray",
 538	.probe = cpqarray_init_one,
 539	.remove = __devexit_p(cpqarray_remove_one_pci),
 540	.id_table = cpqarray_pci_device_id,
 541};
 542
 543/*
 544 *  This is it.  Find all the controllers and register them.
 545 *  returns the number of block devices registered.
 546 */
 547static int __init cpqarray_init(void)
 548{
 549	int num_cntlrs_reg = 0;
 550	int i;
 551	int rc = 0;
 552
 553	/* detect controllers */
 554	printk(DRIVER_NAME "\n");
 555
 556	rc = pci_register_driver(&cpqarray_pci_driver);
 557	if (rc)
 558		return rc;
 559	cpqarray_eisa_detect();
 560	
 561	for (i=0; i < MAX_CTLR; i++) {
 562		if (hba[i] != NULL)
 563			num_cntlrs_reg++;
 564	}
 565
 566	if (num_cntlrs_reg)
 567		return 0;
 568	else {
 569		pci_unregister_driver(&cpqarray_pci_driver);
 570		return -ENODEV;
 571	}
 572}
 573
 574/* Function to find the first free pointer into our hba[] array */
 575/* Returns -1 if no free entries are left.  */
 576static int alloc_cpqarray_hba(void)
 577{
 578	int i;
 579
 580	for(i=0; i< MAX_CTLR; i++) {
 581		if (hba[i] == NULL) {
 582			hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
 583			if(hba[i]==NULL) {
 584				printk(KERN_ERR "cpqarray: out of memory.\n");
 585				return (-1);
 586			}
 587			return (i);
 588		}
 589	}
 590	printk(KERN_WARNING "cpqarray: This driver supports a maximum"
 591		" of 8 controllers.\n");
 592	return(-1);
 593}
 594
 595static void free_hba(int i)
 596{
 597	kfree(hba[i]);
 598	hba[i]=NULL;
 599}
 600
 601/*
 602 * Find the IO address of the controller, its IRQ and so forth.  Fill
 603 * in some basic stuff into the ctlr_info_t structure.
 604 */
 605static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
 606{
 607	ushort vendor_id, device_id, command;
 608	unchar cache_line_size, latency_timer;
 609	unchar irq, revision;
 610	unsigned long addr[6];
 611	__u32 board_id;
 612
 613	int i;
 614
 615	c->pci_dev = pdev;
 616	pci_set_master(pdev);
 617	if (pci_enable_device(pdev)) {
 618		printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
 619		return -1;
 620	}
 621	vendor_id = pdev->vendor;
 622	device_id = pdev->device;
 
 623	irq = pdev->irq;
 624
 625	for(i=0; i<6; i++)
 626		addr[i] = pci_resource_start(pdev, i);
 627
 628	if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
 629	{
 630		printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
 631		return -1;
 632	}
 633
 634	pci_read_config_word(pdev, PCI_COMMAND, &command);
 635	pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
 636	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
 637	pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
 638
 639	pci_read_config_dword(pdev, 0x2c, &board_id);
 640
 641	/* check to see if controller has been disabled */
 642	if(!(command & 0x02)) {
 643		printk(KERN_WARNING
 644			"cpqarray: controller appears to be disabled\n");
 645		return(-1);
 646	}
 647
 648DBGINFO(
 649	printk("vendor_id = %x\n", vendor_id);
 650	printk("device_id = %x\n", device_id);
 651	printk("command = %x\n", command);
 652	for(i=0; i<6; i++)
 653		printk("addr[%d] = %lx\n", i, addr[i]);
 654	printk("revision = %x\n", revision);
 655	printk("irq = %x\n", irq);
 656	printk("cache_line_size = %x\n", cache_line_size);
 657	printk("latency_timer = %x\n", latency_timer);
 658	printk("board_id = %x\n", board_id);
 659);
 660
 661	c->intr = irq;
 662
 663	for(i=0; i<6; i++) {
 664		if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
 665		{ /* IO space */
 666			c->io_mem_addr = addr[i];
 667			c->io_mem_length = pci_resource_end(pdev, i)
 668				- pci_resource_start(pdev, i) + 1;
 669			if(!request_region( c->io_mem_addr, c->io_mem_length,
 670				"cpqarray"))
 671			{
 672				printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
 673				c->io_mem_addr = 0;
 674				c->io_mem_length = 0;
 675			}
 676			break;
 677		}
 678	}
 679
 680	c->paddr = 0;
 681	for(i=0; i<6; i++)
 682		if (!(pci_resource_flags(pdev, i) &
 683				PCI_BASE_ADDRESS_SPACE_IO)) {
 684			c->paddr = pci_resource_start (pdev, i);
 685			break;
 686		}
 687	if (!c->paddr)
 688		return -1;
 689	c->vaddr = remap_pci_mem(c->paddr, 128);
 690	if (!c->vaddr)
 691		return -1;
 692	c->board_id = board_id;
 693
 694	for(i=0; i<NR_PRODUCTS; i++) {
 695		if (board_id == products[i].board_id) {
 696			c->product_name = products[i].product_name;
 697			c->access = *(products[i].access);
 698			break;
 699		}
 700	}
 701	if (i == NR_PRODUCTS) {
 702		printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
 703			" to access the SMART Array controller %08lx\n", 
 704				(unsigned long)board_id);
 705		return -1;
 706	}
 707
 708	return 0;
 709}
 710
 711/*
 712 * Map (physical) PCI mem into (virtual) kernel space
 713 */
 714static void __iomem *remap_pci_mem(ulong base, ulong size)
 715{
 716        ulong page_base        = ((ulong) base) & PAGE_MASK;
 717        ulong page_offs        = ((ulong) base) - page_base;
 718        void __iomem *page_remapped    = ioremap(page_base, page_offs+size);
 719
 720        return (page_remapped ? (page_remapped + page_offs) : NULL);
 721}
 722
 723#ifndef MODULE
 724/*
 725 * Config string is a comma separated set of i/o addresses of EISA cards.
 726 */
 727static int cpqarray_setup(char *str)
 728{
 729	int i, ints[9];
 730
 731	(void)get_options(str, ARRAY_SIZE(ints), ints);
 732
 733	for(i=0; i<ints[0] && i<8; i++)
 734		eisa[i] = ints[i+1];
 735	return 1;
 736}
 737
 738__setup("smart2=", cpqarray_setup);
 739
 740#endif
 741
 742/*
 743 * Find an EISA controller's signature.  Set up an hba if we find it.
 744 */
 745static int __devinit cpqarray_eisa_detect(void)
 746{
 747	int i=0, j;
 748	__u32 board_id;
 749	int intr;
 750	int ctlr;
 751	int num_ctlr = 0;
 752
 753	while(i<8 && eisa[i]) {
 754		ctlr = alloc_cpqarray_hba();
 755		if(ctlr == -1)
 756			break;
 757		board_id = inl(eisa[i]+0xC80);
 758		for(j=0; j < NR_PRODUCTS; j++)
 759			if (board_id == products[j].board_id) 
 760				break;
 761
 762		if (j == NR_PRODUCTS) {
 763			printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
 764				" to access the SMART Array controller %08lx\n",				 (unsigned long)board_id);
 765			continue;
 766		}
 767
 768		memset(hba[ctlr], 0, sizeof(ctlr_info_t));
 769		hba[ctlr]->io_mem_addr = eisa[i];
 770		hba[ctlr]->io_mem_length = 0x7FF;
 771		if(!request_region(hba[ctlr]->io_mem_addr,
 772				hba[ctlr]->io_mem_length,
 773				"cpqarray"))
 774		{
 775			printk(KERN_WARNING "cpqarray: I/O range already in "
 776					"use addr = %lx length = %ld\n",
 777					hba[ctlr]->io_mem_addr,
 778					hba[ctlr]->io_mem_length);
 779			free_hba(ctlr);
 780			continue;
 781		}
 782
 783		/*
 784		 * Read the config register to find our interrupt
 785		 */
 786		intr = inb(eisa[i]+0xCC0) >> 4;
 787		if (intr & 1) intr = 11;
 788		else if (intr & 2) intr = 10;
 789		else if (intr & 4) intr = 14;
 790		else if (intr & 8) intr = 15;
 791		
 792		hba[ctlr]->intr = intr;
 793		sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
 794		hba[ctlr]->product_name = products[j].product_name;
 795		hba[ctlr]->access = *(products[j].access);
 796		hba[ctlr]->ctlr = ctlr;
 797		hba[ctlr]->board_id = board_id;
 798		hba[ctlr]->pci_dev = NULL; /* not PCI */
 799
 800DBGINFO(
 801	printk("i = %d, j = %d\n", i, j);
 802	printk("irq = %x\n", intr);
 803	printk("product name = %s\n", products[j].product_name);
 804	printk("board_id = %x\n", board_id);
 805);
 806
 807		num_ctlr++;
 808		i++;
 809
 810		if (cpqarray_register_ctlr(ctlr, NULL) == -1)
 811			printk(KERN_WARNING
 812				"cpqarray: Can't register EISA controller %d\n",
 813				ctlr);
 814
 815	}
 816
 817	return num_ctlr;
 818}
 819
 820/*
 821 * Open.  Make sure the device is really there.
 822 */
 823static int ida_open(struct block_device *bdev, fmode_t mode)
 824{
 825	drv_info_t *drv = get_drv(bdev->bd_disk);
 826	ctlr_info_t *host = get_host(bdev->bd_disk);
 827
 828	DBGINFO(printk("ida_open %s\n", bdev->bd_disk->disk_name));
 829	/*
 830	 * Root is allowed to open raw volume zero even if it's not configured
 831	 * so array config can still work.  I don't think I really like this,
 832	 * but I'm already using way to many device nodes to claim another one
 833	 * for "raw controller".
 834	 */
 835	if (!drv->nr_blks) {
 836		if (!capable(CAP_SYS_RAWIO))
 837			return -ENXIO;
 838		if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
 839			return -ENXIO;
 840	}
 841	host->usage_count++;
 842	return 0;
 843}
 844
 845static int ida_unlocked_open(struct block_device *bdev, fmode_t mode)
 846{
 847	int ret;
 848
 849	mutex_lock(&cpqarray_mutex);
 850	ret = ida_open(bdev, mode);
 851	mutex_unlock(&cpqarray_mutex);
 852
 853	return ret;
 854}
 855
 856/*
 857 * Close.  Sync first.
 858 */
 859static int ida_release(struct gendisk *disk, fmode_t mode)
 860{
 861	ctlr_info_t *host;
 862
 863	mutex_lock(&cpqarray_mutex);
 864	host = get_host(disk);
 865	host->usage_count--;
 866	mutex_unlock(&cpqarray_mutex);
 867
 868	return 0;
 869}
 870
 871/*
 872 * Enqueuing and dequeuing functions for cmdlists.
 873 */
 874static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
 875{
 876	if (*Qptr == NULL) {
 877		*Qptr = c;
 878		c->next = c->prev = c;
 879	} else {
 880		c->prev = (*Qptr)->prev;
 881		c->next = (*Qptr);
 882		(*Qptr)->prev->next = c;
 883		(*Qptr)->prev = c;
 884	}
 885}
 886
 887static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
 888{
 889	if (c && c->next != c) {
 890		if (*Qptr == c) *Qptr = c->next;
 891		c->prev->next = c->next;
 892		c->next->prev = c->prev;
 893	} else {
 894		*Qptr = NULL;
 895	}
 896	return c;
 897}
 898
 899/*
 900 * Get a request and submit it to the controller.
 901 * This routine needs to grab all the requests it possibly can from the
 902 * req Q and submit them.  Interrupts are off (and need to be off) when you
 903 * are in here (either via the dummy do_ida_request functions or by being
 904 * called from the interrupt handler
 905 */
 906static void do_ida_request(struct request_queue *q)
 907{
 908	ctlr_info_t *h = q->queuedata;
 909	cmdlist_t *c;
 910	struct request *creq;
 911	struct scatterlist tmp_sg[SG_MAX];
 912	int i, dir, seg;
 913
 914queue_next:
 915	creq = blk_peek_request(q);
 916	if (!creq)
 917		goto startio;
 918
 919	BUG_ON(creq->nr_phys_segments > SG_MAX);
 920
 921	if ((c = cmd_alloc(h,1)) == NULL)
 922		goto startio;
 923
 924	blk_start_request(creq);
 925
 926	c->ctlr = h->ctlr;
 927	c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
 928	c->hdr.size = sizeof(rblk_t) >> 2;
 929	c->size += sizeof(rblk_t);
 930
 931	c->req.hdr.blk = blk_rq_pos(creq);
 932	c->rq = creq;
 933DBGPX(
 934	printk("sector=%d, nr_sectors=%u\n",
 935	       blk_rq_pos(creq), blk_rq_sectors(creq));
 936);
 937	sg_init_table(tmp_sg, SG_MAX);
 938	seg = blk_rq_map_sg(q, creq, tmp_sg);
 939
 940	/* Now do all the DMA Mappings */
 941	if (rq_data_dir(creq) == READ)
 942		dir = PCI_DMA_FROMDEVICE;
 943	else
 944		dir = PCI_DMA_TODEVICE;
 945	for( i=0; i < seg; i++)
 946	{
 947		c->req.sg[i].size = tmp_sg[i].length;
 948		c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
 949						 sg_page(&tmp_sg[i]),
 950						 tmp_sg[i].offset,
 951						 tmp_sg[i].length, dir);
 952	}
 953DBGPX(	printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
 954	c->req.hdr.sg_cnt = seg;
 955	c->req.hdr.blk_cnt = blk_rq_sectors(creq);
 956	c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
 957	c->type = CMD_RWREQ;
 958
 959	/* Put the request on the tail of the request queue */
 960	addQ(&h->reqQ, c);
 961	h->Qdepth++;
 962	if (h->Qdepth > h->maxQsinceinit) 
 963		h->maxQsinceinit = h->Qdepth;
 964
 965	goto queue_next;
 966
 967startio:
 968	start_io(h);
 969}
 970
 971/* 
 972 * start_io submits everything on a controller's request queue
 973 * and moves it to the completion queue.
 974 *
 975 * Interrupts had better be off if you're in here
 976 */
 977static void start_io(ctlr_info_t *h)
 978{
 979	cmdlist_t *c;
 980
 981	while((c = h->reqQ) != NULL) {
 982		/* Can't do anything if we're busy */
 983		if (h->access.fifo_full(h) == 0)
 984			return;
 985
 986		/* Get the first entry from the request Q */
 987		removeQ(&h->reqQ, c);
 988		h->Qdepth--;
 989	
 990		/* Tell the controller to do our bidding */
 991		h->access.submit_command(h, c);
 992
 993		/* Get onto the completion Q */
 994		addQ(&h->cmpQ, c);
 995	}
 996}
 997
 998/*
 999 * Mark all buffers that cmd was responsible for
1000 */
1001static inline void complete_command(cmdlist_t *cmd, int timeout)
1002{
1003	struct request *rq = cmd->rq;
1004	int error = 0;
1005	int i, ddir;
1006
1007	if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
1008	   (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
1009		printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
1010				cmd->ctlr, cmd->hdr.unit);
1011		hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
1012	}
1013	if (cmd->req.hdr.rcode & RCODE_FATAL) {
1014		printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1015				cmd->ctlr, cmd->hdr.unit);
1016		error = -EIO;
1017	}
1018	if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1019				printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1020				cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1021				cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1022				cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
1023		error = -EIO;
1024	}
1025	if (timeout)
1026		error = -EIO;
1027	/* unmap the DMA mapping for all the scatter gather elements */
1028	if (cmd->req.hdr.cmd == IDA_READ)
1029		ddir = PCI_DMA_FROMDEVICE;
1030	else
1031		ddir = PCI_DMA_TODEVICE;
1032        for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1033                pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
1034				cmd->req.sg[i].size, ddir);
1035
1036	DBGPX(printk("Done with %p\n", rq););
1037	__blk_end_request_all(rq, error);
1038}
1039
1040/*
1041 *  The controller will interrupt us upon completion of commands.
1042 *  Find the command on the completion queue, remove it, tell the OS and
1043 *  try to queue up more IO
1044 */
1045static irqreturn_t do_ida_intr(int irq, void *dev_id)
1046{
1047	ctlr_info_t *h = dev_id;
1048	cmdlist_t *c;
1049	unsigned long istat;
1050	unsigned long flags;
1051	__u32 a,a1;
1052
1053	istat = h->access.intr_pending(h);
1054	/* Is this interrupt for us? */
1055	if (istat == 0)
1056		return IRQ_NONE;
1057
1058	/*
1059	 * If there are completed commands in the completion queue,
1060	 * we had better do something about it.
1061	 */
1062	spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
1063	if (istat & FIFO_NOT_EMPTY) {
1064		while((a = h->access.command_completed(h))) {
1065			a1 = a; a &= ~3;
1066			if ((c = h->cmpQ) == NULL)
1067			{  
1068				printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1069				continue;	
1070			} 
1071			while(c->busaddr != a) {
1072				c = c->next;
1073				if (c == h->cmpQ) 
1074					break;
1075			}
1076			/*
1077			 * If we've found the command, take it off the
1078			 * completion Q and free it
1079			 */
1080			if (c->busaddr == a) {
1081				removeQ(&h->cmpQ, c);
1082				/*  Check for invalid command.
1083                                 *  Controller returns command error,
1084                                 *  But rcode = 0.
1085                                 */
1086
1087				if((a1 & 0x03) && (c->req.hdr.rcode == 0))
1088                                {
1089                                	c->req.hdr.rcode = RCODE_INVREQ;
1090                                }
1091				if (c->type == CMD_RWREQ) {
1092					complete_command(c, 0);
1093					cmd_free(h, c, 1);
1094				} else if (c->type == CMD_IOCTL_PEND) {
1095					c->type = CMD_IOCTL_DONE;
1096				}
1097				continue;
1098			}
1099		}
1100	}
1101
1102	/*
1103	 * See if we can queue up some more IO
1104	 */
1105	do_ida_request(h->queue);
1106	spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); 
1107	return IRQ_HANDLED;
1108}
1109
1110/*
1111 * This timer was for timing out requests that haven't happened after
1112 * IDA_TIMEOUT.  That wasn't such a good idea.  This timer is used to
1113 * reset a flags structure so we don't flood the user with
1114 * "Non-Fatal error" messages.
1115 */
1116static void ida_timer(unsigned long tdata)
1117{
1118	ctlr_info_t *h = (ctlr_info_t*)tdata;
1119
1120	h->timer.expires = jiffies + IDA_TIMER;
1121	add_timer(&h->timer);
1122	h->misc_tflags = 0;
1123}
1124
1125static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1126{
1127	drv_info_t *drv = get_drv(bdev->bd_disk);
1128
1129	if (drv->cylinders) {
1130		geo->heads = drv->heads;
1131		geo->sectors = drv->sectors;
1132		geo->cylinders = drv->cylinders;
1133	} else {
1134		geo->heads = 0xff;
1135		geo->sectors = 0x3f;
1136		geo->cylinders = drv->nr_blks / (0xff*0x3f);
1137	}
1138
1139	return 0;
1140}
1141
1142/*
1143 *  ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1144 *  setting readahead and submitting commands from userspace to the controller.
1145 */
1146static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
1147{
1148	drv_info_t *drv = get_drv(bdev->bd_disk);
1149	ctlr_info_t *host = get_host(bdev->bd_disk);
1150	int error;
1151	ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
1152	ida_ioctl_t *my_io;
1153
1154	switch(cmd) {
1155	case IDAGETDRVINFO:
1156		if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
1157			return -EFAULT;
1158		return 0;
1159	case IDAPASSTHRU:
1160		if (!capable(CAP_SYS_RAWIO))
1161			return -EPERM;
1162		my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
1163		if (!my_io)
1164			return -ENOMEM;
1165		error = -EFAULT;
1166		if (copy_from_user(my_io, io, sizeof(*my_io)))
1167			goto out_passthru;
1168		error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
1169		if (error)
1170			goto out_passthru;
1171		error = -EFAULT;
1172		if (copy_to_user(io, my_io, sizeof(*my_io)))
1173			goto out_passthru;
1174		error = 0;
1175out_passthru:
1176		kfree(my_io);
1177		return error;
1178	case IDAGETCTLRSIG:
1179		if (!arg) return -EINVAL;
1180		if (put_user(host->ctlr_sig, (int __user *)arg))
1181			return -EFAULT;
1182		return 0;
1183	case IDAREVALIDATEVOLS:
1184		if (MINOR(bdev->bd_dev) != 0)
1185			return -ENXIO;
1186		return revalidate_allvol(host);
1187	case IDADRIVERVERSION:
1188		if (!arg) return -EINVAL;
1189		if (put_user(DRIVER_VERSION, (unsigned long __user *)arg))
1190			return -EFAULT;
1191		return 0;
1192	case IDAGETPCIINFO:
1193	{
1194		
1195		ida_pci_info_struct pciinfo;
1196
1197		if (!arg) return -EINVAL;
 
1198		pciinfo.bus = host->pci_dev->bus->number;
1199		pciinfo.dev_fn = host->pci_dev->devfn;
1200		pciinfo.board_id = host->board_id;
1201		if(copy_to_user((void __user *) arg, &pciinfo,  
1202			sizeof( ida_pci_info_struct)))
1203				return -EFAULT;
1204		return(0);
1205	}	
1206
1207	default:
1208		return -EINVAL;
1209	}
1210		
1211}
1212
1213static int ida_ioctl(struct block_device *bdev, fmode_t mode,
1214			     unsigned int cmd, unsigned long param)
1215{
1216	int ret;
1217
1218	mutex_lock(&cpqarray_mutex);
1219	ret = ida_locked_ioctl(bdev, mode, cmd, param);
1220	mutex_unlock(&cpqarray_mutex);
1221
1222	return ret;
1223}
1224
1225/*
1226 * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1227 * The command block (io) has already been copied to kernel space for us,
1228 * however, any elements in the sglist need to be copied to kernel space
1229 * or copied back to userspace.
1230 *
1231 * Only root may perform a controller passthru command, however I'm not doing
1232 * any serious sanity checking on the arguments.  Doing an IDA_WRITE_MEDIA and
1233 * putting a 64M buffer in the sglist is probably a *bad* idea.
1234 */
1235static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
1236{
1237	int ctlr = h->ctlr;
1238	cmdlist_t *c;
1239	void *p = NULL;
1240	unsigned long flags;
1241	int error;
1242
1243	if ((c = cmd_alloc(h, 0)) == NULL)
1244		return -ENOMEM;
1245	c->ctlr = ctlr;
1246	c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1247	c->hdr.size = sizeof(rblk_t) >> 2;
1248	c->size += sizeof(rblk_t);
1249
1250	c->req.hdr.cmd = io->cmd;
1251	c->req.hdr.blk = io->blk;
1252	c->req.hdr.blk_cnt = io->blk_cnt;
1253	c->type = CMD_IOCTL_PEND;
1254
1255	/* Pre submit processing */
1256	switch(io->cmd) {
1257	case PASSTHRU_A:
1258		p = memdup_user(io->sg[0].addr, io->sg[0].size);
1259		if (IS_ERR(p)) {
1260			error = PTR_ERR(p);
1261			cmd_free(h, c, 0);
1262			return error;
1263		}
1264		c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), 
1265				sizeof(ida_ioctl_t), 
1266				PCI_DMA_BIDIRECTIONAL);
1267		c->req.sg[0].size = io->sg[0].size;
1268		c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 
1269			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1270		c->req.hdr.sg_cnt = 1;
1271		break;
1272	case IDA_READ:
1273	case READ_FLASH_ROM:
1274	case SENSE_CONTROLLER_PERFORMANCE:
1275		p = kmalloc(io->sg[0].size, GFP_KERNEL);
1276		if (!p) 
1277		{ 
1278                        error = -ENOMEM; 
1279                        cmd_free(h, c, 0);
1280                        return(error);
1281                }
1282
1283		c->req.sg[0].size = io->sg[0].size;
1284		c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 
1285			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 
1286		c->req.hdr.sg_cnt = 1;
1287		break;
1288	case IDA_WRITE:
1289	case IDA_WRITE_MEDIA:
1290	case DIAG_PASS_THRU:
1291	case COLLECT_BUFFER:
1292	case WRITE_FLASH_ROM:
1293		p = memdup_user(io->sg[0].addr, io->sg[0].size);
1294		if (IS_ERR(p)) {
1295			error = PTR_ERR(p);
1296			cmd_free(h, c, 0);
1297			return error;
1298                }
1299		c->req.sg[0].size = io->sg[0].size;
1300		c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 
1301			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 
1302		c->req.hdr.sg_cnt = 1;
1303		break;
1304	default:
1305		c->req.sg[0].size = sizeof(io->c);
1306		c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c, 
1307			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1308		c->req.hdr.sg_cnt = 1;
1309	}
1310	
1311	/* Put the request on the tail of the request queue */
1312	spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1313	addQ(&h->reqQ, c);
1314	h->Qdepth++;
1315	start_io(h);
1316	spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1317
1318	/* Wait for completion */
1319	while(c->type != CMD_IOCTL_DONE)
1320		schedule();
1321
1322	/* Unmap the DMA  */
1323	pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size, 
1324		PCI_DMA_BIDIRECTIONAL);
1325	/* Post submit processing */
1326	switch(io->cmd) {
1327	case PASSTHRU_A:
1328		pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1329                                sizeof(ida_ioctl_t),
1330                                PCI_DMA_BIDIRECTIONAL);
1331	case IDA_READ:
1332	case DIAG_PASS_THRU:
1333	case SENSE_CONTROLLER_PERFORMANCE:
1334	case READ_FLASH_ROM:
1335		if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
1336			kfree(p);
1337			return -EFAULT;
1338		}
1339		/* fall through and free p */
1340	case IDA_WRITE:
1341	case IDA_WRITE_MEDIA:
1342	case COLLECT_BUFFER:
1343	case WRITE_FLASH_ROM:
1344		kfree(p);
1345		break;
1346	default:;
1347		/* Nothing to do */
1348	}
1349
1350	io->rcode = c->req.hdr.rcode;
1351	cmd_free(h, c, 0);
1352	return(0);
1353}
1354
1355/*
1356 * Commands are pre-allocated in a large block.  Here we use a simple bitmap
1357 * scheme to suballocte them to the driver.  Operations that are not time
1358 * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1359 * as the first argument to get a new command.
1360 */
1361static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1362{
1363	cmdlist_t * c;
1364	int i;
1365	dma_addr_t cmd_dhandle;
1366
1367	if (!get_from_pool) {
1368		c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev, 
1369			sizeof(cmdlist_t), &cmd_dhandle);
1370		if(c==NULL)
1371			return NULL;
1372	} else {
1373		do {
1374			i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1375			if (i == NR_CMDS)
1376				return NULL;
1377		} while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
1378		c = h->cmd_pool + i;
1379		cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1380		h->nr_allocs++;
1381	}
1382
1383	memset(c, 0, sizeof(cmdlist_t));
1384	c->busaddr = cmd_dhandle; 
1385	return c;
1386}
1387
1388static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1389{
1390	int i;
1391
1392	if (!got_from_pool) {
1393		pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1394			c->busaddr);
1395	} else {
1396		i = c - h->cmd_pool;
1397		clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
1398		h->nr_frees++;
1399	}
1400}
1401
1402/***********************************************************************
1403    name:        sendcmd
1404    Send a command to an IDA using the memory mapped FIFO interface
1405    and wait for it to complete.  
1406    This routine should only be called at init time.
1407***********************************************************************/
1408static int sendcmd(
1409	__u8	cmd,
1410	int	ctlr,
1411	void	*buff,
1412	size_t	size,
1413	unsigned int blk,
1414	unsigned int blkcnt,
1415	unsigned int log_unit )
1416{
1417	cmdlist_t *c;
1418	int complete;
1419	unsigned long temp;
1420	unsigned long i;
1421	ctlr_info_t *info_p = hba[ctlr];
1422
1423	c = cmd_alloc(info_p, 1);
1424	if(!c)
1425		return IO_ERROR;
1426	c->ctlr = ctlr;
1427	c->hdr.unit = log_unit;
1428	c->hdr.prio = 0;
1429	c->hdr.size = sizeof(rblk_t) >> 2;
1430	c->size += sizeof(rblk_t);
1431
1432	/* The request information. */
1433	c->req.hdr.next = 0;
1434	c->req.hdr.rcode = 0;
1435	c->req.bp = 0;
1436	c->req.hdr.sg_cnt = 1;
1437	c->req.hdr.reserved = 0;
1438	
1439	if (size == 0)
1440		c->req.sg[0].size = 512;
1441	else
1442		c->req.sg[0].size = size;
1443
1444	c->req.hdr.blk = blk;
1445	c->req.hdr.blk_cnt = blkcnt;
1446	c->req.hdr.cmd = (unsigned char) cmd;
1447	c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev, 
1448		buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1449	/*
1450	 * Disable interrupt
1451	 */
1452	info_p->access.set_intr_mask(info_p, 0);
1453	/* Make sure there is room in the command FIFO */
1454	/* Actually it should be completely empty at this time. */
1455	for (i = 200000; i > 0; i--) {
1456		temp = info_p->access.fifo_full(info_p);
1457		if (temp != 0) {
1458			break;
1459		}
1460		udelay(10);
1461DBG(
1462		printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1463			" waiting!\n", ctlr);
1464);
1465	} 
1466	/*
1467	 * Send the cmd
1468	 */
1469	info_p->access.submit_command(info_p, c);
1470	complete = pollcomplete(ctlr);
1471	
1472	pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr, 
1473		c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1474	if (complete != 1) {
1475		if (complete != c->busaddr) {
1476			printk( KERN_WARNING
1477			"cpqarray ida%d: idaSendPciCmd "
1478		      "Invalid command list address returned! (%08lx)\n",
1479				ctlr, (unsigned long)complete);
1480			cmd_free(info_p, c, 1);
1481			return (IO_ERROR);
1482		}
1483	} else {
1484		printk( KERN_WARNING
1485			"cpqarray ida%d: idaSendPciCmd Timeout out, "
1486			"No command list address returned!\n",
1487			ctlr);
1488		cmd_free(info_p, c, 1);
1489		return (IO_ERROR);
1490	}
1491
1492	if (c->req.hdr.rcode & 0x00FE) {
1493		if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1494			printk( KERN_WARNING
1495			"cpqarray ida%d: idaSendPciCmd, error: "
1496				"Controller failed at init time "
1497				"cmd: 0x%x, return code = 0x%x\n",
1498				ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1499
1500			cmd_free(info_p, c, 1);
1501			return (IO_ERROR);
1502		}
1503	}
1504	cmd_free(info_p, c, 1);
1505	return (IO_OK);
1506}
1507
1508/*
1509 * revalidate_allvol is for online array config utilities.  After a
1510 * utility reconfigures the drives in the array, it can use this function
1511 * (through an ioctl) to make the driver zap any previous disk structs for
1512 * that controller and get new ones.
1513 *
1514 * Right now I'm using the getgeometry() function to do this, but this
1515 * function should probably be finer grained and allow you to revalidate one
1516 * particualar logical volume (instead of all of them on a particular
1517 * controller).
1518 */
1519static int revalidate_allvol(ctlr_info_t *host)
1520{
1521	int ctlr = host->ctlr;
1522	int i;
1523	unsigned long flags;
1524
1525	spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1526	if (host->usage_count > 1) {
1527		spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1528		printk(KERN_WARNING "cpqarray: Device busy for volume"
1529			" revalidation (usage=%d)\n", host->usage_count);
1530		return -EBUSY;
1531	}
1532	host->usage_count++;
1533	spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1534
1535	/*
1536	 * Set the partition and block size structures for all volumes
1537	 * on this controller to zero.  We will reread all of this data
1538	 */
1539	set_capacity(ida_gendisk[ctlr][0], 0);
1540	for (i = 1; i < NWD; i++) {
1541		struct gendisk *disk = ida_gendisk[ctlr][i];
1542		if (disk->flags & GENHD_FL_UP)
1543			del_gendisk(disk);
1544	}
1545	memset(host->drv, 0, sizeof(drv_info_t)*NWD);
1546
1547	/*
1548	 * Tell the array controller not to give us any interrupts while
1549	 * we check the new geometry.  Then turn interrupts back on when
1550	 * we're done.
1551	 */
1552	host->access.set_intr_mask(host, 0);
1553	getgeometry(ctlr);
1554	host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
1555
1556	for(i=0; i<NWD; i++) {
1557		struct gendisk *disk = ida_gendisk[ctlr][i];
1558		drv_info_t *drv = &host->drv[i];
1559		if (i && !drv->nr_blks)
1560			continue;
1561		blk_queue_logical_block_size(host->queue, drv->blk_size);
1562		set_capacity(disk, drv->nr_blks);
1563		disk->queue = host->queue;
1564		disk->private_data = drv;
1565		if (i)
1566			add_disk(disk);
1567	}
1568
1569	host->usage_count--;
1570	return 0;
1571}
1572
1573static int ida_revalidate(struct gendisk *disk)
1574{
1575	drv_info_t *drv = disk->private_data;
1576	set_capacity(disk, drv->nr_blks);
1577	return 0;
1578}
1579
1580/********************************************************************
1581    name: pollcomplete
1582    Wait polling for a command to complete.
1583    The memory mapped FIFO is polled for the completion.
1584    Used only at init time, interrupts disabled.
1585 ********************************************************************/
1586static int pollcomplete(int ctlr)
1587{
1588	int done;
1589	int i;
1590
1591	/* Wait (up to 2 seconds) for a command to complete */
1592
1593	for (i = 200000; i > 0; i--) {
1594		done = hba[ctlr]->access.command_completed(hba[ctlr]);
1595		if (done == 0) {
1596			udelay(10);	/* a short fixed delay */
1597		} else
1598			return (done);
1599	}
1600	/* Invalid address to tell caller we ran out of time */
1601	return 1;
1602}
1603/*****************************************************************
1604    start_fwbk
1605    Starts controller firmwares background processing. 
1606    Currently only the Integrated Raid controller needs this done.
1607    If the PCI mem address registers are written to after this, 
1608	 data corruption may occur
1609*****************************************************************/
1610static void start_fwbk(int ctlr)
1611{
1612		id_ctlr_t *id_ctlr_buf; 
1613	int ret_code;
1614
1615	if(	(hba[ctlr]->board_id != 0x40400E11)
1616		&& (hba[ctlr]->board_id != 0x40480E11) )
1617
1618	/* Not a Integrated Raid, so there is nothing for us to do */
1619		return;
1620	printk(KERN_DEBUG "cpqarray: Starting firmware's background"
1621		" processing\n");
1622	/* Command does not return anything, but idasend command needs a 
1623		buffer */
1624	id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1625	if(id_ctlr_buf==NULL)
1626	{
1627		printk(KERN_WARNING "cpqarray: Out of memory. "
1628			"Unable to start background processing.\n");
1629		return;
1630	}		
1631	ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr, 
1632		id_ctlr_buf, 0, 0, 0, 0);
1633	if(ret_code != IO_OK)
1634		printk(KERN_WARNING "cpqarray: Unable to start"
1635			" background processing\n");
1636
1637	kfree(id_ctlr_buf);
1638}
1639/*****************************************************************
1640    getgeometry
1641    Get ida logical volume geometry from the controller 
1642    This is a large bit of code which once existed in two flavors,
1643    It is used only at init time.
1644*****************************************************************/
1645static void getgeometry(int ctlr)
1646{				
1647	id_log_drv_t *id_ldrive;
1648	id_ctlr_t *id_ctlr_buf;
1649	sense_log_drv_stat_t *id_lstatus_buf;
1650	config_t *sense_config_buf;
1651	unsigned int log_unit, log_index;
1652	int ret_code, size;
1653	drv_info_t *drv;
1654	ctlr_info_t *info_p = hba[ctlr];
1655	int i;
1656
1657	info_p->log_drv_map = 0;	
1658	
1659	id_ldrive = kzalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1660	if (!id_ldrive)	{
1661		printk( KERN_ERR "cpqarray:  out of memory.\n");
1662		goto err_0;
1663	}
1664
1665	id_ctlr_buf = kzalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1666	if (!id_ctlr_buf) {
1667		printk( KERN_ERR "cpqarray:  out of memory.\n");
1668		goto err_1;
1669	}
1670
1671	id_lstatus_buf = kzalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1672	if (!id_lstatus_buf) {
1673		printk( KERN_ERR "cpqarray:  out of memory.\n");
1674		goto err_2;
1675	}
1676
1677	sense_config_buf = kzalloc(sizeof(config_t), GFP_KERNEL);
1678	if (!sense_config_buf) {
1679		printk( KERN_ERR "cpqarray:  out of memory.\n");
1680		goto err_3;
1681	}
1682
1683	info_p->phys_drives = 0;
1684	info_p->log_drv_map = 0;
1685	info_p->drv_assign_map = 0;
1686	info_p->drv_spare_map = 0;
1687	info_p->mp_failed_drv_map = 0;	/* only initialized here */
1688	/* Get controllers info for this logical drive */
1689	ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
1690	if (ret_code == IO_ERROR) {
1691		/*
1692		 * If can't get controller info, set the logical drive map to 0,
1693		 * so the idastubopen will fail on all logical drives
1694		 * on the controller.
1695		 */
1696		printk(KERN_ERR "cpqarray: error sending ID controller\n");
1697                goto err_4;
1698        }
1699
1700	info_p->log_drives = id_ctlr_buf->nr_drvs;
1701	for(i=0;i<4;i++)
1702		info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
1703	info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
1704
1705	printk(" (%s)\n", info_p->product_name);
1706	/*
1707	 * Initialize logical drive map to zero
1708	 */
1709	log_index = 0;
1710	/*
1711	 * Get drive geometry for all logical drives
1712	 */
1713	if (id_ctlr_buf->nr_drvs > 16)
1714		printk(KERN_WARNING "cpqarray ida%d:  This driver supports "
1715			"16 logical drives per controller.\n.  "
1716			" Additional drives will not be "
1717			"detected\n", ctlr);
1718
1719	for (log_unit = 0;
1720	     (log_index < id_ctlr_buf->nr_drvs)
1721	     && (log_unit < NWD);
1722	     log_unit++) {
1723		size = sizeof(sense_log_drv_stat_t);
1724
1725		/*
1726		   Send "Identify logical drive status" cmd
1727		 */
1728		ret_code = sendcmd(SENSE_LOG_DRV_STAT,
1729			     ctlr, id_lstatus_buf, size, 0, 0, log_unit);
1730		if (ret_code == IO_ERROR) {
1731			/*
1732			   If can't get logical drive status, set
1733			   the logical drive map to 0, so the
1734			   idastubopen will fail for all logical drives
1735			   on the controller. 
1736			 */
1737			info_p->log_drv_map = 0;	
1738			printk( KERN_WARNING
1739			     "cpqarray ida%d: idaGetGeometry - Controller"
1740				" failed to report status of logical drive %d\n"
1741			 "Access to this controller has been disabled\n",
1742				ctlr, log_unit);
1743                	goto err_4;
1744		}
1745		/*
1746		   Make sure the logical drive is configured
1747		 */
1748		if (id_lstatus_buf->status != LOG_NOT_CONF) {
1749			ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
1750			       sizeof(id_log_drv_t), 0, 0, log_unit);
1751			/*
1752			   If error, the bit for this
1753			   logical drive won't be set and
1754			   idastubopen will return error. 
1755			 */
1756			if (ret_code != IO_ERROR) {
1757				drv = &info_p->drv[log_unit];
1758				drv->blk_size = id_ldrive->blk_size;
1759				drv->nr_blks = id_ldrive->nr_blks;
1760				drv->cylinders = id_ldrive->drv.cyl;
1761				drv->heads = id_ldrive->drv.heads;
1762				drv->sectors = id_ldrive->drv.sect_per_track;
1763				info_p->log_drv_map |=	(1 << log_unit);
1764
1765	printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
1766		ctlr, log_unit, drv->blk_size, drv->nr_blks);
1767				ret_code = sendcmd(SENSE_CONFIG,
1768						  ctlr, sense_config_buf,
1769				 sizeof(config_t), 0, 0, log_unit);
1770				if (ret_code == IO_ERROR) {
1771					info_p->log_drv_map = 0;
1772                			printk(KERN_ERR "cpqarray: error sending sense config\n");
1773                			goto err_4;
1774				}
1775
1776				info_p->phys_drives =
1777				    sense_config_buf->ctlr_phys_drv;
1778				info_p->drv_assign_map
1779				    |= sense_config_buf->drv_asgn_map;
1780				info_p->drv_assign_map
1781				    |= sense_config_buf->spare_asgn_map;
1782				info_p->drv_spare_map
1783				    |= sense_config_buf->spare_asgn_map;
1784			}	/* end of if no error on id_ldrive */
1785			log_index = log_index + 1;
1786		}		/* end of if logical drive configured */
1787	}			/* end of for log_unit */
1788
1789	/* Free all the buffers and return */
1790err_4:
1791	kfree(sense_config_buf);
1792err_3:
1793  	kfree(id_lstatus_buf);
1794err_2:
1795	kfree(id_ctlr_buf);
1796err_1:
1797  	kfree(id_ldrive);
1798err_0:
1799	return;
1800}
1801
1802static void __exit cpqarray_exit(void)
1803{
1804	int i;
1805
1806	pci_unregister_driver(&cpqarray_pci_driver);
1807
1808	/* Double check that all controller entries have been removed */
1809	for(i=0; i<MAX_CTLR; i++) {
1810		if (hba[i] != NULL) {
1811			printk(KERN_WARNING "cpqarray: Removing EISA "
1812					"controller %d\n", i);
1813			cpqarray_remove_one_eisa(i);
1814		}
1815	}
1816
1817	remove_proc_entry("driver/cpqarray", NULL);
1818}
1819
1820module_init(cpqarray_init)
1821module_exit(cpqarray_exit)
v3.15
   1/*
   2 *    Disk Array driver for Compaq SMART2 Controllers
   3 *    Copyright 1998 Compaq Computer Corporation
   4 *
   5 *    This program is free software; you can redistribute it and/or modify
   6 *    it under the terms of the GNU General Public License as published by
   7 *    the Free Software Foundation; either version 2 of the License, or
   8 *    (at your option) any later version.
   9 *
  10 *    This program is distributed in the hope that it will be useful,
  11 *    but WITHOUT ANY WARRANTY; without even the implied warranty of
  12 *    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  13 *    NON INFRINGEMENT.  See the GNU General Public License for more details.
  14 *
  15 *    You should have received a copy of the GNU General Public License
  16 *    along with this program; if not, write to the Free Software
  17 *    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  18 *
  19 *    Questions/Comments/Bugfixes to iss_storagedev@hp.com
  20 *
  21 */
  22#include <linux/module.h>
  23#include <linux/types.h>
  24#include <linux/pci.h>
  25#include <linux/bio.h>
  26#include <linux/interrupt.h>
  27#include <linux/kernel.h>
  28#include <linux/slab.h>
  29#include <linux/delay.h>
  30#include <linux/major.h>
  31#include <linux/fs.h>
  32#include <linux/blkpg.h>
  33#include <linux/timer.h>
  34#include <linux/proc_fs.h>
  35#include <linux/seq_file.h>
  36#include <linux/init.h>
  37#include <linux/hdreg.h>
  38#include <linux/mutex.h>
  39#include <linux/spinlock.h>
  40#include <linux/blkdev.h>
  41#include <linux/genhd.h>
  42#include <linux/scatterlist.h>
  43#include <asm/uaccess.h>
  44#include <asm/io.h>
  45
  46
  47#define SMART2_DRIVER_VERSION(maj,min,submin) ((maj<<16)|(min<<8)|(submin))
  48
  49#define DRIVER_NAME "Compaq SMART2 Driver (v 2.6.0)"
  50#define DRIVER_VERSION SMART2_DRIVER_VERSION(2,6,0)
  51
  52/* Embedded module documentation macros - see modules.h */
  53/* Original author Chris Frantz - Compaq Computer Corporation */
  54MODULE_AUTHOR("Compaq Computer Corporation");
  55MODULE_DESCRIPTION("Driver for Compaq Smart2 Array Controllers version 2.6.0");
  56MODULE_LICENSE("GPL");
  57
  58#include "cpqarray.h"
  59#include "ida_cmd.h"
  60#include "smart1,2.h"
  61#include "ida_ioctl.h"
  62
  63#define READ_AHEAD	128
  64#define NR_CMDS		128 /* This could probably go as high as ~400 */
  65
  66#define MAX_CTLR	8
  67#define CTLR_SHIFT	8
  68
  69#define CPQARRAY_DMA_MASK	0xFFFFFFFF	/* 32 bit DMA */
  70
  71static DEFINE_MUTEX(cpqarray_mutex);
  72static int nr_ctlr;
  73static ctlr_info_t *hba[MAX_CTLR];
  74
  75static int eisa[8];
  76
  77#define NR_PRODUCTS ARRAY_SIZE(products)
  78
  79/*  board_id = Subsystem Device ID & Vendor ID
  80 *  product = Marketing Name for the board
  81 *  access = Address of the struct of function pointers
  82 */
  83static struct board_type products[] = {
  84	{ 0x0040110E, "IDA",			&smart1_access },
  85	{ 0x0140110E, "IDA-2",			&smart1_access },
  86	{ 0x1040110E, "IAES",			&smart1_access },
  87	{ 0x2040110E, "SMART",			&smart1_access },
  88	{ 0x3040110E, "SMART-2/E",		&smart2e_access },
  89	{ 0x40300E11, "SMART-2/P",		&smart2_access },
  90	{ 0x40310E11, "SMART-2SL",		&smart2_access },
  91	{ 0x40320E11, "Smart Array 3200",	&smart2_access },
  92	{ 0x40330E11, "Smart Array 3100ES",	&smart2_access },
  93	{ 0x40340E11, "Smart Array 221",	&smart2_access },
  94	{ 0x40400E11, "Integrated Array",	&smart4_access },
  95	{ 0x40480E11, "Compaq Raid LC2",        &smart4_access },
  96	{ 0x40500E11, "Smart Array 4200",	&smart4_access },
  97	{ 0x40510E11, "Smart Array 4250ES",	&smart4_access },
  98	{ 0x40580E11, "Smart Array 431",	&smart4_access },
  99};
 100
 101/* define the PCI info for the PCI cards this driver can control */
 102static const struct pci_device_id cpqarray_pci_device_id[] =
 103{
 104	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
 105		0x0E11, 0x4058, 0, 0, 0},       /* SA431 */
 106	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
 107		0x0E11, 0x4051, 0, 0, 0},      /* SA4250ES */
 108	{ PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_COMPAQ_42XX,
 109		0x0E11, 0x4050, 0, 0, 0},      /* SA4200 */
 110	{ PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
 111		0x0E11, 0x4048, 0, 0, 0},       /* LC2 */
 112	{ PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C1510,
 113		0x0E11, 0x4040, 0, 0, 0},      /* Integrated Array */
 114	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
 115		0x0E11, 0x4034, 0, 0, 0},       /* SA 221 */
 116	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
 117		0x0E11, 0x4033, 0, 0, 0},       /* SA 3100ES*/
 118	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
 119		0x0E11, 0x4032, 0, 0, 0},       /* SA 3200*/
 120	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
 121		0x0E11, 0x4031, 0, 0, 0},       /* SA 2SL*/
 122	{ PCI_VENDOR_ID_COMPAQ, PCI_DEVICE_ID_COMPAQ_SMART2P,
 123		0x0E11, 0x4030, 0, 0, 0},       /* SA 2P */
 124	{ 0 }
 125};
 126
 127MODULE_DEVICE_TABLE(pci, cpqarray_pci_device_id);
 128
 129static struct gendisk *ida_gendisk[MAX_CTLR][NWD];
 130
 131/* Debug... */
 132#define DBG(s)	do { s } while(0)
 133/* Debug (general info)... */
 134#define DBGINFO(s) do { } while(0)
 135/* Debug Paranoid... */
 136#define DBGP(s)  do { } while(0)
 137/* Debug Extra Paranoid... */
 138#define DBGPX(s) do { } while(0)
 139
 140static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev);
 141static void __iomem *remap_pci_mem(ulong base, ulong size);
 142static int cpqarray_eisa_detect(void);
 143static int pollcomplete(int ctlr);
 144static void getgeometry(int ctlr);
 145static void start_fwbk(int ctlr);
 146
 147static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool);
 148static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool);
 149
 150static void free_hba(int i);
 151static int alloc_cpqarray_hba(void);
 152
 153static int sendcmd(
 154	__u8	cmd,
 155	int	ctlr,
 156	void	*buff,
 157	size_t	size,
 158	unsigned int blk,
 159	unsigned int blkcnt,
 160	unsigned int log_unit );
 161
 162static int ida_unlocked_open(struct block_device *bdev, fmode_t mode);
 163static void ida_release(struct gendisk *disk, fmode_t mode);
 164static int ida_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg);
 165static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo);
 166static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io);
 167
 168static void do_ida_request(struct request_queue *q);
 169static void start_io(ctlr_info_t *h);
 170
 171static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c);
 172static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c);
 173static inline void complete_command(cmdlist_t *cmd, int timeout);
 174
 175static irqreturn_t do_ida_intr(int irq, void *dev_id);
 176static void ida_timer(unsigned long tdata);
 177static int ida_revalidate(struct gendisk *disk);
 178static int revalidate_allvol(ctlr_info_t *host);
 179static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
 180
 181#ifdef CONFIG_PROC_FS
 182static void ida_procinit(int i);
 183#else
 184static void ida_procinit(int i) {}
 185#endif
 186
 187static inline drv_info_t *get_drv(struct gendisk *disk)
 188{
 189	return disk->private_data;
 190}
 191
 192static inline ctlr_info_t *get_host(struct gendisk *disk)
 193{
 194	return disk->queue->queuedata;
 195}
 196
 197
 198static const struct block_device_operations ida_fops  = {
 199	.owner		= THIS_MODULE,
 200	.open		= ida_unlocked_open,
 201	.release	= ida_release,
 202	.ioctl		= ida_ioctl,
 203	.getgeo		= ida_getgeo,
 204	.revalidate_disk= ida_revalidate,
 205};
 206
 207
 208#ifdef CONFIG_PROC_FS
 209
 210static struct proc_dir_entry *proc_array;
 211static const struct file_operations ida_proc_fops;
 212
 213/*
 214 * Get us a file in /proc/array that says something about each controller.
 215 * Create /proc/array if it doesn't exist yet.
 216 */
 217static void __init ida_procinit(int i)
 218{
 219	if (proc_array == NULL) {
 220		proc_array = proc_mkdir("driver/cpqarray", NULL);
 221		if (!proc_array) return;
 222	}
 223
 224	proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]);
 225}
 226
 227/*
 228 * Report information about this controller.
 229 */
 230static int ida_proc_show(struct seq_file *m, void *v)
 231{
 232	int i, ctlr;
 233	ctlr_info_t *h = (ctlr_info_t*)m->private;
 234	drv_info_t *drv;
 235#ifdef CPQ_PROC_PRINT_QUEUES
 236	cmdlist_t *c;
 237	unsigned long flags;
 238#endif
 239
 240	ctlr = h->ctlr;
 241	seq_printf(m, "%s:  Compaq %s Controller\n"
 242		"       Board ID: 0x%08lx\n"
 243		"       Firmware Revision: %c%c%c%c\n"
 244		"       Controller Sig: 0x%08lx\n"
 245		"       Memory Address: 0x%08lx\n"
 246		"       I/O Port: 0x%04x\n"
 247		"       IRQ: %d\n"
 248		"       Logical drives: %d\n"
 249		"       Physical drives: %d\n\n"
 250		"       Current Q depth: %d\n"
 251		"       Max Q depth since init: %d\n\n",
 252		h->devname, 
 253		h->product_name,
 254		(unsigned long)h->board_id,
 255		h->firm_rev[0], h->firm_rev[1], h->firm_rev[2], h->firm_rev[3],
 256		(unsigned long)h->ctlr_sig, (unsigned long)h->vaddr,
 257		(unsigned int) h->io_mem_addr, (unsigned int)h->intr,
 258		h->log_drives, h->phys_drives,
 259		h->Qdepth, h->maxQsinceinit);
 260
 261	seq_puts(m, "Logical Drive Info:\n");
 262
 263	for(i=0; i<h->log_drives; i++) {
 264		drv = &h->drv[i];
 265		seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
 266				ctlr, i, drv->blk_size, drv->nr_blks);
 267	}
 268
 269#ifdef CPQ_PROC_PRINT_QUEUES
 270	spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); 
 271	seq_puts(m, "\nCurrent Queues:\n");
 272
 273	c = h->reqQ;
 274	seq_printf(m, "reqQ = %p", c);
 275	if (c) c=c->next;
 276	while(c && c != h->reqQ) {
 277		seq_printf(m, "->%p", c);
 278		c=c->next;
 279	}
 280
 281	c = h->cmpQ;
 282	seq_printf(m, "\ncmpQ = %p", c);
 283	if (c) c=c->next;
 284	while(c && c != h->cmpQ) {
 285		seq_printf(m, "->%p", c);
 286		c=c->next;
 287	}
 288
 289	seq_putc(m, '\n');
 290	spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); 
 291#endif
 292	seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n",
 293			h->nr_allocs, h->nr_frees);
 294	return 0;
 295}
 296
 297static int ida_proc_open(struct inode *inode, struct file *file)
 298{
 299	return single_open(file, ida_proc_show, PDE_DATA(inode));
 300}
 301
 302static const struct file_operations ida_proc_fops = {
 303	.owner		= THIS_MODULE,
 304	.open		= ida_proc_open,
 305	.read		= seq_read,
 306	.llseek		= seq_lseek,
 307	.release	= single_release,
 308};
 309#endif /* CONFIG_PROC_FS */
 310
 311module_param_array(eisa, int, NULL, 0);
 312
 313static void release_io_mem(ctlr_info_t *c)
 314{
 315	/* if IO mem was not protected do nothing */
 316	if( c->io_mem_addr == 0)
 317		return;
 318	release_region(c->io_mem_addr, c->io_mem_length);
 319	c->io_mem_addr = 0;
 320	c->io_mem_length = 0;
 321}
 322
 323static void cpqarray_remove_one(int i)
 324{
 325	int j;
 326	char buff[4];
 327
 328	/* sendcmd will turn off interrupt, and send the flush...
 329	 * To write all data in the battery backed cache to disks
 330	 * no data returned, but don't want to send NULL to sendcmd */
 331	if( sendcmd(FLUSH_CACHE, i, buff, 4, 0, 0, 0))
 332	{
 333		printk(KERN_WARNING "Unable to flush cache on controller %d\n",
 334				i);
 335	}
 336	free_irq(hba[i]->intr, hba[i]);
 337	iounmap(hba[i]->vaddr);
 338	unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
 339	del_timer(&hba[i]->timer);
 340	remove_proc_entry(hba[i]->devname, proc_array);
 341	pci_free_consistent(hba[i]->pci_dev,
 342			NR_CMDS * sizeof(cmdlist_t), (hba[i]->cmd_pool),
 343			hba[i]->cmd_pool_dhandle);
 344	kfree(hba[i]->cmd_pool_bits);
 345	for(j = 0; j < NWD; j++) {
 346		if (ida_gendisk[i][j]->flags & GENHD_FL_UP)
 347			del_gendisk(ida_gendisk[i][j]);
 348		put_disk(ida_gendisk[i][j]);
 349	}
 350	blk_cleanup_queue(hba[i]->queue);
 351	release_io_mem(hba[i]);
 352	free_hba(i);
 353}
 354
 355static void cpqarray_remove_one_pci(struct pci_dev *pdev)
 356{
 357	int i;
 358	ctlr_info_t *tmp_ptr;
 359
 360	if (pci_get_drvdata(pdev) == NULL) {
 361		printk( KERN_ERR "cpqarray: Unable to remove device \n");
 362		return;
 363	}
 364
 365	tmp_ptr = pci_get_drvdata(pdev);
 366	i = tmp_ptr->ctlr;
 367	if (hba[i] == NULL) {
 368		printk(KERN_ERR "cpqarray: controller %d appears to have"
 369			"already been removed \n", i);
 370		return;
 371        }
 372	pci_set_drvdata(pdev, NULL);
 373
 374	cpqarray_remove_one(i);
 375}
 376
 377/* removing an instance that was not removed automatically..
 378 * must be an eisa card.
 379 */
 380static void cpqarray_remove_one_eisa(int i)
 381{
 382	if (hba[i] == NULL) {
 383		printk(KERN_ERR "cpqarray: controller %d appears to have"
 384			"already been removed \n", i);
 385		return;
 386        }
 387	cpqarray_remove_one(i);
 388}
 389
 390/* pdev is NULL for eisa */
 391static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
 392{
 393	struct request_queue *q;
 394	int j;
 395
 396	/* 
 397	 * register block devices
 398	 * Find disks and fill in structs
 399	 * Get an interrupt, set the Q depth and get into /proc
 400	 */
 401
 402	/* If this successful it should insure that we are the only */
 403	/* instance of the driver */
 404	if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
 405		goto Enomem4;
 406	}
 407	hba[i]->access.set_intr_mask(hba[i], 0);
 408	if (request_irq(hba[i]->intr, do_ida_intr,
 409		IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
 410	{
 411		printk(KERN_ERR "cpqarray: Unable to get irq %d for %s\n",
 412				hba[i]->intr, hba[i]->devname);
 413		goto Enomem3;
 414	}
 415		
 416	for (j=0; j<NWD; j++) {
 417		ida_gendisk[i][j] = alloc_disk(1 << NWD_SHIFT);
 418		if (!ida_gendisk[i][j])
 419			goto Enomem2;
 420	}
 421
 422	hba[i]->cmd_pool = pci_alloc_consistent(
 423		hba[i]->pci_dev, NR_CMDS * sizeof(cmdlist_t),
 424		&(hba[i]->cmd_pool_dhandle));
 425	hba[i]->cmd_pool_bits = kcalloc(
 426		DIV_ROUND_UP(NR_CMDS, BITS_PER_LONG), sizeof(unsigned long),
 427		GFP_KERNEL);
 428
 429	if (!hba[i]->cmd_pool_bits || !hba[i]->cmd_pool)
 430			goto Enomem1;
 431
 432	memset(hba[i]->cmd_pool, 0, NR_CMDS * sizeof(cmdlist_t));
 433	printk(KERN_INFO "cpqarray: Finding drives on %s",
 434		hba[i]->devname);
 435
 436	spin_lock_init(&hba[i]->lock);
 437	q = blk_init_queue(do_ida_request, &hba[i]->lock);
 438	if (!q)
 439		goto Enomem1;
 440
 441	hba[i]->queue = q;
 442	q->queuedata = hba[i];
 443
 444	getgeometry(i);
 445	start_fwbk(i);
 446
 447	ida_procinit(i);
 448
 449	if (pdev)
 450		blk_queue_bounce_limit(q, hba[i]->pci_dev->dma_mask);
 451
 452	/* This is a hardware imposed limit. */
 453	blk_queue_max_segments(q, SG_MAX);
 454
 455	init_timer(&hba[i]->timer);
 456	hba[i]->timer.expires = jiffies + IDA_TIMER;
 457	hba[i]->timer.data = (unsigned long)hba[i];
 458	hba[i]->timer.function = ida_timer;
 459	add_timer(&hba[i]->timer);
 460
 461	/* Enable IRQ now that spinlock and rate limit timer are set up */
 462	hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
 463
 464	for(j=0; j<NWD; j++) {
 465		struct gendisk *disk = ida_gendisk[i][j];
 466		drv_info_t *drv = &hba[i]->drv[j];
 467		sprintf(disk->disk_name, "ida/c%dd%d", i, j);
 468		disk->major = COMPAQ_SMART2_MAJOR + i;
 469		disk->first_minor = j<<NWD_SHIFT;
 470		disk->fops = &ida_fops;
 471		if (j && !drv->nr_blks)
 472			continue;
 473		blk_queue_logical_block_size(hba[i]->queue, drv->blk_size);
 474		set_capacity(disk, drv->nr_blks);
 475		disk->queue = hba[i]->queue;
 476		disk->private_data = drv;
 477		add_disk(disk);
 478	}
 479
 480	/* done ! */
 481	return(i);
 482
 483Enomem1:
 484	nr_ctlr = i; 
 485	kfree(hba[i]->cmd_pool_bits);
 486	if (hba[i]->cmd_pool)
 487		pci_free_consistent(hba[i]->pci_dev, NR_CMDS*sizeof(cmdlist_t), 
 488				    hba[i]->cmd_pool, hba[i]->cmd_pool_dhandle);
 489Enomem2:
 490	while (j--) {
 491		put_disk(ida_gendisk[i][j]);
 492		ida_gendisk[i][j] = NULL;
 493	}
 494	free_irq(hba[i]->intr, hba[i]);
 495Enomem3:
 496	unregister_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname);
 497Enomem4:
 498	if (pdev)
 499		pci_set_drvdata(pdev, NULL);
 500	release_io_mem(hba[i]);
 501	free_hba(i);
 502
 503	printk( KERN_ERR "cpqarray: out of memory");
 504
 505	return -1;
 506}
 507
 508static int cpqarray_init_one(struct pci_dev *pdev,
 509			     const struct pci_device_id *ent)
 510{
 511	int i;
 512
 513	printk(KERN_DEBUG "cpqarray: Device 0x%x has been found at"
 514			" bus %d dev %d func %d\n",
 515			pdev->device, pdev->bus->number, PCI_SLOT(pdev->devfn),
 516			PCI_FUNC(pdev->devfn));
 517	i = alloc_cpqarray_hba();
 518	if( i < 0 )
 519		return (-1);
 520	memset(hba[i], 0, sizeof(ctlr_info_t));
 521	sprintf(hba[i]->devname, "ida%d", i);
 522	hba[i]->ctlr = i;
 523	/* Initialize the pdev driver private data */
 524	pci_set_drvdata(pdev, hba[i]);
 525
 526	if (cpqarray_pci_init(hba[i], pdev) != 0) {
 527		pci_set_drvdata(pdev, NULL);
 528		release_io_mem(hba[i]);
 529		free_hba(i);
 530		return -1;
 531	}
 532
 533	return (cpqarray_register_ctlr(i, pdev));
 534}
 535
 536static struct pci_driver cpqarray_pci_driver = {
 537	.name = "cpqarray",
 538	.probe = cpqarray_init_one,
 539	.remove = cpqarray_remove_one_pci,
 540	.id_table = cpqarray_pci_device_id,
 541};
 542
 543/*
 544 *  This is it.  Find all the controllers and register them.
 545 *  returns the number of block devices registered.
 546 */
 547static int __init cpqarray_init(void)
 548{
 549	int num_cntlrs_reg = 0;
 550	int i;
 551	int rc = 0;
 552
 553	/* detect controllers */
 554	printk(DRIVER_NAME "\n");
 555
 556	rc = pci_register_driver(&cpqarray_pci_driver);
 557	if (rc)
 558		return rc;
 559	cpqarray_eisa_detect();
 560	
 561	for (i=0; i < MAX_CTLR; i++) {
 562		if (hba[i] != NULL)
 563			num_cntlrs_reg++;
 564	}
 565
 566	if (num_cntlrs_reg)
 567		return 0;
 568	else {
 569		pci_unregister_driver(&cpqarray_pci_driver);
 570		return -ENODEV;
 571	}
 572}
 573
 574/* Function to find the first free pointer into our hba[] array */
 575/* Returns -1 if no free entries are left.  */
 576static int alloc_cpqarray_hba(void)
 577{
 578	int i;
 579
 580	for(i=0; i< MAX_CTLR; i++) {
 581		if (hba[i] == NULL) {
 582			hba[i] = kmalloc(sizeof(ctlr_info_t), GFP_KERNEL);
 583			if(hba[i]==NULL) {
 584				printk(KERN_ERR "cpqarray: out of memory.\n");
 585				return (-1);
 586			}
 587			return (i);
 588		}
 589	}
 590	printk(KERN_WARNING "cpqarray: This driver supports a maximum"
 591		" of 8 controllers.\n");
 592	return(-1);
 593}
 594
 595static void free_hba(int i)
 596{
 597	kfree(hba[i]);
 598	hba[i]=NULL;
 599}
 600
 601/*
 602 * Find the IO address of the controller, its IRQ and so forth.  Fill
 603 * in some basic stuff into the ctlr_info_t structure.
 604 */
 605static int cpqarray_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
 606{
 607	ushort vendor_id, device_id, command;
 608	unchar cache_line_size, latency_timer;
 609	unchar irq, revision;
 610	unsigned long addr[6];
 611	__u32 board_id;
 612
 613	int i;
 614
 615	c->pci_dev = pdev;
 616	pci_set_master(pdev);
 617	if (pci_enable_device(pdev)) {
 618		printk(KERN_ERR "cpqarray: Unable to Enable PCI device\n");
 619		return -1;
 620	}
 621	vendor_id = pdev->vendor;
 622	device_id = pdev->device;
 623	revision  = pdev->revision;
 624	irq = pdev->irq;
 625
 626	for(i=0; i<6; i++)
 627		addr[i] = pci_resource_start(pdev, i);
 628
 629	if (pci_set_dma_mask(pdev, CPQARRAY_DMA_MASK) != 0)
 630	{
 631		printk(KERN_ERR "cpqarray: Unable to set DMA mask\n");
 632		return -1;
 633	}
 634
 635	pci_read_config_word(pdev, PCI_COMMAND, &command);
 
 636	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache_line_size);
 637	pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_timer);
 638
 639	pci_read_config_dword(pdev, 0x2c, &board_id);
 640
 641	/* check to see if controller has been disabled */
 642	if(!(command & 0x02)) {
 643		printk(KERN_WARNING
 644			"cpqarray: controller appears to be disabled\n");
 645		return(-1);
 646	}
 647
 648DBGINFO(
 649	printk("vendor_id = %x\n", vendor_id);
 650	printk("device_id = %x\n", device_id);
 651	printk("command = %x\n", command);
 652	for(i=0; i<6; i++)
 653		printk("addr[%d] = %lx\n", i, addr[i]);
 654	printk("revision = %x\n", revision);
 655	printk("irq = %x\n", irq);
 656	printk("cache_line_size = %x\n", cache_line_size);
 657	printk("latency_timer = %x\n", latency_timer);
 658	printk("board_id = %x\n", board_id);
 659);
 660
 661	c->intr = irq;
 662
 663	for(i=0; i<6; i++) {
 664		if (pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)
 665		{ /* IO space */
 666			c->io_mem_addr = addr[i];
 667			c->io_mem_length = pci_resource_end(pdev, i)
 668				- pci_resource_start(pdev, i) + 1;
 669			if(!request_region( c->io_mem_addr, c->io_mem_length,
 670				"cpqarray"))
 671			{
 672				printk( KERN_WARNING "cpqarray I/O memory range already in use addr %lx length = %ld\n", c->io_mem_addr, c->io_mem_length);
 673				c->io_mem_addr = 0;
 674				c->io_mem_length = 0;
 675			}
 676			break;
 677		}
 678	}
 679
 680	c->paddr = 0;
 681	for(i=0; i<6; i++)
 682		if (!(pci_resource_flags(pdev, i) &
 683				PCI_BASE_ADDRESS_SPACE_IO)) {
 684			c->paddr = pci_resource_start (pdev, i);
 685			break;
 686		}
 687	if (!c->paddr)
 688		return -1;
 689	c->vaddr = remap_pci_mem(c->paddr, 128);
 690	if (!c->vaddr)
 691		return -1;
 692	c->board_id = board_id;
 693
 694	for(i=0; i<NR_PRODUCTS; i++) {
 695		if (board_id == products[i].board_id) {
 696			c->product_name = products[i].product_name;
 697			c->access = *(products[i].access);
 698			break;
 699		}
 700	}
 701	if (i == NR_PRODUCTS) {
 702		printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
 703			" to access the SMART Array controller %08lx\n", 
 704				(unsigned long)board_id);
 705		return -1;
 706	}
 707
 708	return 0;
 709}
 710
 711/*
 712 * Map (physical) PCI mem into (virtual) kernel space
 713 */
 714static void __iomem *remap_pci_mem(ulong base, ulong size)
 715{
 716        ulong page_base        = ((ulong) base) & PAGE_MASK;
 717        ulong page_offs        = ((ulong) base) - page_base;
 718        void __iomem *page_remapped    = ioremap(page_base, page_offs+size);
 719
 720        return (page_remapped ? (page_remapped + page_offs) : NULL);
 721}
 722
 723#ifndef MODULE
 724/*
 725 * Config string is a comma separated set of i/o addresses of EISA cards.
 726 */
 727static int cpqarray_setup(char *str)
 728{
 729	int i, ints[9];
 730
 731	(void)get_options(str, ARRAY_SIZE(ints), ints);
 732
 733	for(i=0; i<ints[0] && i<8; i++)
 734		eisa[i] = ints[i+1];
 735	return 1;
 736}
 737
 738__setup("smart2=", cpqarray_setup);
 739
 740#endif
 741
 742/*
 743 * Find an EISA controller's signature.  Set up an hba if we find it.
 744 */
 745static int cpqarray_eisa_detect(void)
 746{
 747	int i=0, j;
 748	__u32 board_id;
 749	int intr;
 750	int ctlr;
 751	int num_ctlr = 0;
 752
 753	while(i<8 && eisa[i]) {
 754		ctlr = alloc_cpqarray_hba();
 755		if(ctlr == -1)
 756			break;
 757		board_id = inl(eisa[i]+0xC80);
 758		for(j=0; j < NR_PRODUCTS; j++)
 759			if (board_id == products[j].board_id) 
 760				break;
 761
 762		if (j == NR_PRODUCTS) {
 763			printk(KERN_WARNING "cpqarray: Sorry, I don't know how"
 764				" to access the SMART Array controller %08lx\n",				 (unsigned long)board_id);
 765			continue;
 766		}
 767
 768		memset(hba[ctlr], 0, sizeof(ctlr_info_t));
 769		hba[ctlr]->io_mem_addr = eisa[i];
 770		hba[ctlr]->io_mem_length = 0x7FF;
 771		if(!request_region(hba[ctlr]->io_mem_addr,
 772				hba[ctlr]->io_mem_length,
 773				"cpqarray"))
 774		{
 775			printk(KERN_WARNING "cpqarray: I/O range already in "
 776					"use addr = %lx length = %ld\n",
 777					hba[ctlr]->io_mem_addr,
 778					hba[ctlr]->io_mem_length);
 779			free_hba(ctlr);
 780			continue;
 781		}
 782
 783		/*
 784		 * Read the config register to find our interrupt
 785		 */
 786		intr = inb(eisa[i]+0xCC0) >> 4;
 787		if (intr & 1) intr = 11;
 788		else if (intr & 2) intr = 10;
 789		else if (intr & 4) intr = 14;
 790		else if (intr & 8) intr = 15;
 791		
 792		hba[ctlr]->intr = intr;
 793		sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
 794		hba[ctlr]->product_name = products[j].product_name;
 795		hba[ctlr]->access = *(products[j].access);
 796		hba[ctlr]->ctlr = ctlr;
 797		hba[ctlr]->board_id = board_id;
 798		hba[ctlr]->pci_dev = NULL; /* not PCI */
 799
 800DBGINFO(
 801	printk("i = %d, j = %d\n", i, j);
 802	printk("irq = %x\n", intr);
 803	printk("product name = %s\n", products[j].product_name);
 804	printk("board_id = %x\n", board_id);
 805);
 806
 807		num_ctlr++;
 808		i++;
 809
 810		if (cpqarray_register_ctlr(ctlr, NULL) == -1)
 811			printk(KERN_WARNING
 812				"cpqarray: Can't register EISA controller %d\n",
 813				ctlr);
 814
 815	}
 816
 817	return num_ctlr;
 818}
 819
 820/*
 821 * Open.  Make sure the device is really there.
 822 */
 823static int ida_open(struct block_device *bdev, fmode_t mode)
 824{
 825	drv_info_t *drv = get_drv(bdev->bd_disk);
 826	ctlr_info_t *host = get_host(bdev->bd_disk);
 827
 828	DBGINFO(printk("ida_open %s\n", bdev->bd_disk->disk_name));
 829	/*
 830	 * Root is allowed to open raw volume zero even if it's not configured
 831	 * so array config can still work.  I don't think I really like this,
 832	 * but I'm already using way to many device nodes to claim another one
 833	 * for "raw controller".
 834	 */
 835	if (!drv->nr_blks) {
 836		if (!capable(CAP_SYS_RAWIO))
 837			return -ENXIO;
 838		if (!capable(CAP_SYS_ADMIN) && drv != host->drv)
 839			return -ENXIO;
 840	}
 841	host->usage_count++;
 842	return 0;
 843}
 844
 845static int ida_unlocked_open(struct block_device *bdev, fmode_t mode)
 846{
 847	int ret;
 848
 849	mutex_lock(&cpqarray_mutex);
 850	ret = ida_open(bdev, mode);
 851	mutex_unlock(&cpqarray_mutex);
 852
 853	return ret;
 854}
 855
 856/*
 857 * Close.  Sync first.
 858 */
 859static void ida_release(struct gendisk *disk, fmode_t mode)
 860{
 861	ctlr_info_t *host;
 862
 863	mutex_lock(&cpqarray_mutex);
 864	host = get_host(disk);
 865	host->usage_count--;
 866	mutex_unlock(&cpqarray_mutex);
 
 
 867}
 868
 869/*
 870 * Enqueuing and dequeuing functions for cmdlists.
 871 */
 872static inline void addQ(cmdlist_t **Qptr, cmdlist_t *c)
 873{
 874	if (*Qptr == NULL) {
 875		*Qptr = c;
 876		c->next = c->prev = c;
 877	} else {
 878		c->prev = (*Qptr)->prev;
 879		c->next = (*Qptr);
 880		(*Qptr)->prev->next = c;
 881		(*Qptr)->prev = c;
 882	}
 883}
 884
 885static inline cmdlist_t *removeQ(cmdlist_t **Qptr, cmdlist_t *c)
 886{
 887	if (c && c->next != c) {
 888		if (*Qptr == c) *Qptr = c->next;
 889		c->prev->next = c->next;
 890		c->next->prev = c->prev;
 891	} else {
 892		*Qptr = NULL;
 893	}
 894	return c;
 895}
 896
 897/*
 898 * Get a request and submit it to the controller.
 899 * This routine needs to grab all the requests it possibly can from the
 900 * req Q and submit them.  Interrupts are off (and need to be off) when you
 901 * are in here (either via the dummy do_ida_request functions or by being
 902 * called from the interrupt handler
 903 */
 904static void do_ida_request(struct request_queue *q)
 905{
 906	ctlr_info_t *h = q->queuedata;
 907	cmdlist_t *c;
 908	struct request *creq;
 909	struct scatterlist tmp_sg[SG_MAX];
 910	int i, dir, seg;
 911
 912queue_next:
 913	creq = blk_peek_request(q);
 914	if (!creq)
 915		goto startio;
 916
 917	BUG_ON(creq->nr_phys_segments > SG_MAX);
 918
 919	if ((c = cmd_alloc(h,1)) == NULL)
 920		goto startio;
 921
 922	blk_start_request(creq);
 923
 924	c->ctlr = h->ctlr;
 925	c->hdr.unit = (drv_info_t *)(creq->rq_disk->private_data) - h->drv;
 926	c->hdr.size = sizeof(rblk_t) >> 2;
 927	c->size += sizeof(rblk_t);
 928
 929	c->req.hdr.blk = blk_rq_pos(creq);
 930	c->rq = creq;
 931DBGPX(
 932	printk("sector=%d, nr_sectors=%u\n",
 933	       blk_rq_pos(creq), blk_rq_sectors(creq));
 934);
 935	sg_init_table(tmp_sg, SG_MAX);
 936	seg = blk_rq_map_sg(q, creq, tmp_sg);
 937
 938	/* Now do all the DMA Mappings */
 939	if (rq_data_dir(creq) == READ)
 940		dir = PCI_DMA_FROMDEVICE;
 941	else
 942		dir = PCI_DMA_TODEVICE;
 943	for( i=0; i < seg; i++)
 944	{
 945		c->req.sg[i].size = tmp_sg[i].length;
 946		c->req.sg[i].addr = (__u32) pci_map_page(h->pci_dev,
 947						 sg_page(&tmp_sg[i]),
 948						 tmp_sg[i].offset,
 949						 tmp_sg[i].length, dir);
 950	}
 951DBGPX(	printk("Submitting %u sectors in %d segments\n", blk_rq_sectors(creq), seg); );
 952	c->req.hdr.sg_cnt = seg;
 953	c->req.hdr.blk_cnt = blk_rq_sectors(creq);
 954	c->req.hdr.cmd = (rq_data_dir(creq) == READ) ? IDA_READ : IDA_WRITE;
 955	c->type = CMD_RWREQ;
 956
 957	/* Put the request on the tail of the request queue */
 958	addQ(&h->reqQ, c);
 959	h->Qdepth++;
 960	if (h->Qdepth > h->maxQsinceinit) 
 961		h->maxQsinceinit = h->Qdepth;
 962
 963	goto queue_next;
 964
 965startio:
 966	start_io(h);
 967}
 968
 969/* 
 970 * start_io submits everything on a controller's request queue
 971 * and moves it to the completion queue.
 972 *
 973 * Interrupts had better be off if you're in here
 974 */
 975static void start_io(ctlr_info_t *h)
 976{
 977	cmdlist_t *c;
 978
 979	while((c = h->reqQ) != NULL) {
 980		/* Can't do anything if we're busy */
 981		if (h->access.fifo_full(h) == 0)
 982			return;
 983
 984		/* Get the first entry from the request Q */
 985		removeQ(&h->reqQ, c);
 986		h->Qdepth--;
 987	
 988		/* Tell the controller to do our bidding */
 989		h->access.submit_command(h, c);
 990
 991		/* Get onto the completion Q */
 992		addQ(&h->cmpQ, c);
 993	}
 994}
 995
 996/*
 997 * Mark all buffers that cmd was responsible for
 998 */
 999static inline void complete_command(cmdlist_t *cmd, int timeout)
1000{
1001	struct request *rq = cmd->rq;
1002	int error = 0;
1003	int i, ddir;
1004
1005	if (cmd->req.hdr.rcode & RCODE_NONFATAL &&
1006	   (hba[cmd->ctlr]->misc_tflags & MISC_NONFATAL_WARN) == 0) {
1007		printk(KERN_NOTICE "Non Fatal error on ida/c%dd%d\n",
1008				cmd->ctlr, cmd->hdr.unit);
1009		hba[cmd->ctlr]->misc_tflags |= MISC_NONFATAL_WARN;
1010	}
1011	if (cmd->req.hdr.rcode & RCODE_FATAL) {
1012		printk(KERN_WARNING "Fatal error on ida/c%dd%d\n",
1013				cmd->ctlr, cmd->hdr.unit);
1014		error = -EIO;
1015	}
1016	if (cmd->req.hdr.rcode & RCODE_INVREQ) {
1017				printk(KERN_WARNING "Invalid request on ida/c%dd%d = (cmd=%x sect=%d cnt=%d sg=%d ret=%x)\n",
1018				cmd->ctlr, cmd->hdr.unit, cmd->req.hdr.cmd,
1019				cmd->req.hdr.blk, cmd->req.hdr.blk_cnt,
1020				cmd->req.hdr.sg_cnt, cmd->req.hdr.rcode);
1021		error = -EIO;
1022	}
1023	if (timeout)
1024		error = -EIO;
1025	/* unmap the DMA mapping for all the scatter gather elements */
1026	if (cmd->req.hdr.cmd == IDA_READ)
1027		ddir = PCI_DMA_FROMDEVICE;
1028	else
1029		ddir = PCI_DMA_TODEVICE;
1030        for(i=0; i<cmd->req.hdr.sg_cnt; i++)
1031                pci_unmap_page(hba[cmd->ctlr]->pci_dev, cmd->req.sg[i].addr,
1032				cmd->req.sg[i].size, ddir);
1033
1034	DBGPX(printk("Done with %p\n", rq););
1035	__blk_end_request_all(rq, error);
1036}
1037
1038/*
1039 *  The controller will interrupt us upon completion of commands.
1040 *  Find the command on the completion queue, remove it, tell the OS and
1041 *  try to queue up more IO
1042 */
1043static irqreturn_t do_ida_intr(int irq, void *dev_id)
1044{
1045	ctlr_info_t *h = dev_id;
1046	cmdlist_t *c;
1047	unsigned long istat;
1048	unsigned long flags;
1049	__u32 a,a1;
1050
1051	istat = h->access.intr_pending(h);
1052	/* Is this interrupt for us? */
1053	if (istat == 0)
1054		return IRQ_NONE;
1055
1056	/*
1057	 * If there are completed commands in the completion queue,
1058	 * we had better do something about it.
1059	 */
1060	spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
1061	if (istat & FIFO_NOT_EMPTY) {
1062		while((a = h->access.command_completed(h))) {
1063			a1 = a; a &= ~3;
1064			if ((c = h->cmpQ) == NULL)
1065			{  
1066				printk(KERN_WARNING "cpqarray: Completion of %08lx ignored\n", (unsigned long)a1);
1067				continue;	
1068			} 
1069			while(c->busaddr != a) {
1070				c = c->next;
1071				if (c == h->cmpQ) 
1072					break;
1073			}
1074			/*
1075			 * If we've found the command, take it off the
1076			 * completion Q and free it
1077			 */
1078			if (c->busaddr == a) {
1079				removeQ(&h->cmpQ, c);
1080				/*  Check for invalid command.
1081                                 *  Controller returns command error,
1082                                 *  But rcode = 0.
1083                                 */
1084
1085				if((a1 & 0x03) && (c->req.hdr.rcode == 0))
1086                                {
1087                                	c->req.hdr.rcode = RCODE_INVREQ;
1088                                }
1089				if (c->type == CMD_RWREQ) {
1090					complete_command(c, 0);
1091					cmd_free(h, c, 1);
1092				} else if (c->type == CMD_IOCTL_PEND) {
1093					c->type = CMD_IOCTL_DONE;
1094				}
1095				continue;
1096			}
1097		}
1098	}
1099
1100	/*
1101	 * See if we can queue up some more IO
1102	 */
1103	do_ida_request(h->queue);
1104	spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); 
1105	return IRQ_HANDLED;
1106}
1107
1108/*
1109 * This timer was for timing out requests that haven't happened after
1110 * IDA_TIMEOUT.  That wasn't such a good idea.  This timer is used to
1111 * reset a flags structure so we don't flood the user with
1112 * "Non-Fatal error" messages.
1113 */
1114static void ida_timer(unsigned long tdata)
1115{
1116	ctlr_info_t *h = (ctlr_info_t*)tdata;
1117
1118	h->timer.expires = jiffies + IDA_TIMER;
1119	add_timer(&h->timer);
1120	h->misc_tflags = 0;
1121}
1122
1123static int ida_getgeo(struct block_device *bdev, struct hd_geometry *geo)
1124{
1125	drv_info_t *drv = get_drv(bdev->bd_disk);
1126
1127	if (drv->cylinders) {
1128		geo->heads = drv->heads;
1129		geo->sectors = drv->sectors;
1130		geo->cylinders = drv->cylinders;
1131	} else {
1132		geo->heads = 0xff;
1133		geo->sectors = 0x3f;
1134		geo->cylinders = drv->nr_blks / (0xff*0x3f);
1135	}
1136
1137	return 0;
1138}
1139
1140/*
1141 *  ida_ioctl does some miscellaneous stuff like reporting drive geometry,
1142 *  setting readahead and submitting commands from userspace to the controller.
1143 */
1144static int ida_locked_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg)
1145{
1146	drv_info_t *drv = get_drv(bdev->bd_disk);
1147	ctlr_info_t *host = get_host(bdev->bd_disk);
1148	int error;
1149	ida_ioctl_t __user *io = (ida_ioctl_t __user *)arg;
1150	ida_ioctl_t *my_io;
1151
1152	switch(cmd) {
1153	case IDAGETDRVINFO:
1154		if (copy_to_user(&io->c.drv, drv, sizeof(drv_info_t)))
1155			return -EFAULT;
1156		return 0;
1157	case IDAPASSTHRU:
1158		if (!capable(CAP_SYS_RAWIO))
1159			return -EPERM;
1160		my_io = kmalloc(sizeof(ida_ioctl_t), GFP_KERNEL);
1161		if (!my_io)
1162			return -ENOMEM;
1163		error = -EFAULT;
1164		if (copy_from_user(my_io, io, sizeof(*my_io)))
1165			goto out_passthru;
1166		error = ida_ctlr_ioctl(host, drv - host->drv, my_io);
1167		if (error)
1168			goto out_passthru;
1169		error = -EFAULT;
1170		if (copy_to_user(io, my_io, sizeof(*my_io)))
1171			goto out_passthru;
1172		error = 0;
1173out_passthru:
1174		kfree(my_io);
1175		return error;
1176	case IDAGETCTLRSIG:
1177		if (!arg) return -EINVAL;
1178		if (put_user(host->ctlr_sig, (int __user *)arg))
1179			return -EFAULT;
1180		return 0;
1181	case IDAREVALIDATEVOLS:
1182		if (MINOR(bdev->bd_dev) != 0)
1183			return -ENXIO;
1184		return revalidate_allvol(host);
1185	case IDADRIVERVERSION:
1186		if (!arg) return -EINVAL;
1187		if (put_user(DRIVER_VERSION, (unsigned long __user *)arg))
1188			return -EFAULT;
1189		return 0;
1190	case IDAGETPCIINFO:
1191	{
1192		
1193		ida_pci_info_struct pciinfo;
1194
1195		if (!arg) return -EINVAL;
1196		memset(&pciinfo, 0, sizeof(pciinfo));
1197		pciinfo.bus = host->pci_dev->bus->number;
1198		pciinfo.dev_fn = host->pci_dev->devfn;
1199		pciinfo.board_id = host->board_id;
1200		if(copy_to_user((void __user *) arg, &pciinfo,  
1201			sizeof( ida_pci_info_struct)))
1202				return -EFAULT;
1203		return(0);
1204	}	
1205
1206	default:
1207		return -EINVAL;
1208	}
1209		
1210}
1211
1212static int ida_ioctl(struct block_device *bdev, fmode_t mode,
1213			     unsigned int cmd, unsigned long param)
1214{
1215	int ret;
1216
1217	mutex_lock(&cpqarray_mutex);
1218	ret = ida_locked_ioctl(bdev, mode, cmd, param);
1219	mutex_unlock(&cpqarray_mutex);
1220
1221	return ret;
1222}
1223
1224/*
1225 * ida_ctlr_ioctl is for passing commands to the controller from userspace.
1226 * The command block (io) has already been copied to kernel space for us,
1227 * however, any elements in the sglist need to be copied to kernel space
1228 * or copied back to userspace.
1229 *
1230 * Only root may perform a controller passthru command, however I'm not doing
1231 * any serious sanity checking on the arguments.  Doing an IDA_WRITE_MEDIA and
1232 * putting a 64M buffer in the sglist is probably a *bad* idea.
1233 */
1234static int ida_ctlr_ioctl(ctlr_info_t *h, int dsk, ida_ioctl_t *io)
1235{
1236	int ctlr = h->ctlr;
1237	cmdlist_t *c;
1238	void *p = NULL;
1239	unsigned long flags;
1240	int error;
1241
1242	if ((c = cmd_alloc(h, 0)) == NULL)
1243		return -ENOMEM;
1244	c->ctlr = ctlr;
1245	c->hdr.unit = (io->unit & UNITVALID) ? (io->unit & ~UNITVALID) : dsk;
1246	c->hdr.size = sizeof(rblk_t) >> 2;
1247	c->size += sizeof(rblk_t);
1248
1249	c->req.hdr.cmd = io->cmd;
1250	c->req.hdr.blk = io->blk;
1251	c->req.hdr.blk_cnt = io->blk_cnt;
1252	c->type = CMD_IOCTL_PEND;
1253
1254	/* Pre submit processing */
1255	switch(io->cmd) {
1256	case PASSTHRU_A:
1257		p = memdup_user(io->sg[0].addr, io->sg[0].size);
1258		if (IS_ERR(p)) {
1259			error = PTR_ERR(p);
1260			cmd_free(h, c, 0);
1261			return error;
1262		}
1263		c->req.hdr.blk = pci_map_single(h->pci_dev, &(io->c), 
1264				sizeof(ida_ioctl_t), 
1265				PCI_DMA_BIDIRECTIONAL);
1266		c->req.sg[0].size = io->sg[0].size;
1267		c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 
1268			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1269		c->req.hdr.sg_cnt = 1;
1270		break;
1271	case IDA_READ:
1272	case READ_FLASH_ROM:
1273	case SENSE_CONTROLLER_PERFORMANCE:
1274		p = kmalloc(io->sg[0].size, GFP_KERNEL);
1275		if (!p) 
1276		{ 
1277                        error = -ENOMEM; 
1278                        cmd_free(h, c, 0);
1279                        return(error);
1280                }
1281
1282		c->req.sg[0].size = io->sg[0].size;
1283		c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 
1284			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 
1285		c->req.hdr.sg_cnt = 1;
1286		break;
1287	case IDA_WRITE:
1288	case IDA_WRITE_MEDIA:
1289	case DIAG_PASS_THRU:
1290	case COLLECT_BUFFER:
1291	case WRITE_FLASH_ROM:
1292		p = memdup_user(io->sg[0].addr, io->sg[0].size);
1293		if (IS_ERR(p)) {
1294			error = PTR_ERR(p);
1295			cmd_free(h, c, 0);
1296			return error;
1297                }
1298		c->req.sg[0].size = io->sg[0].size;
1299		c->req.sg[0].addr = pci_map_single(h->pci_dev, p, 
1300			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL); 
1301		c->req.hdr.sg_cnt = 1;
1302		break;
1303	default:
1304		c->req.sg[0].size = sizeof(io->c);
1305		c->req.sg[0].addr = pci_map_single(h->pci_dev,&io->c, 
1306			c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1307		c->req.hdr.sg_cnt = 1;
1308	}
1309	
1310	/* Put the request on the tail of the request queue */
1311	spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1312	addQ(&h->reqQ, c);
1313	h->Qdepth++;
1314	start_io(h);
1315	spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1316
1317	/* Wait for completion */
1318	while(c->type != CMD_IOCTL_DONE)
1319		schedule();
1320
1321	/* Unmap the DMA  */
1322	pci_unmap_single(h->pci_dev, c->req.sg[0].addr, c->req.sg[0].size, 
1323		PCI_DMA_BIDIRECTIONAL);
1324	/* Post submit processing */
1325	switch(io->cmd) {
1326	case PASSTHRU_A:
1327		pci_unmap_single(h->pci_dev, c->req.hdr.blk,
1328                                sizeof(ida_ioctl_t),
1329                                PCI_DMA_BIDIRECTIONAL);
1330	case IDA_READ:
1331	case DIAG_PASS_THRU:
1332	case SENSE_CONTROLLER_PERFORMANCE:
1333	case READ_FLASH_ROM:
1334		if (copy_to_user(io->sg[0].addr, p, io->sg[0].size)) {
1335			kfree(p);
1336			return -EFAULT;
1337		}
1338		/* fall through and free p */
1339	case IDA_WRITE:
1340	case IDA_WRITE_MEDIA:
1341	case COLLECT_BUFFER:
1342	case WRITE_FLASH_ROM:
1343		kfree(p);
1344		break;
1345	default:;
1346		/* Nothing to do */
1347	}
1348
1349	io->rcode = c->req.hdr.rcode;
1350	cmd_free(h, c, 0);
1351	return(0);
1352}
1353
1354/*
1355 * Commands are pre-allocated in a large block.  Here we use a simple bitmap
1356 * scheme to suballocte them to the driver.  Operations that are not time
1357 * critical (and can wait for kmalloc and possibly sleep) can pass in NULL
1358 * as the first argument to get a new command.
1359 */
1360static cmdlist_t * cmd_alloc(ctlr_info_t *h, int get_from_pool)
1361{
1362	cmdlist_t * c;
1363	int i;
1364	dma_addr_t cmd_dhandle;
1365
1366	if (!get_from_pool) {
1367		c = (cmdlist_t*)pci_alloc_consistent(h->pci_dev, 
1368			sizeof(cmdlist_t), &cmd_dhandle);
1369		if(c==NULL)
1370			return NULL;
1371	} else {
1372		do {
1373			i = find_first_zero_bit(h->cmd_pool_bits, NR_CMDS);
1374			if (i == NR_CMDS)
1375				return NULL;
1376		} while(test_and_set_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG)) != 0);
1377		c = h->cmd_pool + i;
1378		cmd_dhandle = h->cmd_pool_dhandle + i*sizeof(cmdlist_t);
1379		h->nr_allocs++;
1380	}
1381
1382	memset(c, 0, sizeof(cmdlist_t));
1383	c->busaddr = cmd_dhandle; 
1384	return c;
1385}
1386
1387static void cmd_free(ctlr_info_t *h, cmdlist_t *c, int got_from_pool)
1388{
1389	int i;
1390
1391	if (!got_from_pool) {
1392		pci_free_consistent(h->pci_dev, sizeof(cmdlist_t), c,
1393			c->busaddr);
1394	} else {
1395		i = c - h->cmd_pool;
1396		clear_bit(i&(BITS_PER_LONG-1), h->cmd_pool_bits+(i/BITS_PER_LONG));
1397		h->nr_frees++;
1398	}
1399}
1400
1401/***********************************************************************
1402    name:        sendcmd
1403    Send a command to an IDA using the memory mapped FIFO interface
1404    and wait for it to complete.  
1405    This routine should only be called at init time.
1406***********************************************************************/
1407static int sendcmd(
1408	__u8	cmd,
1409	int	ctlr,
1410	void	*buff,
1411	size_t	size,
1412	unsigned int blk,
1413	unsigned int blkcnt,
1414	unsigned int log_unit )
1415{
1416	cmdlist_t *c;
1417	int complete;
1418	unsigned long temp;
1419	unsigned long i;
1420	ctlr_info_t *info_p = hba[ctlr];
1421
1422	c = cmd_alloc(info_p, 1);
1423	if(!c)
1424		return IO_ERROR;
1425	c->ctlr = ctlr;
1426	c->hdr.unit = log_unit;
1427	c->hdr.prio = 0;
1428	c->hdr.size = sizeof(rblk_t) >> 2;
1429	c->size += sizeof(rblk_t);
1430
1431	/* The request information. */
1432	c->req.hdr.next = 0;
1433	c->req.hdr.rcode = 0;
1434	c->req.bp = 0;
1435	c->req.hdr.sg_cnt = 1;
1436	c->req.hdr.reserved = 0;
1437	
1438	if (size == 0)
1439		c->req.sg[0].size = 512;
1440	else
1441		c->req.sg[0].size = size;
1442
1443	c->req.hdr.blk = blk;
1444	c->req.hdr.blk_cnt = blkcnt;
1445	c->req.hdr.cmd = (unsigned char) cmd;
1446	c->req.sg[0].addr = (__u32) pci_map_single(info_p->pci_dev, 
1447		buff, c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1448	/*
1449	 * Disable interrupt
1450	 */
1451	info_p->access.set_intr_mask(info_p, 0);
1452	/* Make sure there is room in the command FIFO */
1453	/* Actually it should be completely empty at this time. */
1454	for (i = 200000; i > 0; i--) {
1455		temp = info_p->access.fifo_full(info_p);
1456		if (temp != 0) {
1457			break;
1458		}
1459		udelay(10);
1460DBG(
1461		printk(KERN_WARNING "cpqarray ida%d: idaSendPciCmd FIFO full,"
1462			" waiting!\n", ctlr);
1463);
1464	} 
1465	/*
1466	 * Send the cmd
1467	 */
1468	info_p->access.submit_command(info_p, c);
1469	complete = pollcomplete(ctlr);
1470	
1471	pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr, 
1472		c->req.sg[0].size, PCI_DMA_BIDIRECTIONAL);
1473	if (complete != 1) {
1474		if (complete != c->busaddr) {
1475			printk( KERN_WARNING
1476			"cpqarray ida%d: idaSendPciCmd "
1477		      "Invalid command list address returned! (%08lx)\n",
1478				ctlr, (unsigned long)complete);
1479			cmd_free(info_p, c, 1);
1480			return (IO_ERROR);
1481		}
1482	} else {
1483		printk( KERN_WARNING
1484			"cpqarray ida%d: idaSendPciCmd Timeout out, "
1485			"No command list address returned!\n",
1486			ctlr);
1487		cmd_free(info_p, c, 1);
1488		return (IO_ERROR);
1489	}
1490
1491	if (c->req.hdr.rcode & 0x00FE) {
1492		if (!(c->req.hdr.rcode & BIG_PROBLEM)) {
1493			printk( KERN_WARNING
1494			"cpqarray ida%d: idaSendPciCmd, error: "
1495				"Controller failed at init time "
1496				"cmd: 0x%x, return code = 0x%x\n",
1497				ctlr, c->req.hdr.cmd, c->req.hdr.rcode);
1498
1499			cmd_free(info_p, c, 1);
1500			return (IO_ERROR);
1501		}
1502	}
1503	cmd_free(info_p, c, 1);
1504	return (IO_OK);
1505}
1506
1507/*
1508 * revalidate_allvol is for online array config utilities.  After a
1509 * utility reconfigures the drives in the array, it can use this function
1510 * (through an ioctl) to make the driver zap any previous disk structs for
1511 * that controller and get new ones.
1512 *
1513 * Right now I'm using the getgeometry() function to do this, but this
1514 * function should probably be finer grained and allow you to revalidate one
1515 * particualar logical volume (instead of all of them on a particular
1516 * controller).
1517 */
1518static int revalidate_allvol(ctlr_info_t *host)
1519{
1520	int ctlr = host->ctlr;
1521	int i;
1522	unsigned long flags;
1523
1524	spin_lock_irqsave(IDA_LOCK(ctlr), flags);
1525	if (host->usage_count > 1) {
1526		spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1527		printk(KERN_WARNING "cpqarray: Device busy for volume"
1528			" revalidation (usage=%d)\n", host->usage_count);
1529		return -EBUSY;
1530	}
1531	host->usage_count++;
1532	spin_unlock_irqrestore(IDA_LOCK(ctlr), flags);
1533
1534	/*
1535	 * Set the partition and block size structures for all volumes
1536	 * on this controller to zero.  We will reread all of this data
1537	 */
1538	set_capacity(ida_gendisk[ctlr][0], 0);
1539	for (i = 1; i < NWD; i++) {
1540		struct gendisk *disk = ida_gendisk[ctlr][i];
1541		if (disk->flags & GENHD_FL_UP)
1542			del_gendisk(disk);
1543	}
1544	memset(host->drv, 0, sizeof(drv_info_t)*NWD);
1545
1546	/*
1547	 * Tell the array controller not to give us any interrupts while
1548	 * we check the new geometry.  Then turn interrupts back on when
1549	 * we're done.
1550	 */
1551	host->access.set_intr_mask(host, 0);
1552	getgeometry(ctlr);
1553	host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
1554
1555	for(i=0; i<NWD; i++) {
1556		struct gendisk *disk = ida_gendisk[ctlr][i];
1557		drv_info_t *drv = &host->drv[i];
1558		if (i && !drv->nr_blks)
1559			continue;
1560		blk_queue_logical_block_size(host->queue, drv->blk_size);
1561		set_capacity(disk, drv->nr_blks);
1562		disk->queue = host->queue;
1563		disk->private_data = drv;
1564		if (i)
1565			add_disk(disk);
1566	}
1567
1568	host->usage_count--;
1569	return 0;
1570}
1571
1572static int ida_revalidate(struct gendisk *disk)
1573{
1574	drv_info_t *drv = disk->private_data;
1575	set_capacity(disk, drv->nr_blks);
1576	return 0;
1577}
1578
1579/********************************************************************
1580    name: pollcomplete
1581    Wait polling for a command to complete.
1582    The memory mapped FIFO is polled for the completion.
1583    Used only at init time, interrupts disabled.
1584 ********************************************************************/
1585static int pollcomplete(int ctlr)
1586{
1587	int done;
1588	int i;
1589
1590	/* Wait (up to 2 seconds) for a command to complete */
1591
1592	for (i = 200000; i > 0; i--) {
1593		done = hba[ctlr]->access.command_completed(hba[ctlr]);
1594		if (done == 0) {
1595			udelay(10);	/* a short fixed delay */
1596		} else
1597			return (done);
1598	}
1599	/* Invalid address to tell caller we ran out of time */
1600	return 1;
1601}
1602/*****************************************************************
1603    start_fwbk
1604    Starts controller firmwares background processing. 
1605    Currently only the Integrated Raid controller needs this done.
1606    If the PCI mem address registers are written to after this, 
1607	 data corruption may occur
1608*****************************************************************/
1609static void start_fwbk(int ctlr)
1610{
1611		id_ctlr_t *id_ctlr_buf; 
1612	int ret_code;
1613
1614	if(	(hba[ctlr]->board_id != 0x40400E11)
1615		&& (hba[ctlr]->board_id != 0x40480E11) )
1616
1617	/* Not a Integrated Raid, so there is nothing for us to do */
1618		return;
1619	printk(KERN_DEBUG "cpqarray: Starting firmware's background"
1620		" processing\n");
1621	/* Command does not return anything, but idasend command needs a 
1622		buffer */
1623	id_ctlr_buf = kmalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1624	if(id_ctlr_buf==NULL)
1625	{
1626		printk(KERN_WARNING "cpqarray: Out of memory. "
1627			"Unable to start background processing.\n");
1628		return;
1629	}		
1630	ret_code = sendcmd(RESUME_BACKGROUND_ACTIVITY, ctlr, 
1631		id_ctlr_buf, 0, 0, 0, 0);
1632	if(ret_code != IO_OK)
1633		printk(KERN_WARNING "cpqarray: Unable to start"
1634			" background processing\n");
1635
1636	kfree(id_ctlr_buf);
1637}
1638/*****************************************************************
1639    getgeometry
1640    Get ida logical volume geometry from the controller 
1641    This is a large bit of code which once existed in two flavors,
1642    It is used only at init time.
1643*****************************************************************/
1644static void getgeometry(int ctlr)
1645{				
1646	id_log_drv_t *id_ldrive;
1647	id_ctlr_t *id_ctlr_buf;
1648	sense_log_drv_stat_t *id_lstatus_buf;
1649	config_t *sense_config_buf;
1650	unsigned int log_unit, log_index;
1651	int ret_code, size;
1652	drv_info_t *drv;
1653	ctlr_info_t *info_p = hba[ctlr];
1654	int i;
1655
1656	info_p->log_drv_map = 0;	
1657	
1658	id_ldrive = kzalloc(sizeof(id_log_drv_t), GFP_KERNEL);
1659	if (!id_ldrive)	{
1660		printk( KERN_ERR "cpqarray:  out of memory.\n");
1661		goto err_0;
1662	}
1663
1664	id_ctlr_buf = kzalloc(sizeof(id_ctlr_t), GFP_KERNEL);
1665	if (!id_ctlr_buf) {
1666		printk( KERN_ERR "cpqarray:  out of memory.\n");
1667		goto err_1;
1668	}
1669
1670	id_lstatus_buf = kzalloc(sizeof(sense_log_drv_stat_t), GFP_KERNEL);
1671	if (!id_lstatus_buf) {
1672		printk( KERN_ERR "cpqarray:  out of memory.\n");
1673		goto err_2;
1674	}
1675
1676	sense_config_buf = kzalloc(sizeof(config_t), GFP_KERNEL);
1677	if (!sense_config_buf) {
1678		printk( KERN_ERR "cpqarray:  out of memory.\n");
1679		goto err_3;
1680	}
1681
1682	info_p->phys_drives = 0;
1683	info_p->log_drv_map = 0;
1684	info_p->drv_assign_map = 0;
1685	info_p->drv_spare_map = 0;
1686	info_p->mp_failed_drv_map = 0;	/* only initialized here */
1687	/* Get controllers info for this logical drive */
1688	ret_code = sendcmd(ID_CTLR, ctlr, id_ctlr_buf, 0, 0, 0, 0);
1689	if (ret_code == IO_ERROR) {
1690		/*
1691		 * If can't get controller info, set the logical drive map to 0,
1692		 * so the idastubopen will fail on all logical drives
1693		 * on the controller.
1694		 */
1695		printk(KERN_ERR "cpqarray: error sending ID controller\n");
1696                goto err_4;
1697        }
1698
1699	info_p->log_drives = id_ctlr_buf->nr_drvs;
1700	for(i=0;i<4;i++)
1701		info_p->firm_rev[i] = id_ctlr_buf->firm_rev[i];
1702	info_p->ctlr_sig = id_ctlr_buf->cfg_sig;
1703
1704	printk(" (%s)\n", info_p->product_name);
1705	/*
1706	 * Initialize logical drive map to zero
1707	 */
1708	log_index = 0;
1709	/*
1710	 * Get drive geometry for all logical drives
1711	 */
1712	if (id_ctlr_buf->nr_drvs > 16)
1713		printk(KERN_WARNING "cpqarray ida%d:  This driver supports "
1714			"16 logical drives per controller.\n.  "
1715			" Additional drives will not be "
1716			"detected\n", ctlr);
1717
1718	for (log_unit = 0;
1719	     (log_index < id_ctlr_buf->nr_drvs)
1720	     && (log_unit < NWD);
1721	     log_unit++) {
1722		size = sizeof(sense_log_drv_stat_t);
1723
1724		/*
1725		   Send "Identify logical drive status" cmd
1726		 */
1727		ret_code = sendcmd(SENSE_LOG_DRV_STAT,
1728			     ctlr, id_lstatus_buf, size, 0, 0, log_unit);
1729		if (ret_code == IO_ERROR) {
1730			/*
1731			   If can't get logical drive status, set
1732			   the logical drive map to 0, so the
1733			   idastubopen will fail for all logical drives
1734			   on the controller. 
1735			 */
1736			info_p->log_drv_map = 0;	
1737			printk( KERN_WARNING
1738			     "cpqarray ida%d: idaGetGeometry - Controller"
1739				" failed to report status of logical drive %d\n"
1740			 "Access to this controller has been disabled\n",
1741				ctlr, log_unit);
1742                	goto err_4;
1743		}
1744		/*
1745		   Make sure the logical drive is configured
1746		 */
1747		if (id_lstatus_buf->status != LOG_NOT_CONF) {
1748			ret_code = sendcmd(ID_LOG_DRV, ctlr, id_ldrive,
1749			       sizeof(id_log_drv_t), 0, 0, log_unit);
1750			/*
1751			   If error, the bit for this
1752			   logical drive won't be set and
1753			   idastubopen will return error. 
1754			 */
1755			if (ret_code != IO_ERROR) {
1756				drv = &info_p->drv[log_unit];
1757				drv->blk_size = id_ldrive->blk_size;
1758				drv->nr_blks = id_ldrive->nr_blks;
1759				drv->cylinders = id_ldrive->drv.cyl;
1760				drv->heads = id_ldrive->drv.heads;
1761				drv->sectors = id_ldrive->drv.sect_per_track;
1762				info_p->log_drv_map |=	(1 << log_unit);
1763
1764	printk(KERN_INFO "cpqarray ida/c%dd%d: blksz=%d nr_blks=%d\n",
1765		ctlr, log_unit, drv->blk_size, drv->nr_blks);
1766				ret_code = sendcmd(SENSE_CONFIG,
1767						  ctlr, sense_config_buf,
1768				 sizeof(config_t), 0, 0, log_unit);
1769				if (ret_code == IO_ERROR) {
1770					info_p->log_drv_map = 0;
1771                			printk(KERN_ERR "cpqarray: error sending sense config\n");
1772                			goto err_4;
1773				}
1774
1775				info_p->phys_drives =
1776				    sense_config_buf->ctlr_phys_drv;
1777				info_p->drv_assign_map
1778				    |= sense_config_buf->drv_asgn_map;
1779				info_p->drv_assign_map
1780				    |= sense_config_buf->spare_asgn_map;
1781				info_p->drv_spare_map
1782				    |= sense_config_buf->spare_asgn_map;
1783			}	/* end of if no error on id_ldrive */
1784			log_index = log_index + 1;
1785		}		/* end of if logical drive configured */
1786	}			/* end of for log_unit */
1787
1788	/* Free all the buffers and return */
1789err_4:
1790	kfree(sense_config_buf);
1791err_3:
1792  	kfree(id_lstatus_buf);
1793err_2:
1794	kfree(id_ctlr_buf);
1795err_1:
1796  	kfree(id_ldrive);
1797err_0:
1798	return;
1799}
1800
1801static void __exit cpqarray_exit(void)
1802{
1803	int i;
1804
1805	pci_unregister_driver(&cpqarray_pci_driver);
1806
1807	/* Double check that all controller entries have been removed */
1808	for(i=0; i<MAX_CTLR; i++) {
1809		if (hba[i] != NULL) {
1810			printk(KERN_WARNING "cpqarray: Removing EISA "
1811					"controller %d\n", i);
1812			cpqarray_remove_one_eisa(i);
1813		}
1814	}
1815
1816	remove_proc_entry("driver/cpqarray", NULL);
1817}
1818
1819module_init(cpqarray_init)
1820module_exit(cpqarray_exit)