Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
   1/*
   2 * VME Bridge Framework
   3 *
   4 * Author: Martyn Welch <martyn.welch@ge.com>
   5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
   6 *
   7 * Based on work by Tom Armistead and Ajit Prem
   8 * Copyright 2004 Motorola Inc.
   9 *
  10 * This program is free software; you can redistribute  it and/or modify it
  11 * under  the terms of  the GNU General  Public License as published by the
  12 * Free Software Foundation;  either version 2 of the  License, or (at your
  13 * option) any later version.
  14 */
  15
  16#include <linux/init.h>
  17#include <linux/export.h>
  18#include <linux/mm.h>
  19#include <linux/types.h>
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/pci.h>
  23#include <linux/poll.h>
  24#include <linux/highmem.h>
  25#include <linux/interrupt.h>
  26#include <linux/pagemap.h>
  27#include <linux/device.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/syscalls.h>
  30#include <linux/mutex.h>
  31#include <linux/spinlock.h>
  32#include <linux/slab.h>
  33#include <linux/vme.h>
  34
  35#include "vme_bridge.h"
  36
  37/* Bitmask and list of registered buses both protected by common mutex */
  38static unsigned int vme_bus_numbers;
  39static LIST_HEAD(vme_bus_list);
  40static DEFINE_MUTEX(vme_buses_lock);
  41
  42static int __init vme_init(void);
  43
  44static struct vme_dev *dev_to_vme_dev(struct device *dev)
  45{
  46	return container_of(dev, struct vme_dev, dev);
  47}
  48
  49/*
  50 * Find the bridge that the resource is associated with.
  51 */
  52static struct vme_bridge *find_bridge(struct vme_resource *resource)
  53{
  54	/* Get list to search */
  55	switch (resource->type) {
  56	case VME_MASTER:
  57		return list_entry(resource->entry, struct vme_master_resource,
  58			list)->parent;
  59		break;
  60	case VME_SLAVE:
  61		return list_entry(resource->entry, struct vme_slave_resource,
  62			list)->parent;
  63		break;
  64	case VME_DMA:
  65		return list_entry(resource->entry, struct vme_dma_resource,
  66			list)->parent;
  67		break;
  68	case VME_LM:
  69		return list_entry(resource->entry, struct vme_lm_resource,
  70			list)->parent;
  71		break;
  72	default:
  73		printk(KERN_ERR "Unknown resource type\n");
  74		return NULL;
  75		break;
  76	}
  77}
  78
  79/*
  80 * Allocate a contiguous block of memory for use by the driver. This is used to
  81 * create the buffers for the slave windows.
  82 */
  83void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
  84	dma_addr_t *dma)
  85{
  86	struct vme_bridge *bridge;
  87
  88	if (resource == NULL) {
  89		printk(KERN_ERR "No resource\n");
  90		return NULL;
  91	}
  92
  93	bridge = find_bridge(resource);
  94	if (bridge == NULL) {
  95		printk(KERN_ERR "Can't find bridge\n");
  96		return NULL;
  97	}
  98
  99	if (bridge->parent == NULL) {
 100		printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
 101		return NULL;
 102	}
 103
 104	if (bridge->alloc_consistent == NULL) {
 105		printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
 106		       bridge->name);
 107		return NULL;
 108	}
 109
 110	return bridge->alloc_consistent(bridge->parent, size, dma);
 111}
 112EXPORT_SYMBOL(vme_alloc_consistent);
 113
 114/*
 115 * Free previously allocated contiguous block of memory.
 116 */
 117void vme_free_consistent(struct vme_resource *resource, size_t size,
 118	void *vaddr, dma_addr_t dma)
 119{
 120	struct vme_bridge *bridge;
 121
 122	if (resource == NULL) {
 123		printk(KERN_ERR "No resource\n");
 124		return;
 125	}
 126
 127	bridge = find_bridge(resource);
 128	if (bridge == NULL) {
 129		printk(KERN_ERR "Can't find bridge\n");
 130		return;
 131	}
 132
 133	if (bridge->parent == NULL) {
 134		printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
 135		return;
 136	}
 137
 138	if (bridge->free_consistent == NULL) {
 139		printk(KERN_ERR "free_consistent not supported by bridge %s\n",
 140		       bridge->name);
 141		return;
 142	}
 143
 144	bridge->free_consistent(bridge->parent, size, vaddr, dma);
 145}
 146EXPORT_SYMBOL(vme_free_consistent);
 147
 148size_t vme_get_size(struct vme_resource *resource)
 149{
 150	int enabled, retval;
 151	unsigned long long base, size;
 152	dma_addr_t buf_base;
 153	u32 aspace, cycle, dwidth;
 154
 155	switch (resource->type) {
 156	case VME_MASTER:
 157		retval = vme_master_get(resource, &enabled, &base, &size,
 158			&aspace, &cycle, &dwidth);
 159		if (retval)
 160			return 0;
 161
 162		return size;
 163		break;
 164	case VME_SLAVE:
 165		retval = vme_slave_get(resource, &enabled, &base, &size,
 166			&buf_base, &aspace, &cycle);
 167		if (retval)
 168			return 0;
 169
 170		return size;
 171		break;
 172	case VME_DMA:
 173		return 0;
 174		break;
 175	default:
 176		printk(KERN_ERR "Unknown resource type\n");
 177		return 0;
 178		break;
 179	}
 180}
 181EXPORT_SYMBOL(vme_get_size);
 182
 183int vme_check_window(u32 aspace, unsigned long long vme_base,
 184		     unsigned long long size)
 185{
 186	int retval = 0;
 187
 188	switch (aspace) {
 189	case VME_A16:
 190		if (((vme_base + size) > VME_A16_MAX) ||
 191				(vme_base > VME_A16_MAX))
 192			retval = -EFAULT;
 193		break;
 194	case VME_A24:
 195		if (((vme_base + size) > VME_A24_MAX) ||
 196				(vme_base > VME_A24_MAX))
 197			retval = -EFAULT;
 198		break;
 199	case VME_A32:
 200		if (((vme_base + size) > VME_A32_MAX) ||
 201				(vme_base > VME_A32_MAX))
 202			retval = -EFAULT;
 203		break;
 204	case VME_A64:
 205		if ((size != 0) && (vme_base > U64_MAX + 1 - size))
 206			retval = -EFAULT;
 207		break;
 208	case VME_CRCSR:
 209		if (((vme_base + size) > VME_CRCSR_MAX) ||
 210				(vme_base > VME_CRCSR_MAX))
 211			retval = -EFAULT;
 212		break;
 213	case VME_USER1:
 214	case VME_USER2:
 215	case VME_USER3:
 216	case VME_USER4:
 217		/* User Defined */
 218		break;
 219	default:
 220		printk(KERN_ERR "Invalid address space\n");
 221		retval = -EINVAL;
 222		break;
 223	}
 224
 225	return retval;
 226}
 227EXPORT_SYMBOL(vme_check_window);
 228
 229static u32 vme_get_aspace(int am)
 230{
 231	switch (am) {
 232	case 0x29:
 233	case 0x2D:
 234		return VME_A16;
 235	case 0x38:
 236	case 0x39:
 237	case 0x3A:
 238	case 0x3B:
 239	case 0x3C:
 240	case 0x3D:
 241	case 0x3E:
 242	case 0x3F:
 243		return VME_A24;
 244	case 0x8:
 245	case 0x9:
 246	case 0xA:
 247	case 0xB:
 248	case 0xC:
 249	case 0xD:
 250	case 0xE:
 251	case 0xF:
 252		return VME_A32;
 253	case 0x0:
 254	case 0x1:
 255	case 0x3:
 256		return VME_A64;
 257	}
 258
 259	return 0;
 260}
 261
 262/*
 263 * Request a slave image with specific attributes, return some unique
 264 * identifier.
 265 */
 266struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
 267	u32 cycle)
 268{
 269	struct vme_bridge *bridge;
 270	struct list_head *slave_pos = NULL;
 271	struct vme_slave_resource *allocated_image = NULL;
 272	struct vme_slave_resource *slave_image = NULL;
 273	struct vme_resource *resource = NULL;
 274
 275	bridge = vdev->bridge;
 276	if (bridge == NULL) {
 277		printk(KERN_ERR "Can't find VME bus\n");
 278		goto err_bus;
 279	}
 280
 281	/* Loop through slave resources */
 282	list_for_each(slave_pos, &bridge->slave_resources) {
 283		slave_image = list_entry(slave_pos,
 284			struct vme_slave_resource, list);
 285
 286		if (slave_image == NULL) {
 287			printk(KERN_ERR "Registered NULL Slave resource\n");
 288			continue;
 289		}
 290
 291		/* Find an unlocked and compatible image */
 292		mutex_lock(&slave_image->mtx);
 293		if (((slave_image->address_attr & address) == address) &&
 294			((slave_image->cycle_attr & cycle) == cycle) &&
 295			(slave_image->locked == 0)) {
 296
 297			slave_image->locked = 1;
 298			mutex_unlock(&slave_image->mtx);
 299			allocated_image = slave_image;
 300			break;
 301		}
 302		mutex_unlock(&slave_image->mtx);
 303	}
 304
 305	/* No free image */
 306	if (allocated_image == NULL)
 307		goto err_image;
 308
 309	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
 310	if (resource == NULL) {
 311		printk(KERN_WARNING "Unable to allocate resource structure\n");
 312		goto err_alloc;
 313	}
 314	resource->type = VME_SLAVE;
 315	resource->entry = &allocated_image->list;
 316
 317	return resource;
 318
 319err_alloc:
 320	/* Unlock image */
 321	mutex_lock(&slave_image->mtx);
 322	slave_image->locked = 0;
 323	mutex_unlock(&slave_image->mtx);
 324err_image:
 325err_bus:
 326	return NULL;
 327}
 328EXPORT_SYMBOL(vme_slave_request);
 329
 330int vme_slave_set(struct vme_resource *resource, int enabled,
 331	unsigned long long vme_base, unsigned long long size,
 332	dma_addr_t buf_base, u32 aspace, u32 cycle)
 333{
 334	struct vme_bridge *bridge = find_bridge(resource);
 335	struct vme_slave_resource *image;
 336	int retval;
 337
 338	if (resource->type != VME_SLAVE) {
 339		printk(KERN_ERR "Not a slave resource\n");
 340		return -EINVAL;
 341	}
 342
 343	image = list_entry(resource->entry, struct vme_slave_resource, list);
 344
 345	if (bridge->slave_set == NULL) {
 346		printk(KERN_ERR "Function not supported\n");
 347		return -ENOSYS;
 348	}
 349
 350	if (!(((image->address_attr & aspace) == aspace) &&
 351		((image->cycle_attr & cycle) == cycle))) {
 352		printk(KERN_ERR "Invalid attributes\n");
 353		return -EINVAL;
 354	}
 355
 356	retval = vme_check_window(aspace, vme_base, size);
 357	if (retval)
 358		return retval;
 359
 360	return bridge->slave_set(image, enabled, vme_base, size, buf_base,
 361		aspace, cycle);
 362}
 363EXPORT_SYMBOL(vme_slave_set);
 364
 365int vme_slave_get(struct vme_resource *resource, int *enabled,
 366	unsigned long long *vme_base, unsigned long long *size,
 367	dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
 368{
 369	struct vme_bridge *bridge = find_bridge(resource);
 370	struct vme_slave_resource *image;
 371
 372	if (resource->type != VME_SLAVE) {
 373		printk(KERN_ERR "Not a slave resource\n");
 374		return -EINVAL;
 375	}
 376
 377	image = list_entry(resource->entry, struct vme_slave_resource, list);
 378
 379	if (bridge->slave_get == NULL) {
 380		printk(KERN_ERR "vme_slave_get not supported\n");
 381		return -EINVAL;
 382	}
 383
 384	return bridge->slave_get(image, enabled, vme_base, size, buf_base,
 385		aspace, cycle);
 386}
 387EXPORT_SYMBOL(vme_slave_get);
 388
 389void vme_slave_free(struct vme_resource *resource)
 390{
 391	struct vme_slave_resource *slave_image;
 392
 393	if (resource->type != VME_SLAVE) {
 394		printk(KERN_ERR "Not a slave resource\n");
 395		return;
 396	}
 397
 398	slave_image = list_entry(resource->entry, struct vme_slave_resource,
 399		list);
 400	if (slave_image == NULL) {
 401		printk(KERN_ERR "Can't find slave resource\n");
 402		return;
 403	}
 404
 405	/* Unlock image */
 406	mutex_lock(&slave_image->mtx);
 407	if (slave_image->locked == 0)
 408		printk(KERN_ERR "Image is already free\n");
 409
 410	slave_image->locked = 0;
 411	mutex_unlock(&slave_image->mtx);
 412
 413	/* Free up resource memory */
 414	kfree(resource);
 415}
 416EXPORT_SYMBOL(vme_slave_free);
 417
 418/*
 419 * Request a master image with specific attributes, return some unique
 420 * identifier.
 421 */
 422struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
 423	u32 cycle, u32 dwidth)
 424{
 425	struct vme_bridge *bridge;
 426	struct list_head *master_pos = NULL;
 427	struct vme_master_resource *allocated_image = NULL;
 428	struct vme_master_resource *master_image = NULL;
 429	struct vme_resource *resource = NULL;
 430
 431	bridge = vdev->bridge;
 432	if (bridge == NULL) {
 433		printk(KERN_ERR "Can't find VME bus\n");
 434		goto err_bus;
 435	}
 436
 437	/* Loop through master resources */
 438	list_for_each(master_pos, &bridge->master_resources) {
 439		master_image = list_entry(master_pos,
 440			struct vme_master_resource, list);
 441
 442		if (master_image == NULL) {
 443			printk(KERN_WARNING "Registered NULL master resource\n");
 444			continue;
 445		}
 446
 447		/* Find an unlocked and compatible image */
 448		spin_lock(&master_image->lock);
 449		if (((master_image->address_attr & address) == address) &&
 450			((master_image->cycle_attr & cycle) == cycle) &&
 451			((master_image->width_attr & dwidth) == dwidth) &&
 452			(master_image->locked == 0)) {
 453
 454			master_image->locked = 1;
 455			spin_unlock(&master_image->lock);
 456			allocated_image = master_image;
 457			break;
 458		}
 459		spin_unlock(&master_image->lock);
 460	}
 461
 462	/* Check to see if we found a resource */
 463	if (allocated_image == NULL) {
 464		printk(KERN_ERR "Can't find a suitable resource\n");
 465		goto err_image;
 466	}
 467
 468	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
 469	if (resource == NULL) {
 470		printk(KERN_ERR "Unable to allocate resource structure\n");
 471		goto err_alloc;
 472	}
 473	resource->type = VME_MASTER;
 474	resource->entry = &allocated_image->list;
 475
 476	return resource;
 477
 478err_alloc:
 479	/* Unlock image */
 480	spin_lock(&master_image->lock);
 481	master_image->locked = 0;
 482	spin_unlock(&master_image->lock);
 483err_image:
 484err_bus:
 485	return NULL;
 486}
 487EXPORT_SYMBOL(vme_master_request);
 488
 489int vme_master_set(struct vme_resource *resource, int enabled,
 490	unsigned long long vme_base, unsigned long long size, u32 aspace,
 491	u32 cycle, u32 dwidth)
 492{
 493	struct vme_bridge *bridge = find_bridge(resource);
 494	struct vme_master_resource *image;
 495	int retval;
 496
 497	if (resource->type != VME_MASTER) {
 498		printk(KERN_ERR "Not a master resource\n");
 499		return -EINVAL;
 500	}
 501
 502	image = list_entry(resource->entry, struct vme_master_resource, list);
 503
 504	if (bridge->master_set == NULL) {
 505		printk(KERN_WARNING "vme_master_set not supported\n");
 506		return -EINVAL;
 507	}
 508
 509	if (!(((image->address_attr & aspace) == aspace) &&
 510		((image->cycle_attr & cycle) == cycle) &&
 511		((image->width_attr & dwidth) == dwidth))) {
 512		printk(KERN_WARNING "Invalid attributes\n");
 513		return -EINVAL;
 514	}
 515
 516	retval = vme_check_window(aspace, vme_base, size);
 517	if (retval)
 518		return retval;
 519
 520	return bridge->master_set(image, enabled, vme_base, size, aspace,
 521		cycle, dwidth);
 522}
 523EXPORT_SYMBOL(vme_master_set);
 524
 525int vme_master_get(struct vme_resource *resource, int *enabled,
 526	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
 527	u32 *cycle, u32 *dwidth)
 528{
 529	struct vme_bridge *bridge = find_bridge(resource);
 530	struct vme_master_resource *image;
 531
 532	if (resource->type != VME_MASTER) {
 533		printk(KERN_ERR "Not a master resource\n");
 534		return -EINVAL;
 535	}
 536
 537	image = list_entry(resource->entry, struct vme_master_resource, list);
 538
 539	if (bridge->master_get == NULL) {
 540		printk(KERN_WARNING "%s not supported\n", __func__);
 541		return -EINVAL;
 542	}
 543
 544	return bridge->master_get(image, enabled, vme_base, size, aspace,
 545		cycle, dwidth);
 546}
 547EXPORT_SYMBOL(vme_master_get);
 548
 549/*
 550 * Read data out of VME space into a buffer.
 551 */
 552ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
 553	loff_t offset)
 554{
 555	struct vme_bridge *bridge = find_bridge(resource);
 556	struct vme_master_resource *image;
 557	size_t length;
 558
 559	if (bridge->master_read == NULL) {
 560		printk(KERN_WARNING "Reading from resource not supported\n");
 561		return -EINVAL;
 562	}
 563
 564	if (resource->type != VME_MASTER) {
 565		printk(KERN_ERR "Not a master resource\n");
 566		return -EINVAL;
 567	}
 568
 569	image = list_entry(resource->entry, struct vme_master_resource, list);
 570
 571	length = vme_get_size(resource);
 572
 573	if (offset > length) {
 574		printk(KERN_WARNING "Invalid Offset\n");
 575		return -EFAULT;
 576	}
 577
 578	if ((offset + count) > length)
 579		count = length - offset;
 580
 581	return bridge->master_read(image, buf, count, offset);
 582
 583}
 584EXPORT_SYMBOL(vme_master_read);
 585
 586/*
 587 * Write data out to VME space from a buffer.
 588 */
 589ssize_t vme_master_write(struct vme_resource *resource, void *buf,
 590	size_t count, loff_t offset)
 591{
 592	struct vme_bridge *bridge = find_bridge(resource);
 593	struct vme_master_resource *image;
 594	size_t length;
 595
 596	if (bridge->master_write == NULL) {
 597		printk(KERN_WARNING "Writing to resource not supported\n");
 598		return -EINVAL;
 599	}
 600
 601	if (resource->type != VME_MASTER) {
 602		printk(KERN_ERR "Not a master resource\n");
 603		return -EINVAL;
 604	}
 605
 606	image = list_entry(resource->entry, struct vme_master_resource, list);
 607
 608	length = vme_get_size(resource);
 609
 610	if (offset > length) {
 611		printk(KERN_WARNING "Invalid Offset\n");
 612		return -EFAULT;
 613	}
 614
 615	if ((offset + count) > length)
 616		count = length - offset;
 617
 618	return bridge->master_write(image, buf, count, offset);
 619}
 620EXPORT_SYMBOL(vme_master_write);
 621
 622/*
 623 * Perform RMW cycle to provided location.
 624 */
 625unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
 626	unsigned int compare, unsigned int swap, loff_t offset)
 627{
 628	struct vme_bridge *bridge = find_bridge(resource);
 629	struct vme_master_resource *image;
 630
 631	if (bridge->master_rmw == NULL) {
 632		printk(KERN_WARNING "Writing to resource not supported\n");
 633		return -EINVAL;
 634	}
 635
 636	if (resource->type != VME_MASTER) {
 637		printk(KERN_ERR "Not a master resource\n");
 638		return -EINVAL;
 639	}
 640
 641	image = list_entry(resource->entry, struct vme_master_resource, list);
 642
 643	return bridge->master_rmw(image, mask, compare, swap, offset);
 644}
 645EXPORT_SYMBOL(vme_master_rmw);
 646
 647int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
 648{
 649	struct vme_master_resource *image;
 650	phys_addr_t phys_addr;
 651	unsigned long vma_size;
 652
 653	if (resource->type != VME_MASTER) {
 654		pr_err("Not a master resource\n");
 655		return -EINVAL;
 656	}
 657
 658	image = list_entry(resource->entry, struct vme_master_resource, list);
 659	phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
 660	vma_size = vma->vm_end - vma->vm_start;
 661
 662	if (phys_addr + vma_size > image->bus_resource.end + 1) {
 663		pr_err("Map size cannot exceed the window size\n");
 664		return -EFAULT;
 665	}
 666
 667	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 668
 669	return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
 670}
 671EXPORT_SYMBOL(vme_master_mmap);
 672
 673void vme_master_free(struct vme_resource *resource)
 674{
 675	struct vme_master_resource *master_image;
 676
 677	if (resource->type != VME_MASTER) {
 678		printk(KERN_ERR "Not a master resource\n");
 679		return;
 680	}
 681
 682	master_image = list_entry(resource->entry, struct vme_master_resource,
 683		list);
 684	if (master_image == NULL) {
 685		printk(KERN_ERR "Can't find master resource\n");
 686		return;
 687	}
 688
 689	/* Unlock image */
 690	spin_lock(&master_image->lock);
 691	if (master_image->locked == 0)
 692		printk(KERN_ERR "Image is already free\n");
 693
 694	master_image->locked = 0;
 695	spin_unlock(&master_image->lock);
 696
 697	/* Free up resource memory */
 698	kfree(resource);
 699}
 700EXPORT_SYMBOL(vme_master_free);
 701
 702/*
 703 * Request a DMA controller with specific attributes, return some unique
 704 * identifier.
 705 */
 706struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
 707{
 708	struct vme_bridge *bridge;
 709	struct list_head *dma_pos = NULL;
 710	struct vme_dma_resource *allocated_ctrlr = NULL;
 711	struct vme_dma_resource *dma_ctrlr = NULL;
 712	struct vme_resource *resource = NULL;
 713
 714	/* XXX Not checking resource attributes */
 715	printk(KERN_ERR "No VME resource Attribute tests done\n");
 716
 717	bridge = vdev->bridge;
 718	if (bridge == NULL) {
 719		printk(KERN_ERR "Can't find VME bus\n");
 720		goto err_bus;
 721	}
 722
 723	/* Loop through DMA resources */
 724	list_for_each(dma_pos, &bridge->dma_resources) {
 725		dma_ctrlr = list_entry(dma_pos,
 726			struct vme_dma_resource, list);
 727
 728		if (dma_ctrlr == NULL) {
 729			printk(KERN_ERR "Registered NULL DMA resource\n");
 730			continue;
 731		}
 732
 733		/* Find an unlocked and compatible controller */
 734		mutex_lock(&dma_ctrlr->mtx);
 735		if (((dma_ctrlr->route_attr & route) == route) &&
 736			(dma_ctrlr->locked == 0)) {
 737
 738			dma_ctrlr->locked = 1;
 739			mutex_unlock(&dma_ctrlr->mtx);
 740			allocated_ctrlr = dma_ctrlr;
 741			break;
 742		}
 743		mutex_unlock(&dma_ctrlr->mtx);
 744	}
 745
 746	/* Check to see if we found a resource */
 747	if (allocated_ctrlr == NULL)
 748		goto err_ctrlr;
 749
 750	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
 751	if (resource == NULL) {
 752		printk(KERN_WARNING "Unable to allocate resource structure\n");
 753		goto err_alloc;
 754	}
 755	resource->type = VME_DMA;
 756	resource->entry = &allocated_ctrlr->list;
 757
 758	return resource;
 759
 760err_alloc:
 761	/* Unlock image */
 762	mutex_lock(&dma_ctrlr->mtx);
 763	dma_ctrlr->locked = 0;
 764	mutex_unlock(&dma_ctrlr->mtx);
 765err_ctrlr:
 766err_bus:
 767	return NULL;
 768}
 769EXPORT_SYMBOL(vme_dma_request);
 770
 771/*
 772 * Start new list
 773 */
 774struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
 775{
 776	struct vme_dma_resource *ctrlr;
 777	struct vme_dma_list *dma_list;
 778
 779	if (resource->type != VME_DMA) {
 780		printk(KERN_ERR "Not a DMA resource\n");
 781		return NULL;
 782	}
 783
 784	ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
 785
 786	dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
 787	if (dma_list == NULL) {
 788		printk(KERN_ERR "Unable to allocate memory for new DMA list\n");
 789		return NULL;
 790	}
 791	INIT_LIST_HEAD(&dma_list->entries);
 792	dma_list->parent = ctrlr;
 793	mutex_init(&dma_list->mtx);
 794
 795	return dma_list;
 796}
 797EXPORT_SYMBOL(vme_new_dma_list);
 798
 799/*
 800 * Create "Pattern" type attributes
 801 */
 802struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
 803{
 804	struct vme_dma_attr *attributes;
 805	struct vme_dma_pattern *pattern_attr;
 806
 807	attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
 808	if (attributes == NULL) {
 809		printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
 810		goto err_attr;
 811	}
 812
 813	pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
 814	if (pattern_attr == NULL) {
 815		printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
 816		goto err_pat;
 817	}
 818
 819	attributes->type = VME_DMA_PATTERN;
 820	attributes->private = (void *)pattern_attr;
 821
 822	pattern_attr->pattern = pattern;
 823	pattern_attr->type = type;
 824
 825	return attributes;
 826
 827err_pat:
 828	kfree(attributes);
 829err_attr:
 830	return NULL;
 831}
 832EXPORT_SYMBOL(vme_dma_pattern_attribute);
 833
 834/*
 835 * Create "PCI" type attributes
 836 */
 837struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
 838{
 839	struct vme_dma_attr *attributes;
 840	struct vme_dma_pci *pci_attr;
 841
 842	/* XXX Run some sanity checks here */
 843
 844	attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
 845	if (attributes == NULL) {
 846		printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
 847		goto err_attr;
 848	}
 849
 850	pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
 851	if (pci_attr == NULL) {
 852		printk(KERN_ERR "Unable to allocate memory for PCI attributes\n");
 853		goto err_pci;
 854	}
 855
 856
 857
 858	attributes->type = VME_DMA_PCI;
 859	attributes->private = (void *)pci_attr;
 860
 861	pci_attr->address = address;
 862
 863	return attributes;
 864
 865err_pci:
 866	kfree(attributes);
 867err_attr:
 868	return NULL;
 869}
 870EXPORT_SYMBOL(vme_dma_pci_attribute);
 871
 872/*
 873 * Create "VME" type attributes
 874 */
 875struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
 876	u32 aspace, u32 cycle, u32 dwidth)
 877{
 878	struct vme_dma_attr *attributes;
 879	struct vme_dma_vme *vme_attr;
 880
 881	attributes = kmalloc(
 882		sizeof(struct vme_dma_attr), GFP_KERNEL);
 883	if (attributes == NULL) {
 884		printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
 885		goto err_attr;
 886	}
 887
 888	vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
 889	if (vme_attr == NULL) {
 890		printk(KERN_ERR "Unable to allocate memory for VME attributes\n");
 891		goto err_vme;
 892	}
 893
 894	attributes->type = VME_DMA_VME;
 895	attributes->private = (void *)vme_attr;
 896
 897	vme_attr->address = address;
 898	vme_attr->aspace = aspace;
 899	vme_attr->cycle = cycle;
 900	vme_attr->dwidth = dwidth;
 901
 902	return attributes;
 903
 904err_vme:
 905	kfree(attributes);
 906err_attr:
 907	return NULL;
 908}
 909EXPORT_SYMBOL(vme_dma_vme_attribute);
 910
 911/*
 912 * Free attribute
 913 */
 914void vme_dma_free_attribute(struct vme_dma_attr *attributes)
 915{
 916	kfree(attributes->private);
 917	kfree(attributes);
 918}
 919EXPORT_SYMBOL(vme_dma_free_attribute);
 920
 921int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
 922	struct vme_dma_attr *dest, size_t count)
 923{
 924	struct vme_bridge *bridge = list->parent->parent;
 925	int retval;
 926
 927	if (bridge->dma_list_add == NULL) {
 928		printk(KERN_WARNING "Link List DMA generation not supported\n");
 929		return -EINVAL;
 930	}
 931
 932	if (!mutex_trylock(&list->mtx)) {
 933		printk(KERN_ERR "Link List already submitted\n");
 934		return -EINVAL;
 935	}
 936
 937	retval = bridge->dma_list_add(list, src, dest, count);
 938
 939	mutex_unlock(&list->mtx);
 940
 941	return retval;
 942}
 943EXPORT_SYMBOL(vme_dma_list_add);
 944
 945int vme_dma_list_exec(struct vme_dma_list *list)
 946{
 947	struct vme_bridge *bridge = list->parent->parent;
 948	int retval;
 949
 950	if (bridge->dma_list_exec == NULL) {
 951		printk(KERN_ERR "Link List DMA execution not supported\n");
 952		return -EINVAL;
 953	}
 954
 955	mutex_lock(&list->mtx);
 956
 957	retval = bridge->dma_list_exec(list);
 958
 959	mutex_unlock(&list->mtx);
 960
 961	return retval;
 962}
 963EXPORT_SYMBOL(vme_dma_list_exec);
 964
 965int vme_dma_list_free(struct vme_dma_list *list)
 966{
 967	struct vme_bridge *bridge = list->parent->parent;
 968	int retval;
 969
 970	if (bridge->dma_list_empty == NULL) {
 971		printk(KERN_WARNING "Emptying of Link Lists not supported\n");
 972		return -EINVAL;
 973	}
 974
 975	if (!mutex_trylock(&list->mtx)) {
 976		printk(KERN_ERR "Link List in use\n");
 977		return -EINVAL;
 978	}
 979
 980	/*
 981	 * Empty out all of the entries from the DMA list. We need to go to the
 982	 * low level driver as DMA entries are driver specific.
 983	 */
 984	retval = bridge->dma_list_empty(list);
 985	if (retval) {
 986		printk(KERN_ERR "Unable to empty link-list entries\n");
 987		mutex_unlock(&list->mtx);
 988		return retval;
 989	}
 990	mutex_unlock(&list->mtx);
 991	kfree(list);
 992
 993	return retval;
 994}
 995EXPORT_SYMBOL(vme_dma_list_free);
 996
 997int vme_dma_free(struct vme_resource *resource)
 998{
 999	struct vme_dma_resource *ctrlr;
1000
1001	if (resource->type != VME_DMA) {
1002		printk(KERN_ERR "Not a DMA resource\n");
1003		return -EINVAL;
1004	}
1005
1006	ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
1007
1008	if (!mutex_trylock(&ctrlr->mtx)) {
1009		printk(KERN_ERR "Resource busy, can't free\n");
1010		return -EBUSY;
1011	}
1012
1013	if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
1014		printk(KERN_WARNING "Resource still processing transfers\n");
1015		mutex_unlock(&ctrlr->mtx);
1016		return -EBUSY;
1017	}
1018
1019	ctrlr->locked = 0;
1020
1021	mutex_unlock(&ctrlr->mtx);
1022
1023	kfree(resource);
1024
1025	return 0;
1026}
1027EXPORT_SYMBOL(vme_dma_free);
1028
1029void vme_bus_error_handler(struct vme_bridge *bridge,
1030			   unsigned long long address, int am)
1031{
1032	struct list_head *handler_pos = NULL;
1033	struct vme_error_handler *handler;
1034	int handler_triggered = 0;
1035	u32 aspace = vme_get_aspace(am);
1036
1037	list_for_each(handler_pos, &bridge->vme_error_handlers) {
1038		handler = list_entry(handler_pos, struct vme_error_handler,
1039				     list);
1040		if ((aspace == handler->aspace) &&
1041		    (address >= handler->start) &&
1042		    (address < handler->end)) {
1043			if (!handler->num_errors)
1044				handler->first_error = address;
1045			if (handler->num_errors != UINT_MAX)
1046				handler->num_errors++;
1047			handler_triggered = 1;
1048		}
1049	}
1050
1051	if (!handler_triggered)
1052		dev_err(bridge->parent,
1053			"Unhandled VME access error at address 0x%llx\n",
1054			address);
1055}
1056EXPORT_SYMBOL(vme_bus_error_handler);
1057
1058struct vme_error_handler *vme_register_error_handler(
1059	struct vme_bridge *bridge, u32 aspace,
1060	unsigned long long address, size_t len)
1061{
1062	struct vme_error_handler *handler;
1063
1064	handler = kmalloc(sizeof(*handler), GFP_KERNEL);
1065	if (!handler)
1066		return NULL;
1067
1068	handler->aspace = aspace;
1069	handler->start = address;
1070	handler->end = address + len;
1071	handler->num_errors = 0;
1072	handler->first_error = 0;
1073	list_add_tail(&handler->list, &bridge->vme_error_handlers);
1074
1075	return handler;
1076}
1077EXPORT_SYMBOL(vme_register_error_handler);
1078
1079void vme_unregister_error_handler(struct vme_error_handler *handler)
1080{
1081	list_del(&handler->list);
1082	kfree(handler);
1083}
1084EXPORT_SYMBOL(vme_unregister_error_handler);
1085
1086void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
1087{
1088	void (*call)(int, int, void *);
1089	void *priv_data;
1090
1091	call = bridge->irq[level - 1].callback[statid].func;
1092	priv_data = bridge->irq[level - 1].callback[statid].priv_data;
1093
1094	if (call != NULL)
1095		call(level, statid, priv_data);
1096	else
1097		printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
1098		       level, statid);
1099}
1100EXPORT_SYMBOL(vme_irq_handler);
1101
1102int vme_irq_request(struct vme_dev *vdev, int level, int statid,
1103	void (*callback)(int, int, void *),
1104	void *priv_data)
1105{
1106	struct vme_bridge *bridge;
1107
1108	bridge = vdev->bridge;
1109	if (bridge == NULL) {
1110		printk(KERN_ERR "Can't find VME bus\n");
1111		return -EINVAL;
1112	}
1113
1114	if ((level < 1) || (level > 7)) {
1115		printk(KERN_ERR "Invalid interrupt level\n");
1116		return -EINVAL;
1117	}
1118
1119	if (bridge->irq_set == NULL) {
1120		printk(KERN_ERR "Configuring interrupts not supported\n");
1121		return -EINVAL;
1122	}
1123
1124	mutex_lock(&bridge->irq_mtx);
1125
1126	if (bridge->irq[level - 1].callback[statid].func) {
1127		mutex_unlock(&bridge->irq_mtx);
1128		printk(KERN_WARNING "VME Interrupt already taken\n");
1129		return -EBUSY;
1130	}
1131
1132	bridge->irq[level - 1].count++;
1133	bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1134	bridge->irq[level - 1].callback[statid].func = callback;
1135
1136	/* Enable IRQ level */
1137	bridge->irq_set(bridge, level, 1, 1);
1138
1139	mutex_unlock(&bridge->irq_mtx);
1140
1141	return 0;
1142}
1143EXPORT_SYMBOL(vme_irq_request);
1144
1145void vme_irq_free(struct vme_dev *vdev, int level, int statid)
1146{
1147	struct vme_bridge *bridge;
1148
1149	bridge = vdev->bridge;
1150	if (bridge == NULL) {
1151		printk(KERN_ERR "Can't find VME bus\n");
1152		return;
1153	}
1154
1155	if ((level < 1) || (level > 7)) {
1156		printk(KERN_ERR "Invalid interrupt level\n");
1157		return;
1158	}
1159
1160	if (bridge->irq_set == NULL) {
1161		printk(KERN_ERR "Configuring interrupts not supported\n");
1162		return;
1163	}
1164
1165	mutex_lock(&bridge->irq_mtx);
1166
1167	bridge->irq[level - 1].count--;
1168
1169	/* Disable IRQ level if no more interrupts attached at this level*/
1170	if (bridge->irq[level - 1].count == 0)
1171		bridge->irq_set(bridge, level, 0, 1);
1172
1173	bridge->irq[level - 1].callback[statid].func = NULL;
1174	bridge->irq[level - 1].callback[statid].priv_data = NULL;
1175
1176	mutex_unlock(&bridge->irq_mtx);
1177}
1178EXPORT_SYMBOL(vme_irq_free);
1179
1180int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
1181{
1182	struct vme_bridge *bridge;
1183
1184	bridge = vdev->bridge;
1185	if (bridge == NULL) {
1186		printk(KERN_ERR "Can't find VME bus\n");
1187		return -EINVAL;
1188	}
1189
1190	if ((level < 1) || (level > 7)) {
1191		printk(KERN_WARNING "Invalid interrupt level\n");
1192		return -EINVAL;
1193	}
1194
1195	if (bridge->irq_generate == NULL) {
1196		printk(KERN_WARNING "Interrupt generation not supported\n");
1197		return -EINVAL;
1198	}
1199
1200	return bridge->irq_generate(bridge, level, statid);
1201}
1202EXPORT_SYMBOL(vme_irq_generate);
1203
1204/*
1205 * Request the location monitor, return resource or NULL
1206 */
1207struct vme_resource *vme_lm_request(struct vme_dev *vdev)
1208{
1209	struct vme_bridge *bridge;
1210	struct list_head *lm_pos = NULL;
1211	struct vme_lm_resource *allocated_lm = NULL;
1212	struct vme_lm_resource *lm = NULL;
1213	struct vme_resource *resource = NULL;
1214
1215	bridge = vdev->bridge;
1216	if (bridge == NULL) {
1217		printk(KERN_ERR "Can't find VME bus\n");
1218		goto err_bus;
1219	}
1220
1221	/* Loop through DMA resources */
1222	list_for_each(lm_pos, &bridge->lm_resources) {
1223		lm = list_entry(lm_pos,
1224			struct vme_lm_resource, list);
1225
1226		if (lm == NULL) {
1227			printk(KERN_ERR "Registered NULL Location Monitor resource\n");
1228			continue;
1229		}
1230
1231		/* Find an unlocked controller */
1232		mutex_lock(&lm->mtx);
1233		if (lm->locked == 0) {
1234			lm->locked = 1;
1235			mutex_unlock(&lm->mtx);
1236			allocated_lm = lm;
1237			break;
1238		}
1239		mutex_unlock(&lm->mtx);
1240	}
1241
1242	/* Check to see if we found a resource */
1243	if (allocated_lm == NULL)
1244		goto err_lm;
1245
1246	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1247	if (resource == NULL) {
1248		printk(KERN_ERR "Unable to allocate resource structure\n");
1249		goto err_alloc;
1250	}
1251	resource->type = VME_LM;
1252	resource->entry = &allocated_lm->list;
1253
1254	return resource;
1255
1256err_alloc:
1257	/* Unlock image */
1258	mutex_lock(&lm->mtx);
1259	lm->locked = 0;
1260	mutex_unlock(&lm->mtx);
1261err_lm:
1262err_bus:
1263	return NULL;
1264}
1265EXPORT_SYMBOL(vme_lm_request);
1266
1267int vme_lm_count(struct vme_resource *resource)
1268{
1269	struct vme_lm_resource *lm;
1270
1271	if (resource->type != VME_LM) {
1272		printk(KERN_ERR "Not a Location Monitor resource\n");
1273		return -EINVAL;
1274	}
1275
1276	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1277
1278	return lm->monitors;
1279}
1280EXPORT_SYMBOL(vme_lm_count);
1281
1282int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1283	u32 aspace, u32 cycle)
1284{
1285	struct vme_bridge *bridge = find_bridge(resource);
1286	struct vme_lm_resource *lm;
1287
1288	if (resource->type != VME_LM) {
1289		printk(KERN_ERR "Not a Location Monitor resource\n");
1290		return -EINVAL;
1291	}
1292
1293	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1294
1295	if (bridge->lm_set == NULL) {
1296		printk(KERN_ERR "vme_lm_set not supported\n");
1297		return -EINVAL;
1298	}
1299
1300	return bridge->lm_set(lm, lm_base, aspace, cycle);
1301}
1302EXPORT_SYMBOL(vme_lm_set);
1303
1304int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1305	u32 *aspace, u32 *cycle)
1306{
1307	struct vme_bridge *bridge = find_bridge(resource);
1308	struct vme_lm_resource *lm;
1309
1310	if (resource->type != VME_LM) {
1311		printk(KERN_ERR "Not a Location Monitor resource\n");
1312		return -EINVAL;
1313	}
1314
1315	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1316
1317	if (bridge->lm_get == NULL) {
1318		printk(KERN_ERR "vme_lm_get not supported\n");
1319		return -EINVAL;
1320	}
1321
1322	return bridge->lm_get(lm, lm_base, aspace, cycle);
1323}
1324EXPORT_SYMBOL(vme_lm_get);
1325
1326int vme_lm_attach(struct vme_resource *resource, int monitor,
1327	void (*callback)(void *), void *data)
1328{
1329	struct vme_bridge *bridge = find_bridge(resource);
1330	struct vme_lm_resource *lm;
1331
1332	if (resource->type != VME_LM) {
1333		printk(KERN_ERR "Not a Location Monitor resource\n");
1334		return -EINVAL;
1335	}
1336
1337	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1338
1339	if (bridge->lm_attach == NULL) {
1340		printk(KERN_ERR "vme_lm_attach not supported\n");
1341		return -EINVAL;
1342	}
1343
1344	return bridge->lm_attach(lm, monitor, callback, data);
1345}
1346EXPORT_SYMBOL(vme_lm_attach);
1347
1348int vme_lm_detach(struct vme_resource *resource, int monitor)
1349{
1350	struct vme_bridge *bridge = find_bridge(resource);
1351	struct vme_lm_resource *lm;
1352
1353	if (resource->type != VME_LM) {
1354		printk(KERN_ERR "Not a Location Monitor resource\n");
1355		return -EINVAL;
1356	}
1357
1358	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1359
1360	if (bridge->lm_detach == NULL) {
1361		printk(KERN_ERR "vme_lm_detach not supported\n");
1362		return -EINVAL;
1363	}
1364
1365	return bridge->lm_detach(lm, monitor);
1366}
1367EXPORT_SYMBOL(vme_lm_detach);
1368
1369void vme_lm_free(struct vme_resource *resource)
1370{
1371	struct vme_lm_resource *lm;
1372
1373	if (resource->type != VME_LM) {
1374		printk(KERN_ERR "Not a Location Monitor resource\n");
1375		return;
1376	}
1377
1378	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1379
1380	mutex_lock(&lm->mtx);
1381
1382	/* XXX
1383	 * Check to see that there aren't any callbacks still attached, if
1384	 * there are we should probably be detaching them!
1385	 */
1386
1387	lm->locked = 0;
1388
1389	mutex_unlock(&lm->mtx);
1390
1391	kfree(resource);
1392}
1393EXPORT_SYMBOL(vme_lm_free);
1394
1395int vme_slot_num(struct vme_dev *vdev)
1396{
1397	struct vme_bridge *bridge;
1398
1399	bridge = vdev->bridge;
1400	if (bridge == NULL) {
1401		printk(KERN_ERR "Can't find VME bus\n");
1402		return -EINVAL;
1403	}
1404
1405	if (bridge->slot_get == NULL) {
1406		printk(KERN_WARNING "vme_slot_num not supported\n");
1407		return -EINVAL;
1408	}
1409
1410	return bridge->slot_get(bridge);
1411}
1412EXPORT_SYMBOL(vme_slot_num);
1413
1414int vme_bus_num(struct vme_dev *vdev)
1415{
1416	struct vme_bridge *bridge;
1417
1418	bridge = vdev->bridge;
1419	if (bridge == NULL) {
1420		pr_err("Can't find VME bus\n");
1421		return -EINVAL;
1422	}
1423
1424	return bridge->num;
1425}
1426EXPORT_SYMBOL(vme_bus_num);
1427
1428/* - Bridge Registration --------------------------------------------------- */
1429
1430static void vme_dev_release(struct device *dev)
1431{
1432	kfree(dev_to_vme_dev(dev));
1433}
1434
1435/* Common bridge initialization */
1436struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge)
1437{
1438	INIT_LIST_HEAD(&bridge->vme_error_handlers);
1439	INIT_LIST_HEAD(&bridge->master_resources);
1440	INIT_LIST_HEAD(&bridge->slave_resources);
1441	INIT_LIST_HEAD(&bridge->dma_resources);
1442	INIT_LIST_HEAD(&bridge->lm_resources);
1443	mutex_init(&bridge->irq_mtx);
1444
1445	return bridge;
1446}
1447EXPORT_SYMBOL(vme_init_bridge);
1448
1449int vme_register_bridge(struct vme_bridge *bridge)
1450{
1451	int i;
1452	int ret = -1;
1453
1454	mutex_lock(&vme_buses_lock);
1455	for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1456		if ((vme_bus_numbers & (1 << i)) == 0) {
1457			vme_bus_numbers |= (1 << i);
1458			bridge->num = i;
1459			INIT_LIST_HEAD(&bridge->devices);
1460			list_add_tail(&bridge->bus_list, &vme_bus_list);
1461			ret = 0;
1462			break;
1463		}
1464	}
1465	mutex_unlock(&vme_buses_lock);
1466
1467	return ret;
1468}
1469EXPORT_SYMBOL(vme_register_bridge);
1470
1471void vme_unregister_bridge(struct vme_bridge *bridge)
1472{
1473	struct vme_dev *vdev;
1474	struct vme_dev *tmp;
1475
1476	mutex_lock(&vme_buses_lock);
1477	vme_bus_numbers &= ~(1 << bridge->num);
1478	list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
1479		list_del(&vdev->drv_list);
1480		list_del(&vdev->bridge_list);
1481		device_unregister(&vdev->dev);
1482	}
1483	list_del(&bridge->bus_list);
1484	mutex_unlock(&vme_buses_lock);
1485}
1486EXPORT_SYMBOL(vme_unregister_bridge);
1487
1488/* - Driver Registration --------------------------------------------------- */
1489
1490static int __vme_register_driver_bus(struct vme_driver *drv,
1491	struct vme_bridge *bridge, unsigned int ndevs)
1492{
1493	int err;
1494	unsigned int i;
1495	struct vme_dev *vdev;
1496	struct vme_dev *tmp;
1497
1498	for (i = 0; i < ndevs; i++) {
1499		vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
1500		if (!vdev) {
1501			err = -ENOMEM;
1502			goto err_devalloc;
1503		}
1504		vdev->num = i;
1505		vdev->bridge = bridge;
1506		vdev->dev.platform_data = drv;
1507		vdev->dev.release = vme_dev_release;
1508		vdev->dev.parent = bridge->parent;
1509		vdev->dev.bus = &vme_bus_type;
1510		dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
1511			vdev->num);
1512
1513		err = device_register(&vdev->dev);
1514		if (err)
1515			goto err_reg;
1516
1517		if (vdev->dev.platform_data) {
1518			list_add_tail(&vdev->drv_list, &drv->devices);
1519			list_add_tail(&vdev->bridge_list, &bridge->devices);
1520		} else
1521			device_unregister(&vdev->dev);
1522	}
1523	return 0;
1524
1525err_reg:
1526	put_device(&vdev->dev);
1527	kfree(vdev);
1528err_devalloc:
1529	list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
1530		list_del(&vdev->drv_list);
1531		list_del(&vdev->bridge_list);
1532		device_unregister(&vdev->dev);
1533	}
1534	return err;
1535}
1536
1537static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1538{
1539	struct vme_bridge *bridge;
1540	int err = 0;
1541
1542	mutex_lock(&vme_buses_lock);
1543	list_for_each_entry(bridge, &vme_bus_list, bus_list) {
1544		/*
1545		 * This cannot cause trouble as we already have vme_buses_lock
1546		 * and if the bridge is removed, it will have to go through
1547		 * vme_unregister_bridge() to do it (which calls remove() on
1548		 * the bridge which in turn tries to acquire vme_buses_lock and
1549		 * will have to wait).
1550		 */
1551		err = __vme_register_driver_bus(drv, bridge, ndevs);
1552		if (err)
1553			break;
1554	}
1555	mutex_unlock(&vme_buses_lock);
1556	return err;
1557}
1558
1559int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1560{
1561	int err;
1562
1563	drv->driver.name = drv->name;
1564	drv->driver.bus = &vme_bus_type;
1565	INIT_LIST_HEAD(&drv->devices);
1566
1567	err = driver_register(&drv->driver);
1568	if (err)
1569		return err;
1570
1571	err = __vme_register_driver(drv, ndevs);
1572	if (err)
1573		driver_unregister(&drv->driver);
1574
1575	return err;
1576}
1577EXPORT_SYMBOL(vme_register_driver);
1578
1579void vme_unregister_driver(struct vme_driver *drv)
1580{
1581	struct vme_dev *dev, *dev_tmp;
1582
1583	mutex_lock(&vme_buses_lock);
1584	list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
1585		list_del(&dev->drv_list);
1586		list_del(&dev->bridge_list);
1587		device_unregister(&dev->dev);
1588	}
1589	mutex_unlock(&vme_buses_lock);
1590
1591	driver_unregister(&drv->driver);
1592}
1593EXPORT_SYMBOL(vme_unregister_driver);
1594
1595/* - Bus Registration ------------------------------------------------------ */
1596
1597static int vme_bus_match(struct device *dev, struct device_driver *drv)
1598{
1599	struct vme_driver *vme_drv;
1600
1601	vme_drv = container_of(drv, struct vme_driver, driver);
1602
1603	if (dev->platform_data == vme_drv) {
1604		struct vme_dev *vdev = dev_to_vme_dev(dev);
1605
1606		if (vme_drv->match && vme_drv->match(vdev))
1607			return 1;
1608
1609		dev->platform_data = NULL;
1610	}
1611	return 0;
1612}
1613
1614static int vme_bus_probe(struct device *dev)
1615{
1616	int retval = -ENODEV;
1617	struct vme_driver *driver;
1618	struct vme_dev *vdev = dev_to_vme_dev(dev);
1619
1620	driver = dev->platform_data;
1621
1622	if (driver->probe != NULL)
1623		retval = driver->probe(vdev);
1624
1625	return retval;
1626}
1627
1628static int vme_bus_remove(struct device *dev)
1629{
1630	int retval = -ENODEV;
1631	struct vme_driver *driver;
1632	struct vme_dev *vdev = dev_to_vme_dev(dev);
1633
1634	driver = dev->platform_data;
1635
1636	if (driver->remove != NULL)
1637		retval = driver->remove(vdev);
1638
1639	return retval;
1640}
1641
1642struct bus_type vme_bus_type = {
1643	.name = "vme",
1644	.match = vme_bus_match,
1645	.probe = vme_bus_probe,
1646	.remove = vme_bus_remove,
1647};
1648EXPORT_SYMBOL(vme_bus_type);
1649
1650static int __init vme_init(void)
1651{
1652	return bus_register(&vme_bus_type);
1653}
1654subsys_initcall(vme_init);