Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * VME Bridge Framework
   4 *
   5 * Author: Martyn Welch <martyn.welch@ge.com>
   6 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
   7 *
   8 * Based on work by Tom Armistead and Ajit Prem
   9 * Copyright 2004 Motorola Inc.
 
 
 
 
 
  10 */
  11
  12#include <linux/init.h>
  13#include <linux/export.h>
  14#include <linux/mm.h>
  15#include <linux/types.h>
  16#include <linux/kernel.h>
  17#include <linux/errno.h>
  18#include <linux/pci.h>
  19#include <linux/poll.h>
  20#include <linux/highmem.h>
  21#include <linux/interrupt.h>
  22#include <linux/pagemap.h>
  23#include <linux/device.h>
  24#include <linux/dma-mapping.h>
  25#include <linux/syscalls.h>
  26#include <linux/mutex.h>
  27#include <linux/spinlock.h>
  28#include <linux/slab.h>
  29#include <linux/vme.h>
  30
  31#include "vme_bridge.h"
  32
  33/* Bitmask and list of registered buses both protected by common mutex */
  34static unsigned int vme_bus_numbers;
  35static LIST_HEAD(vme_bus_list);
  36static DEFINE_MUTEX(vme_buses_lock);
  37
 
  38static int __init vme_init(void);
  39
  40static struct vme_dev *dev_to_vme_dev(struct device *dev)
  41{
  42	return container_of(dev, struct vme_dev, dev);
  43}
  44
  45/*
  46 * Find the bridge that the resource is associated with.
  47 */
  48static struct vme_bridge *find_bridge(struct vme_resource *resource)
  49{
  50	/* Get list to search */
  51	switch (resource->type) {
  52	case VME_MASTER:
  53		return list_entry(resource->entry, struct vme_master_resource,
  54			list)->parent;
  55		break;
  56	case VME_SLAVE:
  57		return list_entry(resource->entry, struct vme_slave_resource,
  58			list)->parent;
  59		break;
  60	case VME_DMA:
  61		return list_entry(resource->entry, struct vme_dma_resource,
  62			list)->parent;
  63		break;
  64	case VME_LM:
  65		return list_entry(resource->entry, struct vme_lm_resource,
  66			list)->parent;
  67		break;
  68	default:
  69		printk(KERN_ERR "Unknown resource type\n");
  70		return NULL;
  71		break;
  72	}
  73}
  74
  75/**
  76 * vme_free_consistent - Allocate contiguous memory.
  77 * @resource: Pointer to VME resource.
  78 * @size: Size of allocation required.
  79 * @dma: Pointer to variable to store physical address of allocation.
  80 *
  81 * Allocate a contiguous block of memory for use by the driver. This is used to
  82 * create the buffers for the slave windows.
  83 *
  84 * Return: Virtual address of allocation on success, NULL on failure.
  85 */
  86void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
  87	dma_addr_t *dma)
  88{
  89	struct vme_bridge *bridge;
  90
  91	if (!resource) {
  92		printk(KERN_ERR "No resource\n");
  93		return NULL;
  94	}
  95
  96	bridge = find_bridge(resource);
  97	if (!bridge) {
  98		printk(KERN_ERR "Can't find bridge\n");
  99		return NULL;
 100	}
 101
 102	if (!bridge->parent) {
 103		printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
 104		return NULL;
 105	}
 106
 107	if (!bridge->alloc_consistent) {
 108		printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
 109		       bridge->name);
 110		return NULL;
 111	}
 112
 113	return bridge->alloc_consistent(bridge->parent, size, dma);
 114}
 115EXPORT_SYMBOL(vme_alloc_consistent);
 116
 117/**
 118 * vme_free_consistent - Free previously allocated memory.
 119 * @resource: Pointer to VME resource.
 120 * @size: Size of allocation to free.
 121 * @vaddr: Virtual address of allocation.
 122 * @dma: Physical address of allocation.
 123 *
 124 * Free previously allocated block of contiguous memory.
 125 */
 126void vme_free_consistent(struct vme_resource *resource, size_t size,
 127	void *vaddr, dma_addr_t dma)
 128{
 129	struct vme_bridge *bridge;
 130
 131	if (!resource) {
 132		printk(KERN_ERR "No resource\n");
 133		return;
 134	}
 135
 136	bridge = find_bridge(resource);
 137	if (!bridge) {
 138		printk(KERN_ERR "Can't find bridge\n");
 139		return;
 140	}
 141
 142	if (!bridge->parent) {
 143		printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
 144		return;
 145	}
 146
 147	if (!bridge->free_consistent) {
 148		printk(KERN_ERR "free_consistent not supported by bridge %s\n",
 149		       bridge->name);
 150		return;
 151	}
 152
 153	bridge->free_consistent(bridge->parent, size, vaddr, dma);
 154}
 155EXPORT_SYMBOL(vme_free_consistent);
 156
 157/**
 158 * vme_get_size - Helper function returning size of a VME window
 159 * @resource: Pointer to VME slave or master resource.
 160 *
 161 * Determine the size of the VME window provided. This is a helper
 162 * function, wrappering the call to vme_master_get or vme_slave_get
 163 * depending on the type of window resource handed to it.
 164 *
 165 * Return: Size of the window on success, zero on failure.
 166 */
 167size_t vme_get_size(struct vme_resource *resource)
 168{
 169	int enabled, retval;
 170	unsigned long long base, size;
 171	dma_addr_t buf_base;
 172	u32 aspace, cycle, dwidth;
 173
 174	switch (resource->type) {
 175	case VME_MASTER:
 176		retval = vme_master_get(resource, &enabled, &base, &size,
 177			&aspace, &cycle, &dwidth);
 178		if (retval)
 179			return 0;
 180
 181		return size;
 182		break;
 183	case VME_SLAVE:
 184		retval = vme_slave_get(resource, &enabled, &base, &size,
 185			&buf_base, &aspace, &cycle);
 186		if (retval)
 187			return 0;
 188
 189		return size;
 190		break;
 191	case VME_DMA:
 192		return 0;
 193		break;
 194	default:
 195		printk(KERN_ERR "Unknown resource type\n");
 196		return 0;
 197		break;
 198	}
 199}
 200EXPORT_SYMBOL(vme_get_size);
 201
 202int vme_check_window(u32 aspace, unsigned long long vme_base,
 203		     unsigned long long size)
 204{
 205	int retval = 0;
 206
 207	if (vme_base + size < size)
 208		return -EINVAL;
 209
 210	switch (aspace) {
 211	case VME_A16:
 212		if (vme_base + size > VME_A16_MAX)
 
 213			retval = -EFAULT;
 214		break;
 215	case VME_A24:
 216		if (vme_base + size > VME_A24_MAX)
 
 217			retval = -EFAULT;
 218		break;
 219	case VME_A32:
 220		if (vme_base + size > VME_A32_MAX)
 
 221			retval = -EFAULT;
 222		break;
 223	case VME_A64:
 224		/* The VME_A64_MAX limit is actually U64_MAX + 1 */
 
 
 
 225		break;
 226	case VME_CRCSR:
 227		if (vme_base + size > VME_CRCSR_MAX)
 
 228			retval = -EFAULT;
 229		break;
 230	case VME_USER1:
 231	case VME_USER2:
 232	case VME_USER3:
 233	case VME_USER4:
 234		/* User Defined */
 235		break;
 236	default:
 237		printk(KERN_ERR "Invalid address space\n");
 238		retval = -EINVAL;
 239		break;
 240	}
 241
 242	return retval;
 243}
 244EXPORT_SYMBOL(vme_check_window);
 245
 246static u32 vme_get_aspace(int am)
 247{
 248	switch (am) {
 249	case 0x29:
 250	case 0x2D:
 251		return VME_A16;
 252	case 0x38:
 253	case 0x39:
 254	case 0x3A:
 255	case 0x3B:
 256	case 0x3C:
 257	case 0x3D:
 258	case 0x3E:
 259	case 0x3F:
 260		return VME_A24;
 261	case 0x8:
 262	case 0x9:
 263	case 0xA:
 264	case 0xB:
 265	case 0xC:
 266	case 0xD:
 267	case 0xE:
 268	case 0xF:
 269		return VME_A32;
 270	case 0x0:
 271	case 0x1:
 272	case 0x3:
 273		return VME_A64;
 274	}
 275
 276	return 0;
 277}
 278
 279/**
 280 * vme_slave_request - Request a VME slave window resource.
 281 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
 282 * @address: Required VME address space.
 283 * @cycle: Required VME data transfer cycle type.
 284 *
 285 * Request use of a VME window resource capable of being set for the requested
 286 * address space and data transfer cycle.
 287 *
 288 * Return: Pointer to VME resource on success, NULL on failure.
 289 */
 290struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
 291	u32 cycle)
 292{
 293	struct vme_bridge *bridge;
 294	struct list_head *slave_pos = NULL;
 295	struct vme_slave_resource *allocated_image = NULL;
 296	struct vme_slave_resource *slave_image = NULL;
 297	struct vme_resource *resource = NULL;
 298
 299	bridge = vdev->bridge;
 300	if (!bridge) {
 301		printk(KERN_ERR "Can't find VME bus\n");
 302		goto err_bus;
 303	}
 304
 305	/* Loop through slave resources */
 306	list_for_each(slave_pos, &bridge->slave_resources) {
 307		slave_image = list_entry(slave_pos,
 308			struct vme_slave_resource, list);
 309
 310		if (!slave_image) {
 311			printk(KERN_ERR "Registered NULL Slave resource\n");
 312			continue;
 313		}
 314
 315		/* Find an unlocked and compatible image */
 316		mutex_lock(&slave_image->mtx);
 317		if (((slave_image->address_attr & address) == address) &&
 318			((slave_image->cycle_attr & cycle) == cycle) &&
 319			(slave_image->locked == 0)) {
 320
 321			slave_image->locked = 1;
 322			mutex_unlock(&slave_image->mtx);
 323			allocated_image = slave_image;
 324			break;
 325		}
 326		mutex_unlock(&slave_image->mtx);
 327	}
 328
 329	/* No free image */
 330	if (!allocated_image)
 331		goto err_image;
 332
 333	resource = kmalloc(sizeof(*resource), GFP_KERNEL);
 334	if (!resource)
 
 335		goto err_alloc;
 336
 337	resource->type = VME_SLAVE;
 338	resource->entry = &allocated_image->list;
 339
 340	return resource;
 341
 342err_alloc:
 343	/* Unlock image */
 344	mutex_lock(&slave_image->mtx);
 345	slave_image->locked = 0;
 346	mutex_unlock(&slave_image->mtx);
 347err_image:
 348err_bus:
 349	return NULL;
 350}
 351EXPORT_SYMBOL(vme_slave_request);
 352
 353/**
 354 * vme_slave_set - Set VME slave window configuration.
 355 * @resource: Pointer to VME slave resource.
 356 * @enabled: State to which the window should be configured.
 357 * @vme_base: Base address for the window.
 358 * @size: Size of the VME window.
 359 * @buf_base: Based address of buffer used to provide VME slave window storage.
 360 * @aspace: VME address space for the VME window.
 361 * @cycle: VME data transfer cycle type for the VME window.
 362 *
 363 * Set configuration for provided VME slave window.
 364 *
 365 * Return: Zero on success, -EINVAL if operation is not supported on this
 366 *         device, if an invalid resource has been provided or invalid
 367 *         attributes are provided. Hardware specific errors may also be
 368 *         returned.
 369 */
 370int vme_slave_set(struct vme_resource *resource, int enabled,
 371	unsigned long long vme_base, unsigned long long size,
 372	dma_addr_t buf_base, u32 aspace, u32 cycle)
 373{
 374	struct vme_bridge *bridge = find_bridge(resource);
 375	struct vme_slave_resource *image;
 376	int retval;
 377
 378	if (resource->type != VME_SLAVE) {
 379		printk(KERN_ERR "Not a slave resource\n");
 380		return -EINVAL;
 381	}
 382
 383	image = list_entry(resource->entry, struct vme_slave_resource, list);
 384
 385	if (!bridge->slave_set) {
 386		printk(KERN_ERR "Function not supported\n");
 387		return -ENOSYS;
 388	}
 389
 390	if (!(((image->address_attr & aspace) == aspace) &&
 391		((image->cycle_attr & cycle) == cycle))) {
 392		printk(KERN_ERR "Invalid attributes\n");
 393		return -EINVAL;
 394	}
 395
 396	retval = vme_check_window(aspace, vme_base, size);
 397	if (retval)
 398		return retval;
 399
 400	return bridge->slave_set(image, enabled, vme_base, size, buf_base,
 401		aspace, cycle);
 402}
 403EXPORT_SYMBOL(vme_slave_set);
 404
 405/**
 406 * vme_slave_get - Retrieve VME slave window configuration.
 407 * @resource: Pointer to VME slave resource.
 408 * @enabled: Pointer to variable for storing state.
 409 * @vme_base: Pointer to variable for storing window base address.
 410 * @size: Pointer to variable for storing window size.
 411 * @buf_base: Pointer to variable for storing slave buffer base address.
 412 * @aspace: Pointer to variable for storing VME address space.
 413 * @cycle: Pointer to variable for storing VME data transfer cycle type.
 414 *
 415 * Return configuration for provided VME slave window.
 416 *
 417 * Return: Zero on success, -EINVAL if operation is not supported on this
 418 *         device or if an invalid resource has been provided.
 419 */
 420int vme_slave_get(struct vme_resource *resource, int *enabled,
 421	unsigned long long *vme_base, unsigned long long *size,
 422	dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
 423{
 424	struct vme_bridge *bridge = find_bridge(resource);
 425	struct vme_slave_resource *image;
 426
 427	if (resource->type != VME_SLAVE) {
 428		printk(KERN_ERR "Not a slave resource\n");
 429		return -EINVAL;
 430	}
 431
 432	image = list_entry(resource->entry, struct vme_slave_resource, list);
 433
 434	if (!bridge->slave_get) {
 435		printk(KERN_ERR "vme_slave_get not supported\n");
 436		return -EINVAL;
 437	}
 438
 439	return bridge->slave_get(image, enabled, vme_base, size, buf_base,
 440		aspace, cycle);
 441}
 442EXPORT_SYMBOL(vme_slave_get);
 443
 444/**
 445 * vme_slave_free - Free VME slave window
 446 * @resource: Pointer to VME slave resource.
 447 *
 448 * Free the provided slave resource so that it may be reallocated.
 449 */
 450void vme_slave_free(struct vme_resource *resource)
 451{
 452	struct vme_slave_resource *slave_image;
 453
 454	if (resource->type != VME_SLAVE) {
 455		printk(KERN_ERR "Not a slave resource\n");
 456		return;
 457	}
 458
 459	slave_image = list_entry(resource->entry, struct vme_slave_resource,
 460		list);
 461	if (!slave_image) {
 462		printk(KERN_ERR "Can't find slave resource\n");
 463		return;
 464	}
 465
 466	/* Unlock image */
 467	mutex_lock(&slave_image->mtx);
 468	if (slave_image->locked == 0)
 469		printk(KERN_ERR "Image is already free\n");
 470
 471	slave_image->locked = 0;
 472	mutex_unlock(&slave_image->mtx);
 473
 474	/* Free up resource memory */
 475	kfree(resource);
 476}
 477EXPORT_SYMBOL(vme_slave_free);
 478
 479/**
 480 * vme_master_request - Request a VME master window resource.
 481 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
 482 * @address: Required VME address space.
 483 * @cycle: Required VME data transfer cycle type.
 484 * @dwidth: Required VME data transfer width.
 485 *
 486 * Request use of a VME window resource capable of being set for the requested
 487 * address space, data transfer cycle and width.
 488 *
 489 * Return: Pointer to VME resource on success, NULL on failure.
 490 */
 491struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
 492	u32 cycle, u32 dwidth)
 493{
 494	struct vme_bridge *bridge;
 495	struct list_head *master_pos = NULL;
 496	struct vme_master_resource *allocated_image = NULL;
 497	struct vme_master_resource *master_image = NULL;
 498	struct vme_resource *resource = NULL;
 499
 500	bridge = vdev->bridge;
 501	if (!bridge) {
 502		printk(KERN_ERR "Can't find VME bus\n");
 503		goto err_bus;
 504	}
 505
 506	/* Loop through master resources */
 507	list_for_each(master_pos, &bridge->master_resources) {
 508		master_image = list_entry(master_pos,
 509			struct vme_master_resource, list);
 510
 511		if (!master_image) {
 512			printk(KERN_WARNING "Registered NULL master resource\n");
 513			continue;
 514		}
 515
 516		/* Find an unlocked and compatible image */
 517		spin_lock(&master_image->lock);
 518		if (((master_image->address_attr & address) == address) &&
 519			((master_image->cycle_attr & cycle) == cycle) &&
 520			((master_image->width_attr & dwidth) == dwidth) &&
 521			(master_image->locked == 0)) {
 522
 523			master_image->locked = 1;
 524			spin_unlock(&master_image->lock);
 525			allocated_image = master_image;
 526			break;
 527		}
 528		spin_unlock(&master_image->lock);
 529	}
 530
 531	/* Check to see if we found a resource */
 532	if (!allocated_image) {
 533		printk(KERN_ERR "Can't find a suitable resource\n");
 534		goto err_image;
 535	}
 536
 537	resource = kmalloc(sizeof(*resource), GFP_KERNEL);
 538	if (!resource)
 
 539		goto err_alloc;
 540
 541	resource->type = VME_MASTER;
 542	resource->entry = &allocated_image->list;
 543
 544	return resource;
 545
 546err_alloc:
 547	/* Unlock image */
 548	spin_lock(&master_image->lock);
 549	master_image->locked = 0;
 550	spin_unlock(&master_image->lock);
 551err_image:
 552err_bus:
 553	return NULL;
 554}
 555EXPORT_SYMBOL(vme_master_request);
 556
 557/**
 558 * vme_master_set - Set VME master window configuration.
 559 * @resource: Pointer to VME master resource.
 560 * @enabled: State to which the window should be configured.
 561 * @vme_base: Base address for the window.
 562 * @size: Size of the VME window.
 563 * @aspace: VME address space for the VME window.
 564 * @cycle: VME data transfer cycle type for the VME window.
 565 * @dwidth: VME data transfer width for the VME window.
 566 *
 567 * Set configuration for provided VME master window.
 568 *
 569 * Return: Zero on success, -EINVAL if operation is not supported on this
 570 *         device, if an invalid resource has been provided or invalid
 571 *         attributes are provided. Hardware specific errors may also be
 572 *         returned.
 573 */
 574int vme_master_set(struct vme_resource *resource, int enabled,
 575	unsigned long long vme_base, unsigned long long size, u32 aspace,
 576	u32 cycle, u32 dwidth)
 577{
 578	struct vme_bridge *bridge = find_bridge(resource);
 579	struct vme_master_resource *image;
 580	int retval;
 581
 582	if (resource->type != VME_MASTER) {
 583		printk(KERN_ERR "Not a master resource\n");
 584		return -EINVAL;
 585	}
 586
 587	image = list_entry(resource->entry, struct vme_master_resource, list);
 588
 589	if (!bridge->master_set) {
 590		printk(KERN_WARNING "vme_master_set not supported\n");
 591		return -EINVAL;
 592	}
 593
 594	if (!(((image->address_attr & aspace) == aspace) &&
 595		((image->cycle_attr & cycle) == cycle) &&
 596		((image->width_attr & dwidth) == dwidth))) {
 597		printk(KERN_WARNING "Invalid attributes\n");
 598		return -EINVAL;
 599	}
 600
 601	retval = vme_check_window(aspace, vme_base, size);
 602	if (retval)
 603		return retval;
 604
 605	return bridge->master_set(image, enabled, vme_base, size, aspace,
 606		cycle, dwidth);
 607}
 608EXPORT_SYMBOL(vme_master_set);
 609
 610/**
 611 * vme_master_get - Retrieve VME master window configuration.
 612 * @resource: Pointer to VME master resource.
 613 * @enabled: Pointer to variable for storing state.
 614 * @vme_base: Pointer to variable for storing window base address.
 615 * @size: Pointer to variable for storing window size.
 616 * @aspace: Pointer to variable for storing VME address space.
 617 * @cycle: Pointer to variable for storing VME data transfer cycle type.
 618 * @dwidth: Pointer to variable for storing VME data transfer width.
 619 *
 620 * Return configuration for provided VME master window.
 621 *
 622 * Return: Zero on success, -EINVAL if operation is not supported on this
 623 *         device or if an invalid resource has been provided.
 624 */
 625int vme_master_get(struct vme_resource *resource, int *enabled,
 626	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
 627	u32 *cycle, u32 *dwidth)
 628{
 629	struct vme_bridge *bridge = find_bridge(resource);
 630	struct vme_master_resource *image;
 631
 632	if (resource->type != VME_MASTER) {
 633		printk(KERN_ERR "Not a master resource\n");
 634		return -EINVAL;
 635	}
 636
 637	image = list_entry(resource->entry, struct vme_master_resource, list);
 638
 639	if (!bridge->master_get) {
 640		printk(KERN_WARNING "%s not supported\n", __func__);
 641		return -EINVAL;
 642	}
 643
 644	return bridge->master_get(image, enabled, vme_base, size, aspace,
 645		cycle, dwidth);
 646}
 647EXPORT_SYMBOL(vme_master_get);
 648
 649/**
 650 * vme_master_write - Read data from VME space into a buffer.
 651 * @resource: Pointer to VME master resource.
 652 * @buf: Pointer to buffer where data should be transferred.
 653 * @count: Number of bytes to transfer.
 654 * @offset: Offset into VME master window at which to start transfer.
 655 *
 656 * Perform read of count bytes of data from location on VME bus which maps into
 657 * the VME master window at offset to buf.
 658 *
 659 * Return: Number of bytes read, -EINVAL if resource is not a VME master
 660 *         resource or read operation is not supported. -EFAULT returned if
 661 *         invalid offset is provided. Hardware specific errors may also be
 662 *         returned.
 663 */
 664ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
 665	loff_t offset)
 666{
 667	struct vme_bridge *bridge = find_bridge(resource);
 668	struct vme_master_resource *image;
 669	size_t length;
 670
 671	if (!bridge->master_read) {
 672		printk(KERN_WARNING "Reading from resource not supported\n");
 673		return -EINVAL;
 674	}
 675
 676	if (resource->type != VME_MASTER) {
 677		printk(KERN_ERR "Not a master resource\n");
 678		return -EINVAL;
 679	}
 680
 681	image = list_entry(resource->entry, struct vme_master_resource, list);
 682
 683	length = vme_get_size(resource);
 684
 685	if (offset > length) {
 686		printk(KERN_WARNING "Invalid Offset\n");
 687		return -EFAULT;
 688	}
 689
 690	if ((offset + count) > length)
 691		count = length - offset;
 692
 693	return bridge->master_read(image, buf, count, offset);
 694
 695}
 696EXPORT_SYMBOL(vme_master_read);
 697
 698/**
 699 * vme_master_write - Write data out to VME space from a buffer.
 700 * @resource: Pointer to VME master resource.
 701 * @buf: Pointer to buffer holding data to transfer.
 702 * @count: Number of bytes to transfer.
 703 * @offset: Offset into VME master window at which to start transfer.
 704 *
 705 * Perform write of count bytes of data from buf to location on VME bus which
 706 * maps into the VME master window at offset.
 707 *
 708 * Return: Number of bytes written, -EINVAL if resource is not a VME master
 709 *         resource or write operation is not supported. -EFAULT returned if
 710 *         invalid offset is provided. Hardware specific errors may also be
 711 *         returned.
 712 */
 713ssize_t vme_master_write(struct vme_resource *resource, void *buf,
 714	size_t count, loff_t offset)
 715{
 716	struct vme_bridge *bridge = find_bridge(resource);
 717	struct vme_master_resource *image;
 718	size_t length;
 719
 720	if (!bridge->master_write) {
 721		printk(KERN_WARNING "Writing to resource not supported\n");
 722		return -EINVAL;
 723	}
 724
 725	if (resource->type != VME_MASTER) {
 726		printk(KERN_ERR "Not a master resource\n");
 727		return -EINVAL;
 728	}
 729
 730	image = list_entry(resource->entry, struct vme_master_resource, list);
 731
 732	length = vme_get_size(resource);
 733
 734	if (offset > length) {
 735		printk(KERN_WARNING "Invalid Offset\n");
 736		return -EFAULT;
 737	}
 738
 739	if ((offset + count) > length)
 740		count = length - offset;
 741
 742	return bridge->master_write(image, buf, count, offset);
 743}
 744EXPORT_SYMBOL(vme_master_write);
 745
 746/**
 747 * vme_master_rmw - Perform read-modify-write cycle.
 748 * @resource: Pointer to VME master resource.
 749 * @mask: Bits to be compared and swapped in operation.
 750 * @compare: Bits to be compared with data read from offset.
 751 * @swap: Bits to be swapped in data read from offset.
 752 * @offset: Offset into VME master window at which to perform operation.
 753 *
 754 * Perform read-modify-write cycle on provided location:
 755 * - Location on VME bus is read.
 756 * - Bits selected by mask are compared with compare.
 757 * - Where a selected bit matches that in compare and are selected in swap,
 758 * the bit is swapped.
 759 * - Result written back to location on VME bus.
 760 *
 761 * Return: Bytes written on success, -EINVAL if resource is not a VME master
 762 *         resource or RMW operation is not supported. Hardware specific
 763 *         errors may also be returned.
 764 */
 765unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
 766	unsigned int compare, unsigned int swap, loff_t offset)
 767{
 768	struct vme_bridge *bridge = find_bridge(resource);
 769	struct vme_master_resource *image;
 770
 771	if (!bridge->master_rmw) {
 772		printk(KERN_WARNING "Writing to resource not supported\n");
 773		return -EINVAL;
 774	}
 775
 776	if (resource->type != VME_MASTER) {
 777		printk(KERN_ERR "Not a master resource\n");
 778		return -EINVAL;
 779	}
 780
 781	image = list_entry(resource->entry, struct vme_master_resource, list);
 782
 783	return bridge->master_rmw(image, mask, compare, swap, offset);
 784}
 785EXPORT_SYMBOL(vme_master_rmw);
 786
 787/**
 788 * vme_master_mmap - Mmap region of VME master window.
 789 * @resource: Pointer to VME master resource.
 790 * @vma: Pointer to definition of user mapping.
 791 *
 792 * Memory map a region of the VME master window into user space.
 793 *
 794 * Return: Zero on success, -EINVAL if resource is not a VME master
 795 *         resource or -EFAULT if map exceeds window size. Other generic mmap
 796 *         errors may also be returned.
 797 */
 798int vme_master_mmap(struct vme_resource *resource, struct vm_area_struct *vma)
 799{
 800	struct vme_master_resource *image;
 801	phys_addr_t phys_addr;
 802	unsigned long vma_size;
 803
 804	if (resource->type != VME_MASTER) {
 805		pr_err("Not a master resource\n");
 806		return -EINVAL;
 807	}
 808
 809	image = list_entry(resource->entry, struct vme_master_resource, list);
 810	phys_addr = image->bus_resource.start + (vma->vm_pgoff << PAGE_SHIFT);
 811	vma_size = vma->vm_end - vma->vm_start;
 812
 813	if (phys_addr + vma_size > image->bus_resource.end + 1) {
 814		pr_err("Map size cannot exceed the window size\n");
 815		return -EFAULT;
 816	}
 817
 818	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
 819
 820	return vm_iomap_memory(vma, phys_addr, vma->vm_end - vma->vm_start);
 821}
 822EXPORT_SYMBOL(vme_master_mmap);
 823
 824/**
 825 * vme_master_free - Free VME master window
 826 * @resource: Pointer to VME master resource.
 827 *
 828 * Free the provided master resource so that it may be reallocated.
 829 */
 830void vme_master_free(struct vme_resource *resource)
 831{
 832	struct vme_master_resource *master_image;
 833
 834	if (resource->type != VME_MASTER) {
 835		printk(KERN_ERR "Not a master resource\n");
 836		return;
 837	}
 838
 839	master_image = list_entry(resource->entry, struct vme_master_resource,
 840		list);
 841	if (!master_image) {
 842		printk(KERN_ERR "Can't find master resource\n");
 843		return;
 844	}
 845
 846	/* Unlock image */
 847	spin_lock(&master_image->lock);
 848	if (master_image->locked == 0)
 849		printk(KERN_ERR "Image is already free\n");
 850
 851	master_image->locked = 0;
 852	spin_unlock(&master_image->lock);
 853
 854	/* Free up resource memory */
 855	kfree(resource);
 856}
 857EXPORT_SYMBOL(vme_master_free);
 858
 859/**
 860 * vme_dma_request - Request a DMA controller.
 861 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
 862 * @route: Required src/destination combination.
 863 *
 864 * Request a VME DMA controller with capability to perform transfers bewteen
 865 * requested source/destination combination.
 866 *
 867 * Return: Pointer to VME DMA resource on success, NULL on failure.
 868 */
 869struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
 870{
 871	struct vme_bridge *bridge;
 872	struct list_head *dma_pos = NULL;
 873	struct vme_dma_resource *allocated_ctrlr = NULL;
 874	struct vme_dma_resource *dma_ctrlr = NULL;
 875	struct vme_resource *resource = NULL;
 876
 877	/* XXX Not checking resource attributes */
 878	printk(KERN_ERR "No VME resource Attribute tests done\n");
 879
 880	bridge = vdev->bridge;
 881	if (!bridge) {
 882		printk(KERN_ERR "Can't find VME bus\n");
 883		goto err_bus;
 884	}
 885
 886	/* Loop through DMA resources */
 887	list_for_each(dma_pos, &bridge->dma_resources) {
 888		dma_ctrlr = list_entry(dma_pos,
 889			struct vme_dma_resource, list);
 890		if (!dma_ctrlr) {
 
 891			printk(KERN_ERR "Registered NULL DMA resource\n");
 892			continue;
 893		}
 894
 895		/* Find an unlocked and compatible controller */
 896		mutex_lock(&dma_ctrlr->mtx);
 897		if (((dma_ctrlr->route_attr & route) == route) &&
 898			(dma_ctrlr->locked == 0)) {
 899
 900			dma_ctrlr->locked = 1;
 901			mutex_unlock(&dma_ctrlr->mtx);
 902			allocated_ctrlr = dma_ctrlr;
 903			break;
 904		}
 905		mutex_unlock(&dma_ctrlr->mtx);
 906	}
 907
 908	/* Check to see if we found a resource */
 909	if (!allocated_ctrlr)
 910		goto err_ctrlr;
 911
 912	resource = kmalloc(sizeof(*resource), GFP_KERNEL);
 913	if (!resource)
 
 914		goto err_alloc;
 915
 916	resource->type = VME_DMA;
 917	resource->entry = &allocated_ctrlr->list;
 918
 919	return resource;
 920
 921err_alloc:
 922	/* Unlock image */
 923	mutex_lock(&dma_ctrlr->mtx);
 924	dma_ctrlr->locked = 0;
 925	mutex_unlock(&dma_ctrlr->mtx);
 926err_ctrlr:
 927err_bus:
 928	return NULL;
 929}
 930EXPORT_SYMBOL(vme_dma_request);
 931
 932/**
 933 * vme_new_dma_list - Create new VME DMA list.
 934 * @resource: Pointer to VME DMA resource.
 935 *
 936 * Create a new VME DMA list. It is the responsibility of the user to free
 937 * the list once it is no longer required with vme_dma_list_free().
 938 *
 939 * Return: Pointer to new VME DMA list, NULL on allocation failure or invalid
 940 *         VME DMA resource.
 941 */
 942struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
 943{
 
 944	struct vme_dma_list *dma_list;
 945
 946	if (resource->type != VME_DMA) {
 947		printk(KERN_ERR "Not a DMA resource\n");
 948		return NULL;
 949	}
 950
 951	dma_list = kmalloc(sizeof(*dma_list), GFP_KERNEL);
 952	if (!dma_list)
 953		return NULL;
 954
 
 
 
 
 
 955	INIT_LIST_HEAD(&dma_list->entries);
 956	dma_list->parent = list_entry(resource->entry,
 957				      struct vme_dma_resource,
 958				      list);
 959	mutex_init(&dma_list->mtx);
 960
 961	return dma_list;
 962}
 963EXPORT_SYMBOL(vme_new_dma_list);
 964
 965/**
 966 * vme_dma_pattern_attribute - Create "Pattern" type VME DMA list attribute.
 967 * @pattern: Value to use used as pattern
 968 * @type: Type of pattern to be written.
 969 *
 970 * Create VME DMA list attribute for pattern generation. It is the
 971 * responsibility of the user to free used attributes using
 972 * vme_dma_free_attribute().
 973 *
 974 * Return: Pointer to VME DMA attribute, NULL on failure.
 975 */
 976struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
 977{
 978	struct vme_dma_attr *attributes;
 979	struct vme_dma_pattern *pattern_attr;
 980
 981	attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
 982	if (!attributes)
 
 983		goto err_attr;
 
 984
 985	pattern_attr = kmalloc(sizeof(*pattern_attr), GFP_KERNEL);
 986	if (!pattern_attr)
 
 987		goto err_pat;
 
 988
 989	attributes->type = VME_DMA_PATTERN;
 990	attributes->private = (void *)pattern_attr;
 991
 992	pattern_attr->pattern = pattern;
 993	pattern_attr->type = type;
 994
 995	return attributes;
 996
 997err_pat:
 998	kfree(attributes);
 999err_attr:
1000	return NULL;
1001}
1002EXPORT_SYMBOL(vme_dma_pattern_attribute);
1003
1004/**
1005 * vme_dma_pci_attribute - Create "PCI" type VME DMA list attribute.
1006 * @address: PCI base address for DMA transfer.
1007 *
1008 * Create VME DMA list attribute pointing to a location on PCI for DMA
1009 * transfers. It is the responsibility of the user to free used attributes
1010 * using vme_dma_free_attribute().
1011 *
1012 * Return: Pointer to VME DMA attribute, NULL on failure.
1013 */
1014struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
1015{
1016	struct vme_dma_attr *attributes;
1017	struct vme_dma_pci *pci_attr;
1018
1019	/* XXX Run some sanity checks here */
1020
1021	attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
1022	if (!attributes)
 
1023		goto err_attr;
 
1024
1025	pci_attr = kmalloc(sizeof(*pci_attr), GFP_KERNEL);
1026	if (!pci_attr)
 
1027		goto err_pci;
 
 
 
1028
1029	attributes->type = VME_DMA_PCI;
1030	attributes->private = (void *)pci_attr;
1031
1032	pci_attr->address = address;
1033
1034	return attributes;
1035
1036err_pci:
1037	kfree(attributes);
1038err_attr:
1039	return NULL;
1040}
1041EXPORT_SYMBOL(vme_dma_pci_attribute);
1042
1043/**
1044 * vme_dma_vme_attribute - Create "VME" type VME DMA list attribute.
1045 * @address: VME base address for DMA transfer.
1046 * @aspace: VME address space to use for DMA transfer.
1047 * @cycle: VME bus cycle to use for DMA transfer.
1048 * @dwidth: VME data width to use for DMA transfer.
1049 *
1050 * Create VME DMA list attribute pointing to a location on the VME bus for DMA
1051 * transfers. It is the responsibility of the user to free used attributes
1052 * using vme_dma_free_attribute().
1053 *
1054 * Return: Pointer to VME DMA attribute, NULL on failure.
1055 */
1056struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
1057	u32 aspace, u32 cycle, u32 dwidth)
1058{
1059	struct vme_dma_attr *attributes;
1060	struct vme_dma_vme *vme_attr;
1061
1062	attributes = kmalloc(sizeof(*attributes), GFP_KERNEL);
1063	if (!attributes)
 
 
1064		goto err_attr;
 
1065
1066	vme_attr = kmalloc(sizeof(*vme_attr), GFP_KERNEL);
1067	if (!vme_attr)
 
1068		goto err_vme;
 
1069
1070	attributes->type = VME_DMA_VME;
1071	attributes->private = (void *)vme_attr;
1072
1073	vme_attr->address = address;
1074	vme_attr->aspace = aspace;
1075	vme_attr->cycle = cycle;
1076	vme_attr->dwidth = dwidth;
1077
1078	return attributes;
1079
1080err_vme:
1081	kfree(attributes);
1082err_attr:
1083	return NULL;
1084}
1085EXPORT_SYMBOL(vme_dma_vme_attribute);
1086
1087/**
1088 * vme_dma_free_attribute - Free DMA list attribute.
1089 * @attributes: Pointer to DMA list attribute.
1090 *
1091 * Free VME DMA list attribute. VME DMA list attributes can be safely freed
1092 * once vme_dma_list_add() has returned.
1093 */
1094void vme_dma_free_attribute(struct vme_dma_attr *attributes)
1095{
1096	kfree(attributes->private);
1097	kfree(attributes);
1098}
1099EXPORT_SYMBOL(vme_dma_free_attribute);
1100
1101/**
1102 * vme_dma_list_add - Add enty to a VME DMA list.
1103 * @list: Pointer to VME list.
1104 * @src: Pointer to DMA list attribute to use as source.
1105 * @dest: Pointer to DMA list attribute to use as destination.
1106 * @count: Number of bytes to transfer.
1107 *
1108 * Add an entry to the provided VME DMA list. Entry requires pointers to source
1109 * and destination DMA attributes and a count.
1110 *
1111 * Please note, the attributes supported as source and destinations for
1112 * transfers are hardware dependent.
1113 *
1114 * Return: Zero on success, -EINVAL if operation is not supported on this
1115 *         device or if the link list has already been submitted for execution.
1116 *         Hardware specific errors also possible.
1117 */
1118int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
1119	struct vme_dma_attr *dest, size_t count)
1120{
1121	struct vme_bridge *bridge = list->parent->parent;
1122	int retval;
1123
1124	if (!bridge->dma_list_add) {
1125		printk(KERN_WARNING "Link List DMA generation not supported\n");
1126		return -EINVAL;
1127	}
1128
1129	if (!mutex_trylock(&list->mtx)) {
1130		printk(KERN_ERR "Link List already submitted\n");
1131		return -EINVAL;
1132	}
1133
1134	retval = bridge->dma_list_add(list, src, dest, count);
1135
1136	mutex_unlock(&list->mtx);
1137
1138	return retval;
1139}
1140EXPORT_SYMBOL(vme_dma_list_add);
1141
1142/**
1143 * vme_dma_list_exec - Queue a VME DMA list for execution.
1144 * @list: Pointer to VME list.
1145 *
1146 * Queue the provided VME DMA list for execution. The call will return once the
1147 * list has been executed.
1148 *
1149 * Return: Zero on success, -EINVAL if operation is not supported on this
1150 *         device. Hardware specific errors also possible.
1151 */
1152int vme_dma_list_exec(struct vme_dma_list *list)
1153{
1154	struct vme_bridge *bridge = list->parent->parent;
1155	int retval;
1156
1157	if (!bridge->dma_list_exec) {
1158		printk(KERN_ERR "Link List DMA execution not supported\n");
1159		return -EINVAL;
1160	}
1161
1162	mutex_lock(&list->mtx);
1163
1164	retval = bridge->dma_list_exec(list);
1165
1166	mutex_unlock(&list->mtx);
1167
1168	return retval;
1169}
1170EXPORT_SYMBOL(vme_dma_list_exec);
1171
1172/**
1173 * vme_dma_list_free - Free a VME DMA list.
1174 * @list: Pointer to VME list.
1175 *
1176 * Free the provided DMA list and all its entries.
1177 *
1178 * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
1179 *         is still in use. Hardware specific errors also possible.
1180 */
1181int vme_dma_list_free(struct vme_dma_list *list)
1182{
1183	struct vme_bridge *bridge = list->parent->parent;
1184	int retval;
1185
1186	if (!bridge->dma_list_empty) {
1187		printk(KERN_WARNING "Emptying of Link Lists not supported\n");
1188		return -EINVAL;
1189	}
1190
1191	if (!mutex_trylock(&list->mtx)) {
1192		printk(KERN_ERR "Link List in use\n");
1193		return -EBUSY;
1194	}
1195
1196	/*
1197	 * Empty out all of the entries from the DMA list. We need to go to the
1198	 * low level driver as DMA entries are driver specific.
1199	 */
1200	retval = bridge->dma_list_empty(list);
1201	if (retval) {
1202		printk(KERN_ERR "Unable to empty link-list entries\n");
1203		mutex_unlock(&list->mtx);
1204		return retval;
1205	}
1206	mutex_unlock(&list->mtx);
1207	kfree(list);
1208
1209	return retval;
1210}
1211EXPORT_SYMBOL(vme_dma_list_free);
1212
1213/**
1214 * vme_dma_free - Free a VME DMA resource.
1215 * @resource: Pointer to VME DMA resource.
1216 *
1217 * Free the provided DMA resource so that it may be reallocated.
1218 *
1219 * Return: Zero on success, -EINVAL on invalid VME resource, -EBUSY if resource
1220 *         is still active.
1221 */
1222int vme_dma_free(struct vme_resource *resource)
1223{
1224	struct vme_dma_resource *ctrlr;
1225
1226	if (resource->type != VME_DMA) {
1227		printk(KERN_ERR "Not a DMA resource\n");
1228		return -EINVAL;
1229	}
1230
1231	ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
1232
1233	if (!mutex_trylock(&ctrlr->mtx)) {
1234		printk(KERN_ERR "Resource busy, can't free\n");
1235		return -EBUSY;
1236	}
1237
1238	if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
1239		printk(KERN_WARNING "Resource still processing transfers\n");
1240		mutex_unlock(&ctrlr->mtx);
1241		return -EBUSY;
1242	}
1243
1244	ctrlr->locked = 0;
1245
1246	mutex_unlock(&ctrlr->mtx);
1247
1248	kfree(resource);
1249
1250	return 0;
1251}
1252EXPORT_SYMBOL(vme_dma_free);
1253
1254void vme_bus_error_handler(struct vme_bridge *bridge,
1255			   unsigned long long address, int am)
1256{
1257	struct list_head *handler_pos = NULL;
1258	struct vme_error_handler *handler;
1259	int handler_triggered = 0;
1260	u32 aspace = vme_get_aspace(am);
1261
1262	list_for_each(handler_pos, &bridge->vme_error_handlers) {
1263		handler = list_entry(handler_pos, struct vme_error_handler,
1264				     list);
1265		if ((aspace == handler->aspace) &&
1266		    (address >= handler->start) &&
1267		    (address < handler->end)) {
1268			if (!handler->num_errors)
1269				handler->first_error = address;
1270			if (handler->num_errors != UINT_MAX)
1271				handler->num_errors++;
1272			handler_triggered = 1;
1273		}
1274	}
1275
1276	if (!handler_triggered)
1277		dev_err(bridge->parent,
1278			"Unhandled VME access error at address 0x%llx\n",
1279			address);
1280}
1281EXPORT_SYMBOL(vme_bus_error_handler);
1282
1283struct vme_error_handler *vme_register_error_handler(
1284	struct vme_bridge *bridge, u32 aspace,
1285	unsigned long long address, size_t len)
1286{
1287	struct vme_error_handler *handler;
1288
1289	handler = kmalloc(sizeof(*handler), GFP_ATOMIC);
1290	if (!handler)
1291		return NULL;
1292
1293	handler->aspace = aspace;
1294	handler->start = address;
1295	handler->end = address + len;
1296	handler->num_errors = 0;
1297	handler->first_error = 0;
1298	list_add_tail(&handler->list, &bridge->vme_error_handlers);
1299
1300	return handler;
1301}
1302EXPORT_SYMBOL(vme_register_error_handler);
1303
1304void vme_unregister_error_handler(struct vme_error_handler *handler)
1305{
1306	list_del(&handler->list);
1307	kfree(handler);
1308}
1309EXPORT_SYMBOL(vme_unregister_error_handler);
1310
1311void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
1312{
1313	void (*call)(int, int, void *);
1314	void *priv_data;
1315
1316	call = bridge->irq[level - 1].callback[statid].func;
1317	priv_data = bridge->irq[level - 1].callback[statid].priv_data;
1318	if (call)
 
1319		call(level, statid, priv_data);
1320	else
1321		printk(KERN_WARNING "Spurious VME interrupt, level:%x, vector:%x\n",
1322		       level, statid);
1323}
1324EXPORT_SYMBOL(vme_irq_handler);
1325
1326/**
1327 * vme_irq_request - Request a specific VME interrupt.
1328 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1329 * @level: Interrupt priority being requested.
1330 * @statid: Interrupt vector being requested.
1331 * @callback: Pointer to callback function called when VME interrupt/vector
1332 *            received.
1333 * @priv_data: Generic pointer that will be passed to the callback function.
1334 *
1335 * Request callback to be attached as a handler for VME interrupts with provided
1336 * level and statid.
1337 *
1338 * Return: Zero on success, -EINVAL on invalid vme device, level or if the
1339 *         function is not supported, -EBUSY if the level/statid combination is
1340 *         already in use. Hardware specific errors also possible.
1341 */
1342int vme_irq_request(struct vme_dev *vdev, int level, int statid,
1343	void (*callback)(int, int, void *),
1344	void *priv_data)
1345{
1346	struct vme_bridge *bridge;
1347
1348	bridge = vdev->bridge;
1349	if (!bridge) {
1350		printk(KERN_ERR "Can't find VME bus\n");
1351		return -EINVAL;
1352	}
1353
1354	if ((level < 1) || (level > 7)) {
1355		printk(KERN_ERR "Invalid interrupt level\n");
1356		return -EINVAL;
1357	}
1358
1359	if (!bridge->irq_set) {
1360		printk(KERN_ERR "Configuring interrupts not supported\n");
1361		return -EINVAL;
1362	}
1363
1364	mutex_lock(&bridge->irq_mtx);
1365
1366	if (bridge->irq[level - 1].callback[statid].func) {
1367		mutex_unlock(&bridge->irq_mtx);
1368		printk(KERN_WARNING "VME Interrupt already taken\n");
1369		return -EBUSY;
1370	}
1371
1372	bridge->irq[level - 1].count++;
1373	bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1374	bridge->irq[level - 1].callback[statid].func = callback;
1375
1376	/* Enable IRQ level */
1377	bridge->irq_set(bridge, level, 1, 1);
1378
1379	mutex_unlock(&bridge->irq_mtx);
1380
1381	return 0;
1382}
1383EXPORT_SYMBOL(vme_irq_request);
1384
1385/**
1386 * vme_irq_free - Free a VME interrupt.
1387 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1388 * @level: Interrupt priority of interrupt being freed.
1389 * @statid: Interrupt vector of interrupt being freed.
1390 *
1391 * Remove previously attached callback from VME interrupt priority/vector.
1392 */
1393void vme_irq_free(struct vme_dev *vdev, int level, int statid)
1394{
1395	struct vme_bridge *bridge;
1396
1397	bridge = vdev->bridge;
1398	if (!bridge) {
1399		printk(KERN_ERR "Can't find VME bus\n");
1400		return;
1401	}
1402
1403	if ((level < 1) || (level > 7)) {
1404		printk(KERN_ERR "Invalid interrupt level\n");
1405		return;
1406	}
1407
1408	if (!bridge->irq_set) {
1409		printk(KERN_ERR "Configuring interrupts not supported\n");
1410		return;
1411	}
1412
1413	mutex_lock(&bridge->irq_mtx);
1414
1415	bridge->irq[level - 1].count--;
1416
1417	/* Disable IRQ level if no more interrupts attached at this level*/
1418	if (bridge->irq[level - 1].count == 0)
1419		bridge->irq_set(bridge, level, 0, 1);
1420
1421	bridge->irq[level - 1].callback[statid].func = NULL;
1422	bridge->irq[level - 1].callback[statid].priv_data = NULL;
1423
1424	mutex_unlock(&bridge->irq_mtx);
1425}
1426EXPORT_SYMBOL(vme_irq_free);
1427
1428/**
1429 * vme_irq_generate - Generate VME interrupt.
1430 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1431 * @level: Interrupt priority at which to assert the interrupt.
1432 * @statid: Interrupt vector to associate with the interrupt.
1433 *
1434 * Generate a VME interrupt of the provided level and with the provided
1435 * statid.
1436 *
1437 * Return: Zero on success, -EINVAL on invalid vme device, level or if the
1438 *         function is not supported. Hardware specific errors also possible.
1439 */
1440int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
1441{
1442	struct vme_bridge *bridge;
1443
1444	bridge = vdev->bridge;
1445	if (!bridge) {
1446		printk(KERN_ERR "Can't find VME bus\n");
1447		return -EINVAL;
1448	}
1449
1450	if ((level < 1) || (level > 7)) {
1451		printk(KERN_WARNING "Invalid interrupt level\n");
1452		return -EINVAL;
1453	}
1454
1455	if (!bridge->irq_generate) {
1456		printk(KERN_WARNING "Interrupt generation not supported\n");
1457		return -EINVAL;
1458	}
1459
1460	return bridge->irq_generate(bridge, level, statid);
1461}
1462EXPORT_SYMBOL(vme_irq_generate);
1463
1464/**
1465 * vme_lm_request - Request a VME location monitor
1466 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1467 *
1468 * Allocate a location monitor resource to the driver. A location monitor
1469 * allows the driver to monitor accesses to a contiguous number of
1470 * addresses on the VME bus.
1471 *
1472 * Return: Pointer to a VME resource on success or NULL on failure.
1473 */
1474struct vme_resource *vme_lm_request(struct vme_dev *vdev)
1475{
1476	struct vme_bridge *bridge;
1477	struct list_head *lm_pos = NULL;
1478	struct vme_lm_resource *allocated_lm = NULL;
1479	struct vme_lm_resource *lm = NULL;
1480	struct vme_resource *resource = NULL;
1481
1482	bridge = vdev->bridge;
1483	if (!bridge) {
1484		printk(KERN_ERR "Can't find VME bus\n");
1485		goto err_bus;
1486	}
1487
1488	/* Loop through LM resources */
1489	list_for_each(lm_pos, &bridge->lm_resources) {
1490		lm = list_entry(lm_pos,
1491			struct vme_lm_resource, list);
1492		if (!lm) {
 
1493			printk(KERN_ERR "Registered NULL Location Monitor resource\n");
1494			continue;
1495		}
1496
1497		/* Find an unlocked controller */
1498		mutex_lock(&lm->mtx);
1499		if (lm->locked == 0) {
1500			lm->locked = 1;
1501			mutex_unlock(&lm->mtx);
1502			allocated_lm = lm;
1503			break;
1504		}
1505		mutex_unlock(&lm->mtx);
1506	}
1507
1508	/* Check to see if we found a resource */
1509	if (!allocated_lm)
1510		goto err_lm;
1511
1512	resource = kmalloc(sizeof(*resource), GFP_KERNEL);
1513	if (!resource)
 
1514		goto err_alloc;
1515
1516	resource->type = VME_LM;
1517	resource->entry = &allocated_lm->list;
1518
1519	return resource;
1520
1521err_alloc:
1522	/* Unlock image */
1523	mutex_lock(&lm->mtx);
1524	lm->locked = 0;
1525	mutex_unlock(&lm->mtx);
1526err_lm:
1527err_bus:
1528	return NULL;
1529}
1530EXPORT_SYMBOL(vme_lm_request);
1531
1532/**
1533 * vme_lm_count - Determine number of VME Addresses monitored
1534 * @resource: Pointer to VME location monitor resource.
1535 *
1536 * The number of contiguous addresses monitored is hardware dependent.
1537 * Return the number of contiguous addresses monitored by the
1538 * location monitor.
1539 *
1540 * Return: Count of addresses monitored or -EINVAL when provided with an
1541 *	   invalid location monitor resource.
1542 */
1543int vme_lm_count(struct vme_resource *resource)
1544{
1545	struct vme_lm_resource *lm;
1546
1547	if (resource->type != VME_LM) {
1548		printk(KERN_ERR "Not a Location Monitor resource\n");
1549		return -EINVAL;
1550	}
1551
1552	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1553
1554	return lm->monitors;
1555}
1556EXPORT_SYMBOL(vme_lm_count);
1557
1558/**
1559 * vme_lm_set - Configure location monitor
1560 * @resource: Pointer to VME location monitor resource.
1561 * @lm_base: Base address to monitor.
1562 * @aspace: VME address space to monitor.
1563 * @cycle: VME bus cycle type to monitor.
1564 *
1565 * Set the base address, address space and cycle type of accesses to be
1566 * monitored by the location monitor.
1567 *
1568 * Return: Zero on success, -EINVAL when provided with an invalid location
1569 *	   monitor resource or function is not supported. Hardware specific
1570 *	   errors may also be returned.
1571 */
1572int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1573	u32 aspace, u32 cycle)
1574{
1575	struct vme_bridge *bridge = find_bridge(resource);
1576	struct vme_lm_resource *lm;
1577
1578	if (resource->type != VME_LM) {
1579		printk(KERN_ERR "Not a Location Monitor resource\n");
1580		return -EINVAL;
1581	}
1582
1583	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1584
1585	if (!bridge->lm_set) {
1586		printk(KERN_ERR "vme_lm_set not supported\n");
1587		return -EINVAL;
1588	}
1589
1590	return bridge->lm_set(lm, lm_base, aspace, cycle);
1591}
1592EXPORT_SYMBOL(vme_lm_set);
1593
1594/**
1595 * vme_lm_get - Retrieve location monitor settings
1596 * @resource: Pointer to VME location monitor resource.
1597 * @lm_base: Pointer used to output the base address monitored.
1598 * @aspace: Pointer used to output the address space monitored.
1599 * @cycle: Pointer used to output the VME bus cycle type monitored.
1600 *
1601 * Retrieve the base address, address space and cycle type of accesses to
1602 * be monitored by the location monitor.
1603 *
1604 * Return: Zero on success, -EINVAL when provided with an invalid location
1605 *	   monitor resource or function is not supported. Hardware specific
1606 *	   errors may also be returned.
1607 */
1608int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1609	u32 *aspace, u32 *cycle)
1610{
1611	struct vme_bridge *bridge = find_bridge(resource);
1612	struct vme_lm_resource *lm;
1613
1614	if (resource->type != VME_LM) {
1615		printk(KERN_ERR "Not a Location Monitor resource\n");
1616		return -EINVAL;
1617	}
1618
1619	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1620
1621	if (!bridge->lm_get) {
1622		printk(KERN_ERR "vme_lm_get not supported\n");
1623		return -EINVAL;
1624	}
1625
1626	return bridge->lm_get(lm, lm_base, aspace, cycle);
1627}
1628EXPORT_SYMBOL(vme_lm_get);
1629
1630/**
1631 * vme_lm_attach - Provide callback for location monitor address
1632 * @resource: Pointer to VME location monitor resource.
1633 * @monitor: Offset to which callback should be attached.
1634 * @callback: Pointer to callback function called when triggered.
1635 * @data: Generic pointer that will be passed to the callback function.
1636 *
1637 * Attach a callback to the specificed offset into the location monitors
1638 * monitored addresses. A generic pointer is provided to allow data to be
1639 * passed to the callback when called.
1640 *
1641 * Return: Zero on success, -EINVAL when provided with an invalid location
1642 *	   monitor resource or function is not supported. Hardware specific
1643 *	   errors may also be returned.
1644 */
1645int vme_lm_attach(struct vme_resource *resource, int monitor,
1646	void (*callback)(void *), void *data)
1647{
1648	struct vme_bridge *bridge = find_bridge(resource);
1649	struct vme_lm_resource *lm;
1650
1651	if (resource->type != VME_LM) {
1652		printk(KERN_ERR "Not a Location Monitor resource\n");
1653		return -EINVAL;
1654	}
1655
1656	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1657
1658	if (!bridge->lm_attach) {
1659		printk(KERN_ERR "vme_lm_attach not supported\n");
1660		return -EINVAL;
1661	}
1662
1663	return bridge->lm_attach(lm, monitor, callback, data);
1664}
1665EXPORT_SYMBOL(vme_lm_attach);
1666
1667/**
1668 * vme_lm_detach - Remove callback for location monitor address
1669 * @resource: Pointer to VME location monitor resource.
1670 * @monitor: Offset to which callback should be removed.
1671 *
1672 * Remove the callback associated with the specificed offset into the
1673 * location monitors monitored addresses.
1674 *
1675 * Return: Zero on success, -EINVAL when provided with an invalid location
1676 *	   monitor resource or function is not supported. Hardware specific
1677 *	   errors may also be returned.
1678 */
1679int vme_lm_detach(struct vme_resource *resource, int monitor)
1680{
1681	struct vme_bridge *bridge = find_bridge(resource);
1682	struct vme_lm_resource *lm;
1683
1684	if (resource->type != VME_LM) {
1685		printk(KERN_ERR "Not a Location Monitor resource\n");
1686		return -EINVAL;
1687	}
1688
1689	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1690
1691	if (!bridge->lm_detach) {
1692		printk(KERN_ERR "vme_lm_detach not supported\n");
1693		return -EINVAL;
1694	}
1695
1696	return bridge->lm_detach(lm, monitor);
1697}
1698EXPORT_SYMBOL(vme_lm_detach);
1699
1700/**
1701 * vme_lm_free - Free allocated VME location monitor
1702 * @resource: Pointer to VME location monitor resource.
1703 *
1704 * Free allocation of a VME location monitor.
1705 *
1706 * WARNING: This function currently expects that any callbacks that have
1707 *          been attached to the location monitor have been removed.
1708 *
1709 * Return: Zero on success, -EINVAL when provided with an invalid location
1710 *	   monitor resource.
1711 */
1712void vme_lm_free(struct vme_resource *resource)
1713{
1714	struct vme_lm_resource *lm;
1715
1716	if (resource->type != VME_LM) {
1717		printk(KERN_ERR "Not a Location Monitor resource\n");
1718		return;
1719	}
1720
1721	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1722
1723	mutex_lock(&lm->mtx);
1724
1725	/* XXX
1726	 * Check to see that there aren't any callbacks still attached, if
1727	 * there are we should probably be detaching them!
1728	 */
1729
1730	lm->locked = 0;
1731
1732	mutex_unlock(&lm->mtx);
1733
1734	kfree(resource);
1735}
1736EXPORT_SYMBOL(vme_lm_free);
1737
1738/**
1739 * vme_slot_num - Retrieve slot ID
1740 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1741 *
1742 * Retrieve the slot ID associated with the provided VME device.
1743 *
1744 * Return: The slot ID on success, -EINVAL if VME bridge cannot be determined
1745 *         or the function is not supported. Hardware specific errors may also
1746 *         be returned.
1747 */
1748int vme_slot_num(struct vme_dev *vdev)
1749{
1750	struct vme_bridge *bridge;
1751
1752	bridge = vdev->bridge;
1753	if (!bridge) {
1754		printk(KERN_ERR "Can't find VME bus\n");
1755		return -EINVAL;
1756	}
1757
1758	if (!bridge->slot_get) {
1759		printk(KERN_WARNING "vme_slot_num not supported\n");
1760		return -EINVAL;
1761	}
1762
1763	return bridge->slot_get(bridge);
1764}
1765EXPORT_SYMBOL(vme_slot_num);
1766
1767/**
1768 * vme_bus_num - Retrieve bus number
1769 * @vdev: Pointer to VME device struct vme_dev assigned to driver instance.
1770 *
1771 * Retrieve the bus enumeration associated with the provided VME device.
1772 *
1773 * Return: The bus number on success, -EINVAL if VME bridge cannot be
1774 *         determined.
1775 */
1776int vme_bus_num(struct vme_dev *vdev)
1777{
1778	struct vme_bridge *bridge;
1779
1780	bridge = vdev->bridge;
1781	if (!bridge) {
1782		pr_err("Can't find VME bus\n");
1783		return -EINVAL;
1784	}
1785
1786	return bridge->num;
1787}
1788EXPORT_SYMBOL(vme_bus_num);
1789
1790/* - Bridge Registration --------------------------------------------------- */
1791
1792static void vme_dev_release(struct device *dev)
1793{
1794	kfree(dev_to_vme_dev(dev));
1795}
1796
1797/* Common bridge initialization */
1798struct vme_bridge *vme_init_bridge(struct vme_bridge *bridge)
1799{
1800	INIT_LIST_HEAD(&bridge->vme_error_handlers);
1801	INIT_LIST_HEAD(&bridge->master_resources);
1802	INIT_LIST_HEAD(&bridge->slave_resources);
1803	INIT_LIST_HEAD(&bridge->dma_resources);
1804	INIT_LIST_HEAD(&bridge->lm_resources);
1805	mutex_init(&bridge->irq_mtx);
1806
1807	return bridge;
1808}
1809EXPORT_SYMBOL(vme_init_bridge);
1810
1811int vme_register_bridge(struct vme_bridge *bridge)
1812{
1813	int i;
1814	int ret = -1;
1815
1816	mutex_lock(&vme_buses_lock);
1817	for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1818		if ((vme_bus_numbers & (1 << i)) == 0) {
1819			vme_bus_numbers |= (1 << i);
1820			bridge->num = i;
1821			INIT_LIST_HEAD(&bridge->devices);
1822			list_add_tail(&bridge->bus_list, &vme_bus_list);
1823			ret = 0;
1824			break;
1825		}
1826	}
1827	mutex_unlock(&vme_buses_lock);
1828
1829	return ret;
1830}
1831EXPORT_SYMBOL(vme_register_bridge);
1832
1833void vme_unregister_bridge(struct vme_bridge *bridge)
1834{
1835	struct vme_dev *vdev;
1836	struct vme_dev *tmp;
1837
1838	mutex_lock(&vme_buses_lock);
1839	vme_bus_numbers &= ~(1 << bridge->num);
1840	list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
1841		list_del(&vdev->drv_list);
1842		list_del(&vdev->bridge_list);
1843		device_unregister(&vdev->dev);
1844	}
1845	list_del(&bridge->bus_list);
1846	mutex_unlock(&vme_buses_lock);
1847}
1848EXPORT_SYMBOL(vme_unregister_bridge);
1849
1850/* - Driver Registration --------------------------------------------------- */
1851
1852static int __vme_register_driver_bus(struct vme_driver *drv,
1853	struct vme_bridge *bridge, unsigned int ndevs)
1854{
1855	int err;
1856	unsigned int i;
1857	struct vme_dev *vdev;
1858	struct vme_dev *tmp;
1859
1860	for (i = 0; i < ndevs; i++) {
1861		vdev = kzalloc(sizeof(*vdev), GFP_KERNEL);
1862		if (!vdev) {
1863			err = -ENOMEM;
1864			goto err_devalloc;
1865		}
1866		vdev->num = i;
1867		vdev->bridge = bridge;
1868		vdev->dev.platform_data = drv;
1869		vdev->dev.release = vme_dev_release;
1870		vdev->dev.parent = bridge->parent;
1871		vdev->dev.bus = &vme_bus_type;
1872		dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
1873			vdev->num);
1874
1875		err = device_register(&vdev->dev);
1876		if (err)
1877			goto err_reg;
1878
1879		if (vdev->dev.platform_data) {
1880			list_add_tail(&vdev->drv_list, &drv->devices);
1881			list_add_tail(&vdev->bridge_list, &bridge->devices);
1882		} else
1883			device_unregister(&vdev->dev);
1884	}
1885	return 0;
1886
1887err_reg:
1888	put_device(&vdev->dev);
1889err_devalloc:
1890	list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
1891		list_del(&vdev->drv_list);
1892		list_del(&vdev->bridge_list);
1893		device_unregister(&vdev->dev);
1894	}
1895	return err;
1896}
1897
1898static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1899{
1900	struct vme_bridge *bridge;
1901	int err = 0;
1902
1903	mutex_lock(&vme_buses_lock);
1904	list_for_each_entry(bridge, &vme_bus_list, bus_list) {
1905		/*
1906		 * This cannot cause trouble as we already have vme_buses_lock
1907		 * and if the bridge is removed, it will have to go through
1908		 * vme_unregister_bridge() to do it (which calls remove() on
1909		 * the bridge which in turn tries to acquire vme_buses_lock and
1910		 * will have to wait).
1911		 */
1912		err = __vme_register_driver_bus(drv, bridge, ndevs);
1913		if (err)
1914			break;
1915	}
1916	mutex_unlock(&vme_buses_lock);
1917	return err;
1918}
1919
1920/**
1921 * vme_register_driver - Register a VME driver
1922 * @drv: Pointer to VME driver structure to register.
1923 * @ndevs: Maximum number of devices to allow to be enumerated.
1924 *
1925 * Register a VME device driver with the VME subsystem.
1926 *
1927 * Return: Zero on success, error value on registration failure.
1928 */
1929int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1930{
1931	int err;
1932
1933	drv->driver.name = drv->name;
1934	drv->driver.bus = &vme_bus_type;
1935	INIT_LIST_HEAD(&drv->devices);
1936
1937	err = driver_register(&drv->driver);
1938	if (err)
1939		return err;
1940
1941	err = __vme_register_driver(drv, ndevs);
1942	if (err)
1943		driver_unregister(&drv->driver);
1944
1945	return err;
1946}
1947EXPORT_SYMBOL(vme_register_driver);
1948
1949/**
1950 * vme_unregister_driver - Unregister a VME driver
1951 * @drv: Pointer to VME driver structure to unregister.
1952 *
1953 * Unregister a VME device driver from the VME subsystem.
1954 */
1955void vme_unregister_driver(struct vme_driver *drv)
1956{
1957	struct vme_dev *dev, *dev_tmp;
1958
1959	mutex_lock(&vme_buses_lock);
1960	list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
1961		list_del(&dev->drv_list);
1962		list_del(&dev->bridge_list);
1963		device_unregister(&dev->dev);
1964	}
1965	mutex_unlock(&vme_buses_lock);
1966
1967	driver_unregister(&drv->driver);
1968}
1969EXPORT_SYMBOL(vme_unregister_driver);
1970
1971/* - Bus Registration ------------------------------------------------------ */
1972
1973static int vme_bus_match(struct device *dev, struct device_driver *drv)
1974{
1975	struct vme_driver *vme_drv;
1976
1977	vme_drv = container_of(drv, struct vme_driver, driver);
1978
1979	if (dev->platform_data == vme_drv) {
1980		struct vme_dev *vdev = dev_to_vme_dev(dev);
1981
1982		if (vme_drv->match && vme_drv->match(vdev))
1983			return 1;
1984
1985		dev->platform_data = NULL;
1986	}
1987	return 0;
1988}
1989
1990static int vme_bus_probe(struct device *dev)
1991{
 
1992	struct vme_driver *driver;
1993	struct vme_dev *vdev = dev_to_vme_dev(dev);
1994
1995	driver = dev->platform_data;
1996	if (driver->probe)
1997		return driver->probe(vdev);
1998
1999	return -ENODEV;
 
 
 
2000}
2001
2002static int vme_bus_remove(struct device *dev)
2003{
 
2004	struct vme_driver *driver;
2005	struct vme_dev *vdev = dev_to_vme_dev(dev);
2006
2007	driver = dev->platform_data;
2008	if (driver->remove)
2009		return driver->remove(vdev);
2010
2011	return -ENODEV;
 
 
 
2012}
2013
2014struct bus_type vme_bus_type = {
2015	.name = "vme",
2016	.match = vme_bus_match,
2017	.probe = vme_bus_probe,
2018	.remove = vme_bus_remove,
2019};
2020EXPORT_SYMBOL(vme_bus_type);
2021
2022static int __init vme_init(void)
2023{
2024	return bus_register(&vme_bus_type);
2025}
2026subsys_initcall(vme_init);
 
 
 
 
 
 
 
 
 
 
 
v3.5.6
 
   1/*
   2 * VME Bridge Framework
   3 *
   4 * Author: Martyn Welch <martyn.welch@ge.com>
   5 * Copyright 2008 GE Intelligent Platforms Embedded Systems, Inc.
   6 *
   7 * Based on work by Tom Armistead and Ajit Prem
   8 * Copyright 2004 Motorola Inc.
   9 *
  10 * This program is free software; you can redistribute  it and/or modify it
  11 * under  the terms of  the GNU General  Public License as published by the
  12 * Free Software Foundation;  either version 2 of the  License, or (at your
  13 * option) any later version.
  14 */
  15
  16#include <linux/module.h>
  17#include <linux/moduleparam.h>
  18#include <linux/mm.h>
  19#include <linux/types.h>
  20#include <linux/kernel.h>
  21#include <linux/errno.h>
  22#include <linux/pci.h>
  23#include <linux/poll.h>
  24#include <linux/highmem.h>
  25#include <linux/interrupt.h>
  26#include <linux/pagemap.h>
  27#include <linux/device.h>
  28#include <linux/dma-mapping.h>
  29#include <linux/syscalls.h>
  30#include <linux/mutex.h>
  31#include <linux/spinlock.h>
  32#include <linux/slab.h>
  33#include <linux/vme.h>
  34
  35#include "vme_bridge.h"
  36
  37/* Bitmask and list of registered buses both protected by common mutex */
  38static unsigned int vme_bus_numbers;
  39static LIST_HEAD(vme_bus_list);
  40static DEFINE_MUTEX(vme_buses_lock);
  41
  42static void __exit vme_exit(void);
  43static int __init vme_init(void);
  44
  45static struct vme_dev *dev_to_vme_dev(struct device *dev)
  46{
  47	return container_of(dev, struct vme_dev, dev);
  48}
  49
  50/*
  51 * Find the bridge that the resource is associated with.
  52 */
  53static struct vme_bridge *find_bridge(struct vme_resource *resource)
  54{
  55	/* Get list to search */
  56	switch (resource->type) {
  57	case VME_MASTER:
  58		return list_entry(resource->entry, struct vme_master_resource,
  59			list)->parent;
  60		break;
  61	case VME_SLAVE:
  62		return list_entry(resource->entry, struct vme_slave_resource,
  63			list)->parent;
  64		break;
  65	case VME_DMA:
  66		return list_entry(resource->entry, struct vme_dma_resource,
  67			list)->parent;
  68		break;
  69	case VME_LM:
  70		return list_entry(resource->entry, struct vme_lm_resource,
  71			list)->parent;
  72		break;
  73	default:
  74		printk(KERN_ERR "Unknown resource type\n");
  75		return NULL;
  76		break;
  77	}
  78}
  79
  80/*
 
 
 
 
 
  81 * Allocate a contiguous block of memory for use by the driver. This is used to
  82 * create the buffers for the slave windows.
 
 
  83 */
  84void *vme_alloc_consistent(struct vme_resource *resource, size_t size,
  85	dma_addr_t *dma)
  86{
  87	struct vme_bridge *bridge;
  88
  89	if (resource == NULL) {
  90		printk(KERN_ERR "No resource\n");
  91		return NULL;
  92	}
  93
  94	bridge = find_bridge(resource);
  95	if (bridge == NULL) {
  96		printk(KERN_ERR "Can't find bridge\n");
  97		return NULL;
  98	}
  99
 100	if (bridge->parent == NULL) {
 101		printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
 102		return NULL;
 103	}
 104
 105	if (bridge->alloc_consistent == NULL) {
 106		printk(KERN_ERR "alloc_consistent not supported by bridge %s\n",
 107		       bridge->name);
 108		return NULL;
 109	}
 110
 111	return bridge->alloc_consistent(bridge->parent, size, dma);
 112}
 113EXPORT_SYMBOL(vme_alloc_consistent);
 114
 115/*
 116 * Free previously allocated contiguous block of memory.
 
 
 
 
 
 
 117 */
 118void vme_free_consistent(struct vme_resource *resource, size_t size,
 119	void *vaddr, dma_addr_t dma)
 120{
 121	struct vme_bridge *bridge;
 122
 123	if (resource == NULL) {
 124		printk(KERN_ERR "No resource\n");
 125		return;
 126	}
 127
 128	bridge = find_bridge(resource);
 129	if (bridge == NULL) {
 130		printk(KERN_ERR "Can't find bridge\n");
 131		return;
 132	}
 133
 134	if (bridge->parent == NULL) {
 135		printk(KERN_ERR "Dev entry NULL for bridge %s\n", bridge->name);
 136		return;
 137	}
 138
 139	if (bridge->free_consistent == NULL) {
 140		printk(KERN_ERR "free_consistent not supported by bridge %s\n",
 141		       bridge->name);
 142		return;
 143	}
 144
 145	bridge->free_consistent(bridge->parent, size, vaddr, dma);
 146}
 147EXPORT_SYMBOL(vme_free_consistent);
 148
 
 
 
 
 
 
 
 
 
 
 149size_t vme_get_size(struct vme_resource *resource)
 150{
 151	int enabled, retval;
 152	unsigned long long base, size;
 153	dma_addr_t buf_base;
 154	u32 aspace, cycle, dwidth;
 155
 156	switch (resource->type) {
 157	case VME_MASTER:
 158		retval = vme_master_get(resource, &enabled, &base, &size,
 159			&aspace, &cycle, &dwidth);
 
 
 160
 161		return size;
 162		break;
 163	case VME_SLAVE:
 164		retval = vme_slave_get(resource, &enabled, &base, &size,
 165			&buf_base, &aspace, &cycle);
 
 
 166
 167		return size;
 168		break;
 169	case VME_DMA:
 170		return 0;
 171		break;
 172	default:
 173		printk(KERN_ERR "Unknown resource type\n");
 174		return 0;
 175		break;
 176	}
 177}
 178EXPORT_SYMBOL(vme_get_size);
 179
 180static int vme_check_window(u32 aspace, unsigned long long vme_base,
 181	unsigned long long size)
 182{
 183	int retval = 0;
 184
 
 
 
 185	switch (aspace) {
 186	case VME_A16:
 187		if (((vme_base + size) > VME_A16_MAX) ||
 188				(vme_base > VME_A16_MAX))
 189			retval = -EFAULT;
 190		break;
 191	case VME_A24:
 192		if (((vme_base + size) > VME_A24_MAX) ||
 193				(vme_base > VME_A24_MAX))
 194			retval = -EFAULT;
 195		break;
 196	case VME_A32:
 197		if (((vme_base + size) > VME_A32_MAX) ||
 198				(vme_base > VME_A32_MAX))
 199			retval = -EFAULT;
 200		break;
 201	case VME_A64:
 202		/*
 203		 * Any value held in an unsigned long long can be used as the
 204		 * base
 205		 */
 206		break;
 207	case VME_CRCSR:
 208		if (((vme_base + size) > VME_CRCSR_MAX) ||
 209				(vme_base > VME_CRCSR_MAX))
 210			retval = -EFAULT;
 211		break;
 212	case VME_USER1:
 213	case VME_USER2:
 214	case VME_USER3:
 215	case VME_USER4:
 216		/* User Defined */
 217		break;
 218	default:
 219		printk(KERN_ERR "Invalid address space\n");
 220		retval = -EINVAL;
 221		break;
 222	}
 223
 224	return retval;
 225}
 
 226
 227/*
 228 * Request a slave image with specific attributes, return some unique
 229 * identifier.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 230 */
 231struct vme_resource *vme_slave_request(struct vme_dev *vdev, u32 address,
 232	u32 cycle)
 233{
 234	struct vme_bridge *bridge;
 235	struct list_head *slave_pos = NULL;
 236	struct vme_slave_resource *allocated_image = NULL;
 237	struct vme_slave_resource *slave_image = NULL;
 238	struct vme_resource *resource = NULL;
 239
 240	bridge = vdev->bridge;
 241	if (bridge == NULL) {
 242		printk(KERN_ERR "Can't find VME bus\n");
 243		goto err_bus;
 244	}
 245
 246	/* Loop through slave resources */
 247	list_for_each(slave_pos, &bridge->slave_resources) {
 248		slave_image = list_entry(slave_pos,
 249			struct vme_slave_resource, list);
 250
 251		if (slave_image == NULL) {
 252			printk(KERN_ERR "Registered NULL Slave resource\n");
 253			continue;
 254		}
 255
 256		/* Find an unlocked and compatible image */
 257		mutex_lock(&slave_image->mtx);
 258		if (((slave_image->address_attr & address) == address) &&
 259			((slave_image->cycle_attr & cycle) == cycle) &&
 260			(slave_image->locked == 0)) {
 261
 262			slave_image->locked = 1;
 263			mutex_unlock(&slave_image->mtx);
 264			allocated_image = slave_image;
 265			break;
 266		}
 267		mutex_unlock(&slave_image->mtx);
 268	}
 269
 270	/* No free image */
 271	if (allocated_image == NULL)
 272		goto err_image;
 273
 274	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
 275	if (resource == NULL) {
 276		printk(KERN_WARNING "Unable to allocate resource structure\n");
 277		goto err_alloc;
 278	}
 279	resource->type = VME_SLAVE;
 280	resource->entry = &allocated_image->list;
 281
 282	return resource;
 283
 284err_alloc:
 285	/* Unlock image */
 286	mutex_lock(&slave_image->mtx);
 287	slave_image->locked = 0;
 288	mutex_unlock(&slave_image->mtx);
 289err_image:
 290err_bus:
 291	return NULL;
 292}
 293EXPORT_SYMBOL(vme_slave_request);
 294
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 295int vme_slave_set(struct vme_resource *resource, int enabled,
 296	unsigned long long vme_base, unsigned long long size,
 297	dma_addr_t buf_base, u32 aspace, u32 cycle)
 298{
 299	struct vme_bridge *bridge = find_bridge(resource);
 300	struct vme_slave_resource *image;
 301	int retval;
 302
 303	if (resource->type != VME_SLAVE) {
 304		printk(KERN_ERR "Not a slave resource\n");
 305		return -EINVAL;
 306	}
 307
 308	image = list_entry(resource->entry, struct vme_slave_resource, list);
 309
 310	if (bridge->slave_set == NULL) {
 311		printk(KERN_ERR "Function not supported\n");
 312		return -ENOSYS;
 313	}
 314
 315	if (!(((image->address_attr & aspace) == aspace) &&
 316		((image->cycle_attr & cycle) == cycle))) {
 317		printk(KERN_ERR "Invalid attributes\n");
 318		return -EINVAL;
 319	}
 320
 321	retval = vme_check_window(aspace, vme_base, size);
 322	if (retval)
 323		return retval;
 324
 325	return bridge->slave_set(image, enabled, vme_base, size, buf_base,
 326		aspace, cycle);
 327}
 328EXPORT_SYMBOL(vme_slave_set);
 329
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 330int vme_slave_get(struct vme_resource *resource, int *enabled,
 331	unsigned long long *vme_base, unsigned long long *size,
 332	dma_addr_t *buf_base, u32 *aspace, u32 *cycle)
 333{
 334	struct vme_bridge *bridge = find_bridge(resource);
 335	struct vme_slave_resource *image;
 336
 337	if (resource->type != VME_SLAVE) {
 338		printk(KERN_ERR "Not a slave resource\n");
 339		return -EINVAL;
 340	}
 341
 342	image = list_entry(resource->entry, struct vme_slave_resource, list);
 343
 344	if (bridge->slave_get == NULL) {
 345		printk(KERN_ERR "vme_slave_get not supported\n");
 346		return -EINVAL;
 347	}
 348
 349	return bridge->slave_get(image, enabled, vme_base, size, buf_base,
 350		aspace, cycle);
 351}
 352EXPORT_SYMBOL(vme_slave_get);
 353
 
 
 
 
 
 
 354void vme_slave_free(struct vme_resource *resource)
 355{
 356	struct vme_slave_resource *slave_image;
 357
 358	if (resource->type != VME_SLAVE) {
 359		printk(KERN_ERR "Not a slave resource\n");
 360		return;
 361	}
 362
 363	slave_image = list_entry(resource->entry, struct vme_slave_resource,
 364		list);
 365	if (slave_image == NULL) {
 366		printk(KERN_ERR "Can't find slave resource\n");
 367		return;
 368	}
 369
 370	/* Unlock image */
 371	mutex_lock(&slave_image->mtx);
 372	if (slave_image->locked == 0)
 373		printk(KERN_ERR "Image is already free\n");
 374
 375	slave_image->locked = 0;
 376	mutex_unlock(&slave_image->mtx);
 377
 378	/* Free up resource memory */
 379	kfree(resource);
 380}
 381EXPORT_SYMBOL(vme_slave_free);
 382
 383/*
 384 * Request a master image with specific attributes, return some unique
 385 * identifier.
 
 
 
 
 
 
 
 
 386 */
 387struct vme_resource *vme_master_request(struct vme_dev *vdev, u32 address,
 388	u32 cycle, u32 dwidth)
 389{
 390	struct vme_bridge *bridge;
 391	struct list_head *master_pos = NULL;
 392	struct vme_master_resource *allocated_image = NULL;
 393	struct vme_master_resource *master_image = NULL;
 394	struct vme_resource *resource = NULL;
 395
 396	bridge = vdev->bridge;
 397	if (bridge == NULL) {
 398		printk(KERN_ERR "Can't find VME bus\n");
 399		goto err_bus;
 400	}
 401
 402	/* Loop through master resources */
 403	list_for_each(master_pos, &bridge->master_resources) {
 404		master_image = list_entry(master_pos,
 405			struct vme_master_resource, list);
 406
 407		if (master_image == NULL) {
 408			printk(KERN_WARNING "Registered NULL master resource\n");
 409			continue;
 410		}
 411
 412		/* Find an unlocked and compatible image */
 413		spin_lock(&master_image->lock);
 414		if (((master_image->address_attr & address) == address) &&
 415			((master_image->cycle_attr & cycle) == cycle) &&
 416			((master_image->width_attr & dwidth) == dwidth) &&
 417			(master_image->locked == 0)) {
 418
 419			master_image->locked = 1;
 420			spin_unlock(&master_image->lock);
 421			allocated_image = master_image;
 422			break;
 423		}
 424		spin_unlock(&master_image->lock);
 425	}
 426
 427	/* Check to see if we found a resource */
 428	if (allocated_image == NULL) {
 429		printk(KERN_ERR "Can't find a suitable resource\n");
 430		goto err_image;
 431	}
 432
 433	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
 434	if (resource == NULL) {
 435		printk(KERN_ERR "Unable to allocate resource structure\n");
 436		goto err_alloc;
 437	}
 438	resource->type = VME_MASTER;
 439	resource->entry = &allocated_image->list;
 440
 441	return resource;
 442
 443err_alloc:
 444	/* Unlock image */
 445	spin_lock(&master_image->lock);
 446	master_image->locked = 0;
 447	spin_unlock(&master_image->lock);
 448err_image:
 449err_bus:
 450	return NULL;
 451}
 452EXPORT_SYMBOL(vme_master_request);
 453
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 454int vme_master_set(struct vme_resource *resource, int enabled,
 455	unsigned long long vme_base, unsigned long long size, u32 aspace,
 456	u32 cycle, u32 dwidth)
 457{
 458	struct vme_bridge *bridge = find_bridge(resource);
 459	struct vme_master_resource *image;
 460	int retval;
 461
 462	if (resource->type != VME_MASTER) {
 463		printk(KERN_ERR "Not a master resource\n");
 464		return -EINVAL;
 465	}
 466
 467	image = list_entry(resource->entry, struct vme_master_resource, list);
 468
 469	if (bridge->master_set == NULL) {
 470		printk(KERN_WARNING "vme_master_set not supported\n");
 471		return -EINVAL;
 472	}
 473
 474	if (!(((image->address_attr & aspace) == aspace) &&
 475		((image->cycle_attr & cycle) == cycle) &&
 476		((image->width_attr & dwidth) == dwidth))) {
 477		printk(KERN_WARNING "Invalid attributes\n");
 478		return -EINVAL;
 479	}
 480
 481	retval = vme_check_window(aspace, vme_base, size);
 482	if (retval)
 483		return retval;
 484
 485	return bridge->master_set(image, enabled, vme_base, size, aspace,
 486		cycle, dwidth);
 487}
 488EXPORT_SYMBOL(vme_master_set);
 489
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 490int vme_master_get(struct vme_resource *resource, int *enabled,
 491	unsigned long long *vme_base, unsigned long long *size, u32 *aspace,
 492	u32 *cycle, u32 *dwidth)
 493{
 494	struct vme_bridge *bridge = find_bridge(resource);
 495	struct vme_master_resource *image;
 496
 497	if (resource->type != VME_MASTER) {
 498		printk(KERN_ERR "Not a master resource\n");
 499		return -EINVAL;
 500	}
 501
 502	image = list_entry(resource->entry, struct vme_master_resource, list);
 503
 504	if (bridge->master_get == NULL) {
 505		printk(KERN_WARNING "vme_master_set not supported\n");
 506		return -EINVAL;
 507	}
 508
 509	return bridge->master_get(image, enabled, vme_base, size, aspace,
 510		cycle, dwidth);
 511}
 512EXPORT_SYMBOL(vme_master_get);
 513
 514/*
 515 * Read data out of VME space into a buffer.
 
 
 
 
 
 
 
 
 
 
 
 
 516 */
 517ssize_t vme_master_read(struct vme_resource *resource, void *buf, size_t count,
 518	loff_t offset)
 519{
 520	struct vme_bridge *bridge = find_bridge(resource);
 521	struct vme_master_resource *image;
 522	size_t length;
 523
 524	if (bridge->master_read == NULL) {
 525		printk(KERN_WARNING "Reading from resource not supported\n");
 526		return -EINVAL;
 527	}
 528
 529	if (resource->type != VME_MASTER) {
 530		printk(KERN_ERR "Not a master resource\n");
 531		return -EINVAL;
 532	}
 533
 534	image = list_entry(resource->entry, struct vme_master_resource, list);
 535
 536	length = vme_get_size(resource);
 537
 538	if (offset > length) {
 539		printk(KERN_WARNING "Invalid Offset\n");
 540		return -EFAULT;
 541	}
 542
 543	if ((offset + count) > length)
 544		count = length - offset;
 545
 546	return bridge->master_read(image, buf, count, offset);
 547
 548}
 549EXPORT_SYMBOL(vme_master_read);
 550
 551/*
 552 * Write data out to VME space from a buffer.
 
 
 
 
 
 
 
 
 
 
 
 
 553 */
 554ssize_t vme_master_write(struct vme_resource *resource, void *buf,
 555	size_t count, loff_t offset)
 556{
 557	struct vme_bridge *bridge = find_bridge(resource);
 558	struct vme_master_resource *image;
 559	size_t length;
 560
 561	if (bridge->master_write == NULL) {
 562		printk(KERN_WARNING "Writing to resource not supported\n");
 563		return -EINVAL;
 564	}
 565
 566	if (resource->type != VME_MASTER) {
 567		printk(KERN_ERR "Not a master resource\n");
 568		return -EINVAL;
 569	}
 570
 571	image = list_entry(resource->entry, struct vme_master_resource, list);
 572
 573	length = vme_get_size(resource);
 574
 575	if (offset > length) {
 576		printk(KERN_WARNING "Invalid Offset\n");
 577		return -EFAULT;
 578	}
 579
 580	if ((offset + count) > length)
 581		count = length - offset;
 582
 583	return bridge->master_write(image, buf, count, offset);
 584}
 585EXPORT_SYMBOL(vme_master_write);
 586
 587/*
 588 * Perform RMW cycle to provided location.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 589 */
 590unsigned int vme_master_rmw(struct vme_resource *resource, unsigned int mask,
 591	unsigned int compare, unsigned int swap, loff_t offset)
 592{
 593	struct vme_bridge *bridge = find_bridge(resource);
 594	struct vme_master_resource *image;
 595
 596	if (bridge->master_rmw == NULL) {
 597		printk(KERN_WARNING "Writing to resource not supported\n");
 598		return -EINVAL;
 599	}
 600
 601	if (resource->type != VME_MASTER) {
 602		printk(KERN_ERR "Not a master resource\n");
 603		return -EINVAL;
 604	}
 605
 606	image = list_entry(resource->entry, struct vme_master_resource, list);
 607
 608	return bridge->master_rmw(image, mask, compare, swap, offset);
 609}
 610EXPORT_SYMBOL(vme_master_rmw);
 611
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 612void vme_master_free(struct vme_resource *resource)
 613{
 614	struct vme_master_resource *master_image;
 615
 616	if (resource->type != VME_MASTER) {
 617		printk(KERN_ERR "Not a master resource\n");
 618		return;
 619	}
 620
 621	master_image = list_entry(resource->entry, struct vme_master_resource,
 622		list);
 623	if (master_image == NULL) {
 624		printk(KERN_ERR "Can't find master resource\n");
 625		return;
 626	}
 627
 628	/* Unlock image */
 629	spin_lock(&master_image->lock);
 630	if (master_image->locked == 0)
 631		printk(KERN_ERR "Image is already free\n");
 632
 633	master_image->locked = 0;
 634	spin_unlock(&master_image->lock);
 635
 636	/* Free up resource memory */
 637	kfree(resource);
 638}
 639EXPORT_SYMBOL(vme_master_free);
 640
 641/*
 642 * Request a DMA controller with specific attributes, return some unique
 643 * identifier.
 
 
 
 
 
 
 644 */
 645struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route)
 646{
 647	struct vme_bridge *bridge;
 648	struct list_head *dma_pos = NULL;
 649	struct vme_dma_resource *allocated_ctrlr = NULL;
 650	struct vme_dma_resource *dma_ctrlr = NULL;
 651	struct vme_resource *resource = NULL;
 652
 653	/* XXX Not checking resource attributes */
 654	printk(KERN_ERR "No VME resource Attribute tests done\n");
 655
 656	bridge = vdev->bridge;
 657	if (bridge == NULL) {
 658		printk(KERN_ERR "Can't find VME bus\n");
 659		goto err_bus;
 660	}
 661
 662	/* Loop through DMA resources */
 663	list_for_each(dma_pos, &bridge->dma_resources) {
 664		dma_ctrlr = list_entry(dma_pos,
 665			struct vme_dma_resource, list);
 666
 667		if (dma_ctrlr == NULL) {
 668			printk(KERN_ERR "Registered NULL DMA resource\n");
 669			continue;
 670		}
 671
 672		/* Find an unlocked and compatible controller */
 673		mutex_lock(&dma_ctrlr->mtx);
 674		if (((dma_ctrlr->route_attr & route) == route) &&
 675			(dma_ctrlr->locked == 0)) {
 676
 677			dma_ctrlr->locked = 1;
 678			mutex_unlock(&dma_ctrlr->mtx);
 679			allocated_ctrlr = dma_ctrlr;
 680			break;
 681		}
 682		mutex_unlock(&dma_ctrlr->mtx);
 683	}
 684
 685	/* Check to see if we found a resource */
 686	if (allocated_ctrlr == NULL)
 687		goto err_ctrlr;
 688
 689	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
 690	if (resource == NULL) {
 691		printk(KERN_WARNING "Unable to allocate resource structure\n");
 692		goto err_alloc;
 693	}
 694	resource->type = VME_DMA;
 695	resource->entry = &allocated_ctrlr->list;
 696
 697	return resource;
 698
 699err_alloc:
 700	/* Unlock image */
 701	mutex_lock(&dma_ctrlr->mtx);
 702	dma_ctrlr->locked = 0;
 703	mutex_unlock(&dma_ctrlr->mtx);
 704err_ctrlr:
 705err_bus:
 706	return NULL;
 707}
 708EXPORT_SYMBOL(vme_dma_request);
 709
 710/*
 711 * Start new list
 
 
 
 
 
 
 
 712 */
 713struct vme_dma_list *vme_new_dma_list(struct vme_resource *resource)
 714{
 715	struct vme_dma_resource *ctrlr;
 716	struct vme_dma_list *dma_list;
 717
 718	if (resource->type != VME_DMA) {
 719		printk(KERN_ERR "Not a DMA resource\n");
 720		return NULL;
 721	}
 722
 723	ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
 
 
 724
 725	dma_list = kmalloc(sizeof(struct vme_dma_list), GFP_KERNEL);
 726	if (dma_list == NULL) {
 727		printk(KERN_ERR "Unable to allocate memory for new dma list\n");
 728		return NULL;
 729	}
 730	INIT_LIST_HEAD(&dma_list->entries);
 731	dma_list->parent = ctrlr;
 
 
 732	mutex_init(&dma_list->mtx);
 733
 734	return dma_list;
 735}
 736EXPORT_SYMBOL(vme_new_dma_list);
 737
 738/*
 739 * Create "Pattern" type attributes
 
 
 
 
 
 
 
 
 740 */
 741struct vme_dma_attr *vme_dma_pattern_attribute(u32 pattern, u32 type)
 742{
 743	struct vme_dma_attr *attributes;
 744	struct vme_dma_pattern *pattern_attr;
 745
 746	attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
 747	if (attributes == NULL) {
 748		printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
 749		goto err_attr;
 750	}
 751
 752	pattern_attr = kmalloc(sizeof(struct vme_dma_pattern), GFP_KERNEL);
 753	if (pattern_attr == NULL) {
 754		printk(KERN_ERR "Unable to allocate memory for pattern attributes\n");
 755		goto err_pat;
 756	}
 757
 758	attributes->type = VME_DMA_PATTERN;
 759	attributes->private = (void *)pattern_attr;
 760
 761	pattern_attr->pattern = pattern;
 762	pattern_attr->type = type;
 763
 764	return attributes;
 765
 766err_pat:
 767	kfree(attributes);
 768err_attr:
 769	return NULL;
 770}
 771EXPORT_SYMBOL(vme_dma_pattern_attribute);
 772
 773/*
 774 * Create "PCI" type attributes
 
 
 
 
 
 
 
 775 */
 776struct vme_dma_attr *vme_dma_pci_attribute(dma_addr_t address)
 777{
 778	struct vme_dma_attr *attributes;
 779	struct vme_dma_pci *pci_attr;
 780
 781	/* XXX Run some sanity checks here */
 782
 783	attributes = kmalloc(sizeof(struct vme_dma_attr), GFP_KERNEL);
 784	if (attributes == NULL) {
 785		printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
 786		goto err_attr;
 787	}
 788
 789	pci_attr = kmalloc(sizeof(struct vme_dma_pci), GFP_KERNEL);
 790	if (pci_attr == NULL) {
 791		printk(KERN_ERR "Unable to allocate memory for pci attributes\n");
 792		goto err_pci;
 793	}
 794
 795
 796
 797	attributes->type = VME_DMA_PCI;
 798	attributes->private = (void *)pci_attr;
 799
 800	pci_attr->address = address;
 801
 802	return attributes;
 803
 804err_pci:
 805	kfree(attributes);
 806err_attr:
 807	return NULL;
 808}
 809EXPORT_SYMBOL(vme_dma_pci_attribute);
 810
 811/*
 812 * Create "VME" type attributes
 
 
 
 
 
 
 
 
 
 
 813 */
 814struct vme_dma_attr *vme_dma_vme_attribute(unsigned long long address,
 815	u32 aspace, u32 cycle, u32 dwidth)
 816{
 817	struct vme_dma_attr *attributes;
 818	struct vme_dma_vme *vme_attr;
 819
 820	attributes = kmalloc(
 821		sizeof(struct vme_dma_attr), GFP_KERNEL);
 822	if (attributes == NULL) {
 823		printk(KERN_ERR "Unable to allocate memory for attributes structure\n");
 824		goto err_attr;
 825	}
 826
 827	vme_attr = kmalloc(sizeof(struct vme_dma_vme), GFP_KERNEL);
 828	if (vme_attr == NULL) {
 829		printk(KERN_ERR "Unable to allocate memory for vme attributes\n");
 830		goto err_vme;
 831	}
 832
 833	attributes->type = VME_DMA_VME;
 834	attributes->private = (void *)vme_attr;
 835
 836	vme_attr->address = address;
 837	vme_attr->aspace = aspace;
 838	vme_attr->cycle = cycle;
 839	vme_attr->dwidth = dwidth;
 840
 841	return attributes;
 842
 843err_vme:
 844	kfree(attributes);
 845err_attr:
 846	return NULL;
 847}
 848EXPORT_SYMBOL(vme_dma_vme_attribute);
 849
 850/*
 851 * Free attribute
 
 
 
 
 852 */
 853void vme_dma_free_attribute(struct vme_dma_attr *attributes)
 854{
 855	kfree(attributes->private);
 856	kfree(attributes);
 857}
 858EXPORT_SYMBOL(vme_dma_free_attribute);
 859
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 860int vme_dma_list_add(struct vme_dma_list *list, struct vme_dma_attr *src,
 861	struct vme_dma_attr *dest, size_t count)
 862{
 863	struct vme_bridge *bridge = list->parent->parent;
 864	int retval;
 865
 866	if (bridge->dma_list_add == NULL) {
 867		printk(KERN_WARNING "Link List DMA generation not supported\n");
 868		return -EINVAL;
 869	}
 870
 871	if (!mutex_trylock(&list->mtx)) {
 872		printk(KERN_ERR "Link List already submitted\n");
 873		return -EINVAL;
 874	}
 875
 876	retval = bridge->dma_list_add(list, src, dest, count);
 877
 878	mutex_unlock(&list->mtx);
 879
 880	return retval;
 881}
 882EXPORT_SYMBOL(vme_dma_list_add);
 883
 
 
 
 
 
 
 
 
 
 
 884int vme_dma_list_exec(struct vme_dma_list *list)
 885{
 886	struct vme_bridge *bridge = list->parent->parent;
 887	int retval;
 888
 889	if (bridge->dma_list_exec == NULL) {
 890		printk(KERN_ERR "Link List DMA execution not supported\n");
 891		return -EINVAL;
 892	}
 893
 894	mutex_lock(&list->mtx);
 895
 896	retval = bridge->dma_list_exec(list);
 897
 898	mutex_unlock(&list->mtx);
 899
 900	return retval;
 901}
 902EXPORT_SYMBOL(vme_dma_list_exec);
 903
 
 
 
 
 
 
 
 
 
 904int vme_dma_list_free(struct vme_dma_list *list)
 905{
 906	struct vme_bridge *bridge = list->parent->parent;
 907	int retval;
 908
 909	if (bridge->dma_list_empty == NULL) {
 910		printk(KERN_WARNING "Emptying of Link Lists not supported\n");
 911		return -EINVAL;
 912	}
 913
 914	if (!mutex_trylock(&list->mtx)) {
 915		printk(KERN_ERR "Link List in use\n");
 916		return -EINVAL;
 917	}
 918
 919	/*
 920	 * Empty out all of the entries from the dma list. We need to go to the
 921	 * low level driver as dma entries are driver specific.
 922	 */
 923	retval = bridge->dma_list_empty(list);
 924	if (retval) {
 925		printk(KERN_ERR "Unable to empty link-list entries\n");
 926		mutex_unlock(&list->mtx);
 927		return retval;
 928	}
 929	mutex_unlock(&list->mtx);
 930	kfree(list);
 931
 932	return retval;
 933}
 934EXPORT_SYMBOL(vme_dma_list_free);
 935
 
 
 
 
 
 
 
 
 
 936int vme_dma_free(struct vme_resource *resource)
 937{
 938	struct vme_dma_resource *ctrlr;
 939
 940	if (resource->type != VME_DMA) {
 941		printk(KERN_ERR "Not a DMA resource\n");
 942		return -EINVAL;
 943	}
 944
 945	ctrlr = list_entry(resource->entry, struct vme_dma_resource, list);
 946
 947	if (!mutex_trylock(&ctrlr->mtx)) {
 948		printk(KERN_ERR "Resource busy, can't free\n");
 949		return -EBUSY;
 950	}
 951
 952	if (!(list_empty(&ctrlr->pending) && list_empty(&ctrlr->running))) {
 953		printk(KERN_WARNING "Resource still processing transfers\n");
 954		mutex_unlock(&ctrlr->mtx);
 955		return -EBUSY;
 956	}
 957
 958	ctrlr->locked = 0;
 959
 960	mutex_unlock(&ctrlr->mtx);
 961
 
 
 962	return 0;
 963}
 964EXPORT_SYMBOL(vme_dma_free);
 965
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 966void vme_irq_handler(struct vme_bridge *bridge, int level, int statid)
 967{
 968	void (*call)(int, int, void *);
 969	void *priv_data;
 970
 971	call = bridge->irq[level - 1].callback[statid].func;
 972	priv_data = bridge->irq[level - 1].callback[statid].priv_data;
 973
 974	if (call != NULL)
 975		call(level, statid, priv_data);
 976	else
 977		printk(KERN_WARNING "Spurilous VME interrupt, level:%x, vector:%x\n",
 978		       level, statid);
 979}
 980EXPORT_SYMBOL(vme_irq_handler);
 981
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 982int vme_irq_request(struct vme_dev *vdev, int level, int statid,
 983	void (*callback)(int, int, void *),
 984	void *priv_data)
 985{
 986	struct vme_bridge *bridge;
 987
 988	bridge = vdev->bridge;
 989	if (bridge == NULL) {
 990		printk(KERN_ERR "Can't find VME bus\n");
 991		return -EINVAL;
 992	}
 993
 994	if ((level < 1) || (level > 7)) {
 995		printk(KERN_ERR "Invalid interrupt level\n");
 996		return -EINVAL;
 997	}
 998
 999	if (bridge->irq_set == NULL) {
1000		printk(KERN_ERR "Configuring interrupts not supported\n");
1001		return -EINVAL;
1002	}
1003
1004	mutex_lock(&bridge->irq_mtx);
1005
1006	if (bridge->irq[level - 1].callback[statid].func) {
1007		mutex_unlock(&bridge->irq_mtx);
1008		printk(KERN_WARNING "VME Interrupt already taken\n");
1009		return -EBUSY;
1010	}
1011
1012	bridge->irq[level - 1].count++;
1013	bridge->irq[level - 1].callback[statid].priv_data = priv_data;
1014	bridge->irq[level - 1].callback[statid].func = callback;
1015
1016	/* Enable IRQ level */
1017	bridge->irq_set(bridge, level, 1, 1);
1018
1019	mutex_unlock(&bridge->irq_mtx);
1020
1021	return 0;
1022}
1023EXPORT_SYMBOL(vme_irq_request);
1024
 
 
 
 
 
 
 
 
1025void vme_irq_free(struct vme_dev *vdev, int level, int statid)
1026{
1027	struct vme_bridge *bridge;
1028
1029	bridge = vdev->bridge;
1030	if (bridge == NULL) {
1031		printk(KERN_ERR "Can't find VME bus\n");
1032		return;
1033	}
1034
1035	if ((level < 1) || (level > 7)) {
1036		printk(KERN_ERR "Invalid interrupt level\n");
1037		return;
1038	}
1039
1040	if (bridge->irq_set == NULL) {
1041		printk(KERN_ERR "Configuring interrupts not supported\n");
1042		return;
1043	}
1044
1045	mutex_lock(&bridge->irq_mtx);
1046
1047	bridge->irq[level - 1].count--;
1048
1049	/* Disable IRQ level if no more interrupts attached at this level*/
1050	if (bridge->irq[level - 1].count == 0)
1051		bridge->irq_set(bridge, level, 0, 1);
1052
1053	bridge->irq[level - 1].callback[statid].func = NULL;
1054	bridge->irq[level - 1].callback[statid].priv_data = NULL;
1055
1056	mutex_unlock(&bridge->irq_mtx);
1057}
1058EXPORT_SYMBOL(vme_irq_free);
1059
 
 
 
 
 
 
 
 
 
 
 
 
1060int vme_irq_generate(struct vme_dev *vdev, int level, int statid)
1061{
1062	struct vme_bridge *bridge;
1063
1064	bridge = vdev->bridge;
1065	if (bridge == NULL) {
1066		printk(KERN_ERR "Can't find VME bus\n");
1067		return -EINVAL;
1068	}
1069
1070	if ((level < 1) || (level > 7)) {
1071		printk(KERN_WARNING "Invalid interrupt level\n");
1072		return -EINVAL;
1073	}
1074
1075	if (bridge->irq_generate == NULL) {
1076		printk(KERN_WARNING "Interrupt generation not supported\n");
1077		return -EINVAL;
1078	}
1079
1080	return bridge->irq_generate(bridge, level, statid);
1081}
1082EXPORT_SYMBOL(vme_irq_generate);
1083
1084/*
1085 * Request the location monitor, return resource or NULL
 
 
 
 
 
 
 
1086 */
1087struct vme_resource *vme_lm_request(struct vme_dev *vdev)
1088{
1089	struct vme_bridge *bridge;
1090	struct list_head *lm_pos = NULL;
1091	struct vme_lm_resource *allocated_lm = NULL;
1092	struct vme_lm_resource *lm = NULL;
1093	struct vme_resource *resource = NULL;
1094
1095	bridge = vdev->bridge;
1096	if (bridge == NULL) {
1097		printk(KERN_ERR "Can't find VME bus\n");
1098		goto err_bus;
1099	}
1100
1101	/* Loop through DMA resources */
1102	list_for_each(lm_pos, &bridge->lm_resources) {
1103		lm = list_entry(lm_pos,
1104			struct vme_lm_resource, list);
1105
1106		if (lm == NULL) {
1107			printk(KERN_ERR "Registered NULL Location Monitor resource\n");
1108			continue;
1109		}
1110
1111		/* Find an unlocked controller */
1112		mutex_lock(&lm->mtx);
1113		if (lm->locked == 0) {
1114			lm->locked = 1;
1115			mutex_unlock(&lm->mtx);
1116			allocated_lm = lm;
1117			break;
1118		}
1119		mutex_unlock(&lm->mtx);
1120	}
1121
1122	/* Check to see if we found a resource */
1123	if (allocated_lm == NULL)
1124		goto err_lm;
1125
1126	resource = kmalloc(sizeof(struct vme_resource), GFP_KERNEL);
1127	if (resource == NULL) {
1128		printk(KERN_ERR "Unable to allocate resource structure\n");
1129		goto err_alloc;
1130	}
1131	resource->type = VME_LM;
1132	resource->entry = &allocated_lm->list;
1133
1134	return resource;
1135
1136err_alloc:
1137	/* Unlock image */
1138	mutex_lock(&lm->mtx);
1139	lm->locked = 0;
1140	mutex_unlock(&lm->mtx);
1141err_lm:
1142err_bus:
1143	return NULL;
1144}
1145EXPORT_SYMBOL(vme_lm_request);
1146
 
 
 
 
 
 
 
 
 
 
 
1147int vme_lm_count(struct vme_resource *resource)
1148{
1149	struct vme_lm_resource *lm;
1150
1151	if (resource->type != VME_LM) {
1152		printk(KERN_ERR "Not a Location Monitor resource\n");
1153		return -EINVAL;
1154	}
1155
1156	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1157
1158	return lm->monitors;
1159}
1160EXPORT_SYMBOL(vme_lm_count);
1161
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1162int vme_lm_set(struct vme_resource *resource, unsigned long long lm_base,
1163	u32 aspace, u32 cycle)
1164{
1165	struct vme_bridge *bridge = find_bridge(resource);
1166	struct vme_lm_resource *lm;
1167
1168	if (resource->type != VME_LM) {
1169		printk(KERN_ERR "Not a Location Monitor resource\n");
1170		return -EINVAL;
1171	}
1172
1173	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1174
1175	if (bridge->lm_set == NULL) {
1176		printk(KERN_ERR "vme_lm_set not supported\n");
1177		return -EINVAL;
1178	}
1179
1180	return bridge->lm_set(lm, lm_base, aspace, cycle);
1181}
1182EXPORT_SYMBOL(vme_lm_set);
1183
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1184int vme_lm_get(struct vme_resource *resource, unsigned long long *lm_base,
1185	u32 *aspace, u32 *cycle)
1186{
1187	struct vme_bridge *bridge = find_bridge(resource);
1188	struct vme_lm_resource *lm;
1189
1190	if (resource->type != VME_LM) {
1191		printk(KERN_ERR "Not a Location Monitor resource\n");
1192		return -EINVAL;
1193	}
1194
1195	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1196
1197	if (bridge->lm_get == NULL) {
1198		printk(KERN_ERR "vme_lm_get not supported\n");
1199		return -EINVAL;
1200	}
1201
1202	return bridge->lm_get(lm, lm_base, aspace, cycle);
1203}
1204EXPORT_SYMBOL(vme_lm_get);
1205
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1206int vme_lm_attach(struct vme_resource *resource, int monitor,
1207	void (*callback)(int))
1208{
1209	struct vme_bridge *bridge = find_bridge(resource);
1210	struct vme_lm_resource *lm;
1211
1212	if (resource->type != VME_LM) {
1213		printk(KERN_ERR "Not a Location Monitor resource\n");
1214		return -EINVAL;
1215	}
1216
1217	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1218
1219	if (bridge->lm_attach == NULL) {
1220		printk(KERN_ERR "vme_lm_attach not supported\n");
1221		return -EINVAL;
1222	}
1223
1224	return bridge->lm_attach(lm, monitor, callback);
1225}
1226EXPORT_SYMBOL(vme_lm_attach);
1227
 
 
 
 
 
 
 
 
 
 
 
 
1228int vme_lm_detach(struct vme_resource *resource, int monitor)
1229{
1230	struct vme_bridge *bridge = find_bridge(resource);
1231	struct vme_lm_resource *lm;
1232
1233	if (resource->type != VME_LM) {
1234		printk(KERN_ERR "Not a Location Monitor resource\n");
1235		return -EINVAL;
1236	}
1237
1238	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1239
1240	if (bridge->lm_detach == NULL) {
1241		printk(KERN_ERR "vme_lm_detach not supported\n");
1242		return -EINVAL;
1243	}
1244
1245	return bridge->lm_detach(lm, monitor);
1246}
1247EXPORT_SYMBOL(vme_lm_detach);
1248
 
 
 
 
 
 
 
 
 
 
 
 
1249void vme_lm_free(struct vme_resource *resource)
1250{
1251	struct vme_lm_resource *lm;
1252
1253	if (resource->type != VME_LM) {
1254		printk(KERN_ERR "Not a Location Monitor resource\n");
1255		return;
1256	}
1257
1258	lm = list_entry(resource->entry, struct vme_lm_resource, list);
1259
1260	mutex_lock(&lm->mtx);
1261
1262	/* XXX
1263	 * Check to see that there aren't any callbacks still attached, if
1264	 * there are we should probably be detaching them!
1265	 */
1266
1267	lm->locked = 0;
1268
1269	mutex_unlock(&lm->mtx);
1270
1271	kfree(resource);
1272}
1273EXPORT_SYMBOL(vme_lm_free);
1274
1275int vme_slot_get(struct vme_dev *vdev)
 
 
 
 
 
 
 
 
 
 
1276{
1277	struct vme_bridge *bridge;
1278
1279	bridge = vdev->bridge;
1280	if (bridge == NULL) {
1281		printk(KERN_ERR "Can't find VME bus\n");
1282		return -EINVAL;
1283	}
1284
1285	if (bridge->slot_get == NULL) {
1286		printk(KERN_WARNING "vme_slot_get not supported\n");
1287		return -EINVAL;
1288	}
1289
1290	return bridge->slot_get(bridge);
1291}
1292EXPORT_SYMBOL(vme_slot_get);
1293
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1294
1295/* - Bridge Registration --------------------------------------------------- */
1296
1297static void vme_dev_release(struct device *dev)
1298{
1299	kfree(dev_to_vme_dev(dev));
1300}
1301
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1302int vme_register_bridge(struct vme_bridge *bridge)
1303{
1304	int i;
1305	int ret = -1;
1306
1307	mutex_lock(&vme_buses_lock);
1308	for (i = 0; i < sizeof(vme_bus_numbers) * 8; i++) {
1309		if ((vme_bus_numbers & (1 << i)) == 0) {
1310			vme_bus_numbers |= (1 << i);
1311			bridge->num = i;
1312			INIT_LIST_HEAD(&bridge->devices);
1313			list_add_tail(&bridge->bus_list, &vme_bus_list);
1314			ret = 0;
1315			break;
1316		}
1317	}
1318	mutex_unlock(&vme_buses_lock);
1319
1320	return ret;
1321}
1322EXPORT_SYMBOL(vme_register_bridge);
1323
1324void vme_unregister_bridge(struct vme_bridge *bridge)
1325{
1326	struct vme_dev *vdev;
1327	struct vme_dev *tmp;
1328
1329	mutex_lock(&vme_buses_lock);
1330	vme_bus_numbers &= ~(1 << bridge->num);
1331	list_for_each_entry_safe(vdev, tmp, &bridge->devices, bridge_list) {
1332		list_del(&vdev->drv_list);
1333		list_del(&vdev->bridge_list);
1334		device_unregister(&vdev->dev);
1335	}
1336	list_del(&bridge->bus_list);
1337	mutex_unlock(&vme_buses_lock);
1338}
1339EXPORT_SYMBOL(vme_unregister_bridge);
1340
1341/* - Driver Registration --------------------------------------------------- */
1342
1343static int __vme_register_driver_bus(struct vme_driver *drv,
1344	struct vme_bridge *bridge, unsigned int ndevs)
1345{
1346	int err;
1347	unsigned int i;
1348	struct vme_dev *vdev;
1349	struct vme_dev *tmp;
1350
1351	for (i = 0; i < ndevs; i++) {
1352		vdev = kzalloc(sizeof(struct vme_dev), GFP_KERNEL);
1353		if (!vdev) {
1354			err = -ENOMEM;
1355			goto err_devalloc;
1356		}
1357		vdev->num = i;
1358		vdev->bridge = bridge;
1359		vdev->dev.platform_data = drv;
1360		vdev->dev.release = vme_dev_release;
1361		vdev->dev.parent = bridge->parent;
1362		vdev->dev.bus = &vme_bus_type;
1363		dev_set_name(&vdev->dev, "%s.%u-%u", drv->name, bridge->num,
1364			vdev->num);
1365
1366		err = device_register(&vdev->dev);
1367		if (err)
1368			goto err_reg;
1369
1370		if (vdev->dev.platform_data) {
1371			list_add_tail(&vdev->drv_list, &drv->devices);
1372			list_add_tail(&vdev->bridge_list, &bridge->devices);
1373		} else
1374			device_unregister(&vdev->dev);
1375	}
1376	return 0;
1377
1378err_reg:
1379	kfree(vdev);
1380err_devalloc:
1381	list_for_each_entry_safe(vdev, tmp, &drv->devices, drv_list) {
1382		list_del(&vdev->drv_list);
1383		list_del(&vdev->bridge_list);
1384		device_unregister(&vdev->dev);
1385	}
1386	return err;
1387}
1388
1389static int __vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1390{
1391	struct vme_bridge *bridge;
1392	int err = 0;
1393
1394	mutex_lock(&vme_buses_lock);
1395	list_for_each_entry(bridge, &vme_bus_list, bus_list) {
1396		/*
1397		 * This cannot cause trouble as we already have vme_buses_lock
1398		 * and if the bridge is removed, it will have to go through
1399		 * vme_unregister_bridge() to do it (which calls remove() on
1400		 * the bridge which in turn tries to acquire vme_buses_lock and
1401		 * will have to wait).
1402		 */
1403		err = __vme_register_driver_bus(drv, bridge, ndevs);
1404		if (err)
1405			break;
1406	}
1407	mutex_unlock(&vme_buses_lock);
1408	return err;
1409}
1410
 
 
 
 
 
 
 
 
 
1411int vme_register_driver(struct vme_driver *drv, unsigned int ndevs)
1412{
1413	int err;
1414
1415	drv->driver.name = drv->name;
1416	drv->driver.bus = &vme_bus_type;
1417	INIT_LIST_HEAD(&drv->devices);
1418
1419	err = driver_register(&drv->driver);
1420	if (err)
1421		return err;
1422
1423	err = __vme_register_driver(drv, ndevs);
1424	if (err)
1425		driver_unregister(&drv->driver);
1426
1427	return err;
1428}
1429EXPORT_SYMBOL(vme_register_driver);
1430
 
 
 
 
 
 
1431void vme_unregister_driver(struct vme_driver *drv)
1432{
1433	struct vme_dev *dev, *dev_tmp;
1434
1435	mutex_lock(&vme_buses_lock);
1436	list_for_each_entry_safe(dev, dev_tmp, &drv->devices, drv_list) {
1437		list_del(&dev->drv_list);
1438		list_del(&dev->bridge_list);
1439		device_unregister(&dev->dev);
1440	}
1441	mutex_unlock(&vme_buses_lock);
1442
1443	driver_unregister(&drv->driver);
1444}
1445EXPORT_SYMBOL(vme_unregister_driver);
1446
1447/* - Bus Registration ------------------------------------------------------ */
1448
1449static int vme_bus_match(struct device *dev, struct device_driver *drv)
1450{
1451	struct vme_driver *vme_drv;
1452
1453	vme_drv = container_of(drv, struct vme_driver, driver);
1454
1455	if (dev->platform_data == vme_drv) {
1456		struct vme_dev *vdev = dev_to_vme_dev(dev);
1457
1458		if (vme_drv->match && vme_drv->match(vdev))
1459			return 1;
1460
1461		dev->platform_data = NULL;
1462	}
1463	return 0;
1464}
1465
1466static int vme_bus_probe(struct device *dev)
1467{
1468	int retval = -ENODEV;
1469	struct vme_driver *driver;
1470	struct vme_dev *vdev = dev_to_vme_dev(dev);
1471
1472	driver = dev->platform_data;
 
 
1473
1474	if (driver->probe != NULL)
1475		retval = driver->probe(vdev);
1476
1477	return retval;
1478}
1479
1480static int vme_bus_remove(struct device *dev)
1481{
1482	int retval = -ENODEV;
1483	struct vme_driver *driver;
1484	struct vme_dev *vdev = dev_to_vme_dev(dev);
1485
1486	driver = dev->platform_data;
 
 
1487
1488	if (driver->remove != NULL)
1489		retval = driver->remove(vdev);
1490
1491	return retval;
1492}
1493
1494struct bus_type vme_bus_type = {
1495	.name = "vme",
1496	.match = vme_bus_match,
1497	.probe = vme_bus_probe,
1498	.remove = vme_bus_remove,
1499};
1500EXPORT_SYMBOL(vme_bus_type);
1501
1502static int __init vme_init(void)
1503{
1504	return bus_register(&vme_bus_type);
1505}
1506
1507static void __exit vme_exit(void)
1508{
1509	bus_unregister(&vme_bus_type);
1510}
1511
1512MODULE_DESCRIPTION("VME bridge driver framework");
1513MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
1514MODULE_LICENSE("GPL");
1515
1516module_init(vme_init);
1517module_exit(vme_exit);