Linux Audio

Check our new training course

Loading...
   1/*
   2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License as published by the Free
   6 * Software Foundation; either version 2 of the License, or (at your option)
   7 * any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc., 59
  16 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  17 *
  18 * The full GNU General Public License is included in this distribution in the
  19 * file called COPYING.
  20 */
  21
  22/*
  23 * This code implements the DMA subsystem. It provides a HW-neutral interface
  24 * for other kernel code to use asynchronous memory copy capabilities,
  25 * if present, and allows different HW DMA drivers to register as providing
  26 * this capability.
  27 *
  28 * Due to the fact we are accelerating what is already a relatively fast
  29 * operation, the code goes to great lengths to avoid additional overhead,
  30 * such as locking.
  31 *
  32 * LOCKING:
  33 *
  34 * The subsystem keeps a global list of dma_device structs it is protected by a
  35 * mutex, dma_list_mutex.
  36 *
  37 * A subsystem can get access to a channel by calling dmaengine_get() followed
  38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
  39 * dma_request_channel().  Once a channel is allocated a reference is taken
  40 * against its corresponding driver to disable removal.
  41 *
  42 * Each device has a channels list, which runs unlocked but is never modified
  43 * once the device is registered, it's just setup by the driver.
  44 *
  45 * See Documentation/dmaengine.txt for more details
  46 */
  47
  48#include <linux/dma-mapping.h>
  49#include <linux/init.h>
  50#include <linux/module.h>
  51#include <linux/mm.h>
  52#include <linux/device.h>
  53#include <linux/dmaengine.h>
  54#include <linux/hardirq.h>
  55#include <linux/spinlock.h>
  56#include <linux/percpu.h>
  57#include <linux/rcupdate.h>
  58#include <linux/mutex.h>
  59#include <linux/jiffies.h>
  60#include <linux/rculist.h>
  61#include <linux/idr.h>
  62#include <linux/slab.h>
  63
  64static DEFINE_MUTEX(dma_list_mutex);
  65static DEFINE_IDR(dma_idr);
  66static LIST_HEAD(dma_device_list);
  67static long dmaengine_ref_count;
  68
  69/* --- sysfs implementation --- */
  70
  71/**
  72 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
  73 * @dev - device node
  74 *
  75 * Must be called under dma_list_mutex
  76 */
  77static struct dma_chan *dev_to_dma_chan(struct device *dev)
  78{
  79	struct dma_chan_dev *chan_dev;
  80
  81	chan_dev = container_of(dev, typeof(*chan_dev), device);
  82	return chan_dev->chan;
  83}
  84
  85static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
  86{
  87	struct dma_chan *chan;
  88	unsigned long count = 0;
  89	int i;
  90	int err;
  91
  92	mutex_lock(&dma_list_mutex);
  93	chan = dev_to_dma_chan(dev);
  94	if (chan) {
  95		for_each_possible_cpu(i)
  96			count += per_cpu_ptr(chan->local, i)->memcpy_count;
  97		err = sprintf(buf, "%lu\n", count);
  98	} else
  99		err = -ENODEV;
 100	mutex_unlock(&dma_list_mutex);
 101
 102	return err;
 103}
 104
 105static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
 106				      char *buf)
 107{
 108	struct dma_chan *chan;
 109	unsigned long count = 0;
 110	int i;
 111	int err;
 112
 113	mutex_lock(&dma_list_mutex);
 114	chan = dev_to_dma_chan(dev);
 115	if (chan) {
 116		for_each_possible_cpu(i)
 117			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
 118		err = sprintf(buf, "%lu\n", count);
 119	} else
 120		err = -ENODEV;
 121	mutex_unlock(&dma_list_mutex);
 122
 123	return err;
 124}
 125
 126static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
 127{
 128	struct dma_chan *chan;
 129	int err;
 130
 131	mutex_lock(&dma_list_mutex);
 132	chan = dev_to_dma_chan(dev);
 133	if (chan)
 134		err = sprintf(buf, "%d\n", chan->client_count);
 135	else
 136		err = -ENODEV;
 137	mutex_unlock(&dma_list_mutex);
 138
 139	return err;
 140}
 141
 142static struct device_attribute dma_attrs[] = {
 143	__ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
 144	__ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
 145	__ATTR(in_use, S_IRUGO, show_in_use, NULL),
 146	__ATTR_NULL
 147};
 148
 149static void chan_dev_release(struct device *dev)
 150{
 151	struct dma_chan_dev *chan_dev;
 152
 153	chan_dev = container_of(dev, typeof(*chan_dev), device);
 154	if (atomic_dec_and_test(chan_dev->idr_ref)) {
 155		mutex_lock(&dma_list_mutex);
 156		idr_remove(&dma_idr, chan_dev->dev_id);
 157		mutex_unlock(&dma_list_mutex);
 158		kfree(chan_dev->idr_ref);
 159	}
 160	kfree(chan_dev);
 161}
 162
 163static struct class dma_devclass = {
 164	.name		= "dma",
 165	.dev_attrs	= dma_attrs,
 166	.dev_release	= chan_dev_release,
 167};
 168
 169/* --- client and device registration --- */
 170
 171#define dma_device_satisfies_mask(device, mask) \
 172	__dma_device_satisfies_mask((device), &(mask))
 173static int
 174__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
 175{
 176	dma_cap_mask_t has;
 177
 178	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
 179		DMA_TX_TYPE_END);
 180	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 181}
 182
 183static struct module *dma_chan_to_owner(struct dma_chan *chan)
 184{
 185	return chan->device->dev->driver->owner;
 186}
 187
 188/**
 189 * balance_ref_count - catch up the channel reference count
 190 * @chan - channel to balance ->client_count versus dmaengine_ref_count
 191 *
 192 * balance_ref_count must be called under dma_list_mutex
 193 */
 194static void balance_ref_count(struct dma_chan *chan)
 195{
 196	struct module *owner = dma_chan_to_owner(chan);
 197
 198	while (chan->client_count < dmaengine_ref_count) {
 199		__module_get(owner);
 200		chan->client_count++;
 201	}
 202}
 203
 204/**
 205 * dma_chan_get - try to grab a dma channel's parent driver module
 206 * @chan - channel to grab
 207 *
 208 * Must be called under dma_list_mutex
 209 */
 210static int dma_chan_get(struct dma_chan *chan)
 211{
 212	int err = -ENODEV;
 213	struct module *owner = dma_chan_to_owner(chan);
 214
 215	if (chan->client_count) {
 216		__module_get(owner);
 217		err = 0;
 218	} else if (try_module_get(owner))
 219		err = 0;
 220
 221	if (err == 0)
 222		chan->client_count++;
 223
 224	/* allocate upon first client reference */
 225	if (chan->client_count == 1 && err == 0) {
 226		int desc_cnt = chan->device->device_alloc_chan_resources(chan);
 227
 228		if (desc_cnt < 0) {
 229			err = desc_cnt;
 230			chan->client_count = 0;
 231			module_put(owner);
 232		} else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
 233			balance_ref_count(chan);
 234	}
 235
 236	return err;
 237}
 238
 239/**
 240 * dma_chan_put - drop a reference to a dma channel's parent driver module
 241 * @chan - channel to release
 242 *
 243 * Must be called under dma_list_mutex
 244 */
 245static void dma_chan_put(struct dma_chan *chan)
 246{
 247	if (!chan->client_count)
 248		return; /* this channel failed alloc_chan_resources */
 249	chan->client_count--;
 250	module_put(dma_chan_to_owner(chan));
 251	if (chan->client_count == 0)
 252		chan->device->device_free_chan_resources(chan);
 253}
 254
 255enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 256{
 257	enum dma_status status;
 258	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 259
 260	dma_async_issue_pending(chan);
 261	do {
 262		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 263		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
 264			printk(KERN_ERR "dma_sync_wait_timeout!\n");
 265			return DMA_ERROR;
 266		}
 267	} while (status == DMA_IN_PROGRESS);
 268
 269	return status;
 270}
 271EXPORT_SYMBOL(dma_sync_wait);
 272
 273/**
 274 * dma_cap_mask_all - enable iteration over all operation types
 275 */
 276static dma_cap_mask_t dma_cap_mask_all;
 277
 278/**
 279 * dma_chan_tbl_ent - tracks channel allocations per core/operation
 280 * @chan - associated channel for this entry
 281 */
 282struct dma_chan_tbl_ent {
 283	struct dma_chan *chan;
 284};
 285
 286/**
 287 * channel_table - percpu lookup table for memory-to-memory offload providers
 288 */
 289static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
 290
 291static int __init dma_channel_table_init(void)
 292{
 293	enum dma_transaction_type cap;
 294	int err = 0;
 295
 296	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
 297
 298	/* 'interrupt', 'private', and 'slave' are channel capabilities,
 299	 * but are not associated with an operation so they do not need
 300	 * an entry in the channel_table
 301	 */
 302	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
 303	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
 304	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
 305
 306	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
 307		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
 308		if (!channel_table[cap]) {
 309			err = -ENOMEM;
 310			break;
 311		}
 312	}
 313
 314	if (err) {
 315		pr_err("dmaengine: initialization failure\n");
 316		for_each_dma_cap_mask(cap, dma_cap_mask_all)
 317			if (channel_table[cap])
 318				free_percpu(channel_table[cap]);
 319	}
 320
 321	return err;
 322}
 323arch_initcall(dma_channel_table_init);
 324
 325/**
 326 * dma_find_channel - find a channel to carry out the operation
 327 * @tx_type: transaction type
 328 */
 329struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 330{
 331	return this_cpu_read(channel_table[tx_type]->chan);
 332}
 333EXPORT_SYMBOL(dma_find_channel);
 334
 335/*
 336 * net_dma_find_channel - find a channel for net_dma
 337 * net_dma has alignment requirements
 338 */
 339struct dma_chan *net_dma_find_channel(void)
 340{
 341	struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
 342	if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
 343		return NULL;
 344
 345	return chan;
 346}
 347EXPORT_SYMBOL(net_dma_find_channel);
 348
 349/**
 350 * dma_issue_pending_all - flush all pending operations across all channels
 351 */
 352void dma_issue_pending_all(void)
 353{
 354	struct dma_device *device;
 355	struct dma_chan *chan;
 356
 357	rcu_read_lock();
 358	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 359		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 360			continue;
 361		list_for_each_entry(chan, &device->channels, device_node)
 362			if (chan->client_count)
 363				device->device_issue_pending(chan);
 364	}
 365	rcu_read_unlock();
 366}
 367EXPORT_SYMBOL(dma_issue_pending_all);
 368
 369/**
 370 * nth_chan - returns the nth channel of the given capability
 371 * @cap: capability to match
 372 * @n: nth channel desired
 373 *
 374 * Defaults to returning the channel with the desired capability and the
 375 * lowest reference count when 'n' cannot be satisfied.  Must be called
 376 * under dma_list_mutex.
 377 */
 378static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
 379{
 380	struct dma_device *device;
 381	struct dma_chan *chan;
 382	struct dma_chan *ret = NULL;
 383	struct dma_chan *min = NULL;
 384
 385	list_for_each_entry(device, &dma_device_list, global_node) {
 386		if (!dma_has_cap(cap, device->cap_mask) ||
 387		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
 388			continue;
 389		list_for_each_entry(chan, &device->channels, device_node) {
 390			if (!chan->client_count)
 391				continue;
 392			if (!min)
 393				min = chan;
 394			else if (chan->table_count < min->table_count)
 395				min = chan;
 396
 397			if (n-- == 0) {
 398				ret = chan;
 399				break; /* done */
 400			}
 401		}
 402		if (ret)
 403			break; /* done */
 404	}
 405
 406	if (!ret)
 407		ret = min;
 408
 409	if (ret)
 410		ret->table_count++;
 411
 412	return ret;
 413}
 414
 415/**
 416 * dma_channel_rebalance - redistribute the available channels
 417 *
 418 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
 419 * operation type) in the SMP case,  and operation isolation (avoid
 420 * multi-tasking channels) in the non-SMP case.  Must be called under
 421 * dma_list_mutex.
 422 */
 423static void dma_channel_rebalance(void)
 424{
 425	struct dma_chan *chan;
 426	struct dma_device *device;
 427	int cpu;
 428	int cap;
 429	int n;
 430
 431	/* undo the last distribution */
 432	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 433		for_each_possible_cpu(cpu)
 434			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
 435
 436	list_for_each_entry(device, &dma_device_list, global_node) {
 437		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 438			continue;
 439		list_for_each_entry(chan, &device->channels, device_node)
 440			chan->table_count = 0;
 441	}
 442
 443	/* don't populate the channel_table if no clients are available */
 444	if (!dmaengine_ref_count)
 445		return;
 446
 447	/* redistribute available channels */
 448	n = 0;
 449	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 450		for_each_online_cpu(cpu) {
 451			if (num_possible_cpus() > 1)
 452				chan = nth_chan(cap, n++);
 453			else
 454				chan = nth_chan(cap, -1);
 455
 456			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
 457		}
 458}
 459
 460static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
 461					  dma_filter_fn fn, void *fn_param)
 462{
 463	struct dma_chan *chan;
 464
 465	if (!__dma_device_satisfies_mask(dev, mask)) {
 466		pr_debug("%s: wrong capabilities\n", __func__);
 467		return NULL;
 468	}
 469	/* devices with multiple channels need special handling as we need to
 470	 * ensure that all channels are either private or public.
 471	 */
 472	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
 473		list_for_each_entry(chan, &dev->channels, device_node) {
 474			/* some channels are already publicly allocated */
 475			if (chan->client_count)
 476				return NULL;
 477		}
 478
 479	list_for_each_entry(chan, &dev->channels, device_node) {
 480		if (chan->client_count) {
 481			pr_debug("%s: %s busy\n",
 482				 __func__, dma_chan_name(chan));
 483			continue;
 484		}
 485		if (fn && !fn(chan, fn_param)) {
 486			pr_debug("%s: %s filter said false\n",
 487				 __func__, dma_chan_name(chan));
 488			continue;
 489		}
 490		return chan;
 491	}
 492
 493	return NULL;
 494}
 495
 496/**
 497 * dma_request_channel - try to allocate an exclusive channel
 498 * @mask: capabilities that the channel must satisfy
 499 * @fn: optional callback to disposition available channels
 500 * @fn_param: opaque parameter to pass to dma_filter_fn
 501 */
 502struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
 503{
 504	struct dma_device *device, *_d;
 505	struct dma_chan *chan = NULL;
 506	int err;
 507
 508	/* Find a channel */
 509	mutex_lock(&dma_list_mutex);
 510	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 511		chan = private_candidate(mask, device, fn, fn_param);
 512		if (chan) {
 513			/* Found a suitable channel, try to grab, prep, and
 514			 * return it.  We first set DMA_PRIVATE to disable
 515			 * balance_ref_count as this channel will not be
 516			 * published in the general-purpose allocator
 517			 */
 518			dma_cap_set(DMA_PRIVATE, device->cap_mask);
 519			device->privatecnt++;
 520			err = dma_chan_get(chan);
 521
 522			if (err == -ENODEV) {
 523				pr_debug("%s: %s module removed\n", __func__,
 524					 dma_chan_name(chan));
 525				list_del_rcu(&device->global_node);
 526			} else if (err)
 527				pr_debug("%s: failed to get %s: (%d)\n",
 528					__func__, dma_chan_name(chan), err);
 529			else
 530				break;
 531			if (--device->privatecnt == 0)
 532				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 533			chan = NULL;
 534		}
 535	}
 536	mutex_unlock(&dma_list_mutex);
 537
 538	pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
 539		 chan ? dma_chan_name(chan) : NULL);
 540
 541	return chan;
 542}
 543EXPORT_SYMBOL_GPL(__dma_request_channel);
 544
 545void dma_release_channel(struct dma_chan *chan)
 546{
 547	mutex_lock(&dma_list_mutex);
 548	WARN_ONCE(chan->client_count != 1,
 549		  "chan reference count %d != 1\n", chan->client_count);
 550	dma_chan_put(chan);
 551	/* drop PRIVATE cap enabled by __dma_request_channel() */
 552	if (--chan->device->privatecnt == 0)
 553		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
 554	mutex_unlock(&dma_list_mutex);
 555}
 556EXPORT_SYMBOL_GPL(dma_release_channel);
 557
 558/**
 559 * dmaengine_get - register interest in dma_channels
 560 */
 561void dmaengine_get(void)
 562{
 563	struct dma_device *device, *_d;
 564	struct dma_chan *chan;
 565	int err;
 566
 567	mutex_lock(&dma_list_mutex);
 568	dmaengine_ref_count++;
 569
 570	/* try to grab channels */
 571	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 572		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 573			continue;
 574		list_for_each_entry(chan, &device->channels, device_node) {
 575			err = dma_chan_get(chan);
 576			if (err == -ENODEV) {
 577				/* module removed before we could use it */
 578				list_del_rcu(&device->global_node);
 579				break;
 580			} else if (err)
 581				pr_err("%s: failed to get %s: (%d)\n",
 582					__func__, dma_chan_name(chan), err);
 583		}
 584	}
 585
 586	/* if this is the first reference and there were channels
 587	 * waiting we need to rebalance to get those channels
 588	 * incorporated into the channel table
 589	 */
 590	if (dmaengine_ref_count == 1)
 591		dma_channel_rebalance();
 592	mutex_unlock(&dma_list_mutex);
 593}
 594EXPORT_SYMBOL(dmaengine_get);
 595
 596/**
 597 * dmaengine_put - let dma drivers be removed when ref_count == 0
 598 */
 599void dmaengine_put(void)
 600{
 601	struct dma_device *device;
 602	struct dma_chan *chan;
 603
 604	mutex_lock(&dma_list_mutex);
 605	dmaengine_ref_count--;
 606	BUG_ON(dmaengine_ref_count < 0);
 607	/* drop channel references */
 608	list_for_each_entry(device, &dma_device_list, global_node) {
 609		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 610			continue;
 611		list_for_each_entry(chan, &device->channels, device_node)
 612			dma_chan_put(chan);
 613	}
 614	mutex_unlock(&dma_list_mutex);
 615}
 616EXPORT_SYMBOL(dmaengine_put);
 617
 618static bool device_has_all_tx_types(struct dma_device *device)
 619{
 620	/* A device that satisfies this test has channels that will never cause
 621	 * an async_tx channel switch event as all possible operation types can
 622	 * be handled.
 623	 */
 624	#ifdef CONFIG_ASYNC_TX_DMA
 625	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 626		return false;
 627	#endif
 628
 629	#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
 630	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
 631		return false;
 632	#endif
 633
 634	#if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
 635	if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
 636		return false;
 637	#endif
 638
 639	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
 640	if (!dma_has_cap(DMA_XOR, device->cap_mask))
 641		return false;
 642
 643	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
 644	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
 645		return false;
 646	#endif
 647	#endif
 648
 649	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
 650	if (!dma_has_cap(DMA_PQ, device->cap_mask))
 651		return false;
 652
 653	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
 654	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
 655		return false;
 656	#endif
 657	#endif
 658
 659	return true;
 660}
 661
 662static int get_dma_id(struct dma_device *device)
 663{
 664	int rc;
 665
 666 idr_retry:
 667	if (!idr_pre_get(&dma_idr, GFP_KERNEL))
 668		return -ENOMEM;
 669	mutex_lock(&dma_list_mutex);
 670	rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
 671	mutex_unlock(&dma_list_mutex);
 672	if (rc == -EAGAIN)
 673		goto idr_retry;
 674	else if (rc != 0)
 675		return rc;
 676
 677	return 0;
 678}
 679
 680/**
 681 * dma_async_device_register - registers DMA devices found
 682 * @device: &dma_device
 683 */
 684int dma_async_device_register(struct dma_device *device)
 685{
 686	int chancnt = 0, rc;
 687	struct dma_chan* chan;
 688	atomic_t *idr_ref;
 689
 690	if (!device)
 691		return -ENODEV;
 692
 693	/* validate device routines */
 694	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
 695		!device->device_prep_dma_memcpy);
 696	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
 697		!device->device_prep_dma_xor);
 698	BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
 699		!device->device_prep_dma_xor_val);
 700	BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
 701		!device->device_prep_dma_pq);
 702	BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
 703		!device->device_prep_dma_pq_val);
 704	BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
 705		!device->device_prep_dma_memset);
 706	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
 707		!device->device_prep_dma_interrupt);
 708	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
 709		!device->device_prep_dma_sg);
 710	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
 711		!device->device_prep_dma_cyclic);
 712	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
 713		!device->device_control);
 714	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
 715		!device->device_prep_interleaved_dma);
 716
 717	BUG_ON(!device->device_alloc_chan_resources);
 718	BUG_ON(!device->device_free_chan_resources);
 719	BUG_ON(!device->device_tx_status);
 720	BUG_ON(!device->device_issue_pending);
 721	BUG_ON(!device->dev);
 722
 723	/* note: this only matters in the
 724	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
 725	 */
 726	if (device_has_all_tx_types(device))
 727		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
 728
 729	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
 730	if (!idr_ref)
 731		return -ENOMEM;
 732	rc = get_dma_id(device);
 733	if (rc != 0) {
 734		kfree(idr_ref);
 735		return rc;
 736	}
 737
 738	atomic_set(idr_ref, 0);
 739
 740	/* represent channels in sysfs. Probably want devs too */
 741	list_for_each_entry(chan, &device->channels, device_node) {
 742		rc = -ENOMEM;
 743		chan->local = alloc_percpu(typeof(*chan->local));
 744		if (chan->local == NULL)
 745			goto err_out;
 746		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
 747		if (chan->dev == NULL) {
 748			free_percpu(chan->local);
 749			chan->local = NULL;
 750			goto err_out;
 751		}
 752
 753		chan->chan_id = chancnt++;
 754		chan->dev->device.class = &dma_devclass;
 755		chan->dev->device.parent = device->dev;
 756		chan->dev->chan = chan;
 757		chan->dev->idr_ref = idr_ref;
 758		chan->dev->dev_id = device->dev_id;
 759		atomic_inc(idr_ref);
 760		dev_set_name(&chan->dev->device, "dma%dchan%d",
 761			     device->dev_id, chan->chan_id);
 762
 763		rc = device_register(&chan->dev->device);
 764		if (rc) {
 765			free_percpu(chan->local);
 766			chan->local = NULL;
 767			kfree(chan->dev);
 768			atomic_dec(idr_ref);
 769			goto err_out;
 770		}
 771		chan->client_count = 0;
 772	}
 773	device->chancnt = chancnt;
 774
 775	mutex_lock(&dma_list_mutex);
 776	/* take references on public channels */
 777	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
 778		list_for_each_entry(chan, &device->channels, device_node) {
 779			/* if clients are already waiting for channels we need
 780			 * to take references on their behalf
 781			 */
 782			if (dma_chan_get(chan) == -ENODEV) {
 783				/* note we can only get here for the first
 784				 * channel as the remaining channels are
 785				 * guaranteed to get a reference
 786				 */
 787				rc = -ENODEV;
 788				mutex_unlock(&dma_list_mutex);
 789				goto err_out;
 790			}
 791		}
 792	list_add_tail_rcu(&device->global_node, &dma_device_list);
 793	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 794		device->privatecnt++;	/* Always private */
 795	dma_channel_rebalance();
 796	mutex_unlock(&dma_list_mutex);
 797
 798	return 0;
 799
 800err_out:
 801	/* if we never registered a channel just release the idr */
 802	if (atomic_read(idr_ref) == 0) {
 803		mutex_lock(&dma_list_mutex);
 804		idr_remove(&dma_idr, device->dev_id);
 805		mutex_unlock(&dma_list_mutex);
 806		kfree(idr_ref);
 807		return rc;
 808	}
 809
 810	list_for_each_entry(chan, &device->channels, device_node) {
 811		if (chan->local == NULL)
 812			continue;
 813		mutex_lock(&dma_list_mutex);
 814		chan->dev->chan = NULL;
 815		mutex_unlock(&dma_list_mutex);
 816		device_unregister(&chan->dev->device);
 817		free_percpu(chan->local);
 818	}
 819	return rc;
 820}
 821EXPORT_SYMBOL(dma_async_device_register);
 822
 823/**
 824 * dma_async_device_unregister - unregister a DMA device
 825 * @device: &dma_device
 826 *
 827 * This routine is called by dma driver exit routines, dmaengine holds module
 828 * references to prevent it being called while channels are in use.
 829 */
 830void dma_async_device_unregister(struct dma_device *device)
 831{
 832	struct dma_chan *chan;
 833
 834	mutex_lock(&dma_list_mutex);
 835	list_del_rcu(&device->global_node);
 836	dma_channel_rebalance();
 837	mutex_unlock(&dma_list_mutex);
 838
 839	list_for_each_entry(chan, &device->channels, device_node) {
 840		WARN_ONCE(chan->client_count,
 841			  "%s called while %d clients hold a reference\n",
 842			  __func__, chan->client_count);
 843		mutex_lock(&dma_list_mutex);
 844		chan->dev->chan = NULL;
 845		mutex_unlock(&dma_list_mutex);
 846		device_unregister(&chan->dev->device);
 847		free_percpu(chan->local);
 848	}
 849}
 850EXPORT_SYMBOL(dma_async_device_unregister);
 851
 852/**
 853 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
 854 * @chan: DMA channel to offload copy to
 855 * @dest: destination address (virtual)
 856 * @src: source address (virtual)
 857 * @len: length
 858 *
 859 * Both @dest and @src must be mappable to a bus address according to the
 860 * DMA mapping API rules for streaming mappings.
 861 * Both @dest and @src must stay memory resident (kernel memory or locked
 862 * user space pages).
 863 */
 864dma_cookie_t
 865dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
 866			void *src, size_t len)
 867{
 868	struct dma_device *dev = chan->device;
 869	struct dma_async_tx_descriptor *tx;
 870	dma_addr_t dma_dest, dma_src;
 871	dma_cookie_t cookie;
 872	unsigned long flags;
 873
 874	dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
 875	dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
 876	flags = DMA_CTRL_ACK |
 877		DMA_COMPL_SRC_UNMAP_SINGLE |
 878		DMA_COMPL_DEST_UNMAP_SINGLE;
 879	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 880
 881	if (!tx) {
 882		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
 883		dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
 884		return -ENOMEM;
 885	}
 886
 887	tx->callback = NULL;
 888	cookie = tx->tx_submit(tx);
 889
 890	preempt_disable();
 891	__this_cpu_add(chan->local->bytes_transferred, len);
 892	__this_cpu_inc(chan->local->memcpy_count);
 893	preempt_enable();
 894
 895	return cookie;
 896}
 897EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
 898
 899/**
 900 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
 901 * @chan: DMA channel to offload copy to
 902 * @page: destination page
 903 * @offset: offset in page to copy to
 904 * @kdata: source address (virtual)
 905 * @len: length
 906 *
 907 * Both @page/@offset and @kdata must be mappable to a bus address according
 908 * to the DMA mapping API rules for streaming mappings.
 909 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
 910 * locked user space pages)
 911 */
 912dma_cookie_t
 913dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
 914			unsigned int offset, void *kdata, size_t len)
 915{
 916	struct dma_device *dev = chan->device;
 917	struct dma_async_tx_descriptor *tx;
 918	dma_addr_t dma_dest, dma_src;
 919	dma_cookie_t cookie;
 920	unsigned long flags;
 921
 922	dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
 923	dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
 924	flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
 925	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 926
 927	if (!tx) {
 928		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
 929		dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
 930		return -ENOMEM;
 931	}
 932
 933	tx->callback = NULL;
 934	cookie = tx->tx_submit(tx);
 935
 936	preempt_disable();
 937	__this_cpu_add(chan->local->bytes_transferred, len);
 938	__this_cpu_inc(chan->local->memcpy_count);
 939	preempt_enable();
 940
 941	return cookie;
 942}
 943EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
 944
 945/**
 946 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
 947 * @chan: DMA channel to offload copy to
 948 * @dest_pg: destination page
 949 * @dest_off: offset in page to copy to
 950 * @src_pg: source page
 951 * @src_off: offset in page to copy from
 952 * @len: length
 953 *
 954 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
 955 * address according to the DMA mapping API rules for streaming mappings.
 956 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
 957 * (kernel memory or locked user space pages).
 958 */
 959dma_cookie_t
 960dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
 961	unsigned int dest_off, struct page *src_pg, unsigned int src_off,
 962	size_t len)
 963{
 964	struct dma_device *dev = chan->device;
 965	struct dma_async_tx_descriptor *tx;
 966	dma_addr_t dma_dest, dma_src;
 967	dma_cookie_t cookie;
 968	unsigned long flags;
 969
 970	dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
 971	dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
 972				DMA_FROM_DEVICE);
 973	flags = DMA_CTRL_ACK;
 974	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 975
 976	if (!tx) {
 977		dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
 978		dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
 979		return -ENOMEM;
 980	}
 981
 982	tx->callback = NULL;
 983	cookie = tx->tx_submit(tx);
 984
 985	preempt_disable();
 986	__this_cpu_add(chan->local->bytes_transferred, len);
 987	__this_cpu_inc(chan->local->memcpy_count);
 988	preempt_enable();
 989
 990	return cookie;
 991}
 992EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
 993
 994void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
 995	struct dma_chan *chan)
 996{
 997	tx->chan = chan;
 998	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
 999	spin_lock_init(&tx->lock);
1000	#endif
1001}
1002EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1003
1004/* dma_wait_for_async_tx - spin wait for a transaction to complete
1005 * @tx: in-flight transaction to wait on
1006 */
1007enum dma_status
1008dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1009{
1010	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1011
1012	if (!tx)
1013		return DMA_SUCCESS;
1014
1015	while (tx->cookie == -EBUSY) {
1016		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1017			pr_err("%s timeout waiting for descriptor submission\n",
1018				__func__);
1019			return DMA_ERROR;
1020		}
1021		cpu_relax();
1022	}
1023	return dma_sync_wait(tx->chan, tx->cookie);
1024}
1025EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1026
1027/* dma_run_dependencies - helper routine for dma drivers to process
1028 *	(start) dependent operations on their target channel
1029 * @tx: transaction with dependencies
1030 */
1031void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1032{
1033	struct dma_async_tx_descriptor *dep = txd_next(tx);
1034	struct dma_async_tx_descriptor *dep_next;
1035	struct dma_chan *chan;
1036
1037	if (!dep)
1038		return;
1039
1040	/* we'll submit tx->next now, so clear the link */
1041	txd_clear_next(tx);
1042	chan = dep->chan;
1043
1044	/* keep submitting up until a channel switch is detected
1045	 * in that case we will be called again as a result of
1046	 * processing the interrupt from async_tx_channel_switch
1047	 */
1048	for (; dep; dep = dep_next) {
1049		txd_lock(dep);
1050		txd_clear_parent(dep);
1051		dep_next = txd_next(dep);
1052		if (dep_next && dep_next->chan == chan)
1053			txd_clear_next(dep); /* ->next will be submitted */
1054		else
1055			dep_next = NULL; /* submit current dep and terminate */
1056		txd_unlock(dep);
1057
1058		dep->tx_submit(dep);
1059	}
1060
1061	chan->device->device_issue_pending(chan);
1062}
1063EXPORT_SYMBOL_GPL(dma_run_dependencies);
1064
1065static int __init dma_bus_init(void)
1066{
1067	return class_register(&dma_devclass);
1068}
1069arch_initcall(dma_bus_init);
1070
1071