Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License as published by the Free
   6 * Software Foundation; either version 2 of the License, or (at your option)
   7 * any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc., 59
  16 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  17 *
  18 * The full GNU General Public License is included in this distribution in the
  19 * file called COPYING.
  20 */
  21
  22/*
  23 * This code implements the DMA subsystem. It provides a HW-neutral interface
  24 * for other kernel code to use asynchronous memory copy capabilities,
  25 * if present, and allows different HW DMA drivers to register as providing
  26 * this capability.
  27 *
  28 * Due to the fact we are accelerating what is already a relatively fast
  29 * operation, the code goes to great lengths to avoid additional overhead,
  30 * such as locking.
  31 *
  32 * LOCKING:
  33 *
  34 * The subsystem keeps a global list of dma_device structs it is protected by a
  35 * mutex, dma_list_mutex.
  36 *
  37 * A subsystem can get access to a channel by calling dmaengine_get() followed
  38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
  39 * dma_request_channel().  Once a channel is allocated a reference is taken
  40 * against its corresponding driver to disable removal.
  41 *
  42 * Each device has a channels list, which runs unlocked but is never modified
  43 * once the device is registered, it's just setup by the driver.
  44 *
  45 * See Documentation/dmaengine.txt for more details
  46 */
  47
 
 
  48#include <linux/dma-mapping.h>
  49#include <linux/init.h>
  50#include <linux/module.h>
  51#include <linux/mm.h>
  52#include <linux/device.h>
  53#include <linux/dmaengine.h>
  54#include <linux/hardirq.h>
  55#include <linux/spinlock.h>
  56#include <linux/percpu.h>
  57#include <linux/rcupdate.h>
  58#include <linux/mutex.h>
  59#include <linux/jiffies.h>
  60#include <linux/rculist.h>
  61#include <linux/idr.h>
  62#include <linux/slab.h>
 
 
 
 
  63
  64static DEFINE_MUTEX(dma_list_mutex);
  65static DEFINE_IDR(dma_idr);
  66static LIST_HEAD(dma_device_list);
  67static long dmaengine_ref_count;
  68
  69/* --- sysfs implementation --- */
  70
  71/**
  72 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
  73 * @dev - device node
  74 *
  75 * Must be called under dma_list_mutex
  76 */
  77static struct dma_chan *dev_to_dma_chan(struct device *dev)
  78{
  79	struct dma_chan_dev *chan_dev;
  80
  81	chan_dev = container_of(dev, typeof(*chan_dev), device);
  82	return chan_dev->chan;
  83}
  84
  85static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
 
  86{
  87	struct dma_chan *chan;
  88	unsigned long count = 0;
  89	int i;
  90	int err;
  91
  92	mutex_lock(&dma_list_mutex);
  93	chan = dev_to_dma_chan(dev);
  94	if (chan) {
  95		for_each_possible_cpu(i)
  96			count += per_cpu_ptr(chan->local, i)->memcpy_count;
  97		err = sprintf(buf, "%lu\n", count);
  98	} else
  99		err = -ENODEV;
 100	mutex_unlock(&dma_list_mutex);
 101
 102	return err;
 103}
 
 104
 105static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
 106				      char *buf)
 107{
 108	struct dma_chan *chan;
 109	unsigned long count = 0;
 110	int i;
 111	int err;
 112
 113	mutex_lock(&dma_list_mutex);
 114	chan = dev_to_dma_chan(dev);
 115	if (chan) {
 116		for_each_possible_cpu(i)
 117			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
 118		err = sprintf(buf, "%lu\n", count);
 119	} else
 120		err = -ENODEV;
 121	mutex_unlock(&dma_list_mutex);
 122
 123	return err;
 124}
 
 125
 126static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
 
 127{
 128	struct dma_chan *chan;
 129	int err;
 130
 131	mutex_lock(&dma_list_mutex);
 132	chan = dev_to_dma_chan(dev);
 133	if (chan)
 134		err = sprintf(buf, "%d\n", chan->client_count);
 135	else
 136		err = -ENODEV;
 137	mutex_unlock(&dma_list_mutex);
 138
 139	return err;
 140}
 
 141
 142static struct device_attribute dma_attrs[] = {
 143	__ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
 144	__ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
 145	__ATTR(in_use, S_IRUGO, show_in_use, NULL),
 146	__ATTR_NULL
 147};
 
 148
 149static void chan_dev_release(struct device *dev)
 150{
 151	struct dma_chan_dev *chan_dev;
 152
 153	chan_dev = container_of(dev, typeof(*chan_dev), device);
 154	if (atomic_dec_and_test(chan_dev->idr_ref)) {
 155		mutex_lock(&dma_list_mutex);
 156		idr_remove(&dma_idr, chan_dev->dev_id);
 157		mutex_unlock(&dma_list_mutex);
 158		kfree(chan_dev->idr_ref);
 159	}
 160	kfree(chan_dev);
 161}
 162
 163static struct class dma_devclass = {
 164	.name		= "dma",
 165	.dev_attrs	= dma_attrs,
 166	.dev_release	= chan_dev_release,
 167};
 168
 169/* --- client and device registration --- */
 170
 171#define dma_device_satisfies_mask(device, mask) \
 172	__dma_device_satisfies_mask((device), &(mask))
 173static int
 174__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
 
 175{
 176	dma_cap_mask_t has;
 177
 178	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
 179		DMA_TX_TYPE_END);
 180	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 181}
 182
 183static struct module *dma_chan_to_owner(struct dma_chan *chan)
 184{
 185	return chan->device->dev->driver->owner;
 186}
 187
 188/**
 189 * balance_ref_count - catch up the channel reference count
 190 * @chan - channel to balance ->client_count versus dmaengine_ref_count
 191 *
 192 * balance_ref_count must be called under dma_list_mutex
 193 */
 194static void balance_ref_count(struct dma_chan *chan)
 195{
 196	struct module *owner = dma_chan_to_owner(chan);
 197
 198	while (chan->client_count < dmaengine_ref_count) {
 199		__module_get(owner);
 200		chan->client_count++;
 201	}
 202}
 203
 204/**
 205 * dma_chan_get - try to grab a dma channel's parent driver module
 206 * @chan - channel to grab
 207 *
 208 * Must be called under dma_list_mutex
 209 */
 210static int dma_chan_get(struct dma_chan *chan)
 211{
 212	int err = -ENODEV;
 213	struct module *owner = dma_chan_to_owner(chan);
 214
 215	if (chan->client_count) {
 216		__module_get(owner);
 217		err = 0;
 218	} else if (try_module_get(owner))
 219		err = 0;
 220
 221	if (err == 0)
 222		chan->client_count++;
 223
 224	/* allocate upon first client reference */
 225	if (chan->client_count == 1 && err == 0) {
 226		int desc_cnt = chan->device->device_alloc_chan_resources(chan);
 227
 228		if (desc_cnt < 0) {
 229			err = desc_cnt;
 230			chan->client_count = 0;
 231			module_put(owner);
 232		} else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
 233			balance_ref_count(chan);
 234	}
 235
 236	return err;
 237}
 238
 239/**
 240 * dma_chan_put - drop a reference to a dma channel's parent driver module
 241 * @chan - channel to release
 242 *
 243 * Must be called under dma_list_mutex
 244 */
 245static void dma_chan_put(struct dma_chan *chan)
 246{
 247	if (!chan->client_count)
 248		return; /* this channel failed alloc_chan_resources */
 249	chan->client_count--;
 250	module_put(dma_chan_to_owner(chan));
 251	if (chan->client_count == 0)
 252		chan->device->device_free_chan_resources(chan);
 253}
 254
 255enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 256{
 257	enum dma_status status;
 258	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 259
 260	dma_async_issue_pending(chan);
 261	do {
 262		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 263		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
 264			printk(KERN_ERR "dma_sync_wait_timeout!\n");
 265			return DMA_ERROR;
 266		}
 267	} while (status == DMA_IN_PROGRESS);
 
 
 
 268
 269	return status;
 270}
 271EXPORT_SYMBOL(dma_sync_wait);
 272
 273/**
 274 * dma_cap_mask_all - enable iteration over all operation types
 275 */
 276static dma_cap_mask_t dma_cap_mask_all;
 277
 278/**
 279 * dma_chan_tbl_ent - tracks channel allocations per core/operation
 280 * @chan - associated channel for this entry
 281 */
 282struct dma_chan_tbl_ent {
 283	struct dma_chan *chan;
 284};
 285
 286/**
 287 * channel_table - percpu lookup table for memory-to-memory offload providers
 288 */
 289static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
 290
 291static int __init dma_channel_table_init(void)
 292{
 293	enum dma_transaction_type cap;
 294	int err = 0;
 295
 296	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
 297
 298	/* 'interrupt', 'private', and 'slave' are channel capabilities,
 299	 * but are not associated with an operation so they do not need
 300	 * an entry in the channel_table
 301	 */
 302	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
 303	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
 304	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
 305
 306	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
 307		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
 308		if (!channel_table[cap]) {
 309			err = -ENOMEM;
 310			break;
 311		}
 312	}
 313
 314	if (err) {
 315		pr_err("dmaengine: initialization failure\n");
 316		for_each_dma_cap_mask(cap, dma_cap_mask_all)
 317			if (channel_table[cap])
 318				free_percpu(channel_table[cap]);
 319	}
 320
 321	return err;
 322}
 323arch_initcall(dma_channel_table_init);
 324
 325/**
 326 * dma_find_channel - find a channel to carry out the operation
 327 * @tx_type: transaction type
 328 */
 329struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 330{
 331	return this_cpu_read(channel_table[tx_type]->chan);
 332}
 333EXPORT_SYMBOL(dma_find_channel);
 334
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 335/**
 336 * dma_issue_pending_all - flush all pending operations across all channels
 337 */
 338void dma_issue_pending_all(void)
 339{
 340	struct dma_device *device;
 341	struct dma_chan *chan;
 342
 343	rcu_read_lock();
 344	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 345		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 346			continue;
 347		list_for_each_entry(chan, &device->channels, device_node)
 348			if (chan->client_count)
 349				device->device_issue_pending(chan);
 350	}
 351	rcu_read_unlock();
 352}
 353EXPORT_SYMBOL(dma_issue_pending_all);
 354
 355/**
 356 * nth_chan - returns the nth channel of the given capability
 
 
 
 
 
 
 
 
 
 357 * @cap: capability to match
 358 * @n: nth channel desired
 359 *
 360 * Defaults to returning the channel with the desired capability and the
 361 * lowest reference count when 'n' cannot be satisfied.  Must be called
 362 * under dma_list_mutex.
 
 363 */
 364static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
 365{
 366	struct dma_device *device;
 367	struct dma_chan *chan;
 368	struct dma_chan *ret = NULL;
 369	struct dma_chan *min = NULL;
 
 370
 371	list_for_each_entry(device, &dma_device_list, global_node) {
 372		if (!dma_has_cap(cap, device->cap_mask) ||
 373		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
 374			continue;
 375		list_for_each_entry(chan, &device->channels, device_node) {
 376			if (!chan->client_count)
 377				continue;
 378			if (!min)
 379				min = chan;
 380			else if (chan->table_count < min->table_count)
 381				min = chan;
 382
 383			if (n-- == 0) {
 384				ret = chan;
 385				break; /* done */
 386			}
 387		}
 388		if (ret)
 389			break; /* done */
 390	}
 391
 392	if (!ret)
 393		ret = min;
 394
 395	if (ret)
 396		ret->table_count++;
 397
 398	return ret;
 399}
 400
 401/**
 402 * dma_channel_rebalance - redistribute the available channels
 403 *
 404 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
 405 * operation type) in the SMP case,  and operation isolation (avoid
 406 * multi-tasking channels) in the non-SMP case.  Must be called under
 407 * dma_list_mutex.
 408 */
 409static void dma_channel_rebalance(void)
 410{
 411	struct dma_chan *chan;
 412	struct dma_device *device;
 413	int cpu;
 414	int cap;
 415	int n;
 416
 417	/* undo the last distribution */
 418	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 419		for_each_possible_cpu(cpu)
 420			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
 421
 422	list_for_each_entry(device, &dma_device_list, global_node) {
 423		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 424			continue;
 425		list_for_each_entry(chan, &device->channels, device_node)
 426			chan->table_count = 0;
 427	}
 428
 429	/* don't populate the channel_table if no clients are available */
 430	if (!dmaengine_ref_count)
 431		return;
 432
 433	/* redistribute available channels */
 434	n = 0;
 435	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 436		for_each_online_cpu(cpu) {
 437			if (num_possible_cpus() > 1)
 438				chan = nth_chan(cap, n++);
 439			else
 440				chan = nth_chan(cap, -1);
 441
 442			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
 443		}
 444}
 445
 446static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
 
 447					  dma_filter_fn fn, void *fn_param)
 448{
 449	struct dma_chan *chan;
 450
 451	if (!__dma_device_satisfies_mask(dev, mask)) {
 452		pr_debug("%s: wrong capabilities\n", __func__);
 453		return NULL;
 454	}
 455	/* devices with multiple channels need special handling as we need to
 456	 * ensure that all channels are either private or public.
 457	 */
 458	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
 459		list_for_each_entry(chan, &dev->channels, device_node) {
 460			/* some channels are already publicly allocated */
 461			if (chan->client_count)
 462				return NULL;
 463		}
 464
 465	list_for_each_entry(chan, &dev->channels, device_node) {
 466		if (chan->client_count) {
 467			pr_debug("%s: %s busy\n",
 468				 __func__, dma_chan_name(chan));
 469			continue;
 470		}
 471		if (fn && !fn(chan, fn_param)) {
 472			pr_debug("%s: %s filter said false\n",
 473				 __func__, dma_chan_name(chan));
 474			continue;
 475		}
 476		return chan;
 477	}
 478
 479	return NULL;
 480}
 481
 482/**
 483 * dma_request_channel - try to allocate an exclusive channel
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 484 * @mask: capabilities that the channel must satisfy
 485 * @fn: optional callback to disposition available channels
 486 * @fn_param: opaque parameter to pass to dma_filter_fn
 
 
 487 */
 488struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
 
 489{
 490	struct dma_device *device, *_d;
 491	struct dma_chan *chan = NULL;
 492	int err;
 493
 494	/* Find a channel */
 495	mutex_lock(&dma_list_mutex);
 496	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 497		chan = private_candidate(mask, device, fn, fn_param);
 498		if (chan) {
 499			/* Found a suitable channel, try to grab, prep, and
 500			 * return it.  We first set DMA_PRIVATE to disable
 501			 * balance_ref_count as this channel will not be
 502			 * published in the general-purpose allocator
 503			 */
 504			dma_cap_set(DMA_PRIVATE, device->cap_mask);
 505			device->privatecnt++;
 506			err = dma_chan_get(chan);
 507
 508			if (err == -ENODEV) {
 509				pr_debug("%s: %s module removed\n", __func__,
 510					 dma_chan_name(chan));
 511				list_del_rcu(&device->global_node);
 512			} else if (err)
 513				pr_debug("dmaengine: failed to get %s: (%d)\n",
 514					 dma_chan_name(chan), err);
 515			else
 516				break;
 517			if (--device->privatecnt == 0)
 518				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 519			chan = NULL;
 520		}
 521	}
 522	mutex_unlock(&dma_list_mutex);
 523
 524	pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
 
 
 525		 chan ? dma_chan_name(chan) : NULL);
 526
 527	return chan;
 528}
 529EXPORT_SYMBOL_GPL(__dma_request_channel);
 530
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 531void dma_release_channel(struct dma_chan *chan)
 532{
 533	mutex_lock(&dma_list_mutex);
 534	WARN_ONCE(chan->client_count != 1,
 535		  "chan reference count %d != 1\n", chan->client_count);
 536	dma_chan_put(chan);
 537	/* drop PRIVATE cap enabled by __dma_request_channel() */
 538	if (--chan->device->privatecnt == 0)
 539		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
 540	mutex_unlock(&dma_list_mutex);
 541}
 542EXPORT_SYMBOL_GPL(dma_release_channel);
 543
 544/**
 545 * dmaengine_get - register interest in dma_channels
 546 */
 547void dmaengine_get(void)
 548{
 549	struct dma_device *device, *_d;
 550	struct dma_chan *chan;
 551	int err;
 552
 553	mutex_lock(&dma_list_mutex);
 554	dmaengine_ref_count++;
 555
 556	/* try to grab channels */
 557	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 558		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 559			continue;
 560		list_for_each_entry(chan, &device->channels, device_node) {
 561			err = dma_chan_get(chan);
 562			if (err == -ENODEV) {
 563				/* module removed before we could use it */
 564				list_del_rcu(&device->global_node);
 565				break;
 566			} else if (err)
 567				pr_err("dmaengine: failed to get %s: (%d)\n",
 568				       dma_chan_name(chan), err);
 569		}
 570	}
 571
 572	/* if this is the first reference and there were channels
 573	 * waiting we need to rebalance to get those channels
 574	 * incorporated into the channel table
 575	 */
 576	if (dmaengine_ref_count == 1)
 577		dma_channel_rebalance();
 578	mutex_unlock(&dma_list_mutex);
 579}
 580EXPORT_SYMBOL(dmaengine_get);
 581
 582/**
 583 * dmaengine_put - let dma drivers be removed when ref_count == 0
 584 */
 585void dmaengine_put(void)
 586{
 587	struct dma_device *device;
 588	struct dma_chan *chan;
 589
 590	mutex_lock(&dma_list_mutex);
 591	dmaengine_ref_count--;
 592	BUG_ON(dmaengine_ref_count < 0);
 593	/* drop channel references */
 594	list_for_each_entry(device, &dma_device_list, global_node) {
 595		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 596			continue;
 597		list_for_each_entry(chan, &device->channels, device_node)
 598			dma_chan_put(chan);
 599	}
 600	mutex_unlock(&dma_list_mutex);
 601}
 602EXPORT_SYMBOL(dmaengine_put);
 603
 604static bool device_has_all_tx_types(struct dma_device *device)
 605{
 606	/* A device that satisfies this test has channels that will never cause
 607	 * an async_tx channel switch event as all possible operation types can
 608	 * be handled.
 609	 */
 610	#ifdef CONFIG_ASYNC_TX_DMA
 611	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 612		return false;
 613	#endif
 614
 615	#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
 616	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
 617		return false;
 618	#endif
 619
 620	#if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
 621	if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
 622		return false;
 623	#endif
 624
 625	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
 626	if (!dma_has_cap(DMA_XOR, device->cap_mask))
 627		return false;
 628
 629	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
 630	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
 631		return false;
 632	#endif
 633	#endif
 634
 635	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
 636	if (!dma_has_cap(DMA_PQ, device->cap_mask))
 637		return false;
 638
 639	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
 640	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
 641		return false;
 642	#endif
 643	#endif
 644
 645	return true;
 646}
 647
 648static int get_dma_id(struct dma_device *device)
 649{
 650	int rc;
 651
 652 idr_retry:
 653	if (!idr_pre_get(&dma_idr, GFP_KERNEL))
 654		return -ENOMEM;
 655	mutex_lock(&dma_list_mutex);
 656	rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
 657	mutex_unlock(&dma_list_mutex);
 658	if (rc == -EAGAIN)
 659		goto idr_retry;
 660	else if (rc != 0)
 661		return rc;
 662
 663	return 0;
 
 
 
 
 
 664}
 665
 666/**
 667 * dma_async_device_register - registers DMA devices found
 668 * @device: &dma_device
 669 */
 670int dma_async_device_register(struct dma_device *device)
 671{
 672	int chancnt = 0, rc;
 673	struct dma_chan* chan;
 674	atomic_t *idr_ref;
 675
 676	if (!device)
 677		return -ENODEV;
 678
 679	/* validate device routines */
 680	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
 681		!device->device_prep_dma_memcpy);
 682	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
 683		!device->device_prep_dma_xor);
 684	BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
 685		!device->device_prep_dma_xor_val);
 686	BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
 687		!device->device_prep_dma_pq);
 688	BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
 689		!device->device_prep_dma_pq_val);
 690	BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
 691		!device->device_prep_dma_memset);
 692	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
 693		!device->device_prep_dma_interrupt);
 694	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
 695		!device->device_prep_dma_sg);
 696	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
 697		!device->device_prep_slave_sg);
 698	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
 699		!device->device_prep_dma_cyclic);
 700	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
 701		!device->device_control);
 
 
 702
 703	BUG_ON(!device->device_alloc_chan_resources);
 704	BUG_ON(!device->device_free_chan_resources);
 705	BUG_ON(!device->device_tx_status);
 706	BUG_ON(!device->device_issue_pending);
 707	BUG_ON(!device->dev);
 708
 709	/* note: this only matters in the
 710	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
 711	 */
 712	if (device_has_all_tx_types(device))
 713		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
 714
 715	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
 716	if (!idr_ref)
 717		return -ENOMEM;
 718	rc = get_dma_id(device);
 719	if (rc != 0) {
 720		kfree(idr_ref);
 721		return rc;
 722	}
 723
 724	atomic_set(idr_ref, 0);
 725
 726	/* represent channels in sysfs. Probably want devs too */
 727	list_for_each_entry(chan, &device->channels, device_node) {
 728		rc = -ENOMEM;
 729		chan->local = alloc_percpu(typeof(*chan->local));
 730		if (chan->local == NULL)
 731			goto err_out;
 732		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
 733		if (chan->dev == NULL) {
 734			free_percpu(chan->local);
 735			chan->local = NULL;
 736			goto err_out;
 737		}
 738
 739		chan->chan_id = chancnt++;
 740		chan->dev->device.class = &dma_devclass;
 741		chan->dev->device.parent = device->dev;
 742		chan->dev->chan = chan;
 743		chan->dev->idr_ref = idr_ref;
 744		chan->dev->dev_id = device->dev_id;
 745		atomic_inc(idr_ref);
 746		dev_set_name(&chan->dev->device, "dma%dchan%d",
 747			     device->dev_id, chan->chan_id);
 748
 749		rc = device_register(&chan->dev->device);
 750		if (rc) {
 751			free_percpu(chan->local);
 752			chan->local = NULL;
 753			kfree(chan->dev);
 754			atomic_dec(idr_ref);
 755			goto err_out;
 756		}
 757		chan->client_count = 0;
 758	}
 759	device->chancnt = chancnt;
 760
 761	mutex_lock(&dma_list_mutex);
 762	/* take references on public channels */
 763	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
 764		list_for_each_entry(chan, &device->channels, device_node) {
 765			/* if clients are already waiting for channels we need
 766			 * to take references on their behalf
 767			 */
 768			if (dma_chan_get(chan) == -ENODEV) {
 769				/* note we can only get here for the first
 770				 * channel as the remaining channels are
 771				 * guaranteed to get a reference
 772				 */
 773				rc = -ENODEV;
 774				mutex_unlock(&dma_list_mutex);
 775				goto err_out;
 776			}
 777		}
 778	list_add_tail_rcu(&device->global_node, &dma_device_list);
 779	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 780		device->privatecnt++;	/* Always private */
 781	dma_channel_rebalance();
 782	mutex_unlock(&dma_list_mutex);
 783
 784	return 0;
 785
 786err_out:
 787	/* if we never registered a channel just release the idr */
 788	if (atomic_read(idr_ref) == 0) {
 789		mutex_lock(&dma_list_mutex);
 790		idr_remove(&dma_idr, device->dev_id);
 791		mutex_unlock(&dma_list_mutex);
 792		kfree(idr_ref);
 793		return rc;
 794	}
 795
 796	list_for_each_entry(chan, &device->channels, device_node) {
 797		if (chan->local == NULL)
 798			continue;
 799		mutex_lock(&dma_list_mutex);
 800		chan->dev->chan = NULL;
 801		mutex_unlock(&dma_list_mutex);
 802		device_unregister(&chan->dev->device);
 803		free_percpu(chan->local);
 804	}
 805	return rc;
 806}
 807EXPORT_SYMBOL(dma_async_device_register);
 808
 809/**
 810 * dma_async_device_unregister - unregister a DMA device
 811 * @device: &dma_device
 812 *
 813 * This routine is called by dma driver exit routines, dmaengine holds module
 814 * references to prevent it being called while channels are in use.
 815 */
 816void dma_async_device_unregister(struct dma_device *device)
 817{
 818	struct dma_chan *chan;
 819
 820	mutex_lock(&dma_list_mutex);
 821	list_del_rcu(&device->global_node);
 822	dma_channel_rebalance();
 823	mutex_unlock(&dma_list_mutex);
 824
 825	list_for_each_entry(chan, &device->channels, device_node) {
 826		WARN_ONCE(chan->client_count,
 827			  "%s called while %d clients hold a reference\n",
 828			  __func__, chan->client_count);
 829		mutex_lock(&dma_list_mutex);
 830		chan->dev->chan = NULL;
 831		mutex_unlock(&dma_list_mutex);
 832		device_unregister(&chan->dev->device);
 833		free_percpu(chan->local);
 834	}
 835}
 836EXPORT_SYMBOL(dma_async_device_unregister);
 837
 838/**
 839 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
 840 * @chan: DMA channel to offload copy to
 841 * @dest: destination address (virtual)
 842 * @src: source address (virtual)
 843 * @len: length
 844 *
 845 * Both @dest and @src must be mappable to a bus address according to the
 846 * DMA mapping API rules for streaming mappings.
 847 * Both @dest and @src must stay memory resident (kernel memory or locked
 848 * user space pages).
 849 */
 850dma_cookie_t
 851dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
 852			void *src, size_t len)
 
 
 
 853{
 854	struct dma_device *dev = chan->device;
 855	struct dma_async_tx_descriptor *tx;
 856	dma_addr_t dma_dest, dma_src;
 857	dma_cookie_t cookie;
 858	unsigned long flags;
 859
 860	dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
 861	dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
 862	flags = DMA_CTRL_ACK |
 863		DMA_COMPL_SRC_UNMAP_SINGLE |
 864		DMA_COMPL_DEST_UNMAP_SINGLE;
 865	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 
 
 
 
 
 
 
 
 866
 867	if (!tx) {
 868		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
 869		dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
 870		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 871	}
 
 
 
 872
 873	tx->callback = NULL;
 874	cookie = tx->tx_submit(tx);
 
 
 
 
 875
 876	preempt_disable();
 877	__this_cpu_add(chan->local->bytes_transferred, len);
 878	__this_cpu_inc(chan->local->memcpy_count);
 879	preempt_enable();
 880
 881	return cookie;
 
 
 
 
 
 
 
 
 
 882}
 883EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
 884
 885/**
 886 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
 887 * @chan: DMA channel to offload copy to
 888 * @page: destination page
 889 * @offset: offset in page to copy to
 890 * @kdata: source address (virtual)
 891 * @len: length
 892 *
 893 * Both @page/@offset and @kdata must be mappable to a bus address according
 894 * to the DMA mapping API rules for streaming mappings.
 895 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
 896 * locked user space pages)
 897 */
 898dma_cookie_t
 899dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
 900			unsigned int offset, void *kdata, size_t len)
 901{
 902	struct dma_device *dev = chan->device;
 903	struct dma_async_tx_descriptor *tx;
 904	dma_addr_t dma_dest, dma_src;
 905	dma_cookie_t cookie;
 906	unsigned long flags;
 907
 908	dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
 909	dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
 910	flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
 911	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 912
 913	if (!tx) {
 914		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
 915		dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
 916		return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 917	}
 918
 919	tx->callback = NULL;
 920	cookie = tx->tx_submit(tx);
 921
 922	preempt_disable();
 923	__this_cpu_add(chan->local->bytes_transferred, len);
 924	__this_cpu_inc(chan->local->memcpy_count);
 925	preempt_enable();
 926
 927	return cookie;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 928}
 929EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
 930
 931/**
 932 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
 933 * @chan: DMA channel to offload copy to
 934 * @dest_pg: destination page
 935 * @dest_off: offset in page to copy to
 936 * @src_pg: source page
 937 * @src_off: offset in page to copy from
 938 * @len: length
 939 *
 940 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
 941 * address according to the DMA mapping API rules for streaming mappings.
 942 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
 943 * (kernel memory or locked user space pages).
 944 */
 945dma_cookie_t
 946dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
 947	unsigned int dest_off, struct page *src_pg, unsigned int src_off,
 948	size_t len)
 949{
 950	struct dma_device *dev = chan->device;
 951	struct dma_async_tx_descriptor *tx;
 952	dma_addr_t dma_dest, dma_src;
 953	dma_cookie_t cookie;
 954	unsigned long flags;
 955
 956	dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
 957	dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
 958				DMA_FROM_DEVICE);
 
 
 
 
 
 
 
 
 959	flags = DMA_CTRL_ACK;
 960	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 
 961
 962	if (!tx) {
 963		dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
 964		dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
 965		return -ENOMEM;
 966	}
 967
 968	tx->callback = NULL;
 969	cookie = tx->tx_submit(tx);
 
 970
 971	preempt_disable();
 972	__this_cpu_add(chan->local->bytes_transferred, len);
 973	__this_cpu_inc(chan->local->memcpy_count);
 974	preempt_enable();
 975
 976	return cookie;
 977}
 978EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
 979
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 980void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
 981	struct dma_chan *chan)
 982{
 983	tx->chan = chan;
 984	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
 985	spin_lock_init(&tx->lock);
 986	#endif
 987}
 988EXPORT_SYMBOL(dma_async_tx_descriptor_init);
 989
 990/* dma_wait_for_async_tx - spin wait for a transaction to complete
 991 * @tx: in-flight transaction to wait on
 992 */
 993enum dma_status
 994dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
 995{
 996	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 997
 998	if (!tx)
 999		return DMA_SUCCESS;
1000
1001	while (tx->cookie == -EBUSY) {
1002		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1003			pr_err("%s timeout waiting for descriptor submission\n",
1004				__func__);
1005			return DMA_ERROR;
1006		}
1007		cpu_relax();
1008	}
1009	return dma_sync_wait(tx->chan, tx->cookie);
1010}
1011EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1012
1013/* dma_run_dependencies - helper routine for dma drivers to process
1014 *	(start) dependent operations on their target channel
1015 * @tx: transaction with dependencies
1016 */
1017void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1018{
1019	struct dma_async_tx_descriptor *dep = txd_next(tx);
1020	struct dma_async_tx_descriptor *dep_next;
1021	struct dma_chan *chan;
1022
1023	if (!dep)
1024		return;
1025
1026	/* we'll submit tx->next now, so clear the link */
1027	txd_clear_next(tx);
1028	chan = dep->chan;
1029
1030	/* keep submitting up until a channel switch is detected
1031	 * in that case we will be called again as a result of
1032	 * processing the interrupt from async_tx_channel_switch
1033	 */
1034	for (; dep; dep = dep_next) {
1035		txd_lock(dep);
1036		txd_clear_parent(dep);
1037		dep_next = txd_next(dep);
1038		if (dep_next && dep_next->chan == chan)
1039			txd_clear_next(dep); /* ->next will be submitted */
1040		else
1041			dep_next = NULL; /* submit current dep and terminate */
1042		txd_unlock(dep);
1043
1044		dep->tx_submit(dep);
1045	}
1046
1047	chan->device->device_issue_pending(chan);
1048}
1049EXPORT_SYMBOL_GPL(dma_run_dependencies);
1050
1051static int __init dma_bus_init(void)
1052{
 
 
 
 
1053	return class_register(&dma_devclass);
1054}
1055arch_initcall(dma_bus_init);
1056
1057
v3.15
   1/*
   2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License as published by the Free
   6 * Software Foundation; either version 2 of the License, or (at your option)
   7 * any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc., 59
  16 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  17 *
  18 * The full GNU General Public License is included in this distribution in the
  19 * file called COPYING.
  20 */
  21
  22/*
  23 * This code implements the DMA subsystem. It provides a HW-neutral interface
  24 * for other kernel code to use asynchronous memory copy capabilities,
  25 * if present, and allows different HW DMA drivers to register as providing
  26 * this capability.
  27 *
  28 * Due to the fact we are accelerating what is already a relatively fast
  29 * operation, the code goes to great lengths to avoid additional overhead,
  30 * such as locking.
  31 *
  32 * LOCKING:
  33 *
  34 * The subsystem keeps a global list of dma_device structs it is protected by a
  35 * mutex, dma_list_mutex.
  36 *
  37 * A subsystem can get access to a channel by calling dmaengine_get() followed
  38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
  39 * dma_request_channel().  Once a channel is allocated a reference is taken
  40 * against its corresponding driver to disable removal.
  41 *
  42 * Each device has a channels list, which runs unlocked but is never modified
  43 * once the device is registered, it's just setup by the driver.
  44 *
  45 * See Documentation/dmaengine.txt for more details
  46 */
  47
  48#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  49
  50#include <linux/dma-mapping.h>
  51#include <linux/init.h>
  52#include <linux/module.h>
  53#include <linux/mm.h>
  54#include <linux/device.h>
  55#include <linux/dmaengine.h>
  56#include <linux/hardirq.h>
  57#include <linux/spinlock.h>
  58#include <linux/percpu.h>
  59#include <linux/rcupdate.h>
  60#include <linux/mutex.h>
  61#include <linux/jiffies.h>
  62#include <linux/rculist.h>
  63#include <linux/idr.h>
  64#include <linux/slab.h>
  65#include <linux/acpi.h>
  66#include <linux/acpi_dma.h>
  67#include <linux/of_dma.h>
  68#include <linux/mempool.h>
  69
  70static DEFINE_MUTEX(dma_list_mutex);
  71static DEFINE_IDR(dma_idr);
  72static LIST_HEAD(dma_device_list);
  73static long dmaengine_ref_count;
  74
  75/* --- sysfs implementation --- */
  76
  77/**
  78 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
  79 * @dev - device node
  80 *
  81 * Must be called under dma_list_mutex
  82 */
  83static struct dma_chan *dev_to_dma_chan(struct device *dev)
  84{
  85	struct dma_chan_dev *chan_dev;
  86
  87	chan_dev = container_of(dev, typeof(*chan_dev), device);
  88	return chan_dev->chan;
  89}
  90
  91static ssize_t memcpy_count_show(struct device *dev,
  92				 struct device_attribute *attr, char *buf)
  93{
  94	struct dma_chan *chan;
  95	unsigned long count = 0;
  96	int i;
  97	int err;
  98
  99	mutex_lock(&dma_list_mutex);
 100	chan = dev_to_dma_chan(dev);
 101	if (chan) {
 102		for_each_possible_cpu(i)
 103			count += per_cpu_ptr(chan->local, i)->memcpy_count;
 104		err = sprintf(buf, "%lu\n", count);
 105	} else
 106		err = -ENODEV;
 107	mutex_unlock(&dma_list_mutex);
 108
 109	return err;
 110}
 111static DEVICE_ATTR_RO(memcpy_count);
 112
 113static ssize_t bytes_transferred_show(struct device *dev,
 114				      struct device_attribute *attr, char *buf)
 115{
 116	struct dma_chan *chan;
 117	unsigned long count = 0;
 118	int i;
 119	int err;
 120
 121	mutex_lock(&dma_list_mutex);
 122	chan = dev_to_dma_chan(dev);
 123	if (chan) {
 124		for_each_possible_cpu(i)
 125			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
 126		err = sprintf(buf, "%lu\n", count);
 127	} else
 128		err = -ENODEV;
 129	mutex_unlock(&dma_list_mutex);
 130
 131	return err;
 132}
 133static DEVICE_ATTR_RO(bytes_transferred);
 134
 135static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
 136			   char *buf)
 137{
 138	struct dma_chan *chan;
 139	int err;
 140
 141	mutex_lock(&dma_list_mutex);
 142	chan = dev_to_dma_chan(dev);
 143	if (chan)
 144		err = sprintf(buf, "%d\n", chan->client_count);
 145	else
 146		err = -ENODEV;
 147	mutex_unlock(&dma_list_mutex);
 148
 149	return err;
 150}
 151static DEVICE_ATTR_RO(in_use);
 152
 153static struct attribute *dma_dev_attrs[] = {
 154	&dev_attr_memcpy_count.attr,
 155	&dev_attr_bytes_transferred.attr,
 156	&dev_attr_in_use.attr,
 157	NULL,
 158};
 159ATTRIBUTE_GROUPS(dma_dev);
 160
 161static void chan_dev_release(struct device *dev)
 162{
 163	struct dma_chan_dev *chan_dev;
 164
 165	chan_dev = container_of(dev, typeof(*chan_dev), device);
 166	if (atomic_dec_and_test(chan_dev->idr_ref)) {
 167		mutex_lock(&dma_list_mutex);
 168		idr_remove(&dma_idr, chan_dev->dev_id);
 169		mutex_unlock(&dma_list_mutex);
 170		kfree(chan_dev->idr_ref);
 171	}
 172	kfree(chan_dev);
 173}
 174
 175static struct class dma_devclass = {
 176	.name		= "dma",
 177	.dev_groups	= dma_dev_groups,
 178	.dev_release	= chan_dev_release,
 179};
 180
 181/* --- client and device registration --- */
 182
 183#define dma_device_satisfies_mask(device, mask) \
 184	__dma_device_satisfies_mask((device), &(mask))
 185static int
 186__dma_device_satisfies_mask(struct dma_device *device,
 187			    const dma_cap_mask_t *want)
 188{
 189	dma_cap_mask_t has;
 190
 191	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
 192		DMA_TX_TYPE_END);
 193	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 194}
 195
 196static struct module *dma_chan_to_owner(struct dma_chan *chan)
 197{
 198	return chan->device->dev->driver->owner;
 199}
 200
 201/**
 202 * balance_ref_count - catch up the channel reference count
 203 * @chan - channel to balance ->client_count versus dmaengine_ref_count
 204 *
 205 * balance_ref_count must be called under dma_list_mutex
 206 */
 207static void balance_ref_count(struct dma_chan *chan)
 208{
 209	struct module *owner = dma_chan_to_owner(chan);
 210
 211	while (chan->client_count < dmaengine_ref_count) {
 212		__module_get(owner);
 213		chan->client_count++;
 214	}
 215}
 216
 217/**
 218 * dma_chan_get - try to grab a dma channel's parent driver module
 219 * @chan - channel to grab
 220 *
 221 * Must be called under dma_list_mutex
 222 */
 223static int dma_chan_get(struct dma_chan *chan)
 224{
 225	int err = -ENODEV;
 226	struct module *owner = dma_chan_to_owner(chan);
 227
 228	if (chan->client_count) {
 229		__module_get(owner);
 230		err = 0;
 231	} else if (try_module_get(owner))
 232		err = 0;
 233
 234	if (err == 0)
 235		chan->client_count++;
 236
 237	/* allocate upon first client reference */
 238	if (chan->client_count == 1 && err == 0) {
 239		int desc_cnt = chan->device->device_alloc_chan_resources(chan);
 240
 241		if (desc_cnt < 0) {
 242			err = desc_cnt;
 243			chan->client_count = 0;
 244			module_put(owner);
 245		} else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
 246			balance_ref_count(chan);
 247	}
 248
 249	return err;
 250}
 251
 252/**
 253 * dma_chan_put - drop a reference to a dma channel's parent driver module
 254 * @chan - channel to release
 255 *
 256 * Must be called under dma_list_mutex
 257 */
 258static void dma_chan_put(struct dma_chan *chan)
 259{
 260	if (!chan->client_count)
 261		return; /* this channel failed alloc_chan_resources */
 262	chan->client_count--;
 263	module_put(dma_chan_to_owner(chan));
 264	if (chan->client_count == 0)
 265		chan->device->device_free_chan_resources(chan);
 266}
 267
 268enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 269{
 270	enum dma_status status;
 271	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 272
 273	dma_async_issue_pending(chan);
 274	do {
 275		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 276		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
 277			pr_err("%s: timeout!\n", __func__);
 278			return DMA_ERROR;
 279		}
 280		if (status != DMA_IN_PROGRESS)
 281			break;
 282		cpu_relax();
 283	} while (1);
 284
 285	return status;
 286}
 287EXPORT_SYMBOL(dma_sync_wait);
 288
 289/**
 290 * dma_cap_mask_all - enable iteration over all operation types
 291 */
 292static dma_cap_mask_t dma_cap_mask_all;
 293
 294/**
 295 * dma_chan_tbl_ent - tracks channel allocations per core/operation
 296 * @chan - associated channel for this entry
 297 */
 298struct dma_chan_tbl_ent {
 299	struct dma_chan *chan;
 300};
 301
 302/**
 303 * channel_table - percpu lookup table for memory-to-memory offload providers
 304 */
 305static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
 306
 307static int __init dma_channel_table_init(void)
 308{
 309	enum dma_transaction_type cap;
 310	int err = 0;
 311
 312	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
 313
 314	/* 'interrupt', 'private', and 'slave' are channel capabilities,
 315	 * but are not associated with an operation so they do not need
 316	 * an entry in the channel_table
 317	 */
 318	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
 319	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
 320	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
 321
 322	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
 323		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
 324		if (!channel_table[cap]) {
 325			err = -ENOMEM;
 326			break;
 327		}
 328	}
 329
 330	if (err) {
 331		pr_err("initialization failure\n");
 332		for_each_dma_cap_mask(cap, dma_cap_mask_all)
 333			if (channel_table[cap])
 334				free_percpu(channel_table[cap]);
 335	}
 336
 337	return err;
 338}
 339arch_initcall(dma_channel_table_init);
 340
 341/**
 342 * dma_find_channel - find a channel to carry out the operation
 343 * @tx_type: transaction type
 344 */
 345struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 346{
 347	return this_cpu_read(channel_table[tx_type]->chan);
 348}
 349EXPORT_SYMBOL(dma_find_channel);
 350
 351/*
 352 * net_dma_find_channel - find a channel for net_dma
 353 * net_dma has alignment requirements
 354 */
 355struct dma_chan *net_dma_find_channel(void)
 356{
 357	struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
 358	if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
 359		return NULL;
 360
 361	return chan;
 362}
 363EXPORT_SYMBOL(net_dma_find_channel);
 364
 365/**
 366 * dma_issue_pending_all - flush all pending operations across all channels
 367 */
 368void dma_issue_pending_all(void)
 369{
 370	struct dma_device *device;
 371	struct dma_chan *chan;
 372
 373	rcu_read_lock();
 374	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 375		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 376			continue;
 377		list_for_each_entry(chan, &device->channels, device_node)
 378			if (chan->client_count)
 379				device->device_issue_pending(chan);
 380	}
 381	rcu_read_unlock();
 382}
 383EXPORT_SYMBOL(dma_issue_pending_all);
 384
 385/**
 386 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
 387 */
 388static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
 389{
 390	int node = dev_to_node(chan->device->dev);
 391	return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
 392}
 393
 394/**
 395 * min_chan - returns the channel with min count and in the same numa-node as the cpu
 396 * @cap: capability to match
 397 * @cpu: cpu index which the channel should be close to
 398 *
 399 * If some channels are close to the given cpu, the one with the lowest
 400 * reference count is returned. Otherwise, cpu is ignored and only the
 401 * reference count is taken into account.
 402 * Must be called under dma_list_mutex.
 403 */
 404static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
 405{
 406	struct dma_device *device;
 407	struct dma_chan *chan;
 
 408	struct dma_chan *min = NULL;
 409	struct dma_chan *localmin = NULL;
 410
 411	list_for_each_entry(device, &dma_device_list, global_node) {
 412		if (!dma_has_cap(cap, device->cap_mask) ||
 413		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
 414			continue;
 415		list_for_each_entry(chan, &device->channels, device_node) {
 416			if (!chan->client_count)
 417				continue;
 418			if (!min || chan->table_count < min->table_count)
 
 
 419				min = chan;
 420
 421			if (dma_chan_is_local(chan, cpu))
 422				if (!localmin ||
 423				    chan->table_count < localmin->table_count)
 424					localmin = chan;
 425		}
 
 
 426	}
 427
 428	chan = localmin ? localmin : min;
 
 429
 430	if (chan)
 431		chan->table_count++;
 432
 433	return chan;
 434}
 435
 436/**
 437 * dma_channel_rebalance - redistribute the available channels
 438 *
 439 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
 440 * operation type) in the SMP case,  and operation isolation (avoid
 441 * multi-tasking channels) in the non-SMP case.  Must be called under
 442 * dma_list_mutex.
 443 */
 444static void dma_channel_rebalance(void)
 445{
 446	struct dma_chan *chan;
 447	struct dma_device *device;
 448	int cpu;
 449	int cap;
 
 450
 451	/* undo the last distribution */
 452	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 453		for_each_possible_cpu(cpu)
 454			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
 455
 456	list_for_each_entry(device, &dma_device_list, global_node) {
 457		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 458			continue;
 459		list_for_each_entry(chan, &device->channels, device_node)
 460			chan->table_count = 0;
 461	}
 462
 463	/* don't populate the channel_table if no clients are available */
 464	if (!dmaengine_ref_count)
 465		return;
 466
 467	/* redistribute available channels */
 
 468	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 469		for_each_online_cpu(cpu) {
 470			chan = min_chan(cap, cpu);
 
 
 
 
 471			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
 472		}
 473}
 474
 475static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 476					  struct dma_device *dev,
 477					  dma_filter_fn fn, void *fn_param)
 478{
 479	struct dma_chan *chan;
 480
 481	if (!__dma_device_satisfies_mask(dev, mask)) {
 482		pr_debug("%s: wrong capabilities\n", __func__);
 483		return NULL;
 484	}
 485	/* devices with multiple channels need special handling as we need to
 486	 * ensure that all channels are either private or public.
 487	 */
 488	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
 489		list_for_each_entry(chan, &dev->channels, device_node) {
 490			/* some channels are already publicly allocated */
 491			if (chan->client_count)
 492				return NULL;
 493		}
 494
 495	list_for_each_entry(chan, &dev->channels, device_node) {
 496		if (chan->client_count) {
 497			pr_debug("%s: %s busy\n",
 498				 __func__, dma_chan_name(chan));
 499			continue;
 500		}
 501		if (fn && !fn(chan, fn_param)) {
 502			pr_debug("%s: %s filter said false\n",
 503				 __func__, dma_chan_name(chan));
 504			continue;
 505		}
 506		return chan;
 507	}
 508
 509	return NULL;
 510}
 511
 512/**
 513 * dma_request_slave_channel - try to get specific channel exclusively
 514 * @chan: target channel
 515 */
 516struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
 517{
 518	int err = -EBUSY;
 519
 520	/* lock against __dma_request_channel */
 521	mutex_lock(&dma_list_mutex);
 522
 523	if (chan->client_count == 0) {
 524		err = dma_chan_get(chan);
 525		if (err)
 526			pr_debug("%s: failed to get %s: (%d)\n",
 527				__func__, dma_chan_name(chan), err);
 528	} else
 529		chan = NULL;
 530
 531	mutex_unlock(&dma_list_mutex);
 532
 533
 534	return chan;
 535}
 536EXPORT_SYMBOL_GPL(dma_get_slave_channel);
 537
 538struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
 539{
 540	dma_cap_mask_t mask;
 541	struct dma_chan *chan;
 542	int err;
 543
 544	dma_cap_zero(mask);
 545	dma_cap_set(DMA_SLAVE, mask);
 546
 547	/* lock against __dma_request_channel */
 548	mutex_lock(&dma_list_mutex);
 549
 550	chan = private_candidate(&mask, device, NULL, NULL);
 551	if (chan) {
 552		err = dma_chan_get(chan);
 553		if (err) {
 554			pr_debug("%s: failed to get %s: (%d)\n",
 555				__func__, dma_chan_name(chan), err);
 556			chan = NULL;
 557		}
 558	}
 559
 560	mutex_unlock(&dma_list_mutex);
 561
 562	return chan;
 563}
 564EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
 565
 566/**
 567 * __dma_request_channel - try to allocate an exclusive channel
 568 * @mask: capabilities that the channel must satisfy
 569 * @fn: optional callback to disposition available channels
 570 * @fn_param: opaque parameter to pass to dma_filter_fn
 571 *
 572 * Returns pointer to appropriate DMA channel on success or NULL.
 573 */
 574struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 575				       dma_filter_fn fn, void *fn_param)
 576{
 577	struct dma_device *device, *_d;
 578	struct dma_chan *chan = NULL;
 579	int err;
 580
 581	/* Find a channel */
 582	mutex_lock(&dma_list_mutex);
 583	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 584		chan = private_candidate(mask, device, fn, fn_param);
 585		if (chan) {
 586			/* Found a suitable channel, try to grab, prep, and
 587			 * return it.  We first set DMA_PRIVATE to disable
 588			 * balance_ref_count as this channel will not be
 589			 * published in the general-purpose allocator
 590			 */
 591			dma_cap_set(DMA_PRIVATE, device->cap_mask);
 592			device->privatecnt++;
 593			err = dma_chan_get(chan);
 594
 595			if (err == -ENODEV) {
 596				pr_debug("%s: %s module removed\n",
 597					 __func__, dma_chan_name(chan));
 598				list_del_rcu(&device->global_node);
 599			} else if (err)
 600				pr_debug("%s: failed to get %s: (%d)\n",
 601					 __func__, dma_chan_name(chan), err);
 602			else
 603				break;
 604			if (--device->privatecnt == 0)
 605				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 606			chan = NULL;
 607		}
 608	}
 609	mutex_unlock(&dma_list_mutex);
 610
 611	pr_debug("%s: %s (%s)\n",
 612		 __func__,
 613		 chan ? "success" : "fail",
 614		 chan ? dma_chan_name(chan) : NULL);
 615
 616	return chan;
 617}
 618EXPORT_SYMBOL_GPL(__dma_request_channel);
 619
 620/**
 621 * dma_request_slave_channel - try to allocate an exclusive slave channel
 622 * @dev:	pointer to client device structure
 623 * @name:	slave channel name
 624 *
 625 * Returns pointer to appropriate DMA channel on success or an error pointer.
 626 */
 627struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
 628						  const char *name)
 629{
 630	/* If device-tree is present get slave info from here */
 631	if (dev->of_node)
 632		return of_dma_request_slave_channel(dev->of_node, name);
 633
 634	/* If device was enumerated by ACPI get slave info from here */
 635	if (ACPI_HANDLE(dev))
 636		return acpi_dma_request_slave_chan_by_name(dev, name);
 637
 638	return ERR_PTR(-ENODEV);
 639}
 640EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
 641
 642/**
 643 * dma_request_slave_channel - try to allocate an exclusive slave channel
 644 * @dev:	pointer to client device structure
 645 * @name:	slave channel name
 646 *
 647 * Returns pointer to appropriate DMA channel on success or NULL.
 648 */
 649struct dma_chan *dma_request_slave_channel(struct device *dev,
 650					   const char *name)
 651{
 652	struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
 653	if (IS_ERR(ch))
 654		return NULL;
 655	return ch;
 656}
 657EXPORT_SYMBOL_GPL(dma_request_slave_channel);
 658
 659void dma_release_channel(struct dma_chan *chan)
 660{
 661	mutex_lock(&dma_list_mutex);
 662	WARN_ONCE(chan->client_count != 1,
 663		  "chan reference count %d != 1\n", chan->client_count);
 664	dma_chan_put(chan);
 665	/* drop PRIVATE cap enabled by __dma_request_channel() */
 666	if (--chan->device->privatecnt == 0)
 667		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
 668	mutex_unlock(&dma_list_mutex);
 669}
 670EXPORT_SYMBOL_GPL(dma_release_channel);
 671
 672/**
 673 * dmaengine_get - register interest in dma_channels
 674 */
 675void dmaengine_get(void)
 676{
 677	struct dma_device *device, *_d;
 678	struct dma_chan *chan;
 679	int err;
 680
 681	mutex_lock(&dma_list_mutex);
 682	dmaengine_ref_count++;
 683
 684	/* try to grab channels */
 685	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 686		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 687			continue;
 688		list_for_each_entry(chan, &device->channels, device_node) {
 689			err = dma_chan_get(chan);
 690			if (err == -ENODEV) {
 691				/* module removed before we could use it */
 692				list_del_rcu(&device->global_node);
 693				break;
 694			} else if (err)
 695				pr_debug("%s: failed to get %s: (%d)\n",
 696				       __func__, dma_chan_name(chan), err);
 697		}
 698	}
 699
 700	/* if this is the first reference and there were channels
 701	 * waiting we need to rebalance to get those channels
 702	 * incorporated into the channel table
 703	 */
 704	if (dmaengine_ref_count == 1)
 705		dma_channel_rebalance();
 706	mutex_unlock(&dma_list_mutex);
 707}
 708EXPORT_SYMBOL(dmaengine_get);
 709
 710/**
 711 * dmaengine_put - let dma drivers be removed when ref_count == 0
 712 */
 713void dmaengine_put(void)
 714{
 715	struct dma_device *device;
 716	struct dma_chan *chan;
 717
 718	mutex_lock(&dma_list_mutex);
 719	dmaengine_ref_count--;
 720	BUG_ON(dmaengine_ref_count < 0);
 721	/* drop channel references */
 722	list_for_each_entry(device, &dma_device_list, global_node) {
 723		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 724			continue;
 725		list_for_each_entry(chan, &device->channels, device_node)
 726			dma_chan_put(chan);
 727	}
 728	mutex_unlock(&dma_list_mutex);
 729}
 730EXPORT_SYMBOL(dmaengine_put);
 731
 732static bool device_has_all_tx_types(struct dma_device *device)
 733{
 734	/* A device that satisfies this test has channels that will never cause
 735	 * an async_tx channel switch event as all possible operation types can
 736	 * be handled.
 737	 */
 738	#ifdef CONFIG_ASYNC_TX_DMA
 739	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 740		return false;
 741	#endif
 742
 743	#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
 744	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
 745		return false;
 746	#endif
 747
 
 
 
 
 
 748	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
 749	if (!dma_has_cap(DMA_XOR, device->cap_mask))
 750		return false;
 751
 752	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
 753	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
 754		return false;
 755	#endif
 756	#endif
 757
 758	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
 759	if (!dma_has_cap(DMA_PQ, device->cap_mask))
 760		return false;
 761
 762	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
 763	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
 764		return false;
 765	#endif
 766	#endif
 767
 768	return true;
 769}
 770
 771static int get_dma_id(struct dma_device *device)
 772{
 773	int rc;
 774
 
 
 
 775	mutex_lock(&dma_list_mutex);
 
 
 
 
 
 
 776
 777	rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
 778	if (rc >= 0)
 779		device->dev_id = rc;
 780
 781	mutex_unlock(&dma_list_mutex);
 782	return rc < 0 ? rc : 0;
 783}
 784
 785/**
 786 * dma_async_device_register - registers DMA devices found
 787 * @device: &dma_device
 788 */
 789int dma_async_device_register(struct dma_device *device)
 790{
 791	int chancnt = 0, rc;
 792	struct dma_chan* chan;
 793	atomic_t *idr_ref;
 794
 795	if (!device)
 796		return -ENODEV;
 797
 798	/* validate device routines */
 799	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
 800		!device->device_prep_dma_memcpy);
 801	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
 802		!device->device_prep_dma_xor);
 803	BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
 804		!device->device_prep_dma_xor_val);
 805	BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
 806		!device->device_prep_dma_pq);
 807	BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
 808		!device->device_prep_dma_pq_val);
 
 
 809	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
 810		!device->device_prep_dma_interrupt);
 811	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
 812		!device->device_prep_dma_sg);
 
 
 813	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
 814		!device->device_prep_dma_cyclic);
 815	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
 816		!device->device_control);
 817	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
 818		!device->device_prep_interleaved_dma);
 819
 820	BUG_ON(!device->device_alloc_chan_resources);
 821	BUG_ON(!device->device_free_chan_resources);
 822	BUG_ON(!device->device_tx_status);
 823	BUG_ON(!device->device_issue_pending);
 824	BUG_ON(!device->dev);
 825
 826	/* note: this only matters in the
 827	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
 828	 */
 829	if (device_has_all_tx_types(device))
 830		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
 831
 832	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
 833	if (!idr_ref)
 834		return -ENOMEM;
 835	rc = get_dma_id(device);
 836	if (rc != 0) {
 837		kfree(idr_ref);
 838		return rc;
 839	}
 840
 841	atomic_set(idr_ref, 0);
 842
 843	/* represent channels in sysfs. Probably want devs too */
 844	list_for_each_entry(chan, &device->channels, device_node) {
 845		rc = -ENOMEM;
 846		chan->local = alloc_percpu(typeof(*chan->local));
 847		if (chan->local == NULL)
 848			goto err_out;
 849		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
 850		if (chan->dev == NULL) {
 851			free_percpu(chan->local);
 852			chan->local = NULL;
 853			goto err_out;
 854		}
 855
 856		chan->chan_id = chancnt++;
 857		chan->dev->device.class = &dma_devclass;
 858		chan->dev->device.parent = device->dev;
 859		chan->dev->chan = chan;
 860		chan->dev->idr_ref = idr_ref;
 861		chan->dev->dev_id = device->dev_id;
 862		atomic_inc(idr_ref);
 863		dev_set_name(&chan->dev->device, "dma%dchan%d",
 864			     device->dev_id, chan->chan_id);
 865
 866		rc = device_register(&chan->dev->device);
 867		if (rc) {
 868			free_percpu(chan->local);
 869			chan->local = NULL;
 870			kfree(chan->dev);
 871			atomic_dec(idr_ref);
 872			goto err_out;
 873		}
 874		chan->client_count = 0;
 875	}
 876	device->chancnt = chancnt;
 877
 878	mutex_lock(&dma_list_mutex);
 879	/* take references on public channels */
 880	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
 881		list_for_each_entry(chan, &device->channels, device_node) {
 882			/* if clients are already waiting for channels we need
 883			 * to take references on their behalf
 884			 */
 885			if (dma_chan_get(chan) == -ENODEV) {
 886				/* note we can only get here for the first
 887				 * channel as the remaining channels are
 888				 * guaranteed to get a reference
 889				 */
 890				rc = -ENODEV;
 891				mutex_unlock(&dma_list_mutex);
 892				goto err_out;
 893			}
 894		}
 895	list_add_tail_rcu(&device->global_node, &dma_device_list);
 896	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 897		device->privatecnt++;	/* Always private */
 898	dma_channel_rebalance();
 899	mutex_unlock(&dma_list_mutex);
 900
 901	return 0;
 902
 903err_out:
 904	/* if we never registered a channel just release the idr */
 905	if (atomic_read(idr_ref) == 0) {
 906		mutex_lock(&dma_list_mutex);
 907		idr_remove(&dma_idr, device->dev_id);
 908		mutex_unlock(&dma_list_mutex);
 909		kfree(idr_ref);
 910		return rc;
 911	}
 912
 913	list_for_each_entry(chan, &device->channels, device_node) {
 914		if (chan->local == NULL)
 915			continue;
 916		mutex_lock(&dma_list_mutex);
 917		chan->dev->chan = NULL;
 918		mutex_unlock(&dma_list_mutex);
 919		device_unregister(&chan->dev->device);
 920		free_percpu(chan->local);
 921	}
 922	return rc;
 923}
 924EXPORT_SYMBOL(dma_async_device_register);
 925
 926/**
 927 * dma_async_device_unregister - unregister a DMA device
 928 * @device: &dma_device
 929 *
 930 * This routine is called by dma driver exit routines, dmaengine holds module
 931 * references to prevent it being called while channels are in use.
 932 */
 933void dma_async_device_unregister(struct dma_device *device)
 934{
 935	struct dma_chan *chan;
 936
 937	mutex_lock(&dma_list_mutex);
 938	list_del_rcu(&device->global_node);
 939	dma_channel_rebalance();
 940	mutex_unlock(&dma_list_mutex);
 941
 942	list_for_each_entry(chan, &device->channels, device_node) {
 943		WARN_ONCE(chan->client_count,
 944			  "%s called while %d clients hold a reference\n",
 945			  __func__, chan->client_count);
 946		mutex_lock(&dma_list_mutex);
 947		chan->dev->chan = NULL;
 948		mutex_unlock(&dma_list_mutex);
 949		device_unregister(&chan->dev->device);
 950		free_percpu(chan->local);
 951	}
 952}
 953EXPORT_SYMBOL(dma_async_device_unregister);
 954
 955struct dmaengine_unmap_pool {
 956	struct kmem_cache *cache;
 957	const char *name;
 958	mempool_t *pool;
 959	size_t size;
 960};
 961
 962#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
 963static struct dmaengine_unmap_pool unmap_pool[] = {
 964	__UNMAP_POOL(2),
 965	#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
 966	__UNMAP_POOL(16),
 967	__UNMAP_POOL(128),
 968	__UNMAP_POOL(256),
 969	#endif
 970};
 971
 972static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
 973{
 974	int order = get_count_order(nr);
 
 
 
 
 975
 976	switch (order) {
 977	case 0 ... 1:
 978		return &unmap_pool[0];
 979	case 2 ... 4:
 980		return &unmap_pool[1];
 981	case 5 ... 7:
 982		return &unmap_pool[2];
 983	case 8:
 984		return &unmap_pool[3];
 985	default:
 986		BUG();
 987		return NULL;
 988	}
 989}
 990
 991static void dmaengine_unmap(struct kref *kref)
 992{
 993	struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
 994	struct device *dev = unmap->dev;
 995	int cnt, i;
 996
 997	cnt = unmap->to_cnt;
 998	for (i = 0; i < cnt; i++)
 999		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1000			       DMA_TO_DEVICE);
1001	cnt += unmap->from_cnt;
1002	for (; i < cnt; i++)
1003		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1004			       DMA_FROM_DEVICE);
1005	cnt += unmap->bidi_cnt;
1006	for (; i < cnt; i++) {
1007		if (unmap->addr[i] == 0)
1008			continue;
1009		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1010			       DMA_BIDIRECTIONAL);
1011	}
1012	cnt = unmap->map_cnt;
1013	mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1014}
1015
1016void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1017{
1018	if (unmap)
1019		kref_put(&unmap->kref, dmaengine_unmap);
1020}
1021EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1022
1023static void dmaengine_destroy_unmap_pool(void)
1024{
1025	int i;
 
1026
1027	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1028		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1029
1030		if (p->pool)
1031			mempool_destroy(p->pool);
1032		p->pool = NULL;
1033		if (p->cache)
1034			kmem_cache_destroy(p->cache);
1035		p->cache = NULL;
1036	}
1037}
 
1038
1039static int __init dmaengine_init_unmap_pool(void)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1040{
1041	int i;
 
 
 
 
 
 
 
 
 
1042
1043	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1044		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1045		size_t size;
1046
1047		size = sizeof(struct dmaengine_unmap_data) +
1048		       sizeof(dma_addr_t) * p->size;
1049
1050		p->cache = kmem_cache_create(p->name, size, 0,
1051					     SLAB_HWCACHE_ALIGN, NULL);
1052		if (!p->cache)
1053			break;
1054		p->pool = mempool_create_slab_pool(1, p->cache);
1055		if (!p->pool)
1056			break;
1057	}
1058
1059	if (i == ARRAY_SIZE(unmap_pool))
1060		return 0;
1061
1062	dmaengine_destroy_unmap_pool();
1063	return -ENOMEM;
1064}
 
1065
1066struct dmaengine_unmap_data *
1067dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1068{
1069	struct dmaengine_unmap_data *unmap;
1070
1071	unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1072	if (!unmap)
1073		return NULL;
1074
1075	memset(unmap, 0, sizeof(*unmap));
1076	kref_init(&unmap->kref);
1077	unmap->dev = dev;
1078	unmap->map_cnt = nr;
1079
1080	return unmap;
1081}
1082EXPORT_SYMBOL(dmaengine_get_unmap_data);
1083
1084/**
1085 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
1086 * @chan: DMA channel to offload copy to
1087 * @dest_pg: destination page
1088 * @dest_off: offset in page to copy to
1089 * @src_pg: source page
1090 * @src_off: offset in page to copy from
1091 * @len: length
1092 *
1093 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
1094 * address according to the DMA mapping API rules for streaming mappings.
1095 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
1096 * (kernel memory or locked user space pages).
1097 */
1098dma_cookie_t
1099dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1100	unsigned int dest_off, struct page *src_pg, unsigned int src_off,
1101	size_t len)
1102{
1103	struct dma_device *dev = chan->device;
1104	struct dma_async_tx_descriptor *tx;
1105	struct dmaengine_unmap_data *unmap;
1106	dma_cookie_t cookie;
1107	unsigned long flags;
1108
1109	unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
1110	if (!unmap)
1111		return -ENOMEM;
1112
1113	unmap->to_cnt = 1;
1114	unmap->from_cnt = 1;
1115	unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
1116				      DMA_TO_DEVICE);
1117	unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
1118				      DMA_FROM_DEVICE);
1119	unmap->len = len;
1120	flags = DMA_CTRL_ACK;
1121	tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
1122					 len, flags);
1123
1124	if (!tx) {
1125		dmaengine_unmap_put(unmap);
 
1126		return -ENOMEM;
1127	}
1128
1129	dma_set_unmap(tx, unmap);
1130	cookie = tx->tx_submit(tx);
1131	dmaengine_unmap_put(unmap);
1132
1133	preempt_disable();
1134	__this_cpu_add(chan->local->bytes_transferred, len);
1135	__this_cpu_inc(chan->local->memcpy_count);
1136	preempt_enable();
1137
1138	return cookie;
1139}
1140EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1141
1142/**
1143 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
1144 * @chan: DMA channel to offload copy to
1145 * @dest: destination address (virtual)
1146 * @src: source address (virtual)
1147 * @len: length
1148 *
1149 * Both @dest and @src must be mappable to a bus address according to the
1150 * DMA mapping API rules for streaming mappings.
1151 * Both @dest and @src must stay memory resident (kernel memory or locked
1152 * user space pages).
1153 */
1154dma_cookie_t
1155dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
1156			    void *src, size_t len)
1157{
1158	return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
1159					 (unsigned long) dest & ~PAGE_MASK,
1160					 virt_to_page(src),
1161					 (unsigned long) src & ~PAGE_MASK, len);
1162}
1163EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
1164
1165/**
1166 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
1167 * @chan: DMA channel to offload copy to
1168 * @page: destination page
1169 * @offset: offset in page to copy to
1170 * @kdata: source address (virtual)
1171 * @len: length
1172 *
1173 * Both @page/@offset and @kdata must be mappable to a bus address according
1174 * to the DMA mapping API rules for streaming mappings.
1175 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
1176 * locked user space pages)
1177 */
1178dma_cookie_t
1179dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
1180			   unsigned int offset, void *kdata, size_t len)
1181{
1182	return dma_async_memcpy_pg_to_pg(chan, page, offset,
1183					 virt_to_page(kdata),
1184					 (unsigned long) kdata & ~PAGE_MASK, len);
1185}
1186EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
1187
1188void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1189	struct dma_chan *chan)
1190{
1191	tx->chan = chan;
1192	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1193	spin_lock_init(&tx->lock);
1194	#endif
1195}
1196EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1197
1198/* dma_wait_for_async_tx - spin wait for a transaction to complete
1199 * @tx: in-flight transaction to wait on
1200 */
1201enum dma_status
1202dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1203{
1204	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1205
1206	if (!tx)
1207		return DMA_COMPLETE;
1208
1209	while (tx->cookie == -EBUSY) {
1210		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1211			pr_err("%s timeout waiting for descriptor submission\n",
1212			       __func__);
1213			return DMA_ERROR;
1214		}
1215		cpu_relax();
1216	}
1217	return dma_sync_wait(tx->chan, tx->cookie);
1218}
1219EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1220
1221/* dma_run_dependencies - helper routine for dma drivers to process
1222 *	(start) dependent operations on their target channel
1223 * @tx: transaction with dependencies
1224 */
1225void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1226{
1227	struct dma_async_tx_descriptor *dep = txd_next(tx);
1228	struct dma_async_tx_descriptor *dep_next;
1229	struct dma_chan *chan;
1230
1231	if (!dep)
1232		return;
1233
1234	/* we'll submit tx->next now, so clear the link */
1235	txd_clear_next(tx);
1236	chan = dep->chan;
1237
1238	/* keep submitting up until a channel switch is detected
1239	 * in that case we will be called again as a result of
1240	 * processing the interrupt from async_tx_channel_switch
1241	 */
1242	for (; dep; dep = dep_next) {
1243		txd_lock(dep);
1244		txd_clear_parent(dep);
1245		dep_next = txd_next(dep);
1246		if (dep_next && dep_next->chan == chan)
1247			txd_clear_next(dep); /* ->next will be submitted */
1248		else
1249			dep_next = NULL; /* submit current dep and terminate */
1250		txd_unlock(dep);
1251
1252		dep->tx_submit(dep);
1253	}
1254
1255	chan->device->device_issue_pending(chan);
1256}
1257EXPORT_SYMBOL_GPL(dma_run_dependencies);
1258
1259static int __init dma_bus_init(void)
1260{
1261	int err = dmaengine_init_unmap_pool();
1262
1263	if (err)
1264		return err;
1265	return class_register(&dma_devclass);
1266}
1267arch_initcall(dma_bus_init);
1268
1269