Linux Audio

Check our new training course

Loading...
v3.1
   1/*
   2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License as published by the Free
   6 * Software Foundation; either version 2 of the License, or (at your option)
   7 * any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program; if not, write to the Free Software Foundation, Inc., 59
  16 * Temple Place - Suite 330, Boston, MA  02111-1307, USA.
  17 *
  18 * The full GNU General Public License is included in this distribution in the
  19 * file called COPYING.
  20 */
  21
  22/*
  23 * This code implements the DMA subsystem. It provides a HW-neutral interface
  24 * for other kernel code to use asynchronous memory copy capabilities,
  25 * if present, and allows different HW DMA drivers to register as providing
  26 * this capability.
  27 *
  28 * Due to the fact we are accelerating what is already a relatively fast
  29 * operation, the code goes to great lengths to avoid additional overhead,
  30 * such as locking.
  31 *
  32 * LOCKING:
  33 *
  34 * The subsystem keeps a global list of dma_device structs it is protected by a
  35 * mutex, dma_list_mutex.
  36 *
  37 * A subsystem can get access to a channel by calling dmaengine_get() followed
  38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
  39 * dma_request_channel().  Once a channel is allocated a reference is taken
  40 * against its corresponding driver to disable removal.
  41 *
  42 * Each device has a channels list, which runs unlocked but is never modified
  43 * once the device is registered, it's just setup by the driver.
  44 *
  45 * See Documentation/dmaengine.txt for more details
  46 */
  47
 
 
 
  48#include <linux/dma-mapping.h>
  49#include <linux/init.h>
  50#include <linux/module.h>
  51#include <linux/mm.h>
  52#include <linux/device.h>
  53#include <linux/dmaengine.h>
  54#include <linux/hardirq.h>
  55#include <linux/spinlock.h>
  56#include <linux/percpu.h>
  57#include <linux/rcupdate.h>
  58#include <linux/mutex.h>
  59#include <linux/jiffies.h>
  60#include <linux/rculist.h>
  61#include <linux/idr.h>
  62#include <linux/slab.h>
 
 
 
 
  63
  64static DEFINE_MUTEX(dma_list_mutex);
  65static DEFINE_IDR(dma_idr);
  66static LIST_HEAD(dma_device_list);
  67static long dmaengine_ref_count;
  68
  69/* --- sysfs implementation --- */
  70
  71/**
  72 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
  73 * @dev - device node
  74 *
  75 * Must be called under dma_list_mutex
  76 */
  77static struct dma_chan *dev_to_dma_chan(struct device *dev)
  78{
  79	struct dma_chan_dev *chan_dev;
  80
  81	chan_dev = container_of(dev, typeof(*chan_dev), device);
  82	return chan_dev->chan;
  83}
  84
  85static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
 
  86{
  87	struct dma_chan *chan;
  88	unsigned long count = 0;
  89	int i;
  90	int err;
  91
  92	mutex_lock(&dma_list_mutex);
  93	chan = dev_to_dma_chan(dev);
  94	if (chan) {
  95		for_each_possible_cpu(i)
  96			count += per_cpu_ptr(chan->local, i)->memcpy_count;
  97		err = sprintf(buf, "%lu\n", count);
  98	} else
  99		err = -ENODEV;
 100	mutex_unlock(&dma_list_mutex);
 101
 102	return err;
 103}
 
 104
 105static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
 106				      char *buf)
 107{
 108	struct dma_chan *chan;
 109	unsigned long count = 0;
 110	int i;
 111	int err;
 112
 113	mutex_lock(&dma_list_mutex);
 114	chan = dev_to_dma_chan(dev);
 115	if (chan) {
 116		for_each_possible_cpu(i)
 117			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
 118		err = sprintf(buf, "%lu\n", count);
 119	} else
 120		err = -ENODEV;
 121	mutex_unlock(&dma_list_mutex);
 122
 123	return err;
 124}
 
 125
 126static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
 
 127{
 128	struct dma_chan *chan;
 129	int err;
 130
 131	mutex_lock(&dma_list_mutex);
 132	chan = dev_to_dma_chan(dev);
 133	if (chan)
 134		err = sprintf(buf, "%d\n", chan->client_count);
 135	else
 136		err = -ENODEV;
 137	mutex_unlock(&dma_list_mutex);
 138
 139	return err;
 140}
 
 141
 142static struct device_attribute dma_attrs[] = {
 143	__ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
 144	__ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
 145	__ATTR(in_use, S_IRUGO, show_in_use, NULL),
 146	__ATTR_NULL
 147};
 
 148
 149static void chan_dev_release(struct device *dev)
 150{
 151	struct dma_chan_dev *chan_dev;
 152
 153	chan_dev = container_of(dev, typeof(*chan_dev), device);
 154	if (atomic_dec_and_test(chan_dev->idr_ref)) {
 155		mutex_lock(&dma_list_mutex);
 156		idr_remove(&dma_idr, chan_dev->dev_id);
 157		mutex_unlock(&dma_list_mutex);
 158		kfree(chan_dev->idr_ref);
 159	}
 160	kfree(chan_dev);
 161}
 162
 163static struct class dma_devclass = {
 164	.name		= "dma",
 165	.dev_attrs	= dma_attrs,
 166	.dev_release	= chan_dev_release,
 167};
 168
 169/* --- client and device registration --- */
 170
 171#define dma_device_satisfies_mask(device, mask) \
 172	__dma_device_satisfies_mask((device), &(mask))
 173static int
 174__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
 
 175{
 176	dma_cap_mask_t has;
 177
 178	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
 179		DMA_TX_TYPE_END);
 180	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 181}
 182
 183static struct module *dma_chan_to_owner(struct dma_chan *chan)
 184{
 185	return chan->device->dev->driver->owner;
 186}
 187
 188/**
 189 * balance_ref_count - catch up the channel reference count
 190 * @chan - channel to balance ->client_count versus dmaengine_ref_count
 191 *
 192 * balance_ref_count must be called under dma_list_mutex
 193 */
 194static void balance_ref_count(struct dma_chan *chan)
 195{
 196	struct module *owner = dma_chan_to_owner(chan);
 197
 198	while (chan->client_count < dmaengine_ref_count) {
 199		__module_get(owner);
 200		chan->client_count++;
 201	}
 202}
 203
 204/**
 205 * dma_chan_get - try to grab a dma channel's parent driver module
 206 * @chan - channel to grab
 207 *
 208 * Must be called under dma_list_mutex
 209 */
 210static int dma_chan_get(struct dma_chan *chan)
 211{
 212	int err = -ENODEV;
 213	struct module *owner = dma_chan_to_owner(chan);
 
 214
 
 215	if (chan->client_count) {
 216		__module_get(owner);
 217		err = 0;
 218	} else if (try_module_get(owner))
 219		err = 0;
 220
 221	if (err == 0)
 222		chan->client_count++;
 223
 224	/* allocate upon first client reference */
 225	if (chan->client_count == 1 && err == 0) {
 226		int desc_cnt = chan->device->device_alloc_chan_resources(chan);
 227
 228		if (desc_cnt < 0) {
 229			err = desc_cnt;
 230			chan->client_count = 0;
 231			module_put(owner);
 232		} else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
 233			balance_ref_count(chan);
 234	}
 235
 236	return err;
 
 
 
 
 
 
 
 
 
 237}
 238
 239/**
 240 * dma_chan_put - drop a reference to a dma channel's parent driver module
 241 * @chan - channel to release
 242 *
 243 * Must be called under dma_list_mutex
 244 */
 245static void dma_chan_put(struct dma_chan *chan)
 246{
 
 247	if (!chan->client_count)
 248		return; /* this channel failed alloc_chan_resources */
 
 249	chan->client_count--;
 250	module_put(dma_chan_to_owner(chan));
 251	if (chan->client_count == 0)
 
 
 
 
 252		chan->device->device_free_chan_resources(chan);
 
 
 
 
 
 
 
 
 253}
 254
 255enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 256{
 257	enum dma_status status;
 258	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 259
 260	dma_async_issue_pending(chan);
 261	do {
 262		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 263		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
 264			printk(KERN_ERR "dma_sync_wait_timeout!\n");
 265			return DMA_ERROR;
 266		}
 267	} while (status == DMA_IN_PROGRESS);
 
 
 
 268
 269	return status;
 270}
 271EXPORT_SYMBOL(dma_sync_wait);
 272
 273/**
 274 * dma_cap_mask_all - enable iteration over all operation types
 275 */
 276static dma_cap_mask_t dma_cap_mask_all;
 277
 278/**
 279 * dma_chan_tbl_ent - tracks channel allocations per core/operation
 280 * @chan - associated channel for this entry
 281 */
 282struct dma_chan_tbl_ent {
 283	struct dma_chan *chan;
 284};
 285
 286/**
 287 * channel_table - percpu lookup table for memory-to-memory offload providers
 288 */
 289static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
 290
 291static int __init dma_channel_table_init(void)
 292{
 293	enum dma_transaction_type cap;
 294	int err = 0;
 295
 296	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
 297
 298	/* 'interrupt', 'private', and 'slave' are channel capabilities,
 299	 * but are not associated with an operation so they do not need
 300	 * an entry in the channel_table
 301	 */
 302	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
 303	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
 304	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
 305
 306	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
 307		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
 308		if (!channel_table[cap]) {
 309			err = -ENOMEM;
 310			break;
 311		}
 312	}
 313
 314	if (err) {
 315		pr_err("dmaengine: initialization failure\n");
 316		for_each_dma_cap_mask(cap, dma_cap_mask_all)
 317			if (channel_table[cap])
 318				free_percpu(channel_table[cap]);
 319	}
 320
 321	return err;
 322}
 323arch_initcall(dma_channel_table_init);
 324
 325/**
 326 * dma_find_channel - find a channel to carry out the operation
 327 * @tx_type: transaction type
 328 */
 329struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 330{
 331	return this_cpu_read(channel_table[tx_type]->chan);
 332}
 333EXPORT_SYMBOL(dma_find_channel);
 334
 335/**
 336 * dma_issue_pending_all - flush all pending operations across all channels
 337 */
 338void dma_issue_pending_all(void)
 339{
 340	struct dma_device *device;
 341	struct dma_chan *chan;
 342
 343	rcu_read_lock();
 344	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 345		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 346			continue;
 347		list_for_each_entry(chan, &device->channels, device_node)
 348			if (chan->client_count)
 349				device->device_issue_pending(chan);
 350	}
 351	rcu_read_unlock();
 352}
 353EXPORT_SYMBOL(dma_issue_pending_all);
 354
 355/**
 356 * nth_chan - returns the nth channel of the given capability
 
 
 
 
 
 
 
 
 
 357 * @cap: capability to match
 358 * @n: nth channel desired
 359 *
 360 * Defaults to returning the channel with the desired capability and the
 361 * lowest reference count when 'n' cannot be satisfied.  Must be called
 362 * under dma_list_mutex.
 
 363 */
 364static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
 365{
 366	struct dma_device *device;
 367	struct dma_chan *chan;
 368	struct dma_chan *ret = NULL;
 369	struct dma_chan *min = NULL;
 
 370
 371	list_for_each_entry(device, &dma_device_list, global_node) {
 372		if (!dma_has_cap(cap, device->cap_mask) ||
 373		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
 374			continue;
 375		list_for_each_entry(chan, &device->channels, device_node) {
 376			if (!chan->client_count)
 377				continue;
 378			if (!min)
 379				min = chan;
 380			else if (chan->table_count < min->table_count)
 381				min = chan;
 382
 383			if (n-- == 0) {
 384				ret = chan;
 385				break; /* done */
 386			}
 387		}
 388		if (ret)
 389			break; /* done */
 390	}
 391
 392	if (!ret)
 393		ret = min;
 394
 395	if (ret)
 396		ret->table_count++;
 397
 398	return ret;
 399}
 400
 401/**
 402 * dma_channel_rebalance - redistribute the available channels
 403 *
 404 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
 405 * operation type) in the SMP case,  and operation isolation (avoid
 406 * multi-tasking channels) in the non-SMP case.  Must be called under
 407 * dma_list_mutex.
 408 */
 409static void dma_channel_rebalance(void)
 410{
 411	struct dma_chan *chan;
 412	struct dma_device *device;
 413	int cpu;
 414	int cap;
 415	int n;
 416
 417	/* undo the last distribution */
 418	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 419		for_each_possible_cpu(cpu)
 420			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
 421
 422	list_for_each_entry(device, &dma_device_list, global_node) {
 423		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 424			continue;
 425		list_for_each_entry(chan, &device->channels, device_node)
 426			chan->table_count = 0;
 427	}
 428
 429	/* don't populate the channel_table if no clients are available */
 430	if (!dmaengine_ref_count)
 431		return;
 432
 433	/* redistribute available channels */
 434	n = 0;
 435	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 436		for_each_online_cpu(cpu) {
 437			if (num_possible_cpus() > 1)
 438				chan = nth_chan(cap, n++);
 439			else
 440				chan = nth_chan(cap, -1);
 441
 442			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
 443		}
 444}
 445
 446static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 447					  dma_filter_fn fn, void *fn_param)
 448{
 449	struct dma_chan *chan;
 450
 451	if (!__dma_device_satisfies_mask(dev, mask)) {
 452		pr_debug("%s: wrong capabilities\n", __func__);
 453		return NULL;
 454	}
 455	/* devices with multiple channels need special handling as we need to
 456	 * ensure that all channels are either private or public.
 457	 */
 458	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
 459		list_for_each_entry(chan, &dev->channels, device_node) {
 460			/* some channels are already publicly allocated */
 461			if (chan->client_count)
 462				return NULL;
 463		}
 464
 465	list_for_each_entry(chan, &dev->channels, device_node) {
 466		if (chan->client_count) {
 467			pr_debug("%s: %s busy\n",
 468				 __func__, dma_chan_name(chan));
 469			continue;
 470		}
 471		if (fn && !fn(chan, fn_param)) {
 472			pr_debug("%s: %s filter said false\n",
 473				 __func__, dma_chan_name(chan));
 474			continue;
 475		}
 476		return chan;
 477	}
 478
 479	return NULL;
 480}
 481
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 482/**
 483 * dma_request_channel - try to allocate an exclusive channel
 484 * @mask: capabilities that the channel must satisfy
 485 * @fn: optional callback to disposition available channels
 486 * @fn_param: opaque parameter to pass to dma_filter_fn
 
 
 487 */
 488struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
 
 489{
 490	struct dma_device *device, *_d;
 491	struct dma_chan *chan = NULL;
 492	int err;
 493
 494	/* Find a channel */
 495	mutex_lock(&dma_list_mutex);
 496	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 497		chan = private_candidate(mask, device, fn, fn_param);
 498		if (chan) {
 499			/* Found a suitable channel, try to grab, prep, and
 500			 * return it.  We first set DMA_PRIVATE to disable
 501			 * balance_ref_count as this channel will not be
 502			 * published in the general-purpose allocator
 503			 */
 504			dma_cap_set(DMA_PRIVATE, device->cap_mask);
 505			device->privatecnt++;
 506			err = dma_chan_get(chan);
 507
 508			if (err == -ENODEV) {
 509				pr_debug("%s: %s module removed\n", __func__,
 510					 dma_chan_name(chan));
 511				list_del_rcu(&device->global_node);
 512			} else if (err)
 513				pr_debug("dmaengine: failed to get %s: (%d)\n",
 514					 dma_chan_name(chan), err);
 515			else
 516				break;
 517			if (--device->privatecnt == 0)
 518				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 519			chan = NULL;
 520		}
 521	}
 522	mutex_unlock(&dma_list_mutex);
 523
 524	pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
 
 
 525		 chan ? dma_chan_name(chan) : NULL);
 526
 527	return chan;
 528}
 529EXPORT_SYMBOL_GPL(__dma_request_channel);
 530
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 531void dma_release_channel(struct dma_chan *chan)
 532{
 533	mutex_lock(&dma_list_mutex);
 534	WARN_ONCE(chan->client_count != 1,
 535		  "chan reference count %d != 1\n", chan->client_count);
 536	dma_chan_put(chan);
 537	/* drop PRIVATE cap enabled by __dma_request_channel() */
 538	if (--chan->device->privatecnt == 0)
 539		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
 540	mutex_unlock(&dma_list_mutex);
 541}
 542EXPORT_SYMBOL_GPL(dma_release_channel);
 543
 544/**
 545 * dmaengine_get - register interest in dma_channels
 546 */
 547void dmaengine_get(void)
 548{
 549	struct dma_device *device, *_d;
 550	struct dma_chan *chan;
 551	int err;
 552
 553	mutex_lock(&dma_list_mutex);
 554	dmaengine_ref_count++;
 555
 556	/* try to grab channels */
 557	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 558		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 559			continue;
 560		list_for_each_entry(chan, &device->channels, device_node) {
 561			err = dma_chan_get(chan);
 562			if (err == -ENODEV) {
 563				/* module removed before we could use it */
 564				list_del_rcu(&device->global_node);
 565				break;
 566			} else if (err)
 567				pr_err("dmaengine: failed to get %s: (%d)\n",
 568				       dma_chan_name(chan), err);
 
 569		}
 570	}
 571
 572	/* if this is the first reference and there were channels
 573	 * waiting we need to rebalance to get those channels
 574	 * incorporated into the channel table
 575	 */
 576	if (dmaengine_ref_count == 1)
 577		dma_channel_rebalance();
 578	mutex_unlock(&dma_list_mutex);
 579}
 580EXPORT_SYMBOL(dmaengine_get);
 581
 582/**
 583 * dmaengine_put - let dma drivers be removed when ref_count == 0
 584 */
 585void dmaengine_put(void)
 586{
 587	struct dma_device *device;
 588	struct dma_chan *chan;
 589
 590	mutex_lock(&dma_list_mutex);
 591	dmaengine_ref_count--;
 592	BUG_ON(dmaengine_ref_count < 0);
 593	/* drop channel references */
 594	list_for_each_entry(device, &dma_device_list, global_node) {
 595		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 596			continue;
 597		list_for_each_entry(chan, &device->channels, device_node)
 598			dma_chan_put(chan);
 599	}
 600	mutex_unlock(&dma_list_mutex);
 601}
 602EXPORT_SYMBOL(dmaengine_put);
 603
 604static bool device_has_all_tx_types(struct dma_device *device)
 605{
 606	/* A device that satisfies this test has channels that will never cause
 607	 * an async_tx channel switch event as all possible operation types can
 608	 * be handled.
 609	 */
 610	#ifdef CONFIG_ASYNC_TX_DMA
 611	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 612		return false;
 613	#endif
 614
 615	#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
 616	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
 617		return false;
 618	#endif
 619
 620	#if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
 621	if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
 622		return false;
 623	#endif
 624
 625	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
 626	if (!dma_has_cap(DMA_XOR, device->cap_mask))
 627		return false;
 628
 629	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
 630	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
 631		return false;
 632	#endif
 633	#endif
 634
 635	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
 636	if (!dma_has_cap(DMA_PQ, device->cap_mask))
 637		return false;
 638
 639	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
 640	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
 641		return false;
 642	#endif
 643	#endif
 644
 645	return true;
 646}
 647
 648static int get_dma_id(struct dma_device *device)
 649{
 650	int rc;
 651
 652 idr_retry:
 653	if (!idr_pre_get(&dma_idr, GFP_KERNEL))
 654		return -ENOMEM;
 655	mutex_lock(&dma_list_mutex);
 656	rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
 657	mutex_unlock(&dma_list_mutex);
 658	if (rc == -EAGAIN)
 659		goto idr_retry;
 660	else if (rc != 0)
 661		return rc;
 662
 663	return 0;
 664}
 665
 666/**
 667 * dma_async_device_register - registers DMA devices found
 668 * @device: &dma_device
 669 */
 670int dma_async_device_register(struct dma_device *device)
 671{
 672	int chancnt = 0, rc;
 673	struct dma_chan* chan;
 674	atomic_t *idr_ref;
 675
 676	if (!device)
 677		return -ENODEV;
 678
 679	/* validate device routines */
 680	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
 681		!device->device_prep_dma_memcpy);
 682	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
 683		!device->device_prep_dma_xor);
 684	BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
 685		!device->device_prep_dma_xor_val);
 686	BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
 687		!device->device_prep_dma_pq);
 688	BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
 689		!device->device_prep_dma_pq_val);
 690	BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
 691		!device->device_prep_dma_memset);
 692	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
 693		!device->device_prep_dma_interrupt);
 694	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
 695		!device->device_prep_dma_sg);
 696	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
 697		!device->device_prep_slave_sg);
 698	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
 699		!device->device_prep_dma_cyclic);
 700	BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
 701		!device->device_control);
 702
 703	BUG_ON(!device->device_alloc_chan_resources);
 704	BUG_ON(!device->device_free_chan_resources);
 705	BUG_ON(!device->device_tx_status);
 706	BUG_ON(!device->device_issue_pending);
 707	BUG_ON(!device->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 708
 709	/* note: this only matters in the
 710	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
 711	 */
 712	if (device_has_all_tx_types(device))
 713		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
 714
 715	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
 716	if (!idr_ref)
 717		return -ENOMEM;
 718	rc = get_dma_id(device);
 719	if (rc != 0) {
 720		kfree(idr_ref);
 721		return rc;
 722	}
 723
 724	atomic_set(idr_ref, 0);
 725
 726	/* represent channels in sysfs. Probably want devs too */
 727	list_for_each_entry(chan, &device->channels, device_node) {
 728		rc = -ENOMEM;
 729		chan->local = alloc_percpu(typeof(*chan->local));
 730		if (chan->local == NULL)
 731			goto err_out;
 732		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
 733		if (chan->dev == NULL) {
 734			free_percpu(chan->local);
 735			chan->local = NULL;
 736			goto err_out;
 737		}
 738
 739		chan->chan_id = chancnt++;
 740		chan->dev->device.class = &dma_devclass;
 741		chan->dev->device.parent = device->dev;
 742		chan->dev->chan = chan;
 743		chan->dev->idr_ref = idr_ref;
 744		chan->dev->dev_id = device->dev_id;
 745		atomic_inc(idr_ref);
 746		dev_set_name(&chan->dev->device, "dma%dchan%d",
 747			     device->dev_id, chan->chan_id);
 748
 749		rc = device_register(&chan->dev->device);
 750		if (rc) {
 751			free_percpu(chan->local);
 752			chan->local = NULL;
 753			kfree(chan->dev);
 754			atomic_dec(idr_ref);
 755			goto err_out;
 756		}
 757		chan->client_count = 0;
 758	}
 
 
 
 
 
 
 
 759	device->chancnt = chancnt;
 760
 761	mutex_lock(&dma_list_mutex);
 762	/* take references on public channels */
 763	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
 764		list_for_each_entry(chan, &device->channels, device_node) {
 765			/* if clients are already waiting for channels we need
 766			 * to take references on their behalf
 767			 */
 768			if (dma_chan_get(chan) == -ENODEV) {
 769				/* note we can only get here for the first
 770				 * channel as the remaining channels are
 771				 * guaranteed to get a reference
 772				 */
 773				rc = -ENODEV;
 774				mutex_unlock(&dma_list_mutex);
 775				goto err_out;
 776			}
 777		}
 778	list_add_tail_rcu(&device->global_node, &dma_device_list);
 779	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 780		device->privatecnt++;	/* Always private */
 781	dma_channel_rebalance();
 782	mutex_unlock(&dma_list_mutex);
 783
 784	return 0;
 785
 786err_out:
 787	/* if we never registered a channel just release the idr */
 788	if (atomic_read(idr_ref) == 0) {
 789		mutex_lock(&dma_list_mutex);
 790		idr_remove(&dma_idr, device->dev_id);
 791		mutex_unlock(&dma_list_mutex);
 792		kfree(idr_ref);
 793		return rc;
 794	}
 795
 796	list_for_each_entry(chan, &device->channels, device_node) {
 797		if (chan->local == NULL)
 798			continue;
 799		mutex_lock(&dma_list_mutex);
 800		chan->dev->chan = NULL;
 801		mutex_unlock(&dma_list_mutex);
 802		device_unregister(&chan->dev->device);
 803		free_percpu(chan->local);
 804	}
 805	return rc;
 806}
 807EXPORT_SYMBOL(dma_async_device_register);
 808
 809/**
 810 * dma_async_device_unregister - unregister a DMA device
 811 * @device: &dma_device
 812 *
 813 * This routine is called by dma driver exit routines, dmaengine holds module
 814 * references to prevent it being called while channels are in use.
 815 */
 816void dma_async_device_unregister(struct dma_device *device)
 817{
 818	struct dma_chan *chan;
 819
 820	mutex_lock(&dma_list_mutex);
 821	list_del_rcu(&device->global_node);
 822	dma_channel_rebalance();
 823	mutex_unlock(&dma_list_mutex);
 824
 825	list_for_each_entry(chan, &device->channels, device_node) {
 826		WARN_ONCE(chan->client_count,
 827			  "%s called while %d clients hold a reference\n",
 828			  __func__, chan->client_count);
 829		mutex_lock(&dma_list_mutex);
 830		chan->dev->chan = NULL;
 831		mutex_unlock(&dma_list_mutex);
 832		device_unregister(&chan->dev->device);
 833		free_percpu(chan->local);
 834	}
 835}
 836EXPORT_SYMBOL(dma_async_device_unregister);
 837
 838/**
 839 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
 840 * @chan: DMA channel to offload copy to
 841 * @dest: destination address (virtual)
 842 * @src: source address (virtual)
 843 * @len: length
 844 *
 845 * Both @dest and @src must be mappable to a bus address according to the
 846 * DMA mapping API rules for streaming mappings.
 847 * Both @dest and @src must stay memory resident (kernel memory or locked
 848 * user space pages).
 849 */
 850dma_cookie_t
 851dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
 852			void *src, size_t len)
 853{
 854	struct dma_device *dev = chan->device;
 855	struct dma_async_tx_descriptor *tx;
 856	dma_addr_t dma_dest, dma_src;
 857	dma_cookie_t cookie;
 858	unsigned long flags;
 859
 860	dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
 861	dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
 862	flags = DMA_CTRL_ACK |
 863		DMA_COMPL_SRC_UNMAP_SINGLE |
 864		DMA_COMPL_DEST_UNMAP_SINGLE;
 865	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 866
 867	if (!tx) {
 868		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
 869		dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
 870		return -ENOMEM;
 871	}
 872
 873	tx->callback = NULL;
 874	cookie = tx->tx_submit(tx);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 875
 876	preempt_disable();
 877	__this_cpu_add(chan->local->bytes_transferred, len);
 878	__this_cpu_inc(chan->local->memcpy_count);
 879	preempt_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 880
 881	return cookie;
 
 
 
 882}
 883EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
 884
 885/**
 886 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
 887 * @chan: DMA channel to offload copy to
 888 * @page: destination page
 889 * @offset: offset in page to copy to
 890 * @kdata: source address (virtual)
 891 * @len: length
 892 *
 893 * Both @page/@offset and @kdata must be mappable to a bus address according
 894 * to the DMA mapping API rules for streaming mappings.
 895 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
 896 * locked user space pages)
 897 */
 898dma_cookie_t
 899dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
 900			unsigned int offset, void *kdata, size_t len)
 901{
 902	struct dma_device *dev = chan->device;
 903	struct dma_async_tx_descriptor *tx;
 904	dma_addr_t dma_dest, dma_src;
 905	dma_cookie_t cookie;
 906	unsigned long flags;
 907
 908	dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
 909	dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
 910	flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
 911	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 912
 913	if (!tx) {
 914		dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
 915		dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
 916		return -ENOMEM;
 917	}
 
 918
 919	tx->callback = NULL;
 920	cookie = tx->tx_submit(tx);
 
 921
 922	preempt_disable();
 923	__this_cpu_add(chan->local->bytes_transferred, len);
 924	__this_cpu_inc(chan->local->memcpy_count);
 925	preempt_enable();
 926
 927	return cookie;
 928}
 929EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
 930
 931/**
 932 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
 933 * @chan: DMA channel to offload copy to
 934 * @dest_pg: destination page
 935 * @dest_off: offset in page to copy to
 936 * @src_pg: source page
 937 * @src_off: offset in page to copy from
 938 * @len: length
 939 *
 940 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
 941 * address according to the DMA mapping API rules for streaming mappings.
 942 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
 943 * (kernel memory or locked user space pages).
 944 */
 945dma_cookie_t
 946dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
 947	unsigned int dest_off, struct page *src_pg, unsigned int src_off,
 948	size_t len)
 949{
 950	struct dma_device *dev = chan->device;
 951	struct dma_async_tx_descriptor *tx;
 952	dma_addr_t dma_dest, dma_src;
 953	dma_cookie_t cookie;
 954	unsigned long flags;
 955
 956	dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
 957	dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
 958				DMA_FROM_DEVICE);
 959	flags = DMA_CTRL_ACK;
 960	tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
 961
 962	if (!tx) {
 963		dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
 964		dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
 965		return -ENOMEM;
 966	}
 967
 968	tx->callback = NULL;
 969	cookie = tx->tx_submit(tx);
 970
 971	preempt_disable();
 972	__this_cpu_add(chan->local->bytes_transferred, len);
 973	__this_cpu_inc(chan->local->memcpy_count);
 974	preempt_enable();
 
 
 
 
 975
 976	return cookie;
 
 
 
 
 
 
 
 
 
 977}
 978EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
 979
 980void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
 981	struct dma_chan *chan)
 982{
 983	tx->chan = chan;
 984	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
 985	spin_lock_init(&tx->lock);
 986	#endif
 987}
 988EXPORT_SYMBOL(dma_async_tx_descriptor_init);
 989
 990/* dma_wait_for_async_tx - spin wait for a transaction to complete
 991 * @tx: in-flight transaction to wait on
 992 */
 993enum dma_status
 994dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
 995{
 996	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 997
 998	if (!tx)
 999		return DMA_SUCCESS;
1000
1001	while (tx->cookie == -EBUSY) {
1002		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1003			pr_err("%s timeout waiting for descriptor submission\n",
 
1004				__func__);
1005			return DMA_ERROR;
1006		}
1007		cpu_relax();
1008	}
1009	return dma_sync_wait(tx->chan, tx->cookie);
1010}
1011EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1012
1013/* dma_run_dependencies - helper routine for dma drivers to process
1014 *	(start) dependent operations on their target channel
1015 * @tx: transaction with dependencies
1016 */
1017void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1018{
1019	struct dma_async_tx_descriptor *dep = txd_next(tx);
1020	struct dma_async_tx_descriptor *dep_next;
1021	struct dma_chan *chan;
1022
1023	if (!dep)
1024		return;
1025
1026	/* we'll submit tx->next now, so clear the link */
1027	txd_clear_next(tx);
1028	chan = dep->chan;
1029
1030	/* keep submitting up until a channel switch is detected
1031	 * in that case we will be called again as a result of
1032	 * processing the interrupt from async_tx_channel_switch
1033	 */
1034	for (; dep; dep = dep_next) {
1035		txd_lock(dep);
1036		txd_clear_parent(dep);
1037		dep_next = txd_next(dep);
1038		if (dep_next && dep_next->chan == chan)
1039			txd_clear_next(dep); /* ->next will be submitted */
1040		else
1041			dep_next = NULL; /* submit current dep and terminate */
1042		txd_unlock(dep);
1043
1044		dep->tx_submit(dep);
1045	}
1046
1047	chan->device->device_issue_pending(chan);
1048}
1049EXPORT_SYMBOL_GPL(dma_run_dependencies);
1050
1051static int __init dma_bus_init(void)
1052{
 
 
 
 
1053	return class_register(&dma_devclass);
1054}
1055arch_initcall(dma_bus_init);
1056
1057
v4.17
   1/*
   2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License as published by the Free
   6 * Software Foundation; either version 2 of the License, or (at your option)
   7 * any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
 
 
 
 
  14 * The full GNU General Public License is included in this distribution in the
  15 * file called COPYING.
  16 */
  17
  18/*
  19 * This code implements the DMA subsystem. It provides a HW-neutral interface
  20 * for other kernel code to use asynchronous memory copy capabilities,
  21 * if present, and allows different HW DMA drivers to register as providing
  22 * this capability.
  23 *
  24 * Due to the fact we are accelerating what is already a relatively fast
  25 * operation, the code goes to great lengths to avoid additional overhead,
  26 * such as locking.
  27 *
  28 * LOCKING:
  29 *
  30 * The subsystem keeps a global list of dma_device structs it is protected by a
  31 * mutex, dma_list_mutex.
  32 *
  33 * A subsystem can get access to a channel by calling dmaengine_get() followed
  34 * by dma_find_channel(), or if it has need for an exclusive channel it can call
  35 * dma_request_channel().  Once a channel is allocated a reference is taken
  36 * against its corresponding driver to disable removal.
  37 *
  38 * Each device has a channels list, which runs unlocked but is never modified
  39 * once the device is registered, it's just setup by the driver.
  40 *
  41 * See Documentation/dmaengine.txt for more details
  42 */
  43
  44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  45
  46#include <linux/platform_device.h>
  47#include <linux/dma-mapping.h>
  48#include <linux/init.h>
  49#include <linux/module.h>
  50#include <linux/mm.h>
  51#include <linux/device.h>
  52#include <linux/dmaengine.h>
  53#include <linux/hardirq.h>
  54#include <linux/spinlock.h>
  55#include <linux/percpu.h>
  56#include <linux/rcupdate.h>
  57#include <linux/mutex.h>
  58#include <linux/jiffies.h>
  59#include <linux/rculist.h>
  60#include <linux/idr.h>
  61#include <linux/slab.h>
  62#include <linux/acpi.h>
  63#include <linux/acpi_dma.h>
  64#include <linux/of_dma.h>
  65#include <linux/mempool.h>
  66
  67static DEFINE_MUTEX(dma_list_mutex);
  68static DEFINE_IDA(dma_ida);
  69static LIST_HEAD(dma_device_list);
  70static long dmaengine_ref_count;
  71
  72/* --- sysfs implementation --- */
  73
  74/**
  75 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
  76 * @dev - device node
  77 *
  78 * Must be called under dma_list_mutex
  79 */
  80static struct dma_chan *dev_to_dma_chan(struct device *dev)
  81{
  82	struct dma_chan_dev *chan_dev;
  83
  84	chan_dev = container_of(dev, typeof(*chan_dev), device);
  85	return chan_dev->chan;
  86}
  87
  88static ssize_t memcpy_count_show(struct device *dev,
  89				 struct device_attribute *attr, char *buf)
  90{
  91	struct dma_chan *chan;
  92	unsigned long count = 0;
  93	int i;
  94	int err;
  95
  96	mutex_lock(&dma_list_mutex);
  97	chan = dev_to_dma_chan(dev);
  98	if (chan) {
  99		for_each_possible_cpu(i)
 100			count += per_cpu_ptr(chan->local, i)->memcpy_count;
 101		err = sprintf(buf, "%lu\n", count);
 102	} else
 103		err = -ENODEV;
 104	mutex_unlock(&dma_list_mutex);
 105
 106	return err;
 107}
 108static DEVICE_ATTR_RO(memcpy_count);
 109
 110static ssize_t bytes_transferred_show(struct device *dev,
 111				      struct device_attribute *attr, char *buf)
 112{
 113	struct dma_chan *chan;
 114	unsigned long count = 0;
 115	int i;
 116	int err;
 117
 118	mutex_lock(&dma_list_mutex);
 119	chan = dev_to_dma_chan(dev);
 120	if (chan) {
 121		for_each_possible_cpu(i)
 122			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
 123		err = sprintf(buf, "%lu\n", count);
 124	} else
 125		err = -ENODEV;
 126	mutex_unlock(&dma_list_mutex);
 127
 128	return err;
 129}
 130static DEVICE_ATTR_RO(bytes_transferred);
 131
 132static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
 133			   char *buf)
 134{
 135	struct dma_chan *chan;
 136	int err;
 137
 138	mutex_lock(&dma_list_mutex);
 139	chan = dev_to_dma_chan(dev);
 140	if (chan)
 141		err = sprintf(buf, "%d\n", chan->client_count);
 142	else
 143		err = -ENODEV;
 144	mutex_unlock(&dma_list_mutex);
 145
 146	return err;
 147}
 148static DEVICE_ATTR_RO(in_use);
 149
 150static struct attribute *dma_dev_attrs[] = {
 151	&dev_attr_memcpy_count.attr,
 152	&dev_attr_bytes_transferred.attr,
 153	&dev_attr_in_use.attr,
 154	NULL,
 155};
 156ATTRIBUTE_GROUPS(dma_dev);
 157
 158static void chan_dev_release(struct device *dev)
 159{
 160	struct dma_chan_dev *chan_dev;
 161
 162	chan_dev = container_of(dev, typeof(*chan_dev), device);
 163	if (atomic_dec_and_test(chan_dev->idr_ref)) {
 164		mutex_lock(&dma_list_mutex);
 165		ida_remove(&dma_ida, chan_dev->dev_id);
 166		mutex_unlock(&dma_list_mutex);
 167		kfree(chan_dev->idr_ref);
 168	}
 169	kfree(chan_dev);
 170}
 171
 172static struct class dma_devclass = {
 173	.name		= "dma",
 174	.dev_groups	= dma_dev_groups,
 175	.dev_release	= chan_dev_release,
 176};
 177
 178/* --- client and device registration --- */
 179
 180#define dma_device_satisfies_mask(device, mask) \
 181	__dma_device_satisfies_mask((device), &(mask))
 182static int
 183__dma_device_satisfies_mask(struct dma_device *device,
 184			    const dma_cap_mask_t *want)
 185{
 186	dma_cap_mask_t has;
 187
 188	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
 189		DMA_TX_TYPE_END);
 190	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 191}
 192
 193static struct module *dma_chan_to_owner(struct dma_chan *chan)
 194{
 195	return chan->device->dev->driver->owner;
 196}
 197
 198/**
 199 * balance_ref_count - catch up the channel reference count
 200 * @chan - channel to balance ->client_count versus dmaengine_ref_count
 201 *
 202 * balance_ref_count must be called under dma_list_mutex
 203 */
 204static void balance_ref_count(struct dma_chan *chan)
 205{
 206	struct module *owner = dma_chan_to_owner(chan);
 207
 208	while (chan->client_count < dmaengine_ref_count) {
 209		__module_get(owner);
 210		chan->client_count++;
 211	}
 212}
 213
 214/**
 215 * dma_chan_get - try to grab a dma channel's parent driver module
 216 * @chan - channel to grab
 217 *
 218 * Must be called under dma_list_mutex
 219 */
 220static int dma_chan_get(struct dma_chan *chan)
 221{
 
 222	struct module *owner = dma_chan_to_owner(chan);
 223	int ret;
 224
 225	/* The channel is already in use, update client count */
 226	if (chan->client_count) {
 227		__module_get(owner);
 228		goto out;
 229	}
 
 230
 231	if (!try_module_get(owner))
 232		return -ENODEV;
 233
 234	/* allocate upon first client reference */
 235	if (chan->device->device_alloc_chan_resources) {
 236		ret = chan->device->device_alloc_chan_resources(chan);
 237		if (ret < 0)
 238			goto err_out;
 
 
 
 
 
 239	}
 240
 241	if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
 242		balance_ref_count(chan);
 243
 244out:
 245	chan->client_count++;
 246	return 0;
 247
 248err_out:
 249	module_put(owner);
 250	return ret;
 251}
 252
 253/**
 254 * dma_chan_put - drop a reference to a dma channel's parent driver module
 255 * @chan - channel to release
 256 *
 257 * Must be called under dma_list_mutex
 258 */
 259static void dma_chan_put(struct dma_chan *chan)
 260{
 261	/* This channel is not in use, bail out */
 262	if (!chan->client_count)
 263		return;
 264
 265	chan->client_count--;
 266	module_put(dma_chan_to_owner(chan));
 267
 268	/* This channel is not in use anymore, free it */
 269	if (!chan->client_count && chan->device->device_free_chan_resources) {
 270		/* Make sure all operations have completed */
 271		dmaengine_synchronize(chan);
 272		chan->device->device_free_chan_resources(chan);
 273	}
 274
 275	/* If the channel is used via a DMA request router, free the mapping */
 276	if (chan->router && chan->router->route_free) {
 277		chan->router->route_free(chan->router->dev, chan->route_data);
 278		chan->router = NULL;
 279		chan->route_data = NULL;
 280	}
 281}
 282
 283enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 284{
 285	enum dma_status status;
 286	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 287
 288	dma_async_issue_pending(chan);
 289	do {
 290		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 291		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
 292			dev_err(chan->device->dev, "%s: timeout!\n", __func__);
 293			return DMA_ERROR;
 294		}
 295		if (status != DMA_IN_PROGRESS)
 296			break;
 297		cpu_relax();
 298	} while (1);
 299
 300	return status;
 301}
 302EXPORT_SYMBOL(dma_sync_wait);
 303
 304/**
 305 * dma_cap_mask_all - enable iteration over all operation types
 306 */
 307static dma_cap_mask_t dma_cap_mask_all;
 308
 309/**
 310 * dma_chan_tbl_ent - tracks channel allocations per core/operation
 311 * @chan - associated channel for this entry
 312 */
 313struct dma_chan_tbl_ent {
 314	struct dma_chan *chan;
 315};
 316
 317/**
 318 * channel_table - percpu lookup table for memory-to-memory offload providers
 319 */
 320static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
 321
 322static int __init dma_channel_table_init(void)
 323{
 324	enum dma_transaction_type cap;
 325	int err = 0;
 326
 327	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
 328
 329	/* 'interrupt', 'private', and 'slave' are channel capabilities,
 330	 * but are not associated with an operation so they do not need
 331	 * an entry in the channel_table
 332	 */
 333	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
 334	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
 335	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
 336
 337	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
 338		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
 339		if (!channel_table[cap]) {
 340			err = -ENOMEM;
 341			break;
 342		}
 343	}
 344
 345	if (err) {
 346		pr_err("initialization failure\n");
 347		for_each_dma_cap_mask(cap, dma_cap_mask_all)
 348			free_percpu(channel_table[cap]);
 
 349	}
 350
 351	return err;
 352}
 353arch_initcall(dma_channel_table_init);
 354
 355/**
 356 * dma_find_channel - find a channel to carry out the operation
 357 * @tx_type: transaction type
 358 */
 359struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 360{
 361	return this_cpu_read(channel_table[tx_type]->chan);
 362}
 363EXPORT_SYMBOL(dma_find_channel);
 364
 365/**
 366 * dma_issue_pending_all - flush all pending operations across all channels
 367 */
 368void dma_issue_pending_all(void)
 369{
 370	struct dma_device *device;
 371	struct dma_chan *chan;
 372
 373	rcu_read_lock();
 374	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 375		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 376			continue;
 377		list_for_each_entry(chan, &device->channels, device_node)
 378			if (chan->client_count)
 379				device->device_issue_pending(chan);
 380	}
 381	rcu_read_unlock();
 382}
 383EXPORT_SYMBOL(dma_issue_pending_all);
 384
 385/**
 386 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
 387 */
 388static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
 389{
 390	int node = dev_to_node(chan->device->dev);
 391	return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
 392}
 393
 394/**
 395 * min_chan - returns the channel with min count and in the same numa-node as the cpu
 396 * @cap: capability to match
 397 * @cpu: cpu index which the channel should be close to
 398 *
 399 * If some channels are close to the given cpu, the one with the lowest
 400 * reference count is returned. Otherwise, cpu is ignored and only the
 401 * reference count is taken into account.
 402 * Must be called under dma_list_mutex.
 403 */
 404static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
 405{
 406	struct dma_device *device;
 407	struct dma_chan *chan;
 
 408	struct dma_chan *min = NULL;
 409	struct dma_chan *localmin = NULL;
 410
 411	list_for_each_entry(device, &dma_device_list, global_node) {
 412		if (!dma_has_cap(cap, device->cap_mask) ||
 413		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
 414			continue;
 415		list_for_each_entry(chan, &device->channels, device_node) {
 416			if (!chan->client_count)
 417				continue;
 418			if (!min || chan->table_count < min->table_count)
 
 
 419				min = chan;
 420
 421			if (dma_chan_is_local(chan, cpu))
 422				if (!localmin ||
 423				    chan->table_count < localmin->table_count)
 424					localmin = chan;
 425		}
 
 
 426	}
 427
 428	chan = localmin ? localmin : min;
 
 429
 430	if (chan)
 431		chan->table_count++;
 432
 433	return chan;
 434}
 435
 436/**
 437 * dma_channel_rebalance - redistribute the available channels
 438 *
 439 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
 440 * operation type) in the SMP case,  and operation isolation (avoid
 441 * multi-tasking channels) in the non-SMP case.  Must be called under
 442 * dma_list_mutex.
 443 */
 444static void dma_channel_rebalance(void)
 445{
 446	struct dma_chan *chan;
 447	struct dma_device *device;
 448	int cpu;
 449	int cap;
 
 450
 451	/* undo the last distribution */
 452	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 453		for_each_possible_cpu(cpu)
 454			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
 455
 456	list_for_each_entry(device, &dma_device_list, global_node) {
 457		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 458			continue;
 459		list_for_each_entry(chan, &device->channels, device_node)
 460			chan->table_count = 0;
 461	}
 462
 463	/* don't populate the channel_table if no clients are available */
 464	if (!dmaengine_ref_count)
 465		return;
 466
 467	/* redistribute available channels */
 
 468	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 469		for_each_online_cpu(cpu) {
 470			chan = min_chan(cap, cpu);
 
 
 
 
 471			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
 472		}
 473}
 474
 475int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
 476{
 477	struct dma_device *device;
 478
 479	if (!chan || !caps)
 480		return -EINVAL;
 481
 482	device = chan->device;
 483
 484	/* check if the channel supports slave transactions */
 485	if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
 486	      test_bit(DMA_CYCLIC, device->cap_mask.bits)))
 487		return -ENXIO;
 488
 489	/*
 490	 * Check whether it reports it uses the generic slave
 491	 * capabilities, if not, that means it doesn't support any
 492	 * kind of slave capabilities reporting.
 493	 */
 494	if (!device->directions)
 495		return -ENXIO;
 496
 497	caps->src_addr_widths = device->src_addr_widths;
 498	caps->dst_addr_widths = device->dst_addr_widths;
 499	caps->directions = device->directions;
 500	caps->max_burst = device->max_burst;
 501	caps->residue_granularity = device->residue_granularity;
 502	caps->descriptor_reuse = device->descriptor_reuse;
 503
 504	/*
 505	 * Some devices implement only pause (e.g. to get residuum) but no
 506	 * resume. However cmd_pause is advertised as pause AND resume.
 507	 */
 508	caps->cmd_pause = !!(device->device_pause && device->device_resume);
 509	caps->cmd_terminate = !!device->device_terminate_all;
 510
 511	return 0;
 512}
 513EXPORT_SYMBOL_GPL(dma_get_slave_caps);
 514
 515static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 516					  struct dma_device *dev,
 517					  dma_filter_fn fn, void *fn_param)
 518{
 519	struct dma_chan *chan;
 520
 521	if (mask && !__dma_device_satisfies_mask(dev, mask)) {
 522		dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
 523		return NULL;
 524	}
 525	/* devices with multiple channels need special handling as we need to
 526	 * ensure that all channels are either private or public.
 527	 */
 528	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
 529		list_for_each_entry(chan, &dev->channels, device_node) {
 530			/* some channels are already publicly allocated */
 531			if (chan->client_count)
 532				return NULL;
 533		}
 534
 535	list_for_each_entry(chan, &dev->channels, device_node) {
 536		if (chan->client_count) {
 537			dev_dbg(dev->dev, "%s: %s busy\n",
 538				 __func__, dma_chan_name(chan));
 539			continue;
 540		}
 541		if (fn && !fn(chan, fn_param)) {
 542			dev_dbg(dev->dev, "%s: %s filter said false\n",
 543				 __func__, dma_chan_name(chan));
 544			continue;
 545		}
 546		return chan;
 547	}
 548
 549	return NULL;
 550}
 551
 552static struct dma_chan *find_candidate(struct dma_device *device,
 553				       const dma_cap_mask_t *mask,
 554				       dma_filter_fn fn, void *fn_param)
 555{
 556	struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
 557	int err;
 558
 559	if (chan) {
 560		/* Found a suitable channel, try to grab, prep, and return it.
 561		 * We first set DMA_PRIVATE to disable balance_ref_count as this
 562		 * channel will not be published in the general-purpose
 563		 * allocator
 564		 */
 565		dma_cap_set(DMA_PRIVATE, device->cap_mask);
 566		device->privatecnt++;
 567		err = dma_chan_get(chan);
 568
 569		if (err) {
 570			if (err == -ENODEV) {
 571				dev_dbg(device->dev, "%s: %s module removed\n",
 572					__func__, dma_chan_name(chan));
 573				list_del_rcu(&device->global_node);
 574			} else
 575				dev_dbg(device->dev,
 576					"%s: failed to get %s: (%d)\n",
 577					 __func__, dma_chan_name(chan), err);
 578
 579			if (--device->privatecnt == 0)
 580				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 581
 582			chan = ERR_PTR(err);
 583		}
 584	}
 585
 586	return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 587}
 588
 589/**
 590 * dma_get_slave_channel - try to get specific channel exclusively
 591 * @chan: target channel
 592 */
 593struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
 594{
 595	int err = -EBUSY;
 596
 597	/* lock against __dma_request_channel */
 598	mutex_lock(&dma_list_mutex);
 599
 600	if (chan->client_count == 0) {
 601		struct dma_device *device = chan->device;
 602
 603		dma_cap_set(DMA_PRIVATE, device->cap_mask);
 604		device->privatecnt++;
 605		err = dma_chan_get(chan);
 606		if (err) {
 607			dev_dbg(chan->device->dev,
 608				"%s: failed to get %s: (%d)\n",
 609				__func__, dma_chan_name(chan), err);
 610			chan = NULL;
 611			if (--device->privatecnt == 0)
 612				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 613		}
 614	} else
 615		chan = NULL;
 616
 617	mutex_unlock(&dma_list_mutex);
 618
 619
 620	return chan;
 621}
 622EXPORT_SYMBOL_GPL(dma_get_slave_channel);
 623
 624struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
 625{
 626	dma_cap_mask_t mask;
 627	struct dma_chan *chan;
 628
 629	dma_cap_zero(mask);
 630	dma_cap_set(DMA_SLAVE, mask);
 631
 632	/* lock against __dma_request_channel */
 633	mutex_lock(&dma_list_mutex);
 634
 635	chan = find_candidate(device, &mask, NULL, NULL);
 636
 637	mutex_unlock(&dma_list_mutex);
 638
 639	return IS_ERR(chan) ? NULL : chan;
 640}
 641EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
 642
 643/**
 644 * __dma_request_channel - try to allocate an exclusive channel
 645 * @mask: capabilities that the channel must satisfy
 646 * @fn: optional callback to disposition available channels
 647 * @fn_param: opaque parameter to pass to dma_filter_fn
 648 *
 649 * Returns pointer to appropriate DMA channel on success or NULL.
 650 */
 651struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 652				       dma_filter_fn fn, void *fn_param)
 653{
 654	struct dma_device *device, *_d;
 655	struct dma_chan *chan = NULL;
 
 656
 657	/* Find a channel */
 658	mutex_lock(&dma_list_mutex);
 659	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 660		chan = find_candidate(device, mask, fn, fn_param);
 661		if (!IS_ERR(chan))
 662			break;
 
 
 
 
 
 
 
 663
 664		chan = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 665	}
 666	mutex_unlock(&dma_list_mutex);
 667
 668	pr_debug("%s: %s (%s)\n",
 669		 __func__,
 670		 chan ? "success" : "fail",
 671		 chan ? dma_chan_name(chan) : NULL);
 672
 673	return chan;
 674}
 675EXPORT_SYMBOL_GPL(__dma_request_channel);
 676
 677static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
 678						    const char *name,
 679						    struct device *dev)
 680{
 681	int i;
 682
 683	if (!device->filter.mapcnt)
 684		return NULL;
 685
 686	for (i = 0; i < device->filter.mapcnt; i++) {
 687		const struct dma_slave_map *map = &device->filter.map[i];
 688
 689		if (!strcmp(map->devname, dev_name(dev)) &&
 690		    !strcmp(map->slave, name))
 691			return map;
 692	}
 693
 694	return NULL;
 695}
 696
 697/**
 698 * dma_request_chan - try to allocate an exclusive slave channel
 699 * @dev:	pointer to client device structure
 700 * @name:	slave channel name
 701 *
 702 * Returns pointer to appropriate DMA channel on success or an error pointer.
 703 */
 704struct dma_chan *dma_request_chan(struct device *dev, const char *name)
 705{
 706	struct dma_device *d, *_d;
 707	struct dma_chan *chan = NULL;
 708
 709	/* If device-tree is present get slave info from here */
 710	if (dev->of_node)
 711		chan = of_dma_request_slave_channel(dev->of_node, name);
 712
 713	/* If device was enumerated by ACPI get slave info from here */
 714	if (has_acpi_companion(dev) && !chan)
 715		chan = acpi_dma_request_slave_chan_by_name(dev, name);
 716
 717	if (chan) {
 718		/* Valid channel found or requester need to be deferred */
 719		if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
 720			return chan;
 721	}
 722
 723	/* Try to find the channel via the DMA filter map(s) */
 724	mutex_lock(&dma_list_mutex);
 725	list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
 726		dma_cap_mask_t mask;
 727		const struct dma_slave_map *map = dma_filter_match(d, name, dev);
 728
 729		if (!map)
 730			continue;
 731
 732		dma_cap_zero(mask);
 733		dma_cap_set(DMA_SLAVE, mask);
 734
 735		chan = find_candidate(d, &mask, d->filter.fn, map->param);
 736		if (!IS_ERR(chan))
 737			break;
 738	}
 739	mutex_unlock(&dma_list_mutex);
 740
 741	return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 742}
 743EXPORT_SYMBOL_GPL(dma_request_chan);
 744
 745/**
 746 * dma_request_slave_channel - try to allocate an exclusive slave channel
 747 * @dev:	pointer to client device structure
 748 * @name:	slave channel name
 749 *
 750 * Returns pointer to appropriate DMA channel on success or NULL.
 751 */
 752struct dma_chan *dma_request_slave_channel(struct device *dev,
 753					   const char *name)
 754{
 755	struct dma_chan *ch = dma_request_chan(dev, name);
 756	if (IS_ERR(ch))
 757		return NULL;
 758
 759	return ch;
 760}
 761EXPORT_SYMBOL_GPL(dma_request_slave_channel);
 762
 763/**
 764 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
 765 * @mask: capabilities that the channel must satisfy
 766 *
 767 * Returns pointer to appropriate DMA channel on success or an error pointer.
 768 */
 769struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
 770{
 771	struct dma_chan *chan;
 772
 773	if (!mask)
 774		return ERR_PTR(-ENODEV);
 775
 776	chan = __dma_request_channel(mask, NULL, NULL);
 777	if (!chan)
 778		chan = ERR_PTR(-ENODEV);
 779
 780	return chan;
 781}
 782EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
 783
 784void dma_release_channel(struct dma_chan *chan)
 785{
 786	mutex_lock(&dma_list_mutex);
 787	WARN_ONCE(chan->client_count != 1,
 788		  "chan reference count %d != 1\n", chan->client_count);
 789	dma_chan_put(chan);
 790	/* drop PRIVATE cap enabled by __dma_request_channel() */
 791	if (--chan->device->privatecnt == 0)
 792		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
 793	mutex_unlock(&dma_list_mutex);
 794}
 795EXPORT_SYMBOL_GPL(dma_release_channel);
 796
 797/**
 798 * dmaengine_get - register interest in dma_channels
 799 */
 800void dmaengine_get(void)
 801{
 802	struct dma_device *device, *_d;
 803	struct dma_chan *chan;
 804	int err;
 805
 806	mutex_lock(&dma_list_mutex);
 807	dmaengine_ref_count++;
 808
 809	/* try to grab channels */
 810	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 811		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 812			continue;
 813		list_for_each_entry(chan, &device->channels, device_node) {
 814			err = dma_chan_get(chan);
 815			if (err == -ENODEV) {
 816				/* module removed before we could use it */
 817				list_del_rcu(&device->global_node);
 818				break;
 819			} else if (err)
 820				dev_dbg(chan->device->dev,
 821					"%s: failed to get %s: (%d)\n",
 822					__func__, dma_chan_name(chan), err);
 823		}
 824	}
 825
 826	/* if this is the first reference and there were channels
 827	 * waiting we need to rebalance to get those channels
 828	 * incorporated into the channel table
 829	 */
 830	if (dmaengine_ref_count == 1)
 831		dma_channel_rebalance();
 832	mutex_unlock(&dma_list_mutex);
 833}
 834EXPORT_SYMBOL(dmaengine_get);
 835
 836/**
 837 * dmaengine_put - let dma drivers be removed when ref_count == 0
 838 */
 839void dmaengine_put(void)
 840{
 841	struct dma_device *device;
 842	struct dma_chan *chan;
 843
 844	mutex_lock(&dma_list_mutex);
 845	dmaengine_ref_count--;
 846	BUG_ON(dmaengine_ref_count < 0);
 847	/* drop channel references */
 848	list_for_each_entry(device, &dma_device_list, global_node) {
 849		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 850			continue;
 851		list_for_each_entry(chan, &device->channels, device_node)
 852			dma_chan_put(chan);
 853	}
 854	mutex_unlock(&dma_list_mutex);
 855}
 856EXPORT_SYMBOL(dmaengine_put);
 857
 858static bool device_has_all_tx_types(struct dma_device *device)
 859{
 860	/* A device that satisfies this test has channels that will never cause
 861	 * an async_tx channel switch event as all possible operation types can
 862	 * be handled.
 863	 */
 864	#ifdef CONFIG_ASYNC_TX_DMA
 865	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 866		return false;
 867	#endif
 868
 869	#if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
 870	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
 871		return false;
 872	#endif
 873
 874	#if IS_ENABLED(CONFIG_ASYNC_XOR)
 
 
 
 
 
 875	if (!dma_has_cap(DMA_XOR, device->cap_mask))
 876		return false;
 877
 878	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
 879	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
 880		return false;
 881	#endif
 882	#endif
 883
 884	#if IS_ENABLED(CONFIG_ASYNC_PQ)
 885	if (!dma_has_cap(DMA_PQ, device->cap_mask))
 886		return false;
 887
 888	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
 889	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
 890		return false;
 891	#endif
 892	#endif
 893
 894	return true;
 895}
 896
 897static int get_dma_id(struct dma_device *device)
 898{
 899	int rc;
 900
 901	do {
 902		if (!ida_pre_get(&dma_ida, GFP_KERNEL))
 903			return -ENOMEM;
 904		mutex_lock(&dma_list_mutex);
 905		rc = ida_get_new(&dma_ida, &device->dev_id);
 906		mutex_unlock(&dma_list_mutex);
 907	} while (rc == -EAGAIN);
 
 
 
 908
 909	return rc;
 910}
 911
 912/**
 913 * dma_async_device_register - registers DMA devices found
 914 * @device: &dma_device
 915 */
 916int dma_async_device_register(struct dma_device *device)
 917{
 918	int chancnt = 0, rc;
 919	struct dma_chan* chan;
 920	atomic_t *idr_ref;
 921
 922	if (!device)
 923		return -ENODEV;
 924
 925	/* validate device routines */
 926	if (!device->dev) {
 927		pr_err("DMAdevice must have dev\n");
 928		return -EIO;
 929	}
 930
 931	if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
 932		dev_err(device->dev,
 933			"Device claims capability %s, but op is not defined\n",
 934			"DMA_MEMCPY");
 935		return -EIO;
 936	}
 937
 938	if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
 939		dev_err(device->dev,
 940			"Device claims capability %s, but op is not defined\n",
 941			"DMA_XOR");
 942		return -EIO;
 943	}
 944
 945	if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
 946		dev_err(device->dev,
 947			"Device claims capability %s, but op is not defined\n",
 948			"DMA_XOR_VAL");
 949		return -EIO;
 950	}
 951
 952	if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
 953		dev_err(device->dev,
 954			"Device claims capability %s, but op is not defined\n",
 955			"DMA_PQ");
 956		return -EIO;
 957	}
 958
 959	if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
 960		dev_err(device->dev,
 961			"Device claims capability %s, but op is not defined\n",
 962			"DMA_PQ_VAL");
 963		return -EIO;
 964	}
 965
 966	if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
 967		dev_err(device->dev,
 968			"Device claims capability %s, but op is not defined\n",
 969			"DMA_MEMSET");
 970		return -EIO;
 971	}
 972
 973	if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
 974		dev_err(device->dev,
 975			"Device claims capability %s, but op is not defined\n",
 976			"DMA_INTERRUPT");
 977		return -EIO;
 978	}
 979
 980	if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
 981		dev_err(device->dev,
 982			"Device claims capability %s, but op is not defined\n",
 983			"DMA_CYCLIC");
 984		return -EIO;
 985	}
 986
 987	if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
 988		dev_err(device->dev,
 989			"Device claims capability %s, but op is not defined\n",
 990			"DMA_INTERLEAVE");
 991		return -EIO;
 992	}
 993
 994
 995	if (!device->device_tx_status) {
 996		dev_err(device->dev, "Device tx_status is not defined\n");
 997		return -EIO;
 998	}
 999
1000
1001	if (!device->device_issue_pending) {
1002		dev_err(device->dev, "Device issue_pending is not defined\n");
1003		return -EIO;
1004	}
1005
1006	/* note: this only matters in the
1007	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1008	 */
1009	if (device_has_all_tx_types(device))
1010		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1011
1012	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
1013	if (!idr_ref)
1014		return -ENOMEM;
1015	rc = get_dma_id(device);
1016	if (rc != 0) {
1017		kfree(idr_ref);
1018		return rc;
1019	}
1020
1021	atomic_set(idr_ref, 0);
1022
1023	/* represent channels in sysfs. Probably want devs too */
1024	list_for_each_entry(chan, &device->channels, device_node) {
1025		rc = -ENOMEM;
1026		chan->local = alloc_percpu(typeof(*chan->local));
1027		if (chan->local == NULL)
1028			goto err_out;
1029		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1030		if (chan->dev == NULL) {
1031			free_percpu(chan->local);
1032			chan->local = NULL;
1033			goto err_out;
1034		}
1035
1036		chan->chan_id = chancnt++;
1037		chan->dev->device.class = &dma_devclass;
1038		chan->dev->device.parent = device->dev;
1039		chan->dev->chan = chan;
1040		chan->dev->idr_ref = idr_ref;
1041		chan->dev->dev_id = device->dev_id;
1042		atomic_inc(idr_ref);
1043		dev_set_name(&chan->dev->device, "dma%dchan%d",
1044			     device->dev_id, chan->chan_id);
1045
1046		rc = device_register(&chan->dev->device);
1047		if (rc) {
1048			free_percpu(chan->local);
1049			chan->local = NULL;
1050			kfree(chan->dev);
1051			atomic_dec(idr_ref);
1052			goto err_out;
1053		}
1054		chan->client_count = 0;
1055	}
1056
1057	if (!chancnt) {
1058		dev_err(device->dev, "%s: device has no channels!\n", __func__);
1059		rc = -ENODEV;
1060		goto err_out;
1061	}
1062
1063	device->chancnt = chancnt;
1064
1065	mutex_lock(&dma_list_mutex);
1066	/* take references on public channels */
1067	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1068		list_for_each_entry(chan, &device->channels, device_node) {
1069			/* if clients are already waiting for channels we need
1070			 * to take references on their behalf
1071			 */
1072			if (dma_chan_get(chan) == -ENODEV) {
1073				/* note we can only get here for the first
1074				 * channel as the remaining channels are
1075				 * guaranteed to get a reference
1076				 */
1077				rc = -ENODEV;
1078				mutex_unlock(&dma_list_mutex);
1079				goto err_out;
1080			}
1081		}
1082	list_add_tail_rcu(&device->global_node, &dma_device_list);
1083	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1084		device->privatecnt++;	/* Always private */
1085	dma_channel_rebalance();
1086	mutex_unlock(&dma_list_mutex);
1087
1088	return 0;
1089
1090err_out:
1091	/* if we never registered a channel just release the idr */
1092	if (atomic_read(idr_ref) == 0) {
1093		mutex_lock(&dma_list_mutex);
1094		ida_remove(&dma_ida, device->dev_id);
1095		mutex_unlock(&dma_list_mutex);
1096		kfree(idr_ref);
1097		return rc;
1098	}
1099
1100	list_for_each_entry(chan, &device->channels, device_node) {
1101		if (chan->local == NULL)
1102			continue;
1103		mutex_lock(&dma_list_mutex);
1104		chan->dev->chan = NULL;
1105		mutex_unlock(&dma_list_mutex);
1106		device_unregister(&chan->dev->device);
1107		free_percpu(chan->local);
1108	}
1109	return rc;
1110}
1111EXPORT_SYMBOL(dma_async_device_register);
1112
1113/**
1114 * dma_async_device_unregister - unregister a DMA device
1115 * @device: &dma_device
1116 *
1117 * This routine is called by dma driver exit routines, dmaengine holds module
1118 * references to prevent it being called while channels are in use.
1119 */
1120void dma_async_device_unregister(struct dma_device *device)
1121{
1122	struct dma_chan *chan;
1123
1124	mutex_lock(&dma_list_mutex);
1125	list_del_rcu(&device->global_node);
1126	dma_channel_rebalance();
1127	mutex_unlock(&dma_list_mutex);
1128
1129	list_for_each_entry(chan, &device->channels, device_node) {
1130		WARN_ONCE(chan->client_count,
1131			  "%s called while %d clients hold a reference\n",
1132			  __func__, chan->client_count);
1133		mutex_lock(&dma_list_mutex);
1134		chan->dev->chan = NULL;
1135		mutex_unlock(&dma_list_mutex);
1136		device_unregister(&chan->dev->device);
1137		free_percpu(chan->local);
1138	}
1139}
1140EXPORT_SYMBOL(dma_async_device_unregister);
1141
1142struct dmaengine_unmap_pool {
1143	struct kmem_cache *cache;
1144	const char *name;
1145	mempool_t *pool;
1146	size_t size;
1147};
1148
1149#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1150static struct dmaengine_unmap_pool unmap_pool[] = {
1151	__UNMAP_POOL(2),
1152	#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1153	__UNMAP_POOL(16),
1154	__UNMAP_POOL(128),
1155	__UNMAP_POOL(256),
1156	#endif
1157};
1158
1159static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1160{
1161	int order = get_count_order(nr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1162
1163	switch (order) {
1164	case 0 ... 1:
1165		return &unmap_pool[0];
1166#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1167	case 2 ... 4:
1168		return &unmap_pool[1];
1169	case 5 ... 7:
1170		return &unmap_pool[2];
1171	case 8:
1172		return &unmap_pool[3];
1173#endif
1174	default:
1175		BUG();
1176		return NULL;
1177	}
1178}
1179
1180static void dmaengine_unmap(struct kref *kref)
1181{
1182	struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1183	struct device *dev = unmap->dev;
1184	int cnt, i;
1185
1186	cnt = unmap->to_cnt;
1187	for (i = 0; i < cnt; i++)
1188		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1189			       DMA_TO_DEVICE);
1190	cnt += unmap->from_cnt;
1191	for (; i < cnt; i++)
1192		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1193			       DMA_FROM_DEVICE);
1194	cnt += unmap->bidi_cnt;
1195	for (; i < cnt; i++) {
1196		if (unmap->addr[i] == 0)
1197			continue;
1198		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1199			       DMA_BIDIRECTIONAL);
1200	}
1201	cnt = unmap->map_cnt;
1202	mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1203}
1204
1205void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1206{
1207	if (unmap)
1208		kref_put(&unmap->kref, dmaengine_unmap);
1209}
1210EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1211
1212static void dmaengine_destroy_unmap_pool(void)
1213{
1214	int i;
1215
1216	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1217		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1218
1219		mempool_destroy(p->pool);
1220		p->pool = NULL;
1221		kmem_cache_destroy(p->cache);
1222		p->cache = NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1223	}
1224}
1225
1226static int __init dmaengine_init_unmap_pool(void)
1227{
1228	int i;
1229
1230	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1231		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1232		size_t size;
1233
1234		size = sizeof(struct dmaengine_unmap_data) +
1235		       sizeof(dma_addr_t) * p->size;
1236
1237		p->cache = kmem_cache_create(p->name, size, 0,
1238					     SLAB_HWCACHE_ALIGN, NULL);
1239		if (!p->cache)
1240			break;
1241		p->pool = mempool_create_slab_pool(1, p->cache);
1242		if (!p->pool)
1243			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1244	}
1245
1246	if (i == ARRAY_SIZE(unmap_pool))
1247		return 0;
1248
1249	dmaengine_destroy_unmap_pool();
1250	return -ENOMEM;
1251}
1252
1253struct dmaengine_unmap_data *
1254dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1255{
1256	struct dmaengine_unmap_data *unmap;
1257
1258	unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1259	if (!unmap)
1260		return NULL;
1261
1262	memset(unmap, 0, sizeof(*unmap));
1263	kref_init(&unmap->kref);
1264	unmap->dev = dev;
1265	unmap->map_cnt = nr;
1266
1267	return unmap;
1268}
1269EXPORT_SYMBOL(dmaengine_get_unmap_data);
1270
1271void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1272	struct dma_chan *chan)
1273{
1274	tx->chan = chan;
1275	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1276	spin_lock_init(&tx->lock);
1277	#endif
1278}
1279EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1280
1281/* dma_wait_for_async_tx - spin wait for a transaction to complete
1282 * @tx: in-flight transaction to wait on
1283 */
1284enum dma_status
1285dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1286{
1287	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1288
1289	if (!tx)
1290		return DMA_COMPLETE;
1291
1292	while (tx->cookie == -EBUSY) {
1293		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1294			dev_err(tx->chan->device->dev,
1295				"%s timeout waiting for descriptor submission\n",
1296				__func__);
1297			return DMA_ERROR;
1298		}
1299		cpu_relax();
1300	}
1301	return dma_sync_wait(tx->chan, tx->cookie);
1302}
1303EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1304
1305/* dma_run_dependencies - helper routine for dma drivers to process
1306 *	(start) dependent operations on their target channel
1307 * @tx: transaction with dependencies
1308 */
1309void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1310{
1311	struct dma_async_tx_descriptor *dep = txd_next(tx);
1312	struct dma_async_tx_descriptor *dep_next;
1313	struct dma_chan *chan;
1314
1315	if (!dep)
1316		return;
1317
1318	/* we'll submit tx->next now, so clear the link */
1319	txd_clear_next(tx);
1320	chan = dep->chan;
1321
1322	/* keep submitting up until a channel switch is detected
1323	 * in that case we will be called again as a result of
1324	 * processing the interrupt from async_tx_channel_switch
1325	 */
1326	for (; dep; dep = dep_next) {
1327		txd_lock(dep);
1328		txd_clear_parent(dep);
1329		dep_next = txd_next(dep);
1330		if (dep_next && dep_next->chan == chan)
1331			txd_clear_next(dep); /* ->next will be submitted */
1332		else
1333			dep_next = NULL; /* submit current dep and terminate */
1334		txd_unlock(dep);
1335
1336		dep->tx_submit(dep);
1337	}
1338
1339	chan->device->device_issue_pending(chan);
1340}
1341EXPORT_SYMBOL_GPL(dma_run_dependencies);
1342
1343static int __init dma_bus_init(void)
1344{
1345	int err = dmaengine_init_unmap_pool();
1346
1347	if (err)
1348		return err;
1349	return class_register(&dma_devclass);
1350}
1351arch_initcall(dma_bus_init);
1352
1353