Linux Audio

Check our new training course

Loading...
v4.6
 
   1/*
   2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
   3 *
   4 * This program is free software; you can redistribute it and/or modify it
   5 * under the terms of the GNU General Public License as published by the Free
   6 * Software Foundation; either version 2 of the License, or (at your option)
   7 * any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * The full GNU General Public License is included in this distribution in the
  15 * file called COPYING.
  16 */
  17
  18/*
  19 * This code implements the DMA subsystem. It provides a HW-neutral interface
  20 * for other kernel code to use asynchronous memory copy capabilities,
  21 * if present, and allows different HW DMA drivers to register as providing
  22 * this capability.
  23 *
  24 * Due to the fact we are accelerating what is already a relatively fast
  25 * operation, the code goes to great lengths to avoid additional overhead,
  26 * such as locking.
  27 *
  28 * LOCKING:
  29 *
  30 * The subsystem keeps a global list of dma_device structs it is protected by a
  31 * mutex, dma_list_mutex.
  32 *
  33 * A subsystem can get access to a channel by calling dmaengine_get() followed
  34 * by dma_find_channel(), or if it has need for an exclusive channel it can call
  35 * dma_request_channel().  Once a channel is allocated a reference is taken
  36 * against its corresponding driver to disable removal.
  37 *
  38 * Each device has a channels list, which runs unlocked but is never modified
  39 * once the device is registered, it's just setup by the driver.
  40 *
  41 * See Documentation/dmaengine.txt for more details
  42 */
  43
  44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  45
  46#include <linux/platform_device.h>
  47#include <linux/dma-mapping.h>
  48#include <linux/init.h>
  49#include <linux/module.h>
  50#include <linux/mm.h>
  51#include <linux/device.h>
  52#include <linux/dmaengine.h>
  53#include <linux/hardirq.h>
  54#include <linux/spinlock.h>
  55#include <linux/percpu.h>
  56#include <linux/rcupdate.h>
  57#include <linux/mutex.h>
  58#include <linux/jiffies.h>
  59#include <linux/rculist.h>
  60#include <linux/idr.h>
  61#include <linux/slab.h>
  62#include <linux/acpi.h>
  63#include <linux/acpi_dma.h>
  64#include <linux/of_dma.h>
  65#include <linux/mempool.h>
 
  66
  67static DEFINE_MUTEX(dma_list_mutex);
  68static DEFINE_IDR(dma_idr);
  69static LIST_HEAD(dma_device_list);
  70static long dmaengine_ref_count;
  71
  72/* --- sysfs implementation --- */
  73
  74/**
  75 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
  76 * @dev - device node
  77 *
  78 * Must be called under dma_list_mutex
  79 */
  80static struct dma_chan *dev_to_dma_chan(struct device *dev)
  81{
  82	struct dma_chan_dev *chan_dev;
  83
  84	chan_dev = container_of(dev, typeof(*chan_dev), device);
  85	return chan_dev->chan;
  86}
  87
  88static ssize_t memcpy_count_show(struct device *dev,
  89				 struct device_attribute *attr, char *buf)
  90{
  91	struct dma_chan *chan;
  92	unsigned long count = 0;
  93	int i;
  94	int err;
  95
  96	mutex_lock(&dma_list_mutex);
  97	chan = dev_to_dma_chan(dev);
  98	if (chan) {
  99		for_each_possible_cpu(i)
 100			count += per_cpu_ptr(chan->local, i)->memcpy_count;
 101		err = sprintf(buf, "%lu\n", count);
 102	} else
 103		err = -ENODEV;
 104	mutex_unlock(&dma_list_mutex);
 105
 106	return err;
 107}
 108static DEVICE_ATTR_RO(memcpy_count);
 109
 110static ssize_t bytes_transferred_show(struct device *dev,
 111				      struct device_attribute *attr, char *buf)
 112{
 113	struct dma_chan *chan;
 114	unsigned long count = 0;
 115	int i;
 116	int err;
 117
 118	mutex_lock(&dma_list_mutex);
 119	chan = dev_to_dma_chan(dev);
 120	if (chan) {
 121		for_each_possible_cpu(i)
 122			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
 123		err = sprintf(buf, "%lu\n", count);
 124	} else
 125		err = -ENODEV;
 126	mutex_unlock(&dma_list_mutex);
 127
 128	return err;
 129}
 130static DEVICE_ATTR_RO(bytes_transferred);
 131
 132static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
 133			   char *buf)
 134{
 135	struct dma_chan *chan;
 136	int err;
 137
 138	mutex_lock(&dma_list_mutex);
 139	chan = dev_to_dma_chan(dev);
 140	if (chan)
 141		err = sprintf(buf, "%d\n", chan->client_count);
 142	else
 143		err = -ENODEV;
 144	mutex_unlock(&dma_list_mutex);
 145
 146	return err;
 147}
 148static DEVICE_ATTR_RO(in_use);
 149
 150static struct attribute *dma_dev_attrs[] = {
 151	&dev_attr_memcpy_count.attr,
 152	&dev_attr_bytes_transferred.attr,
 153	&dev_attr_in_use.attr,
 154	NULL,
 155};
 156ATTRIBUTE_GROUPS(dma_dev);
 157
 158static void chan_dev_release(struct device *dev)
 159{
 160	struct dma_chan_dev *chan_dev;
 161
 162	chan_dev = container_of(dev, typeof(*chan_dev), device);
 163	if (atomic_dec_and_test(chan_dev->idr_ref)) {
 164		mutex_lock(&dma_list_mutex);
 165		idr_remove(&dma_idr, chan_dev->dev_id);
 166		mutex_unlock(&dma_list_mutex);
 167		kfree(chan_dev->idr_ref);
 168	}
 169	kfree(chan_dev);
 170}
 171
 172static struct class dma_devclass = {
 173	.name		= "dma",
 174	.dev_groups	= dma_dev_groups,
 175	.dev_release	= chan_dev_release,
 176};
 177
 178/* --- client and device registration --- */
 179
 180#define dma_device_satisfies_mask(device, mask) \
 181	__dma_device_satisfies_mask((device), &(mask))
 182static int
 183__dma_device_satisfies_mask(struct dma_device *device,
 184			    const dma_cap_mask_t *want)
 185{
 186	dma_cap_mask_t has;
 187
 188	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
 189		DMA_TX_TYPE_END);
 190	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 191}
 192
 193static struct module *dma_chan_to_owner(struct dma_chan *chan)
 194{
 195	return chan->device->dev->driver->owner;
 196}
 197
 198/**
 199 * balance_ref_count - catch up the channel reference count
 200 * @chan - channel to balance ->client_count versus dmaengine_ref_count
 201 *
 202 * balance_ref_count must be called under dma_list_mutex
 203 */
 204static void balance_ref_count(struct dma_chan *chan)
 205{
 206	struct module *owner = dma_chan_to_owner(chan);
 207
 208	while (chan->client_count < dmaengine_ref_count) {
 209		__module_get(owner);
 210		chan->client_count++;
 211	}
 212}
 213
 214/**
 215 * dma_chan_get - try to grab a dma channel's parent driver module
 216 * @chan - channel to grab
 217 *
 218 * Must be called under dma_list_mutex
 219 */
 220static int dma_chan_get(struct dma_chan *chan)
 221{
 222	struct module *owner = dma_chan_to_owner(chan);
 223	int ret;
 224
 225	/* The channel is already in use, update client count */
 226	if (chan->client_count) {
 227		__module_get(owner);
 228		goto out;
 229	}
 230
 231	if (!try_module_get(owner))
 232		return -ENODEV;
 233
 234	/* allocate upon first client reference */
 235	if (chan->device->device_alloc_chan_resources) {
 236		ret = chan->device->device_alloc_chan_resources(chan);
 237		if (ret < 0)
 238			goto err_out;
 239	}
 240
 241	if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
 242		balance_ref_count(chan);
 243
 244out:
 245	chan->client_count++;
 246	return 0;
 247
 248err_out:
 249	module_put(owner);
 250	return ret;
 251}
 252
 253/**
 254 * dma_chan_put - drop a reference to a dma channel's parent driver module
 255 * @chan - channel to release
 256 *
 257 * Must be called under dma_list_mutex
 258 */
 259static void dma_chan_put(struct dma_chan *chan)
 260{
 261	/* This channel is not in use, bail out */
 262	if (!chan->client_count)
 263		return;
 264
 265	chan->client_count--;
 266	module_put(dma_chan_to_owner(chan));
 267
 268	/* This channel is not in use anymore, free it */
 269	if (!chan->client_count && chan->device->device_free_chan_resources) {
 270		/* Make sure all operations have completed */
 271		dmaengine_synchronize(chan);
 272		chan->device->device_free_chan_resources(chan);
 273	}
 274
 275	/* If the channel is used via a DMA request router, free the mapping */
 276	if (chan->router && chan->router->route_free) {
 277		chan->router->route_free(chan->router->dev, chan->route_data);
 278		chan->router = NULL;
 279		chan->route_data = NULL;
 280	}
 281}
 282
 283enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 284{
 285	enum dma_status status;
 286	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 287
 288	dma_async_issue_pending(chan);
 289	do {
 290		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 291		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
 292			pr_err("%s: timeout!\n", __func__);
 293			return DMA_ERROR;
 294		}
 295		if (status != DMA_IN_PROGRESS)
 296			break;
 297		cpu_relax();
 298	} while (1);
 299
 300	return status;
 301}
 302EXPORT_SYMBOL(dma_sync_wait);
 303
 304/**
 305 * dma_cap_mask_all - enable iteration over all operation types
 306 */
 307static dma_cap_mask_t dma_cap_mask_all;
 308
 309/**
 310 * dma_chan_tbl_ent - tracks channel allocations per core/operation
 311 * @chan - associated channel for this entry
 312 */
 313struct dma_chan_tbl_ent {
 314	struct dma_chan *chan;
 315};
 316
 317/**
 318 * channel_table - percpu lookup table for memory-to-memory offload providers
 319 */
 320static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
 321
 322static int __init dma_channel_table_init(void)
 323{
 324	enum dma_transaction_type cap;
 325	int err = 0;
 326
 327	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
 328
 329	/* 'interrupt', 'private', and 'slave' are channel capabilities,
 330	 * but are not associated with an operation so they do not need
 331	 * an entry in the channel_table
 332	 */
 333	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
 334	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
 335	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
 336
 337	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
 338		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
 339		if (!channel_table[cap]) {
 340			err = -ENOMEM;
 341			break;
 342		}
 343	}
 344
 345	if (err) {
 346		pr_err("initialization failure\n");
 347		for_each_dma_cap_mask(cap, dma_cap_mask_all)
 348			free_percpu(channel_table[cap]);
 349	}
 350
 351	return err;
 352}
 353arch_initcall(dma_channel_table_init);
 354
 355/**
 356 * dma_find_channel - find a channel to carry out the operation
 357 * @tx_type: transaction type
 358 */
 359struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 360{
 361	return this_cpu_read(channel_table[tx_type]->chan);
 362}
 363EXPORT_SYMBOL(dma_find_channel);
 364
 365/**
 366 * dma_issue_pending_all - flush all pending operations across all channels
 367 */
 368void dma_issue_pending_all(void)
 369{
 370	struct dma_device *device;
 371	struct dma_chan *chan;
 372
 373	rcu_read_lock();
 374	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 375		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 376			continue;
 377		list_for_each_entry(chan, &device->channels, device_node)
 378			if (chan->client_count)
 379				device->device_issue_pending(chan);
 380	}
 381	rcu_read_unlock();
 382}
 383EXPORT_SYMBOL(dma_issue_pending_all);
 384
 385/**
 386 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
 387 */
 388static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
 389{
 390	int node = dev_to_node(chan->device->dev);
 391	return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
 
 392}
 393
 394/**
 395 * min_chan - returns the channel with min count and in the same numa-node as the cpu
 396 * @cap: capability to match
 397 * @cpu: cpu index which the channel should be close to
 398 *
 399 * If some channels are close to the given cpu, the one with the lowest
 400 * reference count is returned. Otherwise, cpu is ignored and only the
 401 * reference count is taken into account.
 402 * Must be called under dma_list_mutex.
 403 */
 404static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
 405{
 406	struct dma_device *device;
 407	struct dma_chan *chan;
 408	struct dma_chan *min = NULL;
 409	struct dma_chan *localmin = NULL;
 410
 411	list_for_each_entry(device, &dma_device_list, global_node) {
 412		if (!dma_has_cap(cap, device->cap_mask) ||
 413		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
 414			continue;
 415		list_for_each_entry(chan, &device->channels, device_node) {
 416			if (!chan->client_count)
 417				continue;
 418			if (!min || chan->table_count < min->table_count)
 419				min = chan;
 420
 421			if (dma_chan_is_local(chan, cpu))
 422				if (!localmin ||
 423				    chan->table_count < localmin->table_count)
 424					localmin = chan;
 425		}
 426	}
 427
 428	chan = localmin ? localmin : min;
 429
 430	if (chan)
 431		chan->table_count++;
 432
 433	return chan;
 434}
 435
 436/**
 437 * dma_channel_rebalance - redistribute the available channels
 438 *
 439 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
 440 * operation type) in the SMP case,  and operation isolation (avoid
 441 * multi-tasking channels) in the non-SMP case.  Must be called under
 442 * dma_list_mutex.
 443 */
 444static void dma_channel_rebalance(void)
 445{
 446	struct dma_chan *chan;
 447	struct dma_device *device;
 448	int cpu;
 449	int cap;
 450
 451	/* undo the last distribution */
 452	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 453		for_each_possible_cpu(cpu)
 454			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
 455
 456	list_for_each_entry(device, &dma_device_list, global_node) {
 457		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 458			continue;
 459		list_for_each_entry(chan, &device->channels, device_node)
 460			chan->table_count = 0;
 461	}
 462
 463	/* don't populate the channel_table if no clients are available */
 464	if (!dmaengine_ref_count)
 465		return;
 466
 467	/* redistribute available channels */
 468	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 469		for_each_online_cpu(cpu) {
 470			chan = min_chan(cap, cpu);
 471			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
 472		}
 473}
 474
 475int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
 476{
 477	struct dma_device *device;
 478
 479	if (!chan || !caps)
 480		return -EINVAL;
 481
 482	device = chan->device;
 483
 484	/* check if the channel supports slave transactions */
 485	if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
 
 486		return -ENXIO;
 487
 488	/*
 489	 * Check whether it reports it uses the generic slave
 490	 * capabilities, if not, that means it doesn't support any
 491	 * kind of slave capabilities reporting.
 492	 */
 493	if (!device->directions)
 494		return -ENXIO;
 495
 496	caps->src_addr_widths = device->src_addr_widths;
 497	caps->dst_addr_widths = device->dst_addr_widths;
 498	caps->directions = device->directions;
 499	caps->max_burst = device->max_burst;
 500	caps->residue_granularity = device->residue_granularity;
 501	caps->descriptor_reuse = device->descriptor_reuse;
 502
 503	/*
 504	 * Some devices implement only pause (e.g. to get residuum) but no
 505	 * resume. However cmd_pause is advertised as pause AND resume.
 506	 */
 507	caps->cmd_pause = !!(device->device_pause && device->device_resume);
 508	caps->cmd_terminate = !!device->device_terminate_all;
 509
 510	return 0;
 511}
 512EXPORT_SYMBOL_GPL(dma_get_slave_caps);
 513
 514static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 515					  struct dma_device *dev,
 516					  dma_filter_fn fn, void *fn_param)
 517{
 518	struct dma_chan *chan;
 519
 520	if (mask && !__dma_device_satisfies_mask(dev, mask)) {
 521		pr_debug("%s: wrong capabilities\n", __func__);
 522		return NULL;
 523	}
 524	/* devices with multiple channels need special handling as we need to
 525	 * ensure that all channels are either private or public.
 526	 */
 527	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
 528		list_for_each_entry(chan, &dev->channels, device_node) {
 529			/* some channels are already publicly allocated */
 530			if (chan->client_count)
 531				return NULL;
 532		}
 533
 534	list_for_each_entry(chan, &dev->channels, device_node) {
 535		if (chan->client_count) {
 536			pr_debug("%s: %s busy\n",
 537				 __func__, dma_chan_name(chan));
 538			continue;
 539		}
 540		if (fn && !fn(chan, fn_param)) {
 541			pr_debug("%s: %s filter said false\n",
 542				 __func__, dma_chan_name(chan));
 543			continue;
 544		}
 545		return chan;
 546	}
 547
 548	return NULL;
 549}
 550
 551static struct dma_chan *find_candidate(struct dma_device *device,
 552				       const dma_cap_mask_t *mask,
 553				       dma_filter_fn fn, void *fn_param)
 554{
 555	struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
 556	int err;
 557
 558	if (chan) {
 559		/* Found a suitable channel, try to grab, prep, and return it.
 560		 * We first set DMA_PRIVATE to disable balance_ref_count as this
 561		 * channel will not be published in the general-purpose
 562		 * allocator
 563		 */
 564		dma_cap_set(DMA_PRIVATE, device->cap_mask);
 565		device->privatecnt++;
 566		err = dma_chan_get(chan);
 567
 568		if (err) {
 569			if (err == -ENODEV) {
 570				pr_debug("%s: %s module removed\n", __func__,
 571					 dma_chan_name(chan));
 572				list_del_rcu(&device->global_node);
 573			} else
 574				pr_debug("%s: failed to get %s: (%d)\n",
 
 575					 __func__, dma_chan_name(chan), err);
 576
 577			if (--device->privatecnt == 0)
 578				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 579
 580			chan = ERR_PTR(err);
 581		}
 582	}
 583
 584	return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 585}
 586
 587/**
 588 * dma_get_slave_channel - try to get specific channel exclusively
 589 * @chan: target channel
 590 */
 591struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
 592{
 593	int err = -EBUSY;
 594
 595	/* lock against __dma_request_channel */
 596	mutex_lock(&dma_list_mutex);
 597
 598	if (chan->client_count == 0) {
 599		struct dma_device *device = chan->device;
 600
 601		dma_cap_set(DMA_PRIVATE, device->cap_mask);
 602		device->privatecnt++;
 603		err = dma_chan_get(chan);
 604		if (err) {
 605			pr_debug("%s: failed to get %s: (%d)\n",
 
 606				__func__, dma_chan_name(chan), err);
 607			chan = NULL;
 608			if (--device->privatecnt == 0)
 609				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 610		}
 611	} else
 612		chan = NULL;
 613
 614	mutex_unlock(&dma_list_mutex);
 615
 616
 617	return chan;
 618}
 619EXPORT_SYMBOL_GPL(dma_get_slave_channel);
 620
 621struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
 622{
 623	dma_cap_mask_t mask;
 624	struct dma_chan *chan;
 625
 626	dma_cap_zero(mask);
 627	dma_cap_set(DMA_SLAVE, mask);
 628
 629	/* lock against __dma_request_channel */
 630	mutex_lock(&dma_list_mutex);
 631
 632	chan = find_candidate(device, &mask, NULL, NULL);
 633
 634	mutex_unlock(&dma_list_mutex);
 635
 636	return IS_ERR(chan) ? NULL : chan;
 637}
 638EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
 639
 640/**
 641 * __dma_request_channel - try to allocate an exclusive channel
 642 * @mask: capabilities that the channel must satisfy
 643 * @fn: optional callback to disposition available channels
 644 * @fn_param: opaque parameter to pass to dma_filter_fn
 
 645 *
 646 * Returns pointer to appropriate DMA channel on success or NULL.
 647 */
 648struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 649				       dma_filter_fn fn, void *fn_param)
 
 650{
 651	struct dma_device *device, *_d;
 652	struct dma_chan *chan = NULL;
 653
 654	/* Find a channel */
 655	mutex_lock(&dma_list_mutex);
 656	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 
 
 
 
 657		chan = find_candidate(device, mask, fn, fn_param);
 658		if (!IS_ERR(chan))
 659			break;
 660
 661		chan = NULL;
 662	}
 663	mutex_unlock(&dma_list_mutex);
 664
 665	pr_debug("%s: %s (%s)\n",
 666		 __func__,
 667		 chan ? "success" : "fail",
 668		 chan ? dma_chan_name(chan) : NULL);
 669
 670	return chan;
 671}
 672EXPORT_SYMBOL_GPL(__dma_request_channel);
 673
 674static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
 675						    const char *name,
 676						    struct device *dev)
 677{
 678	int i;
 679
 680	if (!device->filter.mapcnt)
 681		return NULL;
 682
 683	for (i = 0; i < device->filter.mapcnt; i++) {
 684		const struct dma_slave_map *map = &device->filter.map[i];
 685
 686		if (!strcmp(map->devname, dev_name(dev)) &&
 687		    !strcmp(map->slave, name))
 688			return map;
 689	}
 690
 691	return NULL;
 692}
 693
 694/**
 695 * dma_request_chan - try to allocate an exclusive slave channel
 696 * @dev:	pointer to client device structure
 697 * @name:	slave channel name
 698 *
 699 * Returns pointer to appropriate DMA channel on success or an error pointer.
 700 */
 701struct dma_chan *dma_request_chan(struct device *dev, const char *name)
 702{
 703	struct dma_device *d, *_d;
 704	struct dma_chan *chan = NULL;
 705
 706	/* If device-tree is present get slave info from here */
 707	if (dev->of_node)
 708		chan = of_dma_request_slave_channel(dev->of_node, name);
 709
 710	/* If device was enumerated by ACPI get slave info from here */
 711	if (has_acpi_companion(dev) && !chan)
 712		chan = acpi_dma_request_slave_chan_by_name(dev, name);
 713
 714	if (chan) {
 715		/* Valid channel found or requester need to be deferred */
 716		if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
 717			return chan;
 718	}
 719
 720	/* Try to find the channel via the DMA filter map(s) */
 721	mutex_lock(&dma_list_mutex);
 722	list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
 723		dma_cap_mask_t mask;
 724		const struct dma_slave_map *map = dma_filter_match(d, name, dev);
 725
 726		if (!map)
 727			continue;
 728
 729		dma_cap_zero(mask);
 730		dma_cap_set(DMA_SLAVE, mask);
 731
 732		chan = find_candidate(d, &mask, d->filter.fn, map->param);
 733		if (!IS_ERR(chan))
 734			break;
 735	}
 736	mutex_unlock(&dma_list_mutex);
 737
 738	return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 739}
 740EXPORT_SYMBOL_GPL(dma_request_chan);
 741
 742/**
 743 * dma_request_slave_channel - try to allocate an exclusive slave channel
 744 * @dev:	pointer to client device structure
 745 * @name:	slave channel name
 746 *
 747 * Returns pointer to appropriate DMA channel on success or NULL.
 748 */
 749struct dma_chan *dma_request_slave_channel(struct device *dev,
 750					   const char *name)
 751{
 752	struct dma_chan *ch = dma_request_chan(dev, name);
 753	if (IS_ERR(ch))
 754		return NULL;
 755
 756	return ch;
 757}
 758EXPORT_SYMBOL_GPL(dma_request_slave_channel);
 759
 760/**
 761 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
 762 * @mask: capabilities that the channel must satisfy
 763 *
 764 * Returns pointer to appropriate DMA channel on success or an error pointer.
 765 */
 766struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
 767{
 768	struct dma_chan *chan;
 769
 770	if (!mask)
 771		return ERR_PTR(-ENODEV);
 772
 773	chan = __dma_request_channel(mask, NULL, NULL);
 774	if (!chan)
 775		chan = ERR_PTR(-ENODEV);
 
 
 
 
 
 
 776
 777	return chan;
 778}
 779EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
 780
 781void dma_release_channel(struct dma_chan *chan)
 782{
 783	mutex_lock(&dma_list_mutex);
 784	WARN_ONCE(chan->client_count != 1,
 785		  "chan reference count %d != 1\n", chan->client_count);
 786	dma_chan_put(chan);
 787	/* drop PRIVATE cap enabled by __dma_request_channel() */
 788	if (--chan->device->privatecnt == 0)
 789		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
 790	mutex_unlock(&dma_list_mutex);
 791}
 792EXPORT_SYMBOL_GPL(dma_release_channel);
 793
 794/**
 795 * dmaengine_get - register interest in dma_channels
 796 */
 797void dmaengine_get(void)
 798{
 799	struct dma_device *device, *_d;
 800	struct dma_chan *chan;
 801	int err;
 802
 803	mutex_lock(&dma_list_mutex);
 804	dmaengine_ref_count++;
 805
 806	/* try to grab channels */
 807	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 808		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 809			continue;
 810		list_for_each_entry(chan, &device->channels, device_node) {
 811			err = dma_chan_get(chan);
 812			if (err == -ENODEV) {
 813				/* module removed before we could use it */
 814				list_del_rcu(&device->global_node);
 815				break;
 816			} else if (err)
 817				pr_debug("%s: failed to get %s: (%d)\n",
 818				       __func__, dma_chan_name(chan), err);
 
 819		}
 820	}
 821
 822	/* if this is the first reference and there were channels
 823	 * waiting we need to rebalance to get those channels
 824	 * incorporated into the channel table
 825	 */
 826	if (dmaengine_ref_count == 1)
 827		dma_channel_rebalance();
 828	mutex_unlock(&dma_list_mutex);
 829}
 830EXPORT_SYMBOL(dmaengine_get);
 831
 832/**
 833 * dmaengine_put - let dma drivers be removed when ref_count == 0
 834 */
 835void dmaengine_put(void)
 836{
 837	struct dma_device *device;
 838	struct dma_chan *chan;
 839
 840	mutex_lock(&dma_list_mutex);
 841	dmaengine_ref_count--;
 842	BUG_ON(dmaengine_ref_count < 0);
 843	/* drop channel references */
 844	list_for_each_entry(device, &dma_device_list, global_node) {
 845		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 846			continue;
 847		list_for_each_entry(chan, &device->channels, device_node)
 848			dma_chan_put(chan);
 849	}
 850	mutex_unlock(&dma_list_mutex);
 851}
 852EXPORT_SYMBOL(dmaengine_put);
 853
 854static bool device_has_all_tx_types(struct dma_device *device)
 855{
 856	/* A device that satisfies this test has channels that will never cause
 857	 * an async_tx channel switch event as all possible operation types can
 858	 * be handled.
 859	 */
 860	#ifdef CONFIG_ASYNC_TX_DMA
 861	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 862		return false;
 863	#endif
 864
 865	#if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
 866	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
 867		return false;
 868	#endif
 869
 870	#if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
 871	if (!dma_has_cap(DMA_XOR, device->cap_mask))
 872		return false;
 873
 874	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
 875	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
 876		return false;
 877	#endif
 878	#endif
 879
 880	#if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
 881	if (!dma_has_cap(DMA_PQ, device->cap_mask))
 882		return false;
 883
 884	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
 885	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
 886		return false;
 887	#endif
 888	#endif
 889
 890	return true;
 891}
 892
 893static int get_dma_id(struct dma_device *device)
 894{
 895	int rc;
 896
 897	mutex_lock(&dma_list_mutex);
 898
 899	rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
 900	if (rc >= 0)
 901		device->dev_id = rc;
 902
 903	mutex_unlock(&dma_list_mutex);
 904	return rc < 0 ? rc : 0;
 905}
 906
 907/**
 908 * dma_async_device_register - registers DMA devices found
 909 * @device: &dma_device
 910 */
 911int dma_async_device_register(struct dma_device *device)
 912{
 913	int chancnt = 0, rc;
 914	struct dma_chan* chan;
 915	atomic_t *idr_ref;
 916
 917	if (!device)
 918		return -ENODEV;
 919
 920	/* validate device routines */
 921	BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
 922		!device->device_prep_dma_memcpy);
 923	BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
 924		!device->device_prep_dma_xor);
 925	BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
 926		!device->device_prep_dma_xor_val);
 927	BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
 928		!device->device_prep_dma_pq);
 929	BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
 930		!device->device_prep_dma_pq_val);
 931	BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
 932		!device->device_prep_dma_memset);
 933	BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
 934		!device->device_prep_dma_interrupt);
 935	BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
 936		!device->device_prep_dma_sg);
 937	BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
 938		!device->device_prep_dma_cyclic);
 939	BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
 940		!device->device_prep_interleaved_dma);
 941
 942	BUG_ON(!device->device_tx_status);
 943	BUG_ON(!device->device_issue_pending);
 944	BUG_ON(!device->dev);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 945
 946	/* note: this only matters in the
 947	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
 948	 */
 949	if (device_has_all_tx_types(device))
 950		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
 951
 952	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
 953	if (!idr_ref)
 954		return -ENOMEM;
 955	rc = get_dma_id(device);
 956	if (rc != 0) {
 957		kfree(idr_ref);
 958		return rc;
 959	}
 960
 961	atomic_set(idr_ref, 0);
 962
 963	/* represent channels in sysfs. Probably want devs too */
 964	list_for_each_entry(chan, &device->channels, device_node) {
 965		rc = -ENOMEM;
 966		chan->local = alloc_percpu(typeof(*chan->local));
 967		if (chan->local == NULL)
 968			goto err_out;
 969		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
 970		if (chan->dev == NULL) {
 971			free_percpu(chan->local);
 972			chan->local = NULL;
 973			goto err_out;
 974		}
 975
 976		chan->chan_id = chancnt++;
 977		chan->dev->device.class = &dma_devclass;
 978		chan->dev->device.parent = device->dev;
 979		chan->dev->chan = chan;
 980		chan->dev->idr_ref = idr_ref;
 981		chan->dev->dev_id = device->dev_id;
 982		atomic_inc(idr_ref);
 983		dev_set_name(&chan->dev->device, "dma%dchan%d",
 984			     device->dev_id, chan->chan_id);
 985
 986		rc = device_register(&chan->dev->device);
 987		if (rc) {
 988			free_percpu(chan->local);
 989			chan->local = NULL;
 990			kfree(chan->dev);
 991			atomic_dec(idr_ref);
 992			goto err_out;
 993		}
 994		chan->client_count = 0;
 995	}
 
 
 
 
 
 
 
 996	device->chancnt = chancnt;
 997
 998	mutex_lock(&dma_list_mutex);
 999	/* take references on public channels */
1000	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1001		list_for_each_entry(chan, &device->channels, device_node) {
1002			/* if clients are already waiting for channels we need
1003			 * to take references on their behalf
1004			 */
1005			if (dma_chan_get(chan) == -ENODEV) {
1006				/* note we can only get here for the first
1007				 * channel as the remaining channels are
1008				 * guaranteed to get a reference
1009				 */
1010				rc = -ENODEV;
1011				mutex_unlock(&dma_list_mutex);
1012				goto err_out;
1013			}
1014		}
1015	list_add_tail_rcu(&device->global_node, &dma_device_list);
1016	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1017		device->privatecnt++;	/* Always private */
1018	dma_channel_rebalance();
1019	mutex_unlock(&dma_list_mutex);
1020
1021	return 0;
1022
1023err_out:
1024	/* if we never registered a channel just release the idr */
1025	if (atomic_read(idr_ref) == 0) {
1026		mutex_lock(&dma_list_mutex);
1027		idr_remove(&dma_idr, device->dev_id);
1028		mutex_unlock(&dma_list_mutex);
1029		kfree(idr_ref);
1030		return rc;
1031	}
1032
1033	list_for_each_entry(chan, &device->channels, device_node) {
1034		if (chan->local == NULL)
1035			continue;
1036		mutex_lock(&dma_list_mutex);
1037		chan->dev->chan = NULL;
1038		mutex_unlock(&dma_list_mutex);
1039		device_unregister(&chan->dev->device);
1040		free_percpu(chan->local);
1041	}
1042	return rc;
1043}
1044EXPORT_SYMBOL(dma_async_device_register);
1045
1046/**
1047 * dma_async_device_unregister - unregister a DMA device
1048 * @device: &dma_device
1049 *
1050 * This routine is called by dma driver exit routines, dmaengine holds module
1051 * references to prevent it being called while channels are in use.
1052 */
1053void dma_async_device_unregister(struct dma_device *device)
1054{
1055	struct dma_chan *chan;
1056
1057	mutex_lock(&dma_list_mutex);
1058	list_del_rcu(&device->global_node);
1059	dma_channel_rebalance();
1060	mutex_unlock(&dma_list_mutex);
1061
1062	list_for_each_entry(chan, &device->channels, device_node) {
1063		WARN_ONCE(chan->client_count,
1064			  "%s called while %d clients hold a reference\n",
1065			  __func__, chan->client_count);
1066		mutex_lock(&dma_list_mutex);
1067		chan->dev->chan = NULL;
1068		mutex_unlock(&dma_list_mutex);
1069		device_unregister(&chan->dev->device);
1070		free_percpu(chan->local);
1071	}
1072}
1073EXPORT_SYMBOL(dma_async_device_unregister);
1074
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1075struct dmaengine_unmap_pool {
1076	struct kmem_cache *cache;
1077	const char *name;
1078	mempool_t *pool;
1079	size_t size;
1080};
1081
1082#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1083static struct dmaengine_unmap_pool unmap_pool[] = {
1084	__UNMAP_POOL(2),
1085	#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1086	__UNMAP_POOL(16),
1087	__UNMAP_POOL(128),
1088	__UNMAP_POOL(256),
1089	#endif
1090};
1091
1092static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1093{
1094	int order = get_count_order(nr);
1095
1096	switch (order) {
1097	case 0 ... 1:
1098		return &unmap_pool[0];
 
1099	case 2 ... 4:
1100		return &unmap_pool[1];
1101	case 5 ... 7:
1102		return &unmap_pool[2];
1103	case 8:
1104		return &unmap_pool[3];
 
1105	default:
1106		BUG();
1107		return NULL;
1108	}
1109}
1110
1111static void dmaengine_unmap(struct kref *kref)
1112{
1113	struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1114	struct device *dev = unmap->dev;
1115	int cnt, i;
1116
1117	cnt = unmap->to_cnt;
1118	for (i = 0; i < cnt; i++)
1119		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1120			       DMA_TO_DEVICE);
1121	cnt += unmap->from_cnt;
1122	for (; i < cnt; i++)
1123		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1124			       DMA_FROM_DEVICE);
1125	cnt += unmap->bidi_cnt;
1126	for (; i < cnt; i++) {
1127		if (unmap->addr[i] == 0)
1128			continue;
1129		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1130			       DMA_BIDIRECTIONAL);
1131	}
1132	cnt = unmap->map_cnt;
1133	mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1134}
1135
1136void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1137{
1138	if (unmap)
1139		kref_put(&unmap->kref, dmaengine_unmap);
1140}
1141EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1142
1143static void dmaengine_destroy_unmap_pool(void)
1144{
1145	int i;
1146
1147	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1148		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1149
1150		mempool_destroy(p->pool);
1151		p->pool = NULL;
1152		kmem_cache_destroy(p->cache);
1153		p->cache = NULL;
1154	}
1155}
1156
1157static int __init dmaengine_init_unmap_pool(void)
1158{
1159	int i;
1160
1161	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1162		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1163		size_t size;
1164
1165		size = sizeof(struct dmaengine_unmap_data) +
1166		       sizeof(dma_addr_t) * p->size;
1167
1168		p->cache = kmem_cache_create(p->name, size, 0,
1169					     SLAB_HWCACHE_ALIGN, NULL);
1170		if (!p->cache)
1171			break;
1172		p->pool = mempool_create_slab_pool(1, p->cache);
1173		if (!p->pool)
1174			break;
1175	}
1176
1177	if (i == ARRAY_SIZE(unmap_pool))
1178		return 0;
1179
1180	dmaengine_destroy_unmap_pool();
1181	return -ENOMEM;
1182}
1183
1184struct dmaengine_unmap_data *
1185dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1186{
1187	struct dmaengine_unmap_data *unmap;
1188
1189	unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1190	if (!unmap)
1191		return NULL;
1192
1193	memset(unmap, 0, sizeof(*unmap));
1194	kref_init(&unmap->kref);
1195	unmap->dev = dev;
1196	unmap->map_cnt = nr;
1197
1198	return unmap;
1199}
1200EXPORT_SYMBOL(dmaengine_get_unmap_data);
1201
1202void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1203	struct dma_chan *chan)
1204{
1205	tx->chan = chan;
1206	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1207	spin_lock_init(&tx->lock);
1208	#endif
1209}
1210EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1211
1212/* dma_wait_for_async_tx - spin wait for a transaction to complete
1213 * @tx: in-flight transaction to wait on
1214 */
1215enum dma_status
1216dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1217{
1218	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1219
1220	if (!tx)
1221		return DMA_COMPLETE;
1222
1223	while (tx->cookie == -EBUSY) {
1224		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1225			pr_err("%s timeout waiting for descriptor submission\n",
1226			       __func__);
 
1227			return DMA_ERROR;
1228		}
1229		cpu_relax();
1230	}
1231	return dma_sync_wait(tx->chan, tx->cookie);
1232}
1233EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1234
1235/* dma_run_dependencies - helper routine for dma drivers to process
1236 *	(start) dependent operations on their target channel
1237 * @tx: transaction with dependencies
1238 */
1239void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1240{
1241	struct dma_async_tx_descriptor *dep = txd_next(tx);
1242	struct dma_async_tx_descriptor *dep_next;
1243	struct dma_chan *chan;
1244
1245	if (!dep)
1246		return;
1247
1248	/* we'll submit tx->next now, so clear the link */
1249	txd_clear_next(tx);
1250	chan = dep->chan;
1251
1252	/* keep submitting up until a channel switch is detected
1253	 * in that case we will be called again as a result of
1254	 * processing the interrupt from async_tx_channel_switch
1255	 */
1256	for (; dep; dep = dep_next) {
1257		txd_lock(dep);
1258		txd_clear_parent(dep);
1259		dep_next = txd_next(dep);
1260		if (dep_next && dep_next->chan == chan)
1261			txd_clear_next(dep); /* ->next will be submitted */
1262		else
1263			dep_next = NULL; /* submit current dep and terminate */
1264		txd_unlock(dep);
1265
1266		dep->tx_submit(dep);
1267	}
1268
1269	chan->device->device_issue_pending(chan);
1270}
1271EXPORT_SYMBOL_GPL(dma_run_dependencies);
1272
1273static int __init dma_bus_init(void)
1274{
1275	int err = dmaengine_init_unmap_pool();
1276
1277	if (err)
1278		return err;
1279	return class_register(&dma_devclass);
1280}
1281arch_initcall(dma_bus_init);
1282
1283
v5.4
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
 
 
 
 
 
 
 
 
 
 
 
 
 
   4 */
   5
   6/*
   7 * This code implements the DMA subsystem. It provides a HW-neutral interface
   8 * for other kernel code to use asynchronous memory copy capabilities,
   9 * if present, and allows different HW DMA drivers to register as providing
  10 * this capability.
  11 *
  12 * Due to the fact we are accelerating what is already a relatively fast
  13 * operation, the code goes to great lengths to avoid additional overhead,
  14 * such as locking.
  15 *
  16 * LOCKING:
  17 *
  18 * The subsystem keeps a global list of dma_device structs it is protected by a
  19 * mutex, dma_list_mutex.
  20 *
  21 * A subsystem can get access to a channel by calling dmaengine_get() followed
  22 * by dma_find_channel(), or if it has need for an exclusive channel it can call
  23 * dma_request_channel().  Once a channel is allocated a reference is taken
  24 * against its corresponding driver to disable removal.
  25 *
  26 * Each device has a channels list, which runs unlocked but is never modified
  27 * once the device is registered, it's just setup by the driver.
  28 *
  29 * See Documentation/driver-api/dmaengine for more details
  30 */
  31
  32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  33
  34#include <linux/platform_device.h>
  35#include <linux/dma-mapping.h>
  36#include <linux/init.h>
  37#include <linux/module.h>
  38#include <linux/mm.h>
  39#include <linux/device.h>
  40#include <linux/dmaengine.h>
  41#include <linux/hardirq.h>
  42#include <linux/spinlock.h>
  43#include <linux/percpu.h>
  44#include <linux/rcupdate.h>
  45#include <linux/mutex.h>
  46#include <linux/jiffies.h>
  47#include <linux/rculist.h>
  48#include <linux/idr.h>
  49#include <linux/slab.h>
  50#include <linux/acpi.h>
  51#include <linux/acpi_dma.h>
  52#include <linux/of_dma.h>
  53#include <linux/mempool.h>
  54#include <linux/numa.h>
  55
  56static DEFINE_MUTEX(dma_list_mutex);
  57static DEFINE_IDA(dma_ida);
  58static LIST_HEAD(dma_device_list);
  59static long dmaengine_ref_count;
  60
  61/* --- sysfs implementation --- */
  62
  63/**
  64 * dev_to_dma_chan - convert a device pointer to its sysfs container object
  65 * @dev - device node
  66 *
  67 * Must be called under dma_list_mutex
  68 */
  69static struct dma_chan *dev_to_dma_chan(struct device *dev)
  70{
  71	struct dma_chan_dev *chan_dev;
  72
  73	chan_dev = container_of(dev, typeof(*chan_dev), device);
  74	return chan_dev->chan;
  75}
  76
  77static ssize_t memcpy_count_show(struct device *dev,
  78				 struct device_attribute *attr, char *buf)
  79{
  80	struct dma_chan *chan;
  81	unsigned long count = 0;
  82	int i;
  83	int err;
  84
  85	mutex_lock(&dma_list_mutex);
  86	chan = dev_to_dma_chan(dev);
  87	if (chan) {
  88		for_each_possible_cpu(i)
  89			count += per_cpu_ptr(chan->local, i)->memcpy_count;
  90		err = sprintf(buf, "%lu\n", count);
  91	} else
  92		err = -ENODEV;
  93	mutex_unlock(&dma_list_mutex);
  94
  95	return err;
  96}
  97static DEVICE_ATTR_RO(memcpy_count);
  98
  99static ssize_t bytes_transferred_show(struct device *dev,
 100				      struct device_attribute *attr, char *buf)
 101{
 102	struct dma_chan *chan;
 103	unsigned long count = 0;
 104	int i;
 105	int err;
 106
 107	mutex_lock(&dma_list_mutex);
 108	chan = dev_to_dma_chan(dev);
 109	if (chan) {
 110		for_each_possible_cpu(i)
 111			count += per_cpu_ptr(chan->local, i)->bytes_transferred;
 112		err = sprintf(buf, "%lu\n", count);
 113	} else
 114		err = -ENODEV;
 115	mutex_unlock(&dma_list_mutex);
 116
 117	return err;
 118}
 119static DEVICE_ATTR_RO(bytes_transferred);
 120
 121static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
 122			   char *buf)
 123{
 124	struct dma_chan *chan;
 125	int err;
 126
 127	mutex_lock(&dma_list_mutex);
 128	chan = dev_to_dma_chan(dev);
 129	if (chan)
 130		err = sprintf(buf, "%d\n", chan->client_count);
 131	else
 132		err = -ENODEV;
 133	mutex_unlock(&dma_list_mutex);
 134
 135	return err;
 136}
 137static DEVICE_ATTR_RO(in_use);
 138
 139static struct attribute *dma_dev_attrs[] = {
 140	&dev_attr_memcpy_count.attr,
 141	&dev_attr_bytes_transferred.attr,
 142	&dev_attr_in_use.attr,
 143	NULL,
 144};
 145ATTRIBUTE_GROUPS(dma_dev);
 146
 147static void chan_dev_release(struct device *dev)
 148{
 149	struct dma_chan_dev *chan_dev;
 150
 151	chan_dev = container_of(dev, typeof(*chan_dev), device);
 152	if (atomic_dec_and_test(chan_dev->idr_ref)) {
 153		ida_free(&dma_ida, chan_dev->dev_id);
 
 
 154		kfree(chan_dev->idr_ref);
 155	}
 156	kfree(chan_dev);
 157}
 158
 159static struct class dma_devclass = {
 160	.name		= "dma",
 161	.dev_groups	= dma_dev_groups,
 162	.dev_release	= chan_dev_release,
 163};
 164
 165/* --- client and device registration --- */
 166
 167#define dma_device_satisfies_mask(device, mask) \
 168	__dma_device_satisfies_mask((device), &(mask))
 169static int
 170__dma_device_satisfies_mask(struct dma_device *device,
 171			    const dma_cap_mask_t *want)
 172{
 173	dma_cap_mask_t has;
 174
 175	bitmap_and(has.bits, want->bits, device->cap_mask.bits,
 176		DMA_TX_TYPE_END);
 177	return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
 178}
 179
 180static struct module *dma_chan_to_owner(struct dma_chan *chan)
 181{
 182	return chan->device->dev->driver->owner;
 183}
 184
 185/**
 186 * balance_ref_count - catch up the channel reference count
 187 * @chan - channel to balance ->client_count versus dmaengine_ref_count
 188 *
 189 * balance_ref_count must be called under dma_list_mutex
 190 */
 191static void balance_ref_count(struct dma_chan *chan)
 192{
 193	struct module *owner = dma_chan_to_owner(chan);
 194
 195	while (chan->client_count < dmaengine_ref_count) {
 196		__module_get(owner);
 197		chan->client_count++;
 198	}
 199}
 200
 201/**
 202 * dma_chan_get - try to grab a dma channel's parent driver module
 203 * @chan - channel to grab
 204 *
 205 * Must be called under dma_list_mutex
 206 */
 207static int dma_chan_get(struct dma_chan *chan)
 208{
 209	struct module *owner = dma_chan_to_owner(chan);
 210	int ret;
 211
 212	/* The channel is already in use, update client count */
 213	if (chan->client_count) {
 214		__module_get(owner);
 215		goto out;
 216	}
 217
 218	if (!try_module_get(owner))
 219		return -ENODEV;
 220
 221	/* allocate upon first client reference */
 222	if (chan->device->device_alloc_chan_resources) {
 223		ret = chan->device->device_alloc_chan_resources(chan);
 224		if (ret < 0)
 225			goto err_out;
 226	}
 227
 228	if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
 229		balance_ref_count(chan);
 230
 231out:
 232	chan->client_count++;
 233	return 0;
 234
 235err_out:
 236	module_put(owner);
 237	return ret;
 238}
 239
 240/**
 241 * dma_chan_put - drop a reference to a dma channel's parent driver module
 242 * @chan - channel to release
 243 *
 244 * Must be called under dma_list_mutex
 245 */
 246static void dma_chan_put(struct dma_chan *chan)
 247{
 248	/* This channel is not in use, bail out */
 249	if (!chan->client_count)
 250		return;
 251
 252	chan->client_count--;
 253	module_put(dma_chan_to_owner(chan));
 254
 255	/* This channel is not in use anymore, free it */
 256	if (!chan->client_count && chan->device->device_free_chan_resources) {
 257		/* Make sure all operations have completed */
 258		dmaengine_synchronize(chan);
 259		chan->device->device_free_chan_resources(chan);
 260	}
 261
 262	/* If the channel is used via a DMA request router, free the mapping */
 263	if (chan->router && chan->router->route_free) {
 264		chan->router->route_free(chan->router->dev, chan->route_data);
 265		chan->router = NULL;
 266		chan->route_data = NULL;
 267	}
 268}
 269
 270enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
 271{
 272	enum dma_status status;
 273	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
 274
 275	dma_async_issue_pending(chan);
 276	do {
 277		status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
 278		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
 279			dev_err(chan->device->dev, "%s: timeout!\n", __func__);
 280			return DMA_ERROR;
 281		}
 282		if (status != DMA_IN_PROGRESS)
 283			break;
 284		cpu_relax();
 285	} while (1);
 286
 287	return status;
 288}
 289EXPORT_SYMBOL(dma_sync_wait);
 290
 291/**
 292 * dma_cap_mask_all - enable iteration over all operation types
 293 */
 294static dma_cap_mask_t dma_cap_mask_all;
 295
 296/**
 297 * dma_chan_tbl_ent - tracks channel allocations per core/operation
 298 * @chan - associated channel for this entry
 299 */
 300struct dma_chan_tbl_ent {
 301	struct dma_chan *chan;
 302};
 303
 304/**
 305 * channel_table - percpu lookup table for memory-to-memory offload providers
 306 */
 307static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
 308
 309static int __init dma_channel_table_init(void)
 310{
 311	enum dma_transaction_type cap;
 312	int err = 0;
 313
 314	bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
 315
 316	/* 'interrupt', 'private', and 'slave' are channel capabilities,
 317	 * but are not associated with an operation so they do not need
 318	 * an entry in the channel_table
 319	 */
 320	clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
 321	clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
 322	clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
 323
 324	for_each_dma_cap_mask(cap, dma_cap_mask_all) {
 325		channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
 326		if (!channel_table[cap]) {
 327			err = -ENOMEM;
 328			break;
 329		}
 330	}
 331
 332	if (err) {
 333		pr_err("initialization failure\n");
 334		for_each_dma_cap_mask(cap, dma_cap_mask_all)
 335			free_percpu(channel_table[cap]);
 336	}
 337
 338	return err;
 339}
 340arch_initcall(dma_channel_table_init);
 341
 342/**
 343 * dma_find_channel - find a channel to carry out the operation
 344 * @tx_type: transaction type
 345 */
 346struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
 347{
 348	return this_cpu_read(channel_table[tx_type]->chan);
 349}
 350EXPORT_SYMBOL(dma_find_channel);
 351
 352/**
 353 * dma_issue_pending_all - flush all pending operations across all channels
 354 */
 355void dma_issue_pending_all(void)
 356{
 357	struct dma_device *device;
 358	struct dma_chan *chan;
 359
 360	rcu_read_lock();
 361	list_for_each_entry_rcu(device, &dma_device_list, global_node) {
 362		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 363			continue;
 364		list_for_each_entry(chan, &device->channels, device_node)
 365			if (chan->client_count)
 366				device->device_issue_pending(chan);
 367	}
 368	rcu_read_unlock();
 369}
 370EXPORT_SYMBOL(dma_issue_pending_all);
 371
 372/**
 373 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
 374 */
 375static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
 376{
 377	int node = dev_to_node(chan->device->dev);
 378	return node == NUMA_NO_NODE ||
 379		cpumask_test_cpu(cpu, cpumask_of_node(node));
 380}
 381
 382/**
 383 * min_chan - returns the channel with min count and in the same numa-node as the cpu
 384 * @cap: capability to match
 385 * @cpu: cpu index which the channel should be close to
 386 *
 387 * If some channels are close to the given cpu, the one with the lowest
 388 * reference count is returned. Otherwise, cpu is ignored and only the
 389 * reference count is taken into account.
 390 * Must be called under dma_list_mutex.
 391 */
 392static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
 393{
 394	struct dma_device *device;
 395	struct dma_chan *chan;
 396	struct dma_chan *min = NULL;
 397	struct dma_chan *localmin = NULL;
 398
 399	list_for_each_entry(device, &dma_device_list, global_node) {
 400		if (!dma_has_cap(cap, device->cap_mask) ||
 401		    dma_has_cap(DMA_PRIVATE, device->cap_mask))
 402			continue;
 403		list_for_each_entry(chan, &device->channels, device_node) {
 404			if (!chan->client_count)
 405				continue;
 406			if (!min || chan->table_count < min->table_count)
 407				min = chan;
 408
 409			if (dma_chan_is_local(chan, cpu))
 410				if (!localmin ||
 411				    chan->table_count < localmin->table_count)
 412					localmin = chan;
 413		}
 414	}
 415
 416	chan = localmin ? localmin : min;
 417
 418	if (chan)
 419		chan->table_count++;
 420
 421	return chan;
 422}
 423
 424/**
 425 * dma_channel_rebalance - redistribute the available channels
 426 *
 427 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
 428 * operation type) in the SMP case,  and operation isolation (avoid
 429 * multi-tasking channels) in the non-SMP case.  Must be called under
 430 * dma_list_mutex.
 431 */
 432static void dma_channel_rebalance(void)
 433{
 434	struct dma_chan *chan;
 435	struct dma_device *device;
 436	int cpu;
 437	int cap;
 438
 439	/* undo the last distribution */
 440	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 441		for_each_possible_cpu(cpu)
 442			per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
 443
 444	list_for_each_entry(device, &dma_device_list, global_node) {
 445		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 446			continue;
 447		list_for_each_entry(chan, &device->channels, device_node)
 448			chan->table_count = 0;
 449	}
 450
 451	/* don't populate the channel_table if no clients are available */
 452	if (!dmaengine_ref_count)
 453		return;
 454
 455	/* redistribute available channels */
 456	for_each_dma_cap_mask(cap, dma_cap_mask_all)
 457		for_each_online_cpu(cpu) {
 458			chan = min_chan(cap, cpu);
 459			per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
 460		}
 461}
 462
 463int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
 464{
 465	struct dma_device *device;
 466
 467	if (!chan || !caps)
 468		return -EINVAL;
 469
 470	device = chan->device;
 471
 472	/* check if the channel supports slave transactions */
 473	if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
 474	      test_bit(DMA_CYCLIC, device->cap_mask.bits)))
 475		return -ENXIO;
 476
 477	/*
 478	 * Check whether it reports it uses the generic slave
 479	 * capabilities, if not, that means it doesn't support any
 480	 * kind of slave capabilities reporting.
 481	 */
 482	if (!device->directions)
 483		return -ENXIO;
 484
 485	caps->src_addr_widths = device->src_addr_widths;
 486	caps->dst_addr_widths = device->dst_addr_widths;
 487	caps->directions = device->directions;
 488	caps->max_burst = device->max_burst;
 489	caps->residue_granularity = device->residue_granularity;
 490	caps->descriptor_reuse = device->descriptor_reuse;
 491	caps->cmd_pause = !!device->device_pause;
 492	caps->cmd_resume = !!device->device_resume;
 
 
 
 
 493	caps->cmd_terminate = !!device->device_terminate_all;
 494
 495	return 0;
 496}
 497EXPORT_SYMBOL_GPL(dma_get_slave_caps);
 498
 499static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
 500					  struct dma_device *dev,
 501					  dma_filter_fn fn, void *fn_param)
 502{
 503	struct dma_chan *chan;
 504
 505	if (mask && !__dma_device_satisfies_mask(dev, mask)) {
 506		dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
 507		return NULL;
 508	}
 509	/* devices with multiple channels need special handling as we need to
 510	 * ensure that all channels are either private or public.
 511	 */
 512	if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
 513		list_for_each_entry(chan, &dev->channels, device_node) {
 514			/* some channels are already publicly allocated */
 515			if (chan->client_count)
 516				return NULL;
 517		}
 518
 519	list_for_each_entry(chan, &dev->channels, device_node) {
 520		if (chan->client_count) {
 521			dev_dbg(dev->dev, "%s: %s busy\n",
 522				 __func__, dma_chan_name(chan));
 523			continue;
 524		}
 525		if (fn && !fn(chan, fn_param)) {
 526			dev_dbg(dev->dev, "%s: %s filter said false\n",
 527				 __func__, dma_chan_name(chan));
 528			continue;
 529		}
 530		return chan;
 531	}
 532
 533	return NULL;
 534}
 535
 536static struct dma_chan *find_candidate(struct dma_device *device,
 537				       const dma_cap_mask_t *mask,
 538				       dma_filter_fn fn, void *fn_param)
 539{
 540	struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
 541	int err;
 542
 543	if (chan) {
 544		/* Found a suitable channel, try to grab, prep, and return it.
 545		 * We first set DMA_PRIVATE to disable balance_ref_count as this
 546		 * channel will not be published in the general-purpose
 547		 * allocator
 548		 */
 549		dma_cap_set(DMA_PRIVATE, device->cap_mask);
 550		device->privatecnt++;
 551		err = dma_chan_get(chan);
 552
 553		if (err) {
 554			if (err == -ENODEV) {
 555				dev_dbg(device->dev, "%s: %s module removed\n",
 556					__func__, dma_chan_name(chan));
 557				list_del_rcu(&device->global_node);
 558			} else
 559				dev_dbg(device->dev,
 560					"%s: failed to get %s: (%d)\n",
 561					 __func__, dma_chan_name(chan), err);
 562
 563			if (--device->privatecnt == 0)
 564				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 565
 566			chan = ERR_PTR(err);
 567		}
 568	}
 569
 570	return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 571}
 572
 573/**
 574 * dma_get_slave_channel - try to get specific channel exclusively
 575 * @chan: target channel
 576 */
 577struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
 578{
 579	int err = -EBUSY;
 580
 581	/* lock against __dma_request_channel */
 582	mutex_lock(&dma_list_mutex);
 583
 584	if (chan->client_count == 0) {
 585		struct dma_device *device = chan->device;
 586
 587		dma_cap_set(DMA_PRIVATE, device->cap_mask);
 588		device->privatecnt++;
 589		err = dma_chan_get(chan);
 590		if (err) {
 591			dev_dbg(chan->device->dev,
 592				"%s: failed to get %s: (%d)\n",
 593				__func__, dma_chan_name(chan), err);
 594			chan = NULL;
 595			if (--device->privatecnt == 0)
 596				dma_cap_clear(DMA_PRIVATE, device->cap_mask);
 597		}
 598	} else
 599		chan = NULL;
 600
 601	mutex_unlock(&dma_list_mutex);
 602
 603
 604	return chan;
 605}
 606EXPORT_SYMBOL_GPL(dma_get_slave_channel);
 607
 608struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
 609{
 610	dma_cap_mask_t mask;
 611	struct dma_chan *chan;
 612
 613	dma_cap_zero(mask);
 614	dma_cap_set(DMA_SLAVE, mask);
 615
 616	/* lock against __dma_request_channel */
 617	mutex_lock(&dma_list_mutex);
 618
 619	chan = find_candidate(device, &mask, NULL, NULL);
 620
 621	mutex_unlock(&dma_list_mutex);
 622
 623	return IS_ERR(chan) ? NULL : chan;
 624}
 625EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
 626
 627/**
 628 * __dma_request_channel - try to allocate an exclusive channel
 629 * @mask: capabilities that the channel must satisfy
 630 * @fn: optional callback to disposition available channels
 631 * @fn_param: opaque parameter to pass to dma_filter_fn
 632 * @np: device node to look for DMA channels
 633 *
 634 * Returns pointer to appropriate DMA channel on success or NULL.
 635 */
 636struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
 637				       dma_filter_fn fn, void *fn_param,
 638				       struct device_node *np)
 639{
 640	struct dma_device *device, *_d;
 641	struct dma_chan *chan = NULL;
 642
 643	/* Find a channel */
 644	mutex_lock(&dma_list_mutex);
 645	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 646		/* Finds a DMA controller with matching device node */
 647		if (np && device->dev->of_node && np != device->dev->of_node)
 648			continue;
 649
 650		chan = find_candidate(device, mask, fn, fn_param);
 651		if (!IS_ERR(chan))
 652			break;
 653
 654		chan = NULL;
 655	}
 656	mutex_unlock(&dma_list_mutex);
 657
 658	pr_debug("%s: %s (%s)\n",
 659		 __func__,
 660		 chan ? "success" : "fail",
 661		 chan ? dma_chan_name(chan) : NULL);
 662
 663	return chan;
 664}
 665EXPORT_SYMBOL_GPL(__dma_request_channel);
 666
 667static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
 668						    const char *name,
 669						    struct device *dev)
 670{
 671	int i;
 672
 673	if (!device->filter.mapcnt)
 674		return NULL;
 675
 676	for (i = 0; i < device->filter.mapcnt; i++) {
 677		const struct dma_slave_map *map = &device->filter.map[i];
 678
 679		if (!strcmp(map->devname, dev_name(dev)) &&
 680		    !strcmp(map->slave, name))
 681			return map;
 682	}
 683
 684	return NULL;
 685}
 686
 687/**
 688 * dma_request_chan - try to allocate an exclusive slave channel
 689 * @dev:	pointer to client device structure
 690 * @name:	slave channel name
 691 *
 692 * Returns pointer to appropriate DMA channel on success or an error pointer.
 693 */
 694struct dma_chan *dma_request_chan(struct device *dev, const char *name)
 695{
 696	struct dma_device *d, *_d;
 697	struct dma_chan *chan = NULL;
 698
 699	/* If device-tree is present get slave info from here */
 700	if (dev->of_node)
 701		chan = of_dma_request_slave_channel(dev->of_node, name);
 702
 703	/* If device was enumerated by ACPI get slave info from here */
 704	if (has_acpi_companion(dev) && !chan)
 705		chan = acpi_dma_request_slave_chan_by_name(dev, name);
 706
 707	if (chan) {
 708		/* Valid channel found or requester needs to be deferred */
 709		if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
 710			return chan;
 711	}
 712
 713	/* Try to find the channel via the DMA filter map(s) */
 714	mutex_lock(&dma_list_mutex);
 715	list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
 716		dma_cap_mask_t mask;
 717		const struct dma_slave_map *map = dma_filter_match(d, name, dev);
 718
 719		if (!map)
 720			continue;
 721
 722		dma_cap_zero(mask);
 723		dma_cap_set(DMA_SLAVE, mask);
 724
 725		chan = find_candidate(d, &mask, d->filter.fn, map->param);
 726		if (!IS_ERR(chan))
 727			break;
 728	}
 729	mutex_unlock(&dma_list_mutex);
 730
 731	return chan ? chan : ERR_PTR(-EPROBE_DEFER);
 732}
 733EXPORT_SYMBOL_GPL(dma_request_chan);
 734
 735/**
 736 * dma_request_slave_channel - try to allocate an exclusive slave channel
 737 * @dev:	pointer to client device structure
 738 * @name:	slave channel name
 739 *
 740 * Returns pointer to appropriate DMA channel on success or NULL.
 741 */
 742struct dma_chan *dma_request_slave_channel(struct device *dev,
 743					   const char *name)
 744{
 745	struct dma_chan *ch = dma_request_chan(dev, name);
 746	if (IS_ERR(ch))
 747		return NULL;
 748
 749	return ch;
 750}
 751EXPORT_SYMBOL_GPL(dma_request_slave_channel);
 752
 753/**
 754 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
 755 * @mask: capabilities that the channel must satisfy
 756 *
 757 * Returns pointer to appropriate DMA channel on success or an error pointer.
 758 */
 759struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
 760{
 761	struct dma_chan *chan;
 762
 763	if (!mask)
 764		return ERR_PTR(-ENODEV);
 765
 766	chan = __dma_request_channel(mask, NULL, NULL, NULL);
 767	if (!chan) {
 768		mutex_lock(&dma_list_mutex);
 769		if (list_empty(&dma_device_list))
 770			chan = ERR_PTR(-EPROBE_DEFER);
 771		else
 772			chan = ERR_PTR(-ENODEV);
 773		mutex_unlock(&dma_list_mutex);
 774	}
 775
 776	return chan;
 777}
 778EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
 779
 780void dma_release_channel(struct dma_chan *chan)
 781{
 782	mutex_lock(&dma_list_mutex);
 783	WARN_ONCE(chan->client_count != 1,
 784		  "chan reference count %d != 1\n", chan->client_count);
 785	dma_chan_put(chan);
 786	/* drop PRIVATE cap enabled by __dma_request_channel() */
 787	if (--chan->device->privatecnt == 0)
 788		dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
 789	mutex_unlock(&dma_list_mutex);
 790}
 791EXPORT_SYMBOL_GPL(dma_release_channel);
 792
 793/**
 794 * dmaengine_get - register interest in dma_channels
 795 */
 796void dmaengine_get(void)
 797{
 798	struct dma_device *device, *_d;
 799	struct dma_chan *chan;
 800	int err;
 801
 802	mutex_lock(&dma_list_mutex);
 803	dmaengine_ref_count++;
 804
 805	/* try to grab channels */
 806	list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
 807		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 808			continue;
 809		list_for_each_entry(chan, &device->channels, device_node) {
 810			err = dma_chan_get(chan);
 811			if (err == -ENODEV) {
 812				/* module removed before we could use it */
 813				list_del_rcu(&device->global_node);
 814				break;
 815			} else if (err)
 816				dev_dbg(chan->device->dev,
 817					"%s: failed to get %s: (%d)\n",
 818					__func__, dma_chan_name(chan), err);
 819		}
 820	}
 821
 822	/* if this is the first reference and there were channels
 823	 * waiting we need to rebalance to get those channels
 824	 * incorporated into the channel table
 825	 */
 826	if (dmaengine_ref_count == 1)
 827		dma_channel_rebalance();
 828	mutex_unlock(&dma_list_mutex);
 829}
 830EXPORT_SYMBOL(dmaengine_get);
 831
 832/**
 833 * dmaengine_put - let dma drivers be removed when ref_count == 0
 834 */
 835void dmaengine_put(void)
 836{
 837	struct dma_device *device;
 838	struct dma_chan *chan;
 839
 840	mutex_lock(&dma_list_mutex);
 841	dmaengine_ref_count--;
 842	BUG_ON(dmaengine_ref_count < 0);
 843	/* drop channel references */
 844	list_for_each_entry(device, &dma_device_list, global_node) {
 845		if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
 846			continue;
 847		list_for_each_entry(chan, &device->channels, device_node)
 848			dma_chan_put(chan);
 849	}
 850	mutex_unlock(&dma_list_mutex);
 851}
 852EXPORT_SYMBOL(dmaengine_put);
 853
 854static bool device_has_all_tx_types(struct dma_device *device)
 855{
 856	/* A device that satisfies this test has channels that will never cause
 857	 * an async_tx channel switch event as all possible operation types can
 858	 * be handled.
 859	 */
 860	#ifdef CONFIG_ASYNC_TX_DMA
 861	if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
 862		return false;
 863	#endif
 864
 865	#if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
 866	if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
 867		return false;
 868	#endif
 869
 870	#if IS_ENABLED(CONFIG_ASYNC_XOR)
 871	if (!dma_has_cap(DMA_XOR, device->cap_mask))
 872		return false;
 873
 874	#ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
 875	if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
 876		return false;
 877	#endif
 878	#endif
 879
 880	#if IS_ENABLED(CONFIG_ASYNC_PQ)
 881	if (!dma_has_cap(DMA_PQ, device->cap_mask))
 882		return false;
 883
 884	#ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
 885	if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
 886		return false;
 887	#endif
 888	#endif
 889
 890	return true;
 891}
 892
 893static int get_dma_id(struct dma_device *device)
 894{
 895	int rc = ida_alloc(&dma_ida, GFP_KERNEL);
 896
 897	if (rc < 0)
 898		return rc;
 899	device->dev_id = rc;
 900	return 0;
 
 
 
 
 901}
 902
 903/**
 904 * dma_async_device_register - registers DMA devices found
 905 * @device: &dma_device
 906 */
 907int dma_async_device_register(struct dma_device *device)
 908{
 909	int chancnt = 0, rc;
 910	struct dma_chan* chan;
 911	atomic_t *idr_ref;
 912
 913	if (!device)
 914		return -ENODEV;
 915
 916	/* validate device routines */
 917	if (!device->dev) {
 918		pr_err("DMAdevice must have dev\n");
 919		return -EIO;
 920	}
 921
 922	if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
 923		dev_err(device->dev,
 924			"Device claims capability %s, but op is not defined\n",
 925			"DMA_MEMCPY");
 926		return -EIO;
 927	}
 928
 929	if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
 930		dev_err(device->dev,
 931			"Device claims capability %s, but op is not defined\n",
 932			"DMA_XOR");
 933		return -EIO;
 934	}
 935
 936	if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
 937		dev_err(device->dev,
 938			"Device claims capability %s, but op is not defined\n",
 939			"DMA_XOR_VAL");
 940		return -EIO;
 941	}
 942
 943	if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
 944		dev_err(device->dev,
 945			"Device claims capability %s, but op is not defined\n",
 946			"DMA_PQ");
 947		return -EIO;
 948	}
 949
 950	if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
 951		dev_err(device->dev,
 952			"Device claims capability %s, but op is not defined\n",
 953			"DMA_PQ_VAL");
 954		return -EIO;
 955	}
 956
 957	if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
 958		dev_err(device->dev,
 959			"Device claims capability %s, but op is not defined\n",
 960			"DMA_MEMSET");
 961		return -EIO;
 962	}
 963
 964	if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
 965		dev_err(device->dev,
 966			"Device claims capability %s, but op is not defined\n",
 967			"DMA_INTERRUPT");
 968		return -EIO;
 969	}
 970
 971	if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
 972		dev_err(device->dev,
 973			"Device claims capability %s, but op is not defined\n",
 974			"DMA_CYCLIC");
 975		return -EIO;
 976	}
 977
 978	if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
 979		dev_err(device->dev,
 980			"Device claims capability %s, but op is not defined\n",
 981			"DMA_INTERLEAVE");
 982		return -EIO;
 983	}
 984
 985
 986	if (!device->device_tx_status) {
 987		dev_err(device->dev, "Device tx_status is not defined\n");
 988		return -EIO;
 989	}
 990
 991
 992	if (!device->device_issue_pending) {
 993		dev_err(device->dev, "Device issue_pending is not defined\n");
 994		return -EIO;
 995	}
 996
 997	/* note: this only matters in the
 998	 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
 999	 */
1000	if (device_has_all_tx_types(device))
1001		dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1002
1003	idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
1004	if (!idr_ref)
1005		return -ENOMEM;
1006	rc = get_dma_id(device);
1007	if (rc != 0) {
1008		kfree(idr_ref);
1009		return rc;
1010	}
1011
1012	atomic_set(idr_ref, 0);
1013
1014	/* represent channels in sysfs. Probably want devs too */
1015	list_for_each_entry(chan, &device->channels, device_node) {
1016		rc = -ENOMEM;
1017		chan->local = alloc_percpu(typeof(*chan->local));
1018		if (chan->local == NULL)
1019			goto err_out;
1020		chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1021		if (chan->dev == NULL) {
1022			free_percpu(chan->local);
1023			chan->local = NULL;
1024			goto err_out;
1025		}
1026
1027		chan->chan_id = chancnt++;
1028		chan->dev->device.class = &dma_devclass;
1029		chan->dev->device.parent = device->dev;
1030		chan->dev->chan = chan;
1031		chan->dev->idr_ref = idr_ref;
1032		chan->dev->dev_id = device->dev_id;
1033		atomic_inc(idr_ref);
1034		dev_set_name(&chan->dev->device, "dma%dchan%d",
1035			     device->dev_id, chan->chan_id);
1036
1037		rc = device_register(&chan->dev->device);
1038		if (rc) {
1039			free_percpu(chan->local);
1040			chan->local = NULL;
1041			kfree(chan->dev);
1042			atomic_dec(idr_ref);
1043			goto err_out;
1044		}
1045		chan->client_count = 0;
1046	}
1047
1048	if (!chancnt) {
1049		dev_err(device->dev, "%s: device has no channels!\n", __func__);
1050		rc = -ENODEV;
1051		goto err_out;
1052	}
1053
1054	device->chancnt = chancnt;
1055
1056	mutex_lock(&dma_list_mutex);
1057	/* take references on public channels */
1058	if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1059		list_for_each_entry(chan, &device->channels, device_node) {
1060			/* if clients are already waiting for channels we need
1061			 * to take references on their behalf
1062			 */
1063			if (dma_chan_get(chan) == -ENODEV) {
1064				/* note we can only get here for the first
1065				 * channel as the remaining channels are
1066				 * guaranteed to get a reference
1067				 */
1068				rc = -ENODEV;
1069				mutex_unlock(&dma_list_mutex);
1070				goto err_out;
1071			}
1072		}
1073	list_add_tail_rcu(&device->global_node, &dma_device_list);
1074	if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1075		device->privatecnt++;	/* Always private */
1076	dma_channel_rebalance();
1077	mutex_unlock(&dma_list_mutex);
1078
1079	return 0;
1080
1081err_out:
1082	/* if we never registered a channel just release the idr */
1083	if (atomic_read(idr_ref) == 0) {
1084		ida_free(&dma_ida, device->dev_id);
 
 
1085		kfree(idr_ref);
1086		return rc;
1087	}
1088
1089	list_for_each_entry(chan, &device->channels, device_node) {
1090		if (chan->local == NULL)
1091			continue;
1092		mutex_lock(&dma_list_mutex);
1093		chan->dev->chan = NULL;
1094		mutex_unlock(&dma_list_mutex);
1095		device_unregister(&chan->dev->device);
1096		free_percpu(chan->local);
1097	}
1098	return rc;
1099}
1100EXPORT_SYMBOL(dma_async_device_register);
1101
1102/**
1103 * dma_async_device_unregister - unregister a DMA device
1104 * @device: &dma_device
1105 *
1106 * This routine is called by dma driver exit routines, dmaengine holds module
1107 * references to prevent it being called while channels are in use.
1108 */
1109void dma_async_device_unregister(struct dma_device *device)
1110{
1111	struct dma_chan *chan;
1112
1113	mutex_lock(&dma_list_mutex);
1114	list_del_rcu(&device->global_node);
1115	dma_channel_rebalance();
1116	mutex_unlock(&dma_list_mutex);
1117
1118	list_for_each_entry(chan, &device->channels, device_node) {
1119		WARN_ONCE(chan->client_count,
1120			  "%s called while %d clients hold a reference\n",
1121			  __func__, chan->client_count);
1122		mutex_lock(&dma_list_mutex);
1123		chan->dev->chan = NULL;
1124		mutex_unlock(&dma_list_mutex);
1125		device_unregister(&chan->dev->device);
1126		free_percpu(chan->local);
1127	}
1128}
1129EXPORT_SYMBOL(dma_async_device_unregister);
1130
1131static void dmam_device_release(struct device *dev, void *res)
1132{
1133	struct dma_device *device;
1134
1135	device = *(struct dma_device **)res;
1136	dma_async_device_unregister(device);
1137}
1138
1139/**
1140 * dmaenginem_async_device_register - registers DMA devices found
1141 * @device: &dma_device
1142 *
1143 * The operation is managed and will be undone on driver detach.
1144 */
1145int dmaenginem_async_device_register(struct dma_device *device)
1146{
1147	void *p;
1148	int ret;
1149
1150	p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
1151	if (!p)
1152		return -ENOMEM;
1153
1154	ret = dma_async_device_register(device);
1155	if (!ret) {
1156		*(struct dma_device **)p = device;
1157		devres_add(device->dev, p);
1158	} else {
1159		devres_free(p);
1160	}
1161
1162	return ret;
1163}
1164EXPORT_SYMBOL(dmaenginem_async_device_register);
1165
1166struct dmaengine_unmap_pool {
1167	struct kmem_cache *cache;
1168	const char *name;
1169	mempool_t *pool;
1170	size_t size;
1171};
1172
1173#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1174static struct dmaengine_unmap_pool unmap_pool[] = {
1175	__UNMAP_POOL(2),
1176	#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1177	__UNMAP_POOL(16),
1178	__UNMAP_POOL(128),
1179	__UNMAP_POOL(256),
1180	#endif
1181};
1182
1183static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1184{
1185	int order = get_count_order(nr);
1186
1187	switch (order) {
1188	case 0 ... 1:
1189		return &unmap_pool[0];
1190#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1191	case 2 ... 4:
1192		return &unmap_pool[1];
1193	case 5 ... 7:
1194		return &unmap_pool[2];
1195	case 8:
1196		return &unmap_pool[3];
1197#endif
1198	default:
1199		BUG();
1200		return NULL;
1201	}
1202}
1203
1204static void dmaengine_unmap(struct kref *kref)
1205{
1206	struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1207	struct device *dev = unmap->dev;
1208	int cnt, i;
1209
1210	cnt = unmap->to_cnt;
1211	for (i = 0; i < cnt; i++)
1212		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1213			       DMA_TO_DEVICE);
1214	cnt += unmap->from_cnt;
1215	for (; i < cnt; i++)
1216		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1217			       DMA_FROM_DEVICE);
1218	cnt += unmap->bidi_cnt;
1219	for (; i < cnt; i++) {
1220		if (unmap->addr[i] == 0)
1221			continue;
1222		dma_unmap_page(dev, unmap->addr[i], unmap->len,
1223			       DMA_BIDIRECTIONAL);
1224	}
1225	cnt = unmap->map_cnt;
1226	mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1227}
1228
1229void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1230{
1231	if (unmap)
1232		kref_put(&unmap->kref, dmaengine_unmap);
1233}
1234EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1235
1236static void dmaengine_destroy_unmap_pool(void)
1237{
1238	int i;
1239
1240	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1241		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1242
1243		mempool_destroy(p->pool);
1244		p->pool = NULL;
1245		kmem_cache_destroy(p->cache);
1246		p->cache = NULL;
1247	}
1248}
1249
1250static int __init dmaengine_init_unmap_pool(void)
1251{
1252	int i;
1253
1254	for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1255		struct dmaengine_unmap_pool *p = &unmap_pool[i];
1256		size_t size;
1257
1258		size = sizeof(struct dmaengine_unmap_data) +
1259		       sizeof(dma_addr_t) * p->size;
1260
1261		p->cache = kmem_cache_create(p->name, size, 0,
1262					     SLAB_HWCACHE_ALIGN, NULL);
1263		if (!p->cache)
1264			break;
1265		p->pool = mempool_create_slab_pool(1, p->cache);
1266		if (!p->pool)
1267			break;
1268	}
1269
1270	if (i == ARRAY_SIZE(unmap_pool))
1271		return 0;
1272
1273	dmaengine_destroy_unmap_pool();
1274	return -ENOMEM;
1275}
1276
1277struct dmaengine_unmap_data *
1278dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1279{
1280	struct dmaengine_unmap_data *unmap;
1281
1282	unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1283	if (!unmap)
1284		return NULL;
1285
1286	memset(unmap, 0, sizeof(*unmap));
1287	kref_init(&unmap->kref);
1288	unmap->dev = dev;
1289	unmap->map_cnt = nr;
1290
1291	return unmap;
1292}
1293EXPORT_SYMBOL(dmaengine_get_unmap_data);
1294
1295void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1296	struct dma_chan *chan)
1297{
1298	tx->chan = chan;
1299	#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1300	spin_lock_init(&tx->lock);
1301	#endif
1302}
1303EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1304
1305/* dma_wait_for_async_tx - spin wait for a transaction to complete
1306 * @tx: in-flight transaction to wait on
1307 */
1308enum dma_status
1309dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1310{
1311	unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1312
1313	if (!tx)
1314		return DMA_COMPLETE;
1315
1316	while (tx->cookie == -EBUSY) {
1317		if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1318			dev_err(tx->chan->device->dev,
1319				"%s timeout waiting for descriptor submission\n",
1320				__func__);
1321			return DMA_ERROR;
1322		}
1323		cpu_relax();
1324	}
1325	return dma_sync_wait(tx->chan, tx->cookie);
1326}
1327EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1328
1329/* dma_run_dependencies - helper routine for dma drivers to process
1330 *	(start) dependent operations on their target channel
1331 * @tx: transaction with dependencies
1332 */
1333void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1334{
1335	struct dma_async_tx_descriptor *dep = txd_next(tx);
1336	struct dma_async_tx_descriptor *dep_next;
1337	struct dma_chan *chan;
1338
1339	if (!dep)
1340		return;
1341
1342	/* we'll submit tx->next now, so clear the link */
1343	txd_clear_next(tx);
1344	chan = dep->chan;
1345
1346	/* keep submitting up until a channel switch is detected
1347	 * in that case we will be called again as a result of
1348	 * processing the interrupt from async_tx_channel_switch
1349	 */
1350	for (; dep; dep = dep_next) {
1351		txd_lock(dep);
1352		txd_clear_parent(dep);
1353		dep_next = txd_next(dep);
1354		if (dep_next && dep_next->chan == chan)
1355			txd_clear_next(dep); /* ->next will be submitted */
1356		else
1357			dep_next = NULL; /* submit current dep and terminate */
1358		txd_unlock(dep);
1359
1360		dep->tx_submit(dep);
1361	}
1362
1363	chan->device->device_issue_pending(chan);
1364}
1365EXPORT_SYMBOL_GPL(dma_run_dependencies);
1366
1367static int __init dma_bus_init(void)
1368{
1369	int err = dmaengine_init_unmap_pool();
1370
1371	if (err)
1372		return err;
1373	return class_register(&dma_devclass);
1374}
1375arch_initcall(dma_bus_init);
1376
1377