Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 */
5
6/*
7 * This code implements the DMA subsystem. It provides a HW-neutral interface
8 * for other kernel code to use asynchronous memory copy capabilities,
9 * if present, and allows different HW DMA drivers to register as providing
10 * this capability.
11 *
12 * Due to the fact we are accelerating what is already a relatively fast
13 * operation, the code goes to great lengths to avoid additional overhead,
14 * such as locking.
15 *
16 * LOCKING:
17 *
18 * The subsystem keeps a global list of dma_device structs it is protected by a
19 * mutex, dma_list_mutex.
20 *
21 * A subsystem can get access to a channel by calling dmaengine_get() followed
22 * by dma_find_channel(), or if it has need for an exclusive channel it can call
23 * dma_request_channel(). Once a channel is allocated a reference is taken
24 * against its corresponding driver to disable removal.
25 *
26 * Each device has a channels list, which runs unlocked but is never modified
27 * once the device is registered, it's just setup by the driver.
28 *
29 * See Documentation/driver-api/dmaengine for more details
30 */
31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34#include <linux/platform_device.h>
35#include <linux/dma-mapping.h>
36#include <linux/init.h>
37#include <linux/module.h>
38#include <linux/mm.h>
39#include <linux/device.h>
40#include <linux/dmaengine.h>
41#include <linux/hardirq.h>
42#include <linux/spinlock.h>
43#include <linux/percpu.h>
44#include <linux/rcupdate.h>
45#include <linux/mutex.h>
46#include <linux/jiffies.h>
47#include <linux/rculist.h>
48#include <linux/idr.h>
49#include <linux/slab.h>
50#include <linux/acpi.h>
51#include <linux/acpi_dma.h>
52#include <linux/of_dma.h>
53#include <linux/mempool.h>
54#include <linux/numa.h>
55
56#include "dmaengine.h"
57
58static DEFINE_MUTEX(dma_list_mutex);
59static DEFINE_IDA(dma_ida);
60static LIST_HEAD(dma_device_list);
61static long dmaengine_ref_count;
62
63/* --- debugfs implementation --- */
64#ifdef CONFIG_DEBUG_FS
65#include <linux/debugfs.h>
66
67static struct dentry *rootdir;
68
69static void dmaengine_debug_register(struct dma_device *dma_dev)
70{
71 dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
72 rootdir);
73 if (IS_ERR(dma_dev->dbg_dev_root))
74 dma_dev->dbg_dev_root = NULL;
75}
76
77static void dmaengine_debug_unregister(struct dma_device *dma_dev)
78{
79 debugfs_remove_recursive(dma_dev->dbg_dev_root);
80 dma_dev->dbg_dev_root = NULL;
81}
82
83static void dmaengine_dbg_summary_show(struct seq_file *s,
84 struct dma_device *dma_dev)
85{
86 struct dma_chan *chan;
87
88 list_for_each_entry(chan, &dma_dev->channels, device_node) {
89 if (chan->client_count) {
90 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
91 chan->dbg_client_name ?: "in-use");
92
93 if (chan->router)
94 seq_printf(s, " (via router: %s)\n",
95 dev_name(chan->router->dev));
96 else
97 seq_puts(s, "\n");
98 }
99 }
100}
101
102static int dmaengine_summary_show(struct seq_file *s, void *data)
103{
104 struct dma_device *dma_dev = NULL;
105
106 mutex_lock(&dma_list_mutex);
107 list_for_each_entry(dma_dev, &dma_device_list, global_node) {
108 seq_printf(s, "dma%d (%s): number of channels: %u\n",
109 dma_dev->dev_id, dev_name(dma_dev->dev),
110 dma_dev->chancnt);
111
112 if (dma_dev->dbg_summary_show)
113 dma_dev->dbg_summary_show(s, dma_dev);
114 else
115 dmaengine_dbg_summary_show(s, dma_dev);
116
117 if (!list_is_last(&dma_dev->global_node, &dma_device_list))
118 seq_puts(s, "\n");
119 }
120 mutex_unlock(&dma_list_mutex);
121
122 return 0;
123}
124DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
125
126static void __init dmaengine_debugfs_init(void)
127{
128 rootdir = debugfs_create_dir("dmaengine", NULL);
129
130 /* /sys/kernel/debug/dmaengine/summary */
131 debugfs_create_file("summary", 0444, rootdir, NULL,
132 &dmaengine_summary_fops);
133}
134#else
135static inline void dmaengine_debugfs_init(void) { }
136static inline int dmaengine_debug_register(struct dma_device *dma_dev)
137{
138 return 0;
139}
140
141static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
142#endif /* DEBUG_FS */
143
144/* --- sysfs implementation --- */
145
146#define DMA_SLAVE_NAME "slave"
147
148/**
149 * dev_to_dma_chan - convert a device pointer to its sysfs container object
150 * @dev: device node
151 *
152 * Must be called under dma_list_mutex.
153 */
154static struct dma_chan *dev_to_dma_chan(struct device *dev)
155{
156 struct dma_chan_dev *chan_dev;
157
158 chan_dev = container_of(dev, typeof(*chan_dev), device);
159 return chan_dev->chan;
160}
161
162static ssize_t memcpy_count_show(struct device *dev,
163 struct device_attribute *attr, char *buf)
164{
165 struct dma_chan *chan;
166 unsigned long count = 0;
167 int i;
168 int err;
169
170 mutex_lock(&dma_list_mutex);
171 chan = dev_to_dma_chan(dev);
172 if (chan) {
173 for_each_possible_cpu(i)
174 count += per_cpu_ptr(chan->local, i)->memcpy_count;
175 err = sysfs_emit(buf, "%lu\n", count);
176 } else
177 err = -ENODEV;
178 mutex_unlock(&dma_list_mutex);
179
180 return err;
181}
182static DEVICE_ATTR_RO(memcpy_count);
183
184static ssize_t bytes_transferred_show(struct device *dev,
185 struct device_attribute *attr, char *buf)
186{
187 struct dma_chan *chan;
188 unsigned long count = 0;
189 int i;
190 int err;
191
192 mutex_lock(&dma_list_mutex);
193 chan = dev_to_dma_chan(dev);
194 if (chan) {
195 for_each_possible_cpu(i)
196 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
197 err = sysfs_emit(buf, "%lu\n", count);
198 } else
199 err = -ENODEV;
200 mutex_unlock(&dma_list_mutex);
201
202 return err;
203}
204static DEVICE_ATTR_RO(bytes_transferred);
205
206static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
207 char *buf)
208{
209 struct dma_chan *chan;
210 int err;
211
212 mutex_lock(&dma_list_mutex);
213 chan = dev_to_dma_chan(dev);
214 if (chan)
215 err = sysfs_emit(buf, "%d\n", chan->client_count);
216 else
217 err = -ENODEV;
218 mutex_unlock(&dma_list_mutex);
219
220 return err;
221}
222static DEVICE_ATTR_RO(in_use);
223
224static struct attribute *dma_dev_attrs[] = {
225 &dev_attr_memcpy_count.attr,
226 &dev_attr_bytes_transferred.attr,
227 &dev_attr_in_use.attr,
228 NULL,
229};
230ATTRIBUTE_GROUPS(dma_dev);
231
232static void chan_dev_release(struct device *dev)
233{
234 struct dma_chan_dev *chan_dev;
235
236 chan_dev = container_of(dev, typeof(*chan_dev), device);
237 kfree(chan_dev);
238}
239
240static struct class dma_devclass = {
241 .name = "dma",
242 .dev_groups = dma_dev_groups,
243 .dev_release = chan_dev_release,
244};
245
246/* --- client and device registration --- */
247
248/* enable iteration over all operation types */
249static dma_cap_mask_t dma_cap_mask_all;
250
251/**
252 * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
253 * @chan: associated channel for this entry
254 */
255struct dma_chan_tbl_ent {
256 struct dma_chan *chan;
257};
258
259/* percpu lookup table for memory-to-memory offload providers */
260static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
261
262static int __init dma_channel_table_init(void)
263{
264 enum dma_transaction_type cap;
265 int err = 0;
266
267 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
268
269 /* 'interrupt', 'private', and 'slave' are channel capabilities,
270 * but are not associated with an operation so they do not need
271 * an entry in the channel_table
272 */
273 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
274 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
275 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
276
277 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
278 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
279 if (!channel_table[cap]) {
280 err = -ENOMEM;
281 break;
282 }
283 }
284
285 if (err) {
286 pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
287 for_each_dma_cap_mask(cap, dma_cap_mask_all)
288 free_percpu(channel_table[cap]);
289 }
290
291 return err;
292}
293arch_initcall(dma_channel_table_init);
294
295/**
296 * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
297 * @chan: DMA channel to test
298 * @cpu: CPU index which the channel should be close to
299 *
300 * Returns true if the channel is in the same NUMA-node as the CPU.
301 */
302static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
303{
304 int node = dev_to_node(chan->device->dev);
305 return node == NUMA_NO_NODE ||
306 cpumask_test_cpu(cpu, cpumask_of_node(node));
307}
308
309/**
310 * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
311 * @cap: capability to match
312 * @cpu: CPU index which the channel should be close to
313 *
314 * If some channels are close to the given CPU, the one with the lowest
315 * reference count is returned. Otherwise, CPU is ignored and only the
316 * reference count is taken into account.
317 *
318 * Must be called under dma_list_mutex.
319 */
320static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
321{
322 struct dma_device *device;
323 struct dma_chan *chan;
324 struct dma_chan *min = NULL;
325 struct dma_chan *localmin = NULL;
326
327 list_for_each_entry(device, &dma_device_list, global_node) {
328 if (!dma_has_cap(cap, device->cap_mask) ||
329 dma_has_cap(DMA_PRIVATE, device->cap_mask))
330 continue;
331 list_for_each_entry(chan, &device->channels, device_node) {
332 if (!chan->client_count)
333 continue;
334 if (!min || chan->table_count < min->table_count)
335 min = chan;
336
337 if (dma_chan_is_local(chan, cpu))
338 if (!localmin ||
339 chan->table_count < localmin->table_count)
340 localmin = chan;
341 }
342 }
343
344 chan = localmin ? localmin : min;
345
346 if (chan)
347 chan->table_count++;
348
349 return chan;
350}
351
352/**
353 * dma_channel_rebalance - redistribute the available channels
354 *
355 * Optimize for CPU isolation (each CPU gets a dedicated channel for an
356 * operation type) in the SMP case, and operation isolation (avoid
357 * multi-tasking channels) in the non-SMP case.
358 *
359 * Must be called under dma_list_mutex.
360 */
361static void dma_channel_rebalance(void)
362{
363 struct dma_chan *chan;
364 struct dma_device *device;
365 int cpu;
366 int cap;
367
368 /* undo the last distribution */
369 for_each_dma_cap_mask(cap, dma_cap_mask_all)
370 for_each_possible_cpu(cpu)
371 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
372
373 list_for_each_entry(device, &dma_device_list, global_node) {
374 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
375 continue;
376 list_for_each_entry(chan, &device->channels, device_node)
377 chan->table_count = 0;
378 }
379
380 /* don't populate the channel_table if no clients are available */
381 if (!dmaengine_ref_count)
382 return;
383
384 /* redistribute available channels */
385 for_each_dma_cap_mask(cap, dma_cap_mask_all)
386 for_each_online_cpu(cpu) {
387 chan = min_chan(cap, cpu);
388 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
389 }
390}
391
392static int dma_device_satisfies_mask(struct dma_device *device,
393 const dma_cap_mask_t *want)
394{
395 dma_cap_mask_t has;
396
397 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
398 DMA_TX_TYPE_END);
399 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
400}
401
402static struct module *dma_chan_to_owner(struct dma_chan *chan)
403{
404 return chan->device->owner;
405}
406
407/**
408 * balance_ref_count - catch up the channel reference count
409 * @chan: channel to balance ->client_count versus dmaengine_ref_count
410 *
411 * Must be called under dma_list_mutex.
412 */
413static void balance_ref_count(struct dma_chan *chan)
414{
415 struct module *owner = dma_chan_to_owner(chan);
416
417 while (chan->client_count < dmaengine_ref_count) {
418 __module_get(owner);
419 chan->client_count++;
420 }
421}
422
423static void dma_device_release(struct kref *ref)
424{
425 struct dma_device *device = container_of(ref, struct dma_device, ref);
426
427 list_del_rcu(&device->global_node);
428 dma_channel_rebalance();
429
430 if (device->device_release)
431 device->device_release(device);
432}
433
434static void dma_device_put(struct dma_device *device)
435{
436 lockdep_assert_held(&dma_list_mutex);
437 kref_put(&device->ref, dma_device_release);
438}
439
440/**
441 * dma_chan_get - try to grab a DMA channel's parent driver module
442 * @chan: channel to grab
443 *
444 * Must be called under dma_list_mutex.
445 */
446static int dma_chan_get(struct dma_chan *chan)
447{
448 struct module *owner = dma_chan_to_owner(chan);
449 int ret;
450
451 /* The channel is already in use, update client count */
452 if (chan->client_count) {
453 __module_get(owner);
454 chan->client_count++;
455 return 0;
456 }
457
458 if (!try_module_get(owner))
459 return -ENODEV;
460
461 ret = kref_get_unless_zero(&chan->device->ref);
462 if (!ret) {
463 ret = -ENODEV;
464 goto module_put_out;
465 }
466
467 /* allocate upon first client reference */
468 if (chan->device->device_alloc_chan_resources) {
469 ret = chan->device->device_alloc_chan_resources(chan);
470 if (ret < 0)
471 goto err_out;
472 }
473
474 chan->client_count++;
475
476 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
477 balance_ref_count(chan);
478
479 return 0;
480
481err_out:
482 dma_device_put(chan->device);
483module_put_out:
484 module_put(owner);
485 return ret;
486}
487
488/**
489 * dma_chan_put - drop a reference to a DMA channel's parent driver module
490 * @chan: channel to release
491 *
492 * Must be called under dma_list_mutex.
493 */
494static void dma_chan_put(struct dma_chan *chan)
495{
496 /* This channel is not in use, bail out */
497 if (!chan->client_count)
498 return;
499
500 chan->client_count--;
501
502 /* This channel is not in use anymore, free it */
503 if (!chan->client_count && chan->device->device_free_chan_resources) {
504 /* Make sure all operations have completed */
505 dmaengine_synchronize(chan);
506 chan->device->device_free_chan_resources(chan);
507 }
508
509 /* If the channel is used via a DMA request router, free the mapping */
510 if (chan->router && chan->router->route_free) {
511 chan->router->route_free(chan->router->dev, chan->route_data);
512 chan->router = NULL;
513 chan->route_data = NULL;
514 }
515
516 dma_device_put(chan->device);
517 module_put(dma_chan_to_owner(chan));
518}
519
520enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
521{
522 enum dma_status status;
523 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
524
525 dma_async_issue_pending(chan);
526 do {
527 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
528 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
529 dev_err(chan->device->dev, "%s: timeout!\n", __func__);
530 return DMA_ERROR;
531 }
532 if (status != DMA_IN_PROGRESS)
533 break;
534 cpu_relax();
535 } while (1);
536
537 return status;
538}
539EXPORT_SYMBOL(dma_sync_wait);
540
541/**
542 * dma_find_channel - find a channel to carry out the operation
543 * @tx_type: transaction type
544 */
545struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
546{
547 return this_cpu_read(channel_table[tx_type]->chan);
548}
549EXPORT_SYMBOL(dma_find_channel);
550
551/**
552 * dma_issue_pending_all - flush all pending operations across all channels
553 */
554void dma_issue_pending_all(void)
555{
556 struct dma_device *device;
557 struct dma_chan *chan;
558
559 rcu_read_lock();
560 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
561 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
562 continue;
563 list_for_each_entry(chan, &device->channels, device_node)
564 if (chan->client_count)
565 device->device_issue_pending(chan);
566 }
567 rcu_read_unlock();
568}
569EXPORT_SYMBOL(dma_issue_pending_all);
570
571int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
572{
573 struct dma_device *device;
574
575 if (!chan || !caps)
576 return -EINVAL;
577
578 device = chan->device;
579
580 /* check if the channel supports slave transactions */
581 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
582 test_bit(DMA_CYCLIC, device->cap_mask.bits)))
583 return -ENXIO;
584
585 /*
586 * Check whether it reports it uses the generic slave
587 * capabilities, if not, that means it doesn't support any
588 * kind of slave capabilities reporting.
589 */
590 if (!device->directions)
591 return -ENXIO;
592
593 caps->src_addr_widths = device->src_addr_widths;
594 caps->dst_addr_widths = device->dst_addr_widths;
595 caps->directions = device->directions;
596 caps->min_burst = device->min_burst;
597 caps->max_burst = device->max_burst;
598 caps->max_sg_burst = device->max_sg_burst;
599 caps->residue_granularity = device->residue_granularity;
600 caps->descriptor_reuse = device->descriptor_reuse;
601 caps->cmd_pause = !!device->device_pause;
602 caps->cmd_resume = !!device->device_resume;
603 caps->cmd_terminate = !!device->device_terminate_all;
604
605 /*
606 * DMA engine device might be configured with non-uniformly
607 * distributed slave capabilities per device channels. In this
608 * case the corresponding driver may provide the device_caps
609 * callback to override the generic capabilities with
610 * channel-specific ones.
611 */
612 if (device->device_caps)
613 device->device_caps(chan, caps);
614
615 return 0;
616}
617EXPORT_SYMBOL_GPL(dma_get_slave_caps);
618
619static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
620 struct dma_device *dev,
621 dma_filter_fn fn, void *fn_param)
622{
623 struct dma_chan *chan;
624
625 if (mask && !dma_device_satisfies_mask(dev, mask)) {
626 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
627 return NULL;
628 }
629 /* devices with multiple channels need special handling as we need to
630 * ensure that all channels are either private or public.
631 */
632 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
633 list_for_each_entry(chan, &dev->channels, device_node) {
634 /* some channels are already publicly allocated */
635 if (chan->client_count)
636 return NULL;
637 }
638
639 list_for_each_entry(chan, &dev->channels, device_node) {
640 if (chan->client_count) {
641 dev_dbg(dev->dev, "%s: %s busy\n",
642 __func__, dma_chan_name(chan));
643 continue;
644 }
645 if (fn && !fn(chan, fn_param)) {
646 dev_dbg(dev->dev, "%s: %s filter said false\n",
647 __func__, dma_chan_name(chan));
648 continue;
649 }
650 return chan;
651 }
652
653 return NULL;
654}
655
656static struct dma_chan *find_candidate(struct dma_device *device,
657 const dma_cap_mask_t *mask,
658 dma_filter_fn fn, void *fn_param)
659{
660 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
661 int err;
662
663 if (chan) {
664 /* Found a suitable channel, try to grab, prep, and return it.
665 * We first set DMA_PRIVATE to disable balance_ref_count as this
666 * channel will not be published in the general-purpose
667 * allocator
668 */
669 dma_cap_set(DMA_PRIVATE, device->cap_mask);
670 device->privatecnt++;
671 err = dma_chan_get(chan);
672
673 if (err) {
674 if (err == -ENODEV) {
675 dev_dbg(device->dev, "%s: %s module removed\n",
676 __func__, dma_chan_name(chan));
677 list_del_rcu(&device->global_node);
678 } else
679 dev_dbg(device->dev,
680 "%s: failed to get %s: (%d)\n",
681 __func__, dma_chan_name(chan), err);
682
683 if (--device->privatecnt == 0)
684 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
685
686 chan = ERR_PTR(err);
687 }
688 }
689
690 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
691}
692
693/**
694 * dma_get_slave_channel - try to get specific channel exclusively
695 * @chan: target channel
696 */
697struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
698{
699 /* lock against __dma_request_channel */
700 mutex_lock(&dma_list_mutex);
701
702 if (chan->client_count == 0) {
703 struct dma_device *device = chan->device;
704 int err;
705
706 dma_cap_set(DMA_PRIVATE, device->cap_mask);
707 device->privatecnt++;
708 err = dma_chan_get(chan);
709 if (err) {
710 dev_dbg(chan->device->dev,
711 "%s: failed to get %s: (%d)\n",
712 __func__, dma_chan_name(chan), err);
713 chan = NULL;
714 if (--device->privatecnt == 0)
715 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
716 }
717 } else
718 chan = NULL;
719
720 mutex_unlock(&dma_list_mutex);
721
722
723 return chan;
724}
725EXPORT_SYMBOL_GPL(dma_get_slave_channel);
726
727struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
728{
729 dma_cap_mask_t mask;
730 struct dma_chan *chan;
731
732 dma_cap_zero(mask);
733 dma_cap_set(DMA_SLAVE, mask);
734
735 /* lock against __dma_request_channel */
736 mutex_lock(&dma_list_mutex);
737
738 chan = find_candidate(device, &mask, NULL, NULL);
739
740 mutex_unlock(&dma_list_mutex);
741
742 return IS_ERR(chan) ? NULL : chan;
743}
744EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
745
746/**
747 * __dma_request_channel - try to allocate an exclusive channel
748 * @mask: capabilities that the channel must satisfy
749 * @fn: optional callback to disposition available channels
750 * @fn_param: opaque parameter to pass to dma_filter_fn()
751 * @np: device node to look for DMA channels
752 *
753 * Returns pointer to appropriate DMA channel on success or NULL.
754 */
755struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
756 dma_filter_fn fn, void *fn_param,
757 struct device_node *np)
758{
759 struct dma_device *device, *_d;
760 struct dma_chan *chan = NULL;
761
762 /* Find a channel */
763 mutex_lock(&dma_list_mutex);
764 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
765 /* Finds a DMA controller with matching device node */
766 if (np && device->dev->of_node && np != device->dev->of_node)
767 continue;
768
769 chan = find_candidate(device, mask, fn, fn_param);
770 if (!IS_ERR(chan))
771 break;
772
773 chan = NULL;
774 }
775 mutex_unlock(&dma_list_mutex);
776
777 pr_debug("%s: %s (%s)\n",
778 __func__,
779 chan ? "success" : "fail",
780 chan ? dma_chan_name(chan) : NULL);
781
782 return chan;
783}
784EXPORT_SYMBOL_GPL(__dma_request_channel);
785
786static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
787 const char *name,
788 struct device *dev)
789{
790 int i;
791
792 if (!device->filter.mapcnt)
793 return NULL;
794
795 for (i = 0; i < device->filter.mapcnt; i++) {
796 const struct dma_slave_map *map = &device->filter.map[i];
797
798 if (!strcmp(map->devname, dev_name(dev)) &&
799 !strcmp(map->slave, name))
800 return map;
801 }
802
803 return NULL;
804}
805
806/**
807 * dma_request_chan - try to allocate an exclusive slave channel
808 * @dev: pointer to client device structure
809 * @name: slave channel name
810 *
811 * Returns pointer to appropriate DMA channel on success or an error pointer.
812 */
813struct dma_chan *dma_request_chan(struct device *dev, const char *name)
814{
815 struct dma_device *d, *_d;
816 struct dma_chan *chan = NULL;
817
818 /* If device-tree is present get slave info from here */
819 if (dev->of_node)
820 chan = of_dma_request_slave_channel(dev->of_node, name);
821
822 /* If device was enumerated by ACPI get slave info from here */
823 if (has_acpi_companion(dev) && !chan)
824 chan = acpi_dma_request_slave_chan_by_name(dev, name);
825
826 if (PTR_ERR(chan) == -EPROBE_DEFER)
827 return chan;
828
829 if (!IS_ERR_OR_NULL(chan))
830 goto found;
831
832 /* Try to find the channel via the DMA filter map(s) */
833 mutex_lock(&dma_list_mutex);
834 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
835 dma_cap_mask_t mask;
836 const struct dma_slave_map *map = dma_filter_match(d, name, dev);
837
838 if (!map)
839 continue;
840
841 dma_cap_zero(mask);
842 dma_cap_set(DMA_SLAVE, mask);
843
844 chan = find_candidate(d, &mask, d->filter.fn, map->param);
845 if (!IS_ERR(chan))
846 break;
847 }
848 mutex_unlock(&dma_list_mutex);
849
850 if (IS_ERR(chan))
851 return chan;
852 if (!chan)
853 return ERR_PTR(-EPROBE_DEFER);
854
855found:
856#ifdef CONFIG_DEBUG_FS
857 chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
858 name);
859#endif
860
861 chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
862 if (!chan->name)
863 return chan;
864 chan->slave = dev;
865
866 if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
867 DMA_SLAVE_NAME))
868 dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
869 if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
870 dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
871
872 return chan;
873}
874EXPORT_SYMBOL_GPL(dma_request_chan);
875
876/**
877 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
878 * @mask: capabilities that the channel must satisfy
879 *
880 * Returns pointer to appropriate DMA channel on success or an error pointer.
881 */
882struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
883{
884 struct dma_chan *chan;
885
886 if (!mask)
887 return ERR_PTR(-ENODEV);
888
889 chan = __dma_request_channel(mask, NULL, NULL, NULL);
890 if (!chan) {
891 mutex_lock(&dma_list_mutex);
892 if (list_empty(&dma_device_list))
893 chan = ERR_PTR(-EPROBE_DEFER);
894 else
895 chan = ERR_PTR(-ENODEV);
896 mutex_unlock(&dma_list_mutex);
897 }
898
899 return chan;
900}
901EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
902
903void dma_release_channel(struct dma_chan *chan)
904{
905 mutex_lock(&dma_list_mutex);
906 WARN_ONCE(chan->client_count != 1,
907 "chan reference count %d != 1\n", chan->client_count);
908 dma_chan_put(chan);
909 /* drop PRIVATE cap enabled by __dma_request_channel() */
910 if (--chan->device->privatecnt == 0)
911 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
912
913 if (chan->slave) {
914 sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
915 sysfs_remove_link(&chan->slave->kobj, chan->name);
916 kfree(chan->name);
917 chan->name = NULL;
918 chan->slave = NULL;
919 }
920
921#ifdef CONFIG_DEBUG_FS
922 kfree(chan->dbg_client_name);
923 chan->dbg_client_name = NULL;
924#endif
925 mutex_unlock(&dma_list_mutex);
926}
927EXPORT_SYMBOL_GPL(dma_release_channel);
928
929/**
930 * dmaengine_get - register interest in dma_channels
931 */
932void dmaengine_get(void)
933{
934 struct dma_device *device, *_d;
935 struct dma_chan *chan;
936 int err;
937
938 mutex_lock(&dma_list_mutex);
939 dmaengine_ref_count++;
940
941 /* try to grab channels */
942 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
943 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
944 continue;
945 list_for_each_entry(chan, &device->channels, device_node) {
946 err = dma_chan_get(chan);
947 if (err == -ENODEV) {
948 /* module removed before we could use it */
949 list_del_rcu(&device->global_node);
950 break;
951 } else if (err)
952 dev_dbg(chan->device->dev,
953 "%s: failed to get %s: (%d)\n",
954 __func__, dma_chan_name(chan), err);
955 }
956 }
957
958 /* if this is the first reference and there were channels
959 * waiting we need to rebalance to get those channels
960 * incorporated into the channel table
961 */
962 if (dmaengine_ref_count == 1)
963 dma_channel_rebalance();
964 mutex_unlock(&dma_list_mutex);
965}
966EXPORT_SYMBOL(dmaengine_get);
967
968/**
969 * dmaengine_put - let DMA drivers be removed when ref_count == 0
970 */
971void dmaengine_put(void)
972{
973 struct dma_device *device, *_d;
974 struct dma_chan *chan;
975
976 mutex_lock(&dma_list_mutex);
977 dmaengine_ref_count--;
978 BUG_ON(dmaengine_ref_count < 0);
979 /* drop channel references */
980 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
981 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
982 continue;
983 list_for_each_entry(chan, &device->channels, device_node)
984 dma_chan_put(chan);
985 }
986 mutex_unlock(&dma_list_mutex);
987}
988EXPORT_SYMBOL(dmaengine_put);
989
990static bool device_has_all_tx_types(struct dma_device *device)
991{
992 /* A device that satisfies this test has channels that will never cause
993 * an async_tx channel switch event as all possible operation types can
994 * be handled.
995 */
996 #ifdef CONFIG_ASYNC_TX_DMA
997 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
998 return false;
999 #endif
1000
1001 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
1002 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
1003 return false;
1004 #endif
1005
1006 #if IS_ENABLED(CONFIG_ASYNC_XOR)
1007 if (!dma_has_cap(DMA_XOR, device->cap_mask))
1008 return false;
1009
1010 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1011 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
1012 return false;
1013 #endif
1014 #endif
1015
1016 #if IS_ENABLED(CONFIG_ASYNC_PQ)
1017 if (!dma_has_cap(DMA_PQ, device->cap_mask))
1018 return false;
1019
1020 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1021 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
1022 return false;
1023 #endif
1024 #endif
1025
1026 return true;
1027}
1028
1029static int get_dma_id(struct dma_device *device)
1030{
1031 int rc = ida_alloc(&dma_ida, GFP_KERNEL);
1032
1033 if (rc < 0)
1034 return rc;
1035 device->dev_id = rc;
1036 return 0;
1037}
1038
1039static int __dma_async_device_channel_register(struct dma_device *device,
1040 struct dma_chan *chan)
1041{
1042 int rc;
1043
1044 chan->local = alloc_percpu(typeof(*chan->local));
1045 if (!chan->local)
1046 return -ENOMEM;
1047 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1048 if (!chan->dev) {
1049 rc = -ENOMEM;
1050 goto err_free_local;
1051 }
1052
1053 /*
1054 * When the chan_id is a negative value, we are dynamically adding
1055 * the channel. Otherwise we are static enumerating.
1056 */
1057 chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
1058 if (chan->chan_id < 0) {
1059 pr_err("%s: unable to alloc ida for chan: %d\n",
1060 __func__, chan->chan_id);
1061 rc = chan->chan_id;
1062 goto err_free_dev;
1063 }
1064
1065 chan->dev->device.class = &dma_devclass;
1066 chan->dev->device.parent = device->dev;
1067 chan->dev->chan = chan;
1068 chan->dev->dev_id = device->dev_id;
1069 dev_set_name(&chan->dev->device, "dma%dchan%d",
1070 device->dev_id, chan->chan_id);
1071 rc = device_register(&chan->dev->device);
1072 if (rc)
1073 goto err_out_ida;
1074 chan->client_count = 0;
1075 device->chancnt++;
1076
1077 return 0;
1078
1079 err_out_ida:
1080 ida_free(&device->chan_ida, chan->chan_id);
1081 err_free_dev:
1082 kfree(chan->dev);
1083 err_free_local:
1084 free_percpu(chan->local);
1085 chan->local = NULL;
1086 return rc;
1087}
1088
1089int dma_async_device_channel_register(struct dma_device *device,
1090 struct dma_chan *chan)
1091{
1092 int rc;
1093
1094 rc = __dma_async_device_channel_register(device, chan);
1095 if (rc < 0)
1096 return rc;
1097
1098 dma_channel_rebalance();
1099 return 0;
1100}
1101EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
1102
1103static void __dma_async_device_channel_unregister(struct dma_device *device,
1104 struct dma_chan *chan)
1105{
1106 if (chan->local == NULL)
1107 return;
1108
1109 WARN_ONCE(!device->device_release && chan->client_count,
1110 "%s called while %d clients hold a reference\n",
1111 __func__, chan->client_count);
1112 mutex_lock(&dma_list_mutex);
1113 device->chancnt--;
1114 chan->dev->chan = NULL;
1115 mutex_unlock(&dma_list_mutex);
1116 ida_free(&device->chan_ida, chan->chan_id);
1117 device_unregister(&chan->dev->device);
1118 free_percpu(chan->local);
1119}
1120
1121void dma_async_device_channel_unregister(struct dma_device *device,
1122 struct dma_chan *chan)
1123{
1124 __dma_async_device_channel_unregister(device, chan);
1125 dma_channel_rebalance();
1126}
1127EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
1128
1129/**
1130 * dma_async_device_register - registers DMA devices found
1131 * @device: pointer to &struct dma_device
1132 *
1133 * After calling this routine the structure should not be freed except in the
1134 * device_release() callback which will be called after
1135 * dma_async_device_unregister() is called and no further references are taken.
1136 */
1137int dma_async_device_register(struct dma_device *device)
1138{
1139 int rc;
1140 struct dma_chan* chan;
1141
1142 if (!device)
1143 return -ENODEV;
1144
1145 /* validate device routines */
1146 if (!device->dev) {
1147 pr_err("DMAdevice must have dev\n");
1148 return -EIO;
1149 }
1150
1151 device->owner = device->dev->driver->owner;
1152
1153#define CHECK_CAP(_name, _type) \
1154{ \
1155 if (dma_has_cap(_type, device->cap_mask) && !device->device_prep_##_name) { \
1156 dev_err(device->dev, \
1157 "Device claims capability %s, but op is not defined\n", \
1158 __stringify(_type)); \
1159 return -EIO; \
1160 } \
1161}
1162
1163 CHECK_CAP(dma_memcpy, DMA_MEMCPY);
1164 CHECK_CAP(dma_xor, DMA_XOR);
1165 CHECK_CAP(dma_xor_val, DMA_XOR_VAL);
1166 CHECK_CAP(dma_pq, DMA_PQ);
1167 CHECK_CAP(dma_pq_val, DMA_PQ_VAL);
1168 CHECK_CAP(dma_memset, DMA_MEMSET);
1169 CHECK_CAP(dma_interrupt, DMA_INTERRUPT);
1170 CHECK_CAP(dma_cyclic, DMA_CYCLIC);
1171 CHECK_CAP(interleaved_dma, DMA_INTERLEAVE);
1172
1173#undef CHECK_CAP
1174
1175 if (!device->device_tx_status) {
1176 dev_err(device->dev, "Device tx_status is not defined\n");
1177 return -EIO;
1178 }
1179
1180
1181 if (!device->device_issue_pending) {
1182 dev_err(device->dev, "Device issue_pending is not defined\n");
1183 return -EIO;
1184 }
1185
1186 if (!device->device_release)
1187 dev_dbg(device->dev,
1188 "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
1189
1190 kref_init(&device->ref);
1191
1192 /* note: this only matters in the
1193 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1194 */
1195 if (device_has_all_tx_types(device))
1196 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1197
1198 rc = get_dma_id(device);
1199 if (rc != 0)
1200 return rc;
1201
1202 ida_init(&device->chan_ida);
1203
1204 /* represent channels in sysfs. Probably want devs too */
1205 list_for_each_entry(chan, &device->channels, device_node) {
1206 rc = __dma_async_device_channel_register(device, chan);
1207 if (rc < 0)
1208 goto err_out;
1209 }
1210
1211 mutex_lock(&dma_list_mutex);
1212 /* take references on public channels */
1213 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1214 list_for_each_entry(chan, &device->channels, device_node) {
1215 /* if clients are already waiting for channels we need
1216 * to take references on their behalf
1217 */
1218 if (dma_chan_get(chan) == -ENODEV) {
1219 /* note we can only get here for the first
1220 * channel as the remaining channels are
1221 * guaranteed to get a reference
1222 */
1223 rc = -ENODEV;
1224 mutex_unlock(&dma_list_mutex);
1225 goto err_out;
1226 }
1227 }
1228 list_add_tail_rcu(&device->global_node, &dma_device_list);
1229 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1230 device->privatecnt++; /* Always private */
1231 dma_channel_rebalance();
1232 mutex_unlock(&dma_list_mutex);
1233
1234 dmaengine_debug_register(device);
1235
1236 return 0;
1237
1238err_out:
1239 /* if we never registered a channel just release the idr */
1240 if (!device->chancnt) {
1241 ida_free(&dma_ida, device->dev_id);
1242 return rc;
1243 }
1244
1245 list_for_each_entry(chan, &device->channels, device_node) {
1246 if (chan->local == NULL)
1247 continue;
1248 mutex_lock(&dma_list_mutex);
1249 chan->dev->chan = NULL;
1250 mutex_unlock(&dma_list_mutex);
1251 device_unregister(&chan->dev->device);
1252 free_percpu(chan->local);
1253 }
1254 return rc;
1255}
1256EXPORT_SYMBOL(dma_async_device_register);
1257
1258/**
1259 * dma_async_device_unregister - unregister a DMA device
1260 * @device: pointer to &struct dma_device
1261 *
1262 * This routine is called by dma driver exit routines, dmaengine holds module
1263 * references to prevent it being called while channels are in use.
1264 */
1265void dma_async_device_unregister(struct dma_device *device)
1266{
1267 struct dma_chan *chan, *n;
1268
1269 dmaengine_debug_unregister(device);
1270
1271 list_for_each_entry_safe(chan, n, &device->channels, device_node)
1272 __dma_async_device_channel_unregister(device, chan);
1273
1274 mutex_lock(&dma_list_mutex);
1275 /*
1276 * setting DMA_PRIVATE ensures the device being torn down will not
1277 * be used in the channel_table
1278 */
1279 dma_cap_set(DMA_PRIVATE, device->cap_mask);
1280 dma_channel_rebalance();
1281 ida_free(&dma_ida, device->dev_id);
1282 dma_device_put(device);
1283 mutex_unlock(&dma_list_mutex);
1284}
1285EXPORT_SYMBOL(dma_async_device_unregister);
1286
1287static void dmaenginem_async_device_unregister(void *device)
1288{
1289 dma_async_device_unregister(device);
1290}
1291
1292/**
1293 * dmaenginem_async_device_register - registers DMA devices found
1294 * @device: pointer to &struct dma_device
1295 *
1296 * The operation is managed and will be undone on driver detach.
1297 */
1298int dmaenginem_async_device_register(struct dma_device *device)
1299{
1300 int ret;
1301
1302 ret = dma_async_device_register(device);
1303 if (ret)
1304 return ret;
1305
1306 return devm_add_action_or_reset(device->dev, dmaenginem_async_device_unregister, device);
1307}
1308EXPORT_SYMBOL(dmaenginem_async_device_register);
1309
1310struct dmaengine_unmap_pool {
1311 struct kmem_cache *cache;
1312 const char *name;
1313 mempool_t *pool;
1314 size_t size;
1315};
1316
1317#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1318static struct dmaengine_unmap_pool unmap_pool[] = {
1319 __UNMAP_POOL(2),
1320 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1321 __UNMAP_POOL(16),
1322 __UNMAP_POOL(128),
1323 __UNMAP_POOL(256),
1324 #endif
1325};
1326
1327static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1328{
1329 int order = get_count_order(nr);
1330
1331 switch (order) {
1332 case 0 ... 1:
1333 return &unmap_pool[0];
1334#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1335 case 2 ... 4:
1336 return &unmap_pool[1];
1337 case 5 ... 7:
1338 return &unmap_pool[2];
1339 case 8:
1340 return &unmap_pool[3];
1341#endif
1342 default:
1343 BUG();
1344 return NULL;
1345 }
1346}
1347
1348static void dmaengine_unmap(struct kref *kref)
1349{
1350 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1351 struct device *dev = unmap->dev;
1352 int cnt, i;
1353
1354 cnt = unmap->to_cnt;
1355 for (i = 0; i < cnt; i++)
1356 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1357 DMA_TO_DEVICE);
1358 cnt += unmap->from_cnt;
1359 for (; i < cnt; i++)
1360 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1361 DMA_FROM_DEVICE);
1362 cnt += unmap->bidi_cnt;
1363 for (; i < cnt; i++) {
1364 if (unmap->addr[i] == 0)
1365 continue;
1366 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1367 DMA_BIDIRECTIONAL);
1368 }
1369 cnt = unmap->map_cnt;
1370 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1371}
1372
1373void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1374{
1375 if (unmap)
1376 kref_put(&unmap->kref, dmaengine_unmap);
1377}
1378EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1379
1380static void dmaengine_destroy_unmap_pool(void)
1381{
1382 int i;
1383
1384 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1385 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1386
1387 mempool_destroy(p->pool);
1388 p->pool = NULL;
1389 kmem_cache_destroy(p->cache);
1390 p->cache = NULL;
1391 }
1392}
1393
1394static int __init dmaengine_init_unmap_pool(void)
1395{
1396 int i;
1397
1398 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1399 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1400 size_t size;
1401
1402 size = sizeof(struct dmaengine_unmap_data) +
1403 sizeof(dma_addr_t) * p->size;
1404
1405 p->cache = kmem_cache_create(p->name, size, 0,
1406 SLAB_HWCACHE_ALIGN, NULL);
1407 if (!p->cache)
1408 break;
1409 p->pool = mempool_create_slab_pool(1, p->cache);
1410 if (!p->pool)
1411 break;
1412 }
1413
1414 if (i == ARRAY_SIZE(unmap_pool))
1415 return 0;
1416
1417 dmaengine_destroy_unmap_pool();
1418 return -ENOMEM;
1419}
1420
1421struct dmaengine_unmap_data *
1422dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1423{
1424 struct dmaengine_unmap_data *unmap;
1425
1426 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1427 if (!unmap)
1428 return NULL;
1429
1430 memset(unmap, 0, sizeof(*unmap));
1431 kref_init(&unmap->kref);
1432 unmap->dev = dev;
1433 unmap->map_cnt = nr;
1434
1435 return unmap;
1436}
1437EXPORT_SYMBOL(dmaengine_get_unmap_data);
1438
1439void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1440 struct dma_chan *chan)
1441{
1442 tx->chan = chan;
1443 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1444 spin_lock_init(&tx->lock);
1445 #endif
1446}
1447EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1448
1449static inline int desc_check_and_set_metadata_mode(
1450 struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
1451{
1452 /* Make sure that the metadata mode is not mixed */
1453 if (!desc->desc_metadata_mode) {
1454 if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
1455 desc->desc_metadata_mode = mode;
1456 else
1457 return -ENOTSUPP;
1458 } else if (desc->desc_metadata_mode != mode) {
1459 return -EINVAL;
1460 }
1461
1462 return 0;
1463}
1464
1465int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
1466 void *data, size_t len)
1467{
1468 int ret;
1469
1470 if (!desc)
1471 return -EINVAL;
1472
1473 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
1474 if (ret)
1475 return ret;
1476
1477 if (!desc->metadata_ops || !desc->metadata_ops->attach)
1478 return -ENOTSUPP;
1479
1480 return desc->metadata_ops->attach(desc, data, len);
1481}
1482EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
1483
1484void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
1485 size_t *payload_len, size_t *max_len)
1486{
1487 int ret;
1488
1489 if (!desc)
1490 return ERR_PTR(-EINVAL);
1491
1492 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1493 if (ret)
1494 return ERR_PTR(ret);
1495
1496 if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
1497 return ERR_PTR(-ENOTSUPP);
1498
1499 return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
1500}
1501EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
1502
1503int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
1504 size_t payload_len)
1505{
1506 int ret;
1507
1508 if (!desc)
1509 return -EINVAL;
1510
1511 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1512 if (ret)
1513 return ret;
1514
1515 if (!desc->metadata_ops || !desc->metadata_ops->set_len)
1516 return -ENOTSUPP;
1517
1518 return desc->metadata_ops->set_len(desc, payload_len);
1519}
1520EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
1521
1522/**
1523 * dma_wait_for_async_tx - spin wait for a transaction to complete
1524 * @tx: in-flight transaction to wait on
1525 */
1526enum dma_status
1527dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1528{
1529 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1530
1531 if (!tx)
1532 return DMA_COMPLETE;
1533
1534 while (tx->cookie == -EBUSY) {
1535 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1536 dev_err(tx->chan->device->dev,
1537 "%s timeout waiting for descriptor submission\n",
1538 __func__);
1539 return DMA_ERROR;
1540 }
1541 cpu_relax();
1542 }
1543 return dma_sync_wait(tx->chan, tx->cookie);
1544}
1545EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1546
1547/**
1548 * dma_run_dependencies - process dependent operations on the target channel
1549 * @tx: transaction with dependencies
1550 *
1551 * Helper routine for DMA drivers to process (start) dependent operations
1552 * on their target channel.
1553 */
1554void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1555{
1556 struct dma_async_tx_descriptor *dep = txd_next(tx);
1557 struct dma_async_tx_descriptor *dep_next;
1558 struct dma_chan *chan;
1559
1560 if (!dep)
1561 return;
1562
1563 /* we'll submit tx->next now, so clear the link */
1564 txd_clear_next(tx);
1565 chan = dep->chan;
1566
1567 /* keep submitting up until a channel switch is detected
1568 * in that case we will be called again as a result of
1569 * processing the interrupt from async_tx_channel_switch
1570 */
1571 for (; dep; dep = dep_next) {
1572 txd_lock(dep);
1573 txd_clear_parent(dep);
1574 dep_next = txd_next(dep);
1575 if (dep_next && dep_next->chan == chan)
1576 txd_clear_next(dep); /* ->next will be submitted */
1577 else
1578 dep_next = NULL; /* submit current dep and terminate */
1579 txd_unlock(dep);
1580
1581 dep->tx_submit(dep);
1582 }
1583
1584 chan->device->device_issue_pending(chan);
1585}
1586EXPORT_SYMBOL_GPL(dma_run_dependencies);
1587
1588static int __init dma_bus_init(void)
1589{
1590 int err = dmaengine_init_unmap_pool();
1591
1592 if (err)
1593 return err;
1594
1595 err = class_register(&dma_devclass);
1596 if (!err)
1597 dmaengine_debugfs_init();
1598
1599 return err;
1600}
1601arch_initcall(dma_bus_init);
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 */
5
6/*
7 * This code implements the DMA subsystem. It provides a HW-neutral interface
8 * for other kernel code to use asynchronous memory copy capabilities,
9 * if present, and allows different HW DMA drivers to register as providing
10 * this capability.
11 *
12 * Due to the fact we are accelerating what is already a relatively fast
13 * operation, the code goes to great lengths to avoid additional overhead,
14 * such as locking.
15 *
16 * LOCKING:
17 *
18 * The subsystem keeps a global list of dma_device structs it is protected by a
19 * mutex, dma_list_mutex.
20 *
21 * A subsystem can get access to a channel by calling dmaengine_get() followed
22 * by dma_find_channel(), or if it has need for an exclusive channel it can call
23 * dma_request_channel(). Once a channel is allocated a reference is taken
24 * against its corresponding driver to disable removal.
25 *
26 * Each device has a channels list, which runs unlocked but is never modified
27 * once the device is registered, it's just setup by the driver.
28 *
29 * See Documentation/driver-api/dmaengine for more details
30 */
31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34#include <linux/platform_device.h>
35#include <linux/dma-mapping.h>
36#include <linux/init.h>
37#include <linux/module.h>
38#include <linux/mm.h>
39#include <linux/device.h>
40#include <linux/dmaengine.h>
41#include <linux/hardirq.h>
42#include <linux/spinlock.h>
43#include <linux/percpu.h>
44#include <linux/rcupdate.h>
45#include <linux/mutex.h>
46#include <linux/jiffies.h>
47#include <linux/rculist.h>
48#include <linux/idr.h>
49#include <linux/slab.h>
50#include <linux/acpi.h>
51#include <linux/acpi_dma.h>
52#include <linux/of_dma.h>
53#include <linux/mempool.h>
54#include <linux/numa.h>
55
56static DEFINE_MUTEX(dma_list_mutex);
57static DEFINE_IDA(dma_ida);
58static LIST_HEAD(dma_device_list);
59static long dmaengine_ref_count;
60
61/* --- sysfs implementation --- */
62
63/**
64 * dev_to_dma_chan - convert a device pointer to its sysfs container object
65 * @dev - device node
66 *
67 * Must be called under dma_list_mutex
68 */
69static struct dma_chan *dev_to_dma_chan(struct device *dev)
70{
71 struct dma_chan_dev *chan_dev;
72
73 chan_dev = container_of(dev, typeof(*chan_dev), device);
74 return chan_dev->chan;
75}
76
77static ssize_t memcpy_count_show(struct device *dev,
78 struct device_attribute *attr, char *buf)
79{
80 struct dma_chan *chan;
81 unsigned long count = 0;
82 int i;
83 int err;
84
85 mutex_lock(&dma_list_mutex);
86 chan = dev_to_dma_chan(dev);
87 if (chan) {
88 for_each_possible_cpu(i)
89 count += per_cpu_ptr(chan->local, i)->memcpy_count;
90 err = sprintf(buf, "%lu\n", count);
91 } else
92 err = -ENODEV;
93 mutex_unlock(&dma_list_mutex);
94
95 return err;
96}
97static DEVICE_ATTR_RO(memcpy_count);
98
99static ssize_t bytes_transferred_show(struct device *dev,
100 struct device_attribute *attr, char *buf)
101{
102 struct dma_chan *chan;
103 unsigned long count = 0;
104 int i;
105 int err;
106
107 mutex_lock(&dma_list_mutex);
108 chan = dev_to_dma_chan(dev);
109 if (chan) {
110 for_each_possible_cpu(i)
111 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
112 err = sprintf(buf, "%lu\n", count);
113 } else
114 err = -ENODEV;
115 mutex_unlock(&dma_list_mutex);
116
117 return err;
118}
119static DEVICE_ATTR_RO(bytes_transferred);
120
121static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
122 char *buf)
123{
124 struct dma_chan *chan;
125 int err;
126
127 mutex_lock(&dma_list_mutex);
128 chan = dev_to_dma_chan(dev);
129 if (chan)
130 err = sprintf(buf, "%d\n", chan->client_count);
131 else
132 err = -ENODEV;
133 mutex_unlock(&dma_list_mutex);
134
135 return err;
136}
137static DEVICE_ATTR_RO(in_use);
138
139static struct attribute *dma_dev_attrs[] = {
140 &dev_attr_memcpy_count.attr,
141 &dev_attr_bytes_transferred.attr,
142 &dev_attr_in_use.attr,
143 NULL,
144};
145ATTRIBUTE_GROUPS(dma_dev);
146
147static void chan_dev_release(struct device *dev)
148{
149 struct dma_chan_dev *chan_dev;
150
151 chan_dev = container_of(dev, typeof(*chan_dev), device);
152 if (atomic_dec_and_test(chan_dev->idr_ref)) {
153 ida_free(&dma_ida, chan_dev->dev_id);
154 kfree(chan_dev->idr_ref);
155 }
156 kfree(chan_dev);
157}
158
159static struct class dma_devclass = {
160 .name = "dma",
161 .dev_groups = dma_dev_groups,
162 .dev_release = chan_dev_release,
163};
164
165/* --- client and device registration --- */
166
167#define dma_device_satisfies_mask(device, mask) \
168 __dma_device_satisfies_mask((device), &(mask))
169static int
170__dma_device_satisfies_mask(struct dma_device *device,
171 const dma_cap_mask_t *want)
172{
173 dma_cap_mask_t has;
174
175 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
176 DMA_TX_TYPE_END);
177 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
178}
179
180static struct module *dma_chan_to_owner(struct dma_chan *chan)
181{
182 return chan->device->dev->driver->owner;
183}
184
185/**
186 * balance_ref_count - catch up the channel reference count
187 * @chan - channel to balance ->client_count versus dmaengine_ref_count
188 *
189 * balance_ref_count must be called under dma_list_mutex
190 */
191static void balance_ref_count(struct dma_chan *chan)
192{
193 struct module *owner = dma_chan_to_owner(chan);
194
195 while (chan->client_count < dmaengine_ref_count) {
196 __module_get(owner);
197 chan->client_count++;
198 }
199}
200
201/**
202 * dma_chan_get - try to grab a dma channel's parent driver module
203 * @chan - channel to grab
204 *
205 * Must be called under dma_list_mutex
206 */
207static int dma_chan_get(struct dma_chan *chan)
208{
209 struct module *owner = dma_chan_to_owner(chan);
210 int ret;
211
212 /* The channel is already in use, update client count */
213 if (chan->client_count) {
214 __module_get(owner);
215 goto out;
216 }
217
218 if (!try_module_get(owner))
219 return -ENODEV;
220
221 /* allocate upon first client reference */
222 if (chan->device->device_alloc_chan_resources) {
223 ret = chan->device->device_alloc_chan_resources(chan);
224 if (ret < 0)
225 goto err_out;
226 }
227
228 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
229 balance_ref_count(chan);
230
231out:
232 chan->client_count++;
233 return 0;
234
235err_out:
236 module_put(owner);
237 return ret;
238}
239
240/**
241 * dma_chan_put - drop a reference to a dma channel's parent driver module
242 * @chan - channel to release
243 *
244 * Must be called under dma_list_mutex
245 */
246static void dma_chan_put(struct dma_chan *chan)
247{
248 /* This channel is not in use, bail out */
249 if (!chan->client_count)
250 return;
251
252 chan->client_count--;
253 module_put(dma_chan_to_owner(chan));
254
255 /* This channel is not in use anymore, free it */
256 if (!chan->client_count && chan->device->device_free_chan_resources) {
257 /* Make sure all operations have completed */
258 dmaengine_synchronize(chan);
259 chan->device->device_free_chan_resources(chan);
260 }
261
262 /* If the channel is used via a DMA request router, free the mapping */
263 if (chan->router && chan->router->route_free) {
264 chan->router->route_free(chan->router->dev, chan->route_data);
265 chan->router = NULL;
266 chan->route_data = NULL;
267 }
268}
269
270enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
271{
272 enum dma_status status;
273 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
274
275 dma_async_issue_pending(chan);
276 do {
277 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
278 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
279 dev_err(chan->device->dev, "%s: timeout!\n", __func__);
280 return DMA_ERROR;
281 }
282 if (status != DMA_IN_PROGRESS)
283 break;
284 cpu_relax();
285 } while (1);
286
287 return status;
288}
289EXPORT_SYMBOL(dma_sync_wait);
290
291/**
292 * dma_cap_mask_all - enable iteration over all operation types
293 */
294static dma_cap_mask_t dma_cap_mask_all;
295
296/**
297 * dma_chan_tbl_ent - tracks channel allocations per core/operation
298 * @chan - associated channel for this entry
299 */
300struct dma_chan_tbl_ent {
301 struct dma_chan *chan;
302};
303
304/**
305 * channel_table - percpu lookup table for memory-to-memory offload providers
306 */
307static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
308
309static int __init dma_channel_table_init(void)
310{
311 enum dma_transaction_type cap;
312 int err = 0;
313
314 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
315
316 /* 'interrupt', 'private', and 'slave' are channel capabilities,
317 * but are not associated with an operation so they do not need
318 * an entry in the channel_table
319 */
320 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
321 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
322 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
323
324 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
325 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
326 if (!channel_table[cap]) {
327 err = -ENOMEM;
328 break;
329 }
330 }
331
332 if (err) {
333 pr_err("initialization failure\n");
334 for_each_dma_cap_mask(cap, dma_cap_mask_all)
335 free_percpu(channel_table[cap]);
336 }
337
338 return err;
339}
340arch_initcall(dma_channel_table_init);
341
342/**
343 * dma_find_channel - find a channel to carry out the operation
344 * @tx_type: transaction type
345 */
346struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
347{
348 return this_cpu_read(channel_table[tx_type]->chan);
349}
350EXPORT_SYMBOL(dma_find_channel);
351
352/**
353 * dma_issue_pending_all - flush all pending operations across all channels
354 */
355void dma_issue_pending_all(void)
356{
357 struct dma_device *device;
358 struct dma_chan *chan;
359
360 rcu_read_lock();
361 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
362 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
363 continue;
364 list_for_each_entry(chan, &device->channels, device_node)
365 if (chan->client_count)
366 device->device_issue_pending(chan);
367 }
368 rcu_read_unlock();
369}
370EXPORT_SYMBOL(dma_issue_pending_all);
371
372/**
373 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
374 */
375static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
376{
377 int node = dev_to_node(chan->device->dev);
378 return node == NUMA_NO_NODE ||
379 cpumask_test_cpu(cpu, cpumask_of_node(node));
380}
381
382/**
383 * min_chan - returns the channel with min count and in the same numa-node as the cpu
384 * @cap: capability to match
385 * @cpu: cpu index which the channel should be close to
386 *
387 * If some channels are close to the given cpu, the one with the lowest
388 * reference count is returned. Otherwise, cpu is ignored and only the
389 * reference count is taken into account.
390 * Must be called under dma_list_mutex.
391 */
392static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
393{
394 struct dma_device *device;
395 struct dma_chan *chan;
396 struct dma_chan *min = NULL;
397 struct dma_chan *localmin = NULL;
398
399 list_for_each_entry(device, &dma_device_list, global_node) {
400 if (!dma_has_cap(cap, device->cap_mask) ||
401 dma_has_cap(DMA_PRIVATE, device->cap_mask))
402 continue;
403 list_for_each_entry(chan, &device->channels, device_node) {
404 if (!chan->client_count)
405 continue;
406 if (!min || chan->table_count < min->table_count)
407 min = chan;
408
409 if (dma_chan_is_local(chan, cpu))
410 if (!localmin ||
411 chan->table_count < localmin->table_count)
412 localmin = chan;
413 }
414 }
415
416 chan = localmin ? localmin : min;
417
418 if (chan)
419 chan->table_count++;
420
421 return chan;
422}
423
424/**
425 * dma_channel_rebalance - redistribute the available channels
426 *
427 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
428 * operation type) in the SMP case, and operation isolation (avoid
429 * multi-tasking channels) in the non-SMP case. Must be called under
430 * dma_list_mutex.
431 */
432static void dma_channel_rebalance(void)
433{
434 struct dma_chan *chan;
435 struct dma_device *device;
436 int cpu;
437 int cap;
438
439 /* undo the last distribution */
440 for_each_dma_cap_mask(cap, dma_cap_mask_all)
441 for_each_possible_cpu(cpu)
442 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
443
444 list_for_each_entry(device, &dma_device_list, global_node) {
445 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
446 continue;
447 list_for_each_entry(chan, &device->channels, device_node)
448 chan->table_count = 0;
449 }
450
451 /* don't populate the channel_table if no clients are available */
452 if (!dmaengine_ref_count)
453 return;
454
455 /* redistribute available channels */
456 for_each_dma_cap_mask(cap, dma_cap_mask_all)
457 for_each_online_cpu(cpu) {
458 chan = min_chan(cap, cpu);
459 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
460 }
461}
462
463int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
464{
465 struct dma_device *device;
466
467 if (!chan || !caps)
468 return -EINVAL;
469
470 device = chan->device;
471
472 /* check if the channel supports slave transactions */
473 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
474 test_bit(DMA_CYCLIC, device->cap_mask.bits)))
475 return -ENXIO;
476
477 /*
478 * Check whether it reports it uses the generic slave
479 * capabilities, if not, that means it doesn't support any
480 * kind of slave capabilities reporting.
481 */
482 if (!device->directions)
483 return -ENXIO;
484
485 caps->src_addr_widths = device->src_addr_widths;
486 caps->dst_addr_widths = device->dst_addr_widths;
487 caps->directions = device->directions;
488 caps->max_burst = device->max_burst;
489 caps->residue_granularity = device->residue_granularity;
490 caps->descriptor_reuse = device->descriptor_reuse;
491 caps->cmd_pause = !!device->device_pause;
492 caps->cmd_resume = !!device->device_resume;
493 caps->cmd_terminate = !!device->device_terminate_all;
494
495 return 0;
496}
497EXPORT_SYMBOL_GPL(dma_get_slave_caps);
498
499static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
500 struct dma_device *dev,
501 dma_filter_fn fn, void *fn_param)
502{
503 struct dma_chan *chan;
504
505 if (mask && !__dma_device_satisfies_mask(dev, mask)) {
506 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
507 return NULL;
508 }
509 /* devices with multiple channels need special handling as we need to
510 * ensure that all channels are either private or public.
511 */
512 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
513 list_for_each_entry(chan, &dev->channels, device_node) {
514 /* some channels are already publicly allocated */
515 if (chan->client_count)
516 return NULL;
517 }
518
519 list_for_each_entry(chan, &dev->channels, device_node) {
520 if (chan->client_count) {
521 dev_dbg(dev->dev, "%s: %s busy\n",
522 __func__, dma_chan_name(chan));
523 continue;
524 }
525 if (fn && !fn(chan, fn_param)) {
526 dev_dbg(dev->dev, "%s: %s filter said false\n",
527 __func__, dma_chan_name(chan));
528 continue;
529 }
530 return chan;
531 }
532
533 return NULL;
534}
535
536static struct dma_chan *find_candidate(struct dma_device *device,
537 const dma_cap_mask_t *mask,
538 dma_filter_fn fn, void *fn_param)
539{
540 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
541 int err;
542
543 if (chan) {
544 /* Found a suitable channel, try to grab, prep, and return it.
545 * We first set DMA_PRIVATE to disable balance_ref_count as this
546 * channel will not be published in the general-purpose
547 * allocator
548 */
549 dma_cap_set(DMA_PRIVATE, device->cap_mask);
550 device->privatecnt++;
551 err = dma_chan_get(chan);
552
553 if (err) {
554 if (err == -ENODEV) {
555 dev_dbg(device->dev, "%s: %s module removed\n",
556 __func__, dma_chan_name(chan));
557 list_del_rcu(&device->global_node);
558 } else
559 dev_dbg(device->dev,
560 "%s: failed to get %s: (%d)\n",
561 __func__, dma_chan_name(chan), err);
562
563 if (--device->privatecnt == 0)
564 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
565
566 chan = ERR_PTR(err);
567 }
568 }
569
570 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
571}
572
573/**
574 * dma_get_slave_channel - try to get specific channel exclusively
575 * @chan: target channel
576 */
577struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
578{
579 int err = -EBUSY;
580
581 /* lock against __dma_request_channel */
582 mutex_lock(&dma_list_mutex);
583
584 if (chan->client_count == 0) {
585 struct dma_device *device = chan->device;
586
587 dma_cap_set(DMA_PRIVATE, device->cap_mask);
588 device->privatecnt++;
589 err = dma_chan_get(chan);
590 if (err) {
591 dev_dbg(chan->device->dev,
592 "%s: failed to get %s: (%d)\n",
593 __func__, dma_chan_name(chan), err);
594 chan = NULL;
595 if (--device->privatecnt == 0)
596 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
597 }
598 } else
599 chan = NULL;
600
601 mutex_unlock(&dma_list_mutex);
602
603
604 return chan;
605}
606EXPORT_SYMBOL_GPL(dma_get_slave_channel);
607
608struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
609{
610 dma_cap_mask_t mask;
611 struct dma_chan *chan;
612
613 dma_cap_zero(mask);
614 dma_cap_set(DMA_SLAVE, mask);
615
616 /* lock against __dma_request_channel */
617 mutex_lock(&dma_list_mutex);
618
619 chan = find_candidate(device, &mask, NULL, NULL);
620
621 mutex_unlock(&dma_list_mutex);
622
623 return IS_ERR(chan) ? NULL : chan;
624}
625EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
626
627/**
628 * __dma_request_channel - try to allocate an exclusive channel
629 * @mask: capabilities that the channel must satisfy
630 * @fn: optional callback to disposition available channels
631 * @fn_param: opaque parameter to pass to dma_filter_fn
632 * @np: device node to look for DMA channels
633 *
634 * Returns pointer to appropriate DMA channel on success or NULL.
635 */
636struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
637 dma_filter_fn fn, void *fn_param,
638 struct device_node *np)
639{
640 struct dma_device *device, *_d;
641 struct dma_chan *chan = NULL;
642
643 /* Find a channel */
644 mutex_lock(&dma_list_mutex);
645 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
646 /* Finds a DMA controller with matching device node */
647 if (np && device->dev->of_node && np != device->dev->of_node)
648 continue;
649
650 chan = find_candidate(device, mask, fn, fn_param);
651 if (!IS_ERR(chan))
652 break;
653
654 chan = NULL;
655 }
656 mutex_unlock(&dma_list_mutex);
657
658 pr_debug("%s: %s (%s)\n",
659 __func__,
660 chan ? "success" : "fail",
661 chan ? dma_chan_name(chan) : NULL);
662
663 return chan;
664}
665EXPORT_SYMBOL_GPL(__dma_request_channel);
666
667static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
668 const char *name,
669 struct device *dev)
670{
671 int i;
672
673 if (!device->filter.mapcnt)
674 return NULL;
675
676 for (i = 0; i < device->filter.mapcnt; i++) {
677 const struct dma_slave_map *map = &device->filter.map[i];
678
679 if (!strcmp(map->devname, dev_name(dev)) &&
680 !strcmp(map->slave, name))
681 return map;
682 }
683
684 return NULL;
685}
686
687/**
688 * dma_request_chan - try to allocate an exclusive slave channel
689 * @dev: pointer to client device structure
690 * @name: slave channel name
691 *
692 * Returns pointer to appropriate DMA channel on success or an error pointer.
693 */
694struct dma_chan *dma_request_chan(struct device *dev, const char *name)
695{
696 struct dma_device *d, *_d;
697 struct dma_chan *chan = NULL;
698
699 /* If device-tree is present get slave info from here */
700 if (dev->of_node)
701 chan = of_dma_request_slave_channel(dev->of_node, name);
702
703 /* If device was enumerated by ACPI get slave info from here */
704 if (has_acpi_companion(dev) && !chan)
705 chan = acpi_dma_request_slave_chan_by_name(dev, name);
706
707 if (chan) {
708 /* Valid channel found or requester needs to be deferred */
709 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
710 return chan;
711 }
712
713 /* Try to find the channel via the DMA filter map(s) */
714 mutex_lock(&dma_list_mutex);
715 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
716 dma_cap_mask_t mask;
717 const struct dma_slave_map *map = dma_filter_match(d, name, dev);
718
719 if (!map)
720 continue;
721
722 dma_cap_zero(mask);
723 dma_cap_set(DMA_SLAVE, mask);
724
725 chan = find_candidate(d, &mask, d->filter.fn, map->param);
726 if (!IS_ERR(chan))
727 break;
728 }
729 mutex_unlock(&dma_list_mutex);
730
731 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
732}
733EXPORT_SYMBOL_GPL(dma_request_chan);
734
735/**
736 * dma_request_slave_channel - try to allocate an exclusive slave channel
737 * @dev: pointer to client device structure
738 * @name: slave channel name
739 *
740 * Returns pointer to appropriate DMA channel on success or NULL.
741 */
742struct dma_chan *dma_request_slave_channel(struct device *dev,
743 const char *name)
744{
745 struct dma_chan *ch = dma_request_chan(dev, name);
746 if (IS_ERR(ch))
747 return NULL;
748
749 return ch;
750}
751EXPORT_SYMBOL_GPL(dma_request_slave_channel);
752
753/**
754 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
755 * @mask: capabilities that the channel must satisfy
756 *
757 * Returns pointer to appropriate DMA channel on success or an error pointer.
758 */
759struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
760{
761 struct dma_chan *chan;
762
763 if (!mask)
764 return ERR_PTR(-ENODEV);
765
766 chan = __dma_request_channel(mask, NULL, NULL, NULL);
767 if (!chan) {
768 mutex_lock(&dma_list_mutex);
769 if (list_empty(&dma_device_list))
770 chan = ERR_PTR(-EPROBE_DEFER);
771 else
772 chan = ERR_PTR(-ENODEV);
773 mutex_unlock(&dma_list_mutex);
774 }
775
776 return chan;
777}
778EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
779
780void dma_release_channel(struct dma_chan *chan)
781{
782 mutex_lock(&dma_list_mutex);
783 WARN_ONCE(chan->client_count != 1,
784 "chan reference count %d != 1\n", chan->client_count);
785 dma_chan_put(chan);
786 /* drop PRIVATE cap enabled by __dma_request_channel() */
787 if (--chan->device->privatecnt == 0)
788 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
789 mutex_unlock(&dma_list_mutex);
790}
791EXPORT_SYMBOL_GPL(dma_release_channel);
792
793/**
794 * dmaengine_get - register interest in dma_channels
795 */
796void dmaengine_get(void)
797{
798 struct dma_device *device, *_d;
799 struct dma_chan *chan;
800 int err;
801
802 mutex_lock(&dma_list_mutex);
803 dmaengine_ref_count++;
804
805 /* try to grab channels */
806 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
807 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
808 continue;
809 list_for_each_entry(chan, &device->channels, device_node) {
810 err = dma_chan_get(chan);
811 if (err == -ENODEV) {
812 /* module removed before we could use it */
813 list_del_rcu(&device->global_node);
814 break;
815 } else if (err)
816 dev_dbg(chan->device->dev,
817 "%s: failed to get %s: (%d)\n",
818 __func__, dma_chan_name(chan), err);
819 }
820 }
821
822 /* if this is the first reference and there were channels
823 * waiting we need to rebalance to get those channels
824 * incorporated into the channel table
825 */
826 if (dmaengine_ref_count == 1)
827 dma_channel_rebalance();
828 mutex_unlock(&dma_list_mutex);
829}
830EXPORT_SYMBOL(dmaengine_get);
831
832/**
833 * dmaengine_put - let dma drivers be removed when ref_count == 0
834 */
835void dmaengine_put(void)
836{
837 struct dma_device *device;
838 struct dma_chan *chan;
839
840 mutex_lock(&dma_list_mutex);
841 dmaengine_ref_count--;
842 BUG_ON(dmaengine_ref_count < 0);
843 /* drop channel references */
844 list_for_each_entry(device, &dma_device_list, global_node) {
845 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
846 continue;
847 list_for_each_entry(chan, &device->channels, device_node)
848 dma_chan_put(chan);
849 }
850 mutex_unlock(&dma_list_mutex);
851}
852EXPORT_SYMBOL(dmaengine_put);
853
854static bool device_has_all_tx_types(struct dma_device *device)
855{
856 /* A device that satisfies this test has channels that will never cause
857 * an async_tx channel switch event as all possible operation types can
858 * be handled.
859 */
860 #ifdef CONFIG_ASYNC_TX_DMA
861 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
862 return false;
863 #endif
864
865 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
866 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
867 return false;
868 #endif
869
870 #if IS_ENABLED(CONFIG_ASYNC_XOR)
871 if (!dma_has_cap(DMA_XOR, device->cap_mask))
872 return false;
873
874 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
875 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
876 return false;
877 #endif
878 #endif
879
880 #if IS_ENABLED(CONFIG_ASYNC_PQ)
881 if (!dma_has_cap(DMA_PQ, device->cap_mask))
882 return false;
883
884 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
885 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
886 return false;
887 #endif
888 #endif
889
890 return true;
891}
892
893static int get_dma_id(struct dma_device *device)
894{
895 int rc = ida_alloc(&dma_ida, GFP_KERNEL);
896
897 if (rc < 0)
898 return rc;
899 device->dev_id = rc;
900 return 0;
901}
902
903/**
904 * dma_async_device_register - registers DMA devices found
905 * @device: &dma_device
906 */
907int dma_async_device_register(struct dma_device *device)
908{
909 int chancnt = 0, rc;
910 struct dma_chan* chan;
911 atomic_t *idr_ref;
912
913 if (!device)
914 return -ENODEV;
915
916 /* validate device routines */
917 if (!device->dev) {
918 pr_err("DMAdevice must have dev\n");
919 return -EIO;
920 }
921
922 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
923 dev_err(device->dev,
924 "Device claims capability %s, but op is not defined\n",
925 "DMA_MEMCPY");
926 return -EIO;
927 }
928
929 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
930 dev_err(device->dev,
931 "Device claims capability %s, but op is not defined\n",
932 "DMA_XOR");
933 return -EIO;
934 }
935
936 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
937 dev_err(device->dev,
938 "Device claims capability %s, but op is not defined\n",
939 "DMA_XOR_VAL");
940 return -EIO;
941 }
942
943 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
944 dev_err(device->dev,
945 "Device claims capability %s, but op is not defined\n",
946 "DMA_PQ");
947 return -EIO;
948 }
949
950 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
951 dev_err(device->dev,
952 "Device claims capability %s, but op is not defined\n",
953 "DMA_PQ_VAL");
954 return -EIO;
955 }
956
957 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
958 dev_err(device->dev,
959 "Device claims capability %s, but op is not defined\n",
960 "DMA_MEMSET");
961 return -EIO;
962 }
963
964 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
965 dev_err(device->dev,
966 "Device claims capability %s, but op is not defined\n",
967 "DMA_INTERRUPT");
968 return -EIO;
969 }
970
971 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
972 dev_err(device->dev,
973 "Device claims capability %s, but op is not defined\n",
974 "DMA_CYCLIC");
975 return -EIO;
976 }
977
978 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
979 dev_err(device->dev,
980 "Device claims capability %s, but op is not defined\n",
981 "DMA_INTERLEAVE");
982 return -EIO;
983 }
984
985
986 if (!device->device_tx_status) {
987 dev_err(device->dev, "Device tx_status is not defined\n");
988 return -EIO;
989 }
990
991
992 if (!device->device_issue_pending) {
993 dev_err(device->dev, "Device issue_pending is not defined\n");
994 return -EIO;
995 }
996
997 /* note: this only matters in the
998 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
999 */
1000 if (device_has_all_tx_types(device))
1001 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1002
1003 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
1004 if (!idr_ref)
1005 return -ENOMEM;
1006 rc = get_dma_id(device);
1007 if (rc != 0) {
1008 kfree(idr_ref);
1009 return rc;
1010 }
1011
1012 atomic_set(idr_ref, 0);
1013
1014 /* represent channels in sysfs. Probably want devs too */
1015 list_for_each_entry(chan, &device->channels, device_node) {
1016 rc = -ENOMEM;
1017 chan->local = alloc_percpu(typeof(*chan->local));
1018 if (chan->local == NULL)
1019 goto err_out;
1020 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1021 if (chan->dev == NULL) {
1022 free_percpu(chan->local);
1023 chan->local = NULL;
1024 goto err_out;
1025 }
1026
1027 chan->chan_id = chancnt++;
1028 chan->dev->device.class = &dma_devclass;
1029 chan->dev->device.parent = device->dev;
1030 chan->dev->chan = chan;
1031 chan->dev->idr_ref = idr_ref;
1032 chan->dev->dev_id = device->dev_id;
1033 atomic_inc(idr_ref);
1034 dev_set_name(&chan->dev->device, "dma%dchan%d",
1035 device->dev_id, chan->chan_id);
1036
1037 rc = device_register(&chan->dev->device);
1038 if (rc) {
1039 free_percpu(chan->local);
1040 chan->local = NULL;
1041 kfree(chan->dev);
1042 atomic_dec(idr_ref);
1043 goto err_out;
1044 }
1045 chan->client_count = 0;
1046 }
1047
1048 if (!chancnt) {
1049 dev_err(device->dev, "%s: device has no channels!\n", __func__);
1050 rc = -ENODEV;
1051 goto err_out;
1052 }
1053
1054 device->chancnt = chancnt;
1055
1056 mutex_lock(&dma_list_mutex);
1057 /* take references on public channels */
1058 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1059 list_for_each_entry(chan, &device->channels, device_node) {
1060 /* if clients are already waiting for channels we need
1061 * to take references on their behalf
1062 */
1063 if (dma_chan_get(chan) == -ENODEV) {
1064 /* note we can only get here for the first
1065 * channel as the remaining channels are
1066 * guaranteed to get a reference
1067 */
1068 rc = -ENODEV;
1069 mutex_unlock(&dma_list_mutex);
1070 goto err_out;
1071 }
1072 }
1073 list_add_tail_rcu(&device->global_node, &dma_device_list);
1074 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1075 device->privatecnt++; /* Always private */
1076 dma_channel_rebalance();
1077 mutex_unlock(&dma_list_mutex);
1078
1079 return 0;
1080
1081err_out:
1082 /* if we never registered a channel just release the idr */
1083 if (atomic_read(idr_ref) == 0) {
1084 ida_free(&dma_ida, device->dev_id);
1085 kfree(idr_ref);
1086 return rc;
1087 }
1088
1089 list_for_each_entry(chan, &device->channels, device_node) {
1090 if (chan->local == NULL)
1091 continue;
1092 mutex_lock(&dma_list_mutex);
1093 chan->dev->chan = NULL;
1094 mutex_unlock(&dma_list_mutex);
1095 device_unregister(&chan->dev->device);
1096 free_percpu(chan->local);
1097 }
1098 return rc;
1099}
1100EXPORT_SYMBOL(dma_async_device_register);
1101
1102/**
1103 * dma_async_device_unregister - unregister a DMA device
1104 * @device: &dma_device
1105 *
1106 * This routine is called by dma driver exit routines, dmaengine holds module
1107 * references to prevent it being called while channels are in use.
1108 */
1109void dma_async_device_unregister(struct dma_device *device)
1110{
1111 struct dma_chan *chan;
1112
1113 mutex_lock(&dma_list_mutex);
1114 list_del_rcu(&device->global_node);
1115 dma_channel_rebalance();
1116 mutex_unlock(&dma_list_mutex);
1117
1118 list_for_each_entry(chan, &device->channels, device_node) {
1119 WARN_ONCE(chan->client_count,
1120 "%s called while %d clients hold a reference\n",
1121 __func__, chan->client_count);
1122 mutex_lock(&dma_list_mutex);
1123 chan->dev->chan = NULL;
1124 mutex_unlock(&dma_list_mutex);
1125 device_unregister(&chan->dev->device);
1126 free_percpu(chan->local);
1127 }
1128}
1129EXPORT_SYMBOL(dma_async_device_unregister);
1130
1131static void dmam_device_release(struct device *dev, void *res)
1132{
1133 struct dma_device *device;
1134
1135 device = *(struct dma_device **)res;
1136 dma_async_device_unregister(device);
1137}
1138
1139/**
1140 * dmaenginem_async_device_register - registers DMA devices found
1141 * @device: &dma_device
1142 *
1143 * The operation is managed and will be undone on driver detach.
1144 */
1145int dmaenginem_async_device_register(struct dma_device *device)
1146{
1147 void *p;
1148 int ret;
1149
1150 p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
1151 if (!p)
1152 return -ENOMEM;
1153
1154 ret = dma_async_device_register(device);
1155 if (!ret) {
1156 *(struct dma_device **)p = device;
1157 devres_add(device->dev, p);
1158 } else {
1159 devres_free(p);
1160 }
1161
1162 return ret;
1163}
1164EXPORT_SYMBOL(dmaenginem_async_device_register);
1165
1166struct dmaengine_unmap_pool {
1167 struct kmem_cache *cache;
1168 const char *name;
1169 mempool_t *pool;
1170 size_t size;
1171};
1172
1173#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1174static struct dmaengine_unmap_pool unmap_pool[] = {
1175 __UNMAP_POOL(2),
1176 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1177 __UNMAP_POOL(16),
1178 __UNMAP_POOL(128),
1179 __UNMAP_POOL(256),
1180 #endif
1181};
1182
1183static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1184{
1185 int order = get_count_order(nr);
1186
1187 switch (order) {
1188 case 0 ... 1:
1189 return &unmap_pool[0];
1190#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1191 case 2 ... 4:
1192 return &unmap_pool[1];
1193 case 5 ... 7:
1194 return &unmap_pool[2];
1195 case 8:
1196 return &unmap_pool[3];
1197#endif
1198 default:
1199 BUG();
1200 return NULL;
1201 }
1202}
1203
1204static void dmaengine_unmap(struct kref *kref)
1205{
1206 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1207 struct device *dev = unmap->dev;
1208 int cnt, i;
1209
1210 cnt = unmap->to_cnt;
1211 for (i = 0; i < cnt; i++)
1212 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1213 DMA_TO_DEVICE);
1214 cnt += unmap->from_cnt;
1215 for (; i < cnt; i++)
1216 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1217 DMA_FROM_DEVICE);
1218 cnt += unmap->bidi_cnt;
1219 for (; i < cnt; i++) {
1220 if (unmap->addr[i] == 0)
1221 continue;
1222 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1223 DMA_BIDIRECTIONAL);
1224 }
1225 cnt = unmap->map_cnt;
1226 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1227}
1228
1229void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1230{
1231 if (unmap)
1232 kref_put(&unmap->kref, dmaengine_unmap);
1233}
1234EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1235
1236static void dmaengine_destroy_unmap_pool(void)
1237{
1238 int i;
1239
1240 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1241 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1242
1243 mempool_destroy(p->pool);
1244 p->pool = NULL;
1245 kmem_cache_destroy(p->cache);
1246 p->cache = NULL;
1247 }
1248}
1249
1250static int __init dmaengine_init_unmap_pool(void)
1251{
1252 int i;
1253
1254 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1255 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1256 size_t size;
1257
1258 size = sizeof(struct dmaengine_unmap_data) +
1259 sizeof(dma_addr_t) * p->size;
1260
1261 p->cache = kmem_cache_create(p->name, size, 0,
1262 SLAB_HWCACHE_ALIGN, NULL);
1263 if (!p->cache)
1264 break;
1265 p->pool = mempool_create_slab_pool(1, p->cache);
1266 if (!p->pool)
1267 break;
1268 }
1269
1270 if (i == ARRAY_SIZE(unmap_pool))
1271 return 0;
1272
1273 dmaengine_destroy_unmap_pool();
1274 return -ENOMEM;
1275}
1276
1277struct dmaengine_unmap_data *
1278dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1279{
1280 struct dmaengine_unmap_data *unmap;
1281
1282 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1283 if (!unmap)
1284 return NULL;
1285
1286 memset(unmap, 0, sizeof(*unmap));
1287 kref_init(&unmap->kref);
1288 unmap->dev = dev;
1289 unmap->map_cnt = nr;
1290
1291 return unmap;
1292}
1293EXPORT_SYMBOL(dmaengine_get_unmap_data);
1294
1295void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1296 struct dma_chan *chan)
1297{
1298 tx->chan = chan;
1299 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1300 spin_lock_init(&tx->lock);
1301 #endif
1302}
1303EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1304
1305/* dma_wait_for_async_tx - spin wait for a transaction to complete
1306 * @tx: in-flight transaction to wait on
1307 */
1308enum dma_status
1309dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1310{
1311 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1312
1313 if (!tx)
1314 return DMA_COMPLETE;
1315
1316 while (tx->cookie == -EBUSY) {
1317 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1318 dev_err(tx->chan->device->dev,
1319 "%s timeout waiting for descriptor submission\n",
1320 __func__);
1321 return DMA_ERROR;
1322 }
1323 cpu_relax();
1324 }
1325 return dma_sync_wait(tx->chan, tx->cookie);
1326}
1327EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1328
1329/* dma_run_dependencies - helper routine for dma drivers to process
1330 * (start) dependent operations on their target channel
1331 * @tx: transaction with dependencies
1332 */
1333void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1334{
1335 struct dma_async_tx_descriptor *dep = txd_next(tx);
1336 struct dma_async_tx_descriptor *dep_next;
1337 struct dma_chan *chan;
1338
1339 if (!dep)
1340 return;
1341
1342 /* we'll submit tx->next now, so clear the link */
1343 txd_clear_next(tx);
1344 chan = dep->chan;
1345
1346 /* keep submitting up until a channel switch is detected
1347 * in that case we will be called again as a result of
1348 * processing the interrupt from async_tx_channel_switch
1349 */
1350 for (; dep; dep = dep_next) {
1351 txd_lock(dep);
1352 txd_clear_parent(dep);
1353 dep_next = txd_next(dep);
1354 if (dep_next && dep_next->chan == chan)
1355 txd_clear_next(dep); /* ->next will be submitted */
1356 else
1357 dep_next = NULL; /* submit current dep and terminate */
1358 txd_unlock(dep);
1359
1360 dep->tx_submit(dep);
1361 }
1362
1363 chan->device->device_issue_pending(chan);
1364}
1365EXPORT_SYMBOL_GPL(dma_run_dependencies);
1366
1367static int __init dma_bus_init(void)
1368{
1369 int err = dmaengine_init_unmap_pool();
1370
1371 if (err)
1372 return err;
1373 return class_register(&dma_devclass);
1374}
1375arch_initcall(dma_bus_init);
1376
1377