Loading...
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
36 *
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
41 *
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
44 *
45 * See Documentation/dmaengine.txt for more details
46 */
47
48#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
49
50#include <linux/dma-mapping.h>
51#include <linux/init.h>
52#include <linux/module.h>
53#include <linux/mm.h>
54#include <linux/device.h>
55#include <linux/dmaengine.h>
56#include <linux/hardirq.h>
57#include <linux/spinlock.h>
58#include <linux/percpu.h>
59#include <linux/rcupdate.h>
60#include <linux/mutex.h>
61#include <linux/jiffies.h>
62#include <linux/rculist.h>
63#include <linux/idr.h>
64#include <linux/slab.h>
65#include <linux/acpi.h>
66#include <linux/acpi_dma.h>
67#include <linux/of_dma.h>
68#include <linux/mempool.h>
69
70static DEFINE_MUTEX(dma_list_mutex);
71static DEFINE_IDR(dma_idr);
72static LIST_HEAD(dma_device_list);
73static long dmaengine_ref_count;
74
75/* --- sysfs implementation --- */
76
77/**
78 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
79 * @dev - device node
80 *
81 * Must be called under dma_list_mutex
82 */
83static struct dma_chan *dev_to_dma_chan(struct device *dev)
84{
85 struct dma_chan_dev *chan_dev;
86
87 chan_dev = container_of(dev, typeof(*chan_dev), device);
88 return chan_dev->chan;
89}
90
91static ssize_t memcpy_count_show(struct device *dev,
92 struct device_attribute *attr, char *buf)
93{
94 struct dma_chan *chan;
95 unsigned long count = 0;
96 int i;
97 int err;
98
99 mutex_lock(&dma_list_mutex);
100 chan = dev_to_dma_chan(dev);
101 if (chan) {
102 for_each_possible_cpu(i)
103 count += per_cpu_ptr(chan->local, i)->memcpy_count;
104 err = sprintf(buf, "%lu\n", count);
105 } else
106 err = -ENODEV;
107 mutex_unlock(&dma_list_mutex);
108
109 return err;
110}
111static DEVICE_ATTR_RO(memcpy_count);
112
113static ssize_t bytes_transferred_show(struct device *dev,
114 struct device_attribute *attr, char *buf)
115{
116 struct dma_chan *chan;
117 unsigned long count = 0;
118 int i;
119 int err;
120
121 mutex_lock(&dma_list_mutex);
122 chan = dev_to_dma_chan(dev);
123 if (chan) {
124 for_each_possible_cpu(i)
125 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
126 err = sprintf(buf, "%lu\n", count);
127 } else
128 err = -ENODEV;
129 mutex_unlock(&dma_list_mutex);
130
131 return err;
132}
133static DEVICE_ATTR_RO(bytes_transferred);
134
135static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
136 char *buf)
137{
138 struct dma_chan *chan;
139 int err;
140
141 mutex_lock(&dma_list_mutex);
142 chan = dev_to_dma_chan(dev);
143 if (chan)
144 err = sprintf(buf, "%d\n", chan->client_count);
145 else
146 err = -ENODEV;
147 mutex_unlock(&dma_list_mutex);
148
149 return err;
150}
151static DEVICE_ATTR_RO(in_use);
152
153static struct attribute *dma_dev_attrs[] = {
154 &dev_attr_memcpy_count.attr,
155 &dev_attr_bytes_transferred.attr,
156 &dev_attr_in_use.attr,
157 NULL,
158};
159ATTRIBUTE_GROUPS(dma_dev);
160
161static void chan_dev_release(struct device *dev)
162{
163 struct dma_chan_dev *chan_dev;
164
165 chan_dev = container_of(dev, typeof(*chan_dev), device);
166 if (atomic_dec_and_test(chan_dev->idr_ref)) {
167 mutex_lock(&dma_list_mutex);
168 idr_remove(&dma_idr, chan_dev->dev_id);
169 mutex_unlock(&dma_list_mutex);
170 kfree(chan_dev->idr_ref);
171 }
172 kfree(chan_dev);
173}
174
175static struct class dma_devclass = {
176 .name = "dma",
177 .dev_groups = dma_dev_groups,
178 .dev_release = chan_dev_release,
179};
180
181/* --- client and device registration --- */
182
183#define dma_device_satisfies_mask(device, mask) \
184 __dma_device_satisfies_mask((device), &(mask))
185static int
186__dma_device_satisfies_mask(struct dma_device *device,
187 const dma_cap_mask_t *want)
188{
189 dma_cap_mask_t has;
190
191 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
192 DMA_TX_TYPE_END);
193 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
194}
195
196static struct module *dma_chan_to_owner(struct dma_chan *chan)
197{
198 return chan->device->dev->driver->owner;
199}
200
201/**
202 * balance_ref_count - catch up the channel reference count
203 * @chan - channel to balance ->client_count versus dmaengine_ref_count
204 *
205 * balance_ref_count must be called under dma_list_mutex
206 */
207static void balance_ref_count(struct dma_chan *chan)
208{
209 struct module *owner = dma_chan_to_owner(chan);
210
211 while (chan->client_count < dmaengine_ref_count) {
212 __module_get(owner);
213 chan->client_count++;
214 }
215}
216
217/**
218 * dma_chan_get - try to grab a dma channel's parent driver module
219 * @chan - channel to grab
220 *
221 * Must be called under dma_list_mutex
222 */
223static int dma_chan_get(struct dma_chan *chan)
224{
225 int err = -ENODEV;
226 struct module *owner = dma_chan_to_owner(chan);
227
228 if (chan->client_count) {
229 __module_get(owner);
230 err = 0;
231 } else if (try_module_get(owner))
232 err = 0;
233
234 if (err == 0)
235 chan->client_count++;
236
237 /* allocate upon first client reference */
238 if (chan->client_count == 1 && err == 0) {
239 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
240
241 if (desc_cnt < 0) {
242 err = desc_cnt;
243 chan->client_count = 0;
244 module_put(owner);
245 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
246 balance_ref_count(chan);
247 }
248
249 return err;
250}
251
252/**
253 * dma_chan_put - drop a reference to a dma channel's parent driver module
254 * @chan - channel to release
255 *
256 * Must be called under dma_list_mutex
257 */
258static void dma_chan_put(struct dma_chan *chan)
259{
260 if (!chan->client_count)
261 return; /* this channel failed alloc_chan_resources */
262 chan->client_count--;
263 module_put(dma_chan_to_owner(chan));
264 if (chan->client_count == 0)
265 chan->device->device_free_chan_resources(chan);
266}
267
268enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
269{
270 enum dma_status status;
271 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
272
273 dma_async_issue_pending(chan);
274 do {
275 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
276 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
277 pr_err("%s: timeout!\n", __func__);
278 return DMA_ERROR;
279 }
280 if (status != DMA_IN_PROGRESS)
281 break;
282 cpu_relax();
283 } while (1);
284
285 return status;
286}
287EXPORT_SYMBOL(dma_sync_wait);
288
289/**
290 * dma_cap_mask_all - enable iteration over all operation types
291 */
292static dma_cap_mask_t dma_cap_mask_all;
293
294/**
295 * dma_chan_tbl_ent - tracks channel allocations per core/operation
296 * @chan - associated channel for this entry
297 */
298struct dma_chan_tbl_ent {
299 struct dma_chan *chan;
300};
301
302/**
303 * channel_table - percpu lookup table for memory-to-memory offload providers
304 */
305static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
306
307static int __init dma_channel_table_init(void)
308{
309 enum dma_transaction_type cap;
310 int err = 0;
311
312 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
313
314 /* 'interrupt', 'private', and 'slave' are channel capabilities,
315 * but are not associated with an operation so they do not need
316 * an entry in the channel_table
317 */
318 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
319 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
320 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
321
322 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
323 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
324 if (!channel_table[cap]) {
325 err = -ENOMEM;
326 break;
327 }
328 }
329
330 if (err) {
331 pr_err("initialization failure\n");
332 for_each_dma_cap_mask(cap, dma_cap_mask_all)
333 if (channel_table[cap])
334 free_percpu(channel_table[cap]);
335 }
336
337 return err;
338}
339arch_initcall(dma_channel_table_init);
340
341/**
342 * dma_find_channel - find a channel to carry out the operation
343 * @tx_type: transaction type
344 */
345struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
346{
347 return this_cpu_read(channel_table[tx_type]->chan);
348}
349EXPORT_SYMBOL(dma_find_channel);
350
351/*
352 * net_dma_find_channel - find a channel for net_dma
353 * net_dma has alignment requirements
354 */
355struct dma_chan *net_dma_find_channel(void)
356{
357 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
358 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
359 return NULL;
360
361 return chan;
362}
363EXPORT_SYMBOL(net_dma_find_channel);
364
365/**
366 * dma_issue_pending_all - flush all pending operations across all channels
367 */
368void dma_issue_pending_all(void)
369{
370 struct dma_device *device;
371 struct dma_chan *chan;
372
373 rcu_read_lock();
374 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
375 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
376 continue;
377 list_for_each_entry(chan, &device->channels, device_node)
378 if (chan->client_count)
379 device->device_issue_pending(chan);
380 }
381 rcu_read_unlock();
382}
383EXPORT_SYMBOL(dma_issue_pending_all);
384
385/**
386 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
387 */
388static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
389{
390 int node = dev_to_node(chan->device->dev);
391 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
392}
393
394/**
395 * min_chan - returns the channel with min count and in the same numa-node as the cpu
396 * @cap: capability to match
397 * @cpu: cpu index which the channel should be close to
398 *
399 * If some channels are close to the given cpu, the one with the lowest
400 * reference count is returned. Otherwise, cpu is ignored and only the
401 * reference count is taken into account.
402 * Must be called under dma_list_mutex.
403 */
404static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
405{
406 struct dma_device *device;
407 struct dma_chan *chan;
408 struct dma_chan *min = NULL;
409 struct dma_chan *localmin = NULL;
410
411 list_for_each_entry(device, &dma_device_list, global_node) {
412 if (!dma_has_cap(cap, device->cap_mask) ||
413 dma_has_cap(DMA_PRIVATE, device->cap_mask))
414 continue;
415 list_for_each_entry(chan, &device->channels, device_node) {
416 if (!chan->client_count)
417 continue;
418 if (!min || chan->table_count < min->table_count)
419 min = chan;
420
421 if (dma_chan_is_local(chan, cpu))
422 if (!localmin ||
423 chan->table_count < localmin->table_count)
424 localmin = chan;
425 }
426 }
427
428 chan = localmin ? localmin : min;
429
430 if (chan)
431 chan->table_count++;
432
433 return chan;
434}
435
436/**
437 * dma_channel_rebalance - redistribute the available channels
438 *
439 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
440 * operation type) in the SMP case, and operation isolation (avoid
441 * multi-tasking channels) in the non-SMP case. Must be called under
442 * dma_list_mutex.
443 */
444static void dma_channel_rebalance(void)
445{
446 struct dma_chan *chan;
447 struct dma_device *device;
448 int cpu;
449 int cap;
450
451 /* undo the last distribution */
452 for_each_dma_cap_mask(cap, dma_cap_mask_all)
453 for_each_possible_cpu(cpu)
454 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
455
456 list_for_each_entry(device, &dma_device_list, global_node) {
457 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
458 continue;
459 list_for_each_entry(chan, &device->channels, device_node)
460 chan->table_count = 0;
461 }
462
463 /* don't populate the channel_table if no clients are available */
464 if (!dmaengine_ref_count)
465 return;
466
467 /* redistribute available channels */
468 for_each_dma_cap_mask(cap, dma_cap_mask_all)
469 for_each_online_cpu(cpu) {
470 chan = min_chan(cap, cpu);
471 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
472 }
473}
474
475static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
476 struct dma_device *dev,
477 dma_filter_fn fn, void *fn_param)
478{
479 struct dma_chan *chan;
480
481 if (!__dma_device_satisfies_mask(dev, mask)) {
482 pr_debug("%s: wrong capabilities\n", __func__);
483 return NULL;
484 }
485 /* devices with multiple channels need special handling as we need to
486 * ensure that all channels are either private or public.
487 */
488 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
489 list_for_each_entry(chan, &dev->channels, device_node) {
490 /* some channels are already publicly allocated */
491 if (chan->client_count)
492 return NULL;
493 }
494
495 list_for_each_entry(chan, &dev->channels, device_node) {
496 if (chan->client_count) {
497 pr_debug("%s: %s busy\n",
498 __func__, dma_chan_name(chan));
499 continue;
500 }
501 if (fn && !fn(chan, fn_param)) {
502 pr_debug("%s: %s filter said false\n",
503 __func__, dma_chan_name(chan));
504 continue;
505 }
506 return chan;
507 }
508
509 return NULL;
510}
511
512/**
513 * dma_request_slave_channel - try to get specific channel exclusively
514 * @chan: target channel
515 */
516struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
517{
518 int err = -EBUSY;
519
520 /* lock against __dma_request_channel */
521 mutex_lock(&dma_list_mutex);
522
523 if (chan->client_count == 0) {
524 err = dma_chan_get(chan);
525 if (err)
526 pr_debug("%s: failed to get %s: (%d)\n",
527 __func__, dma_chan_name(chan), err);
528 } else
529 chan = NULL;
530
531 mutex_unlock(&dma_list_mutex);
532
533
534 return chan;
535}
536EXPORT_SYMBOL_GPL(dma_get_slave_channel);
537
538struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
539{
540 dma_cap_mask_t mask;
541 struct dma_chan *chan;
542 int err;
543
544 dma_cap_zero(mask);
545 dma_cap_set(DMA_SLAVE, mask);
546
547 /* lock against __dma_request_channel */
548 mutex_lock(&dma_list_mutex);
549
550 chan = private_candidate(&mask, device, NULL, NULL);
551 if (chan) {
552 err = dma_chan_get(chan);
553 if (err) {
554 pr_debug("%s: failed to get %s: (%d)\n",
555 __func__, dma_chan_name(chan), err);
556 chan = NULL;
557 }
558 }
559
560 mutex_unlock(&dma_list_mutex);
561
562 return chan;
563}
564EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
565
566/**
567 * __dma_request_channel - try to allocate an exclusive channel
568 * @mask: capabilities that the channel must satisfy
569 * @fn: optional callback to disposition available channels
570 * @fn_param: opaque parameter to pass to dma_filter_fn
571 *
572 * Returns pointer to appropriate DMA channel on success or NULL.
573 */
574struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
575 dma_filter_fn fn, void *fn_param)
576{
577 struct dma_device *device, *_d;
578 struct dma_chan *chan = NULL;
579 int err;
580
581 /* Find a channel */
582 mutex_lock(&dma_list_mutex);
583 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
584 chan = private_candidate(mask, device, fn, fn_param);
585 if (chan) {
586 /* Found a suitable channel, try to grab, prep, and
587 * return it. We first set DMA_PRIVATE to disable
588 * balance_ref_count as this channel will not be
589 * published in the general-purpose allocator
590 */
591 dma_cap_set(DMA_PRIVATE, device->cap_mask);
592 device->privatecnt++;
593 err = dma_chan_get(chan);
594
595 if (err == -ENODEV) {
596 pr_debug("%s: %s module removed\n",
597 __func__, dma_chan_name(chan));
598 list_del_rcu(&device->global_node);
599 } else if (err)
600 pr_debug("%s: failed to get %s: (%d)\n",
601 __func__, dma_chan_name(chan), err);
602 else
603 break;
604 if (--device->privatecnt == 0)
605 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
606 chan = NULL;
607 }
608 }
609 mutex_unlock(&dma_list_mutex);
610
611 pr_debug("%s: %s (%s)\n",
612 __func__,
613 chan ? "success" : "fail",
614 chan ? dma_chan_name(chan) : NULL);
615
616 return chan;
617}
618EXPORT_SYMBOL_GPL(__dma_request_channel);
619
620/**
621 * dma_request_slave_channel - try to allocate an exclusive slave channel
622 * @dev: pointer to client device structure
623 * @name: slave channel name
624 *
625 * Returns pointer to appropriate DMA channel on success or an error pointer.
626 */
627struct dma_chan *dma_request_slave_channel_reason(struct device *dev,
628 const char *name)
629{
630 /* If device-tree is present get slave info from here */
631 if (dev->of_node)
632 return of_dma_request_slave_channel(dev->of_node, name);
633
634 /* If device was enumerated by ACPI get slave info from here */
635 if (ACPI_HANDLE(dev))
636 return acpi_dma_request_slave_chan_by_name(dev, name);
637
638 return ERR_PTR(-ENODEV);
639}
640EXPORT_SYMBOL_GPL(dma_request_slave_channel_reason);
641
642/**
643 * dma_request_slave_channel - try to allocate an exclusive slave channel
644 * @dev: pointer to client device structure
645 * @name: slave channel name
646 *
647 * Returns pointer to appropriate DMA channel on success or NULL.
648 */
649struct dma_chan *dma_request_slave_channel(struct device *dev,
650 const char *name)
651{
652 struct dma_chan *ch = dma_request_slave_channel_reason(dev, name);
653 if (IS_ERR(ch))
654 return NULL;
655 return ch;
656}
657EXPORT_SYMBOL_GPL(dma_request_slave_channel);
658
659void dma_release_channel(struct dma_chan *chan)
660{
661 mutex_lock(&dma_list_mutex);
662 WARN_ONCE(chan->client_count != 1,
663 "chan reference count %d != 1\n", chan->client_count);
664 dma_chan_put(chan);
665 /* drop PRIVATE cap enabled by __dma_request_channel() */
666 if (--chan->device->privatecnt == 0)
667 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
668 mutex_unlock(&dma_list_mutex);
669}
670EXPORT_SYMBOL_GPL(dma_release_channel);
671
672/**
673 * dmaengine_get - register interest in dma_channels
674 */
675void dmaengine_get(void)
676{
677 struct dma_device *device, *_d;
678 struct dma_chan *chan;
679 int err;
680
681 mutex_lock(&dma_list_mutex);
682 dmaengine_ref_count++;
683
684 /* try to grab channels */
685 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
686 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
687 continue;
688 list_for_each_entry(chan, &device->channels, device_node) {
689 err = dma_chan_get(chan);
690 if (err == -ENODEV) {
691 /* module removed before we could use it */
692 list_del_rcu(&device->global_node);
693 break;
694 } else if (err)
695 pr_debug("%s: failed to get %s: (%d)\n",
696 __func__, dma_chan_name(chan), err);
697 }
698 }
699
700 /* if this is the first reference and there were channels
701 * waiting we need to rebalance to get those channels
702 * incorporated into the channel table
703 */
704 if (dmaengine_ref_count == 1)
705 dma_channel_rebalance();
706 mutex_unlock(&dma_list_mutex);
707}
708EXPORT_SYMBOL(dmaengine_get);
709
710/**
711 * dmaengine_put - let dma drivers be removed when ref_count == 0
712 */
713void dmaengine_put(void)
714{
715 struct dma_device *device;
716 struct dma_chan *chan;
717
718 mutex_lock(&dma_list_mutex);
719 dmaengine_ref_count--;
720 BUG_ON(dmaengine_ref_count < 0);
721 /* drop channel references */
722 list_for_each_entry(device, &dma_device_list, global_node) {
723 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
724 continue;
725 list_for_each_entry(chan, &device->channels, device_node)
726 dma_chan_put(chan);
727 }
728 mutex_unlock(&dma_list_mutex);
729}
730EXPORT_SYMBOL(dmaengine_put);
731
732static bool device_has_all_tx_types(struct dma_device *device)
733{
734 /* A device that satisfies this test has channels that will never cause
735 * an async_tx channel switch event as all possible operation types can
736 * be handled.
737 */
738 #ifdef CONFIG_ASYNC_TX_DMA
739 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
740 return false;
741 #endif
742
743 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
744 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
745 return false;
746 #endif
747
748 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
749 if (!dma_has_cap(DMA_XOR, device->cap_mask))
750 return false;
751
752 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
753 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
754 return false;
755 #endif
756 #endif
757
758 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
759 if (!dma_has_cap(DMA_PQ, device->cap_mask))
760 return false;
761
762 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
763 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
764 return false;
765 #endif
766 #endif
767
768 return true;
769}
770
771static int get_dma_id(struct dma_device *device)
772{
773 int rc;
774
775 mutex_lock(&dma_list_mutex);
776
777 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
778 if (rc >= 0)
779 device->dev_id = rc;
780
781 mutex_unlock(&dma_list_mutex);
782 return rc < 0 ? rc : 0;
783}
784
785/**
786 * dma_async_device_register - registers DMA devices found
787 * @device: &dma_device
788 */
789int dma_async_device_register(struct dma_device *device)
790{
791 int chancnt = 0, rc;
792 struct dma_chan* chan;
793 atomic_t *idr_ref;
794
795 if (!device)
796 return -ENODEV;
797
798 /* validate device routines */
799 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
800 !device->device_prep_dma_memcpy);
801 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
802 !device->device_prep_dma_xor);
803 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
804 !device->device_prep_dma_xor_val);
805 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
806 !device->device_prep_dma_pq);
807 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
808 !device->device_prep_dma_pq_val);
809 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
810 !device->device_prep_dma_interrupt);
811 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
812 !device->device_prep_dma_sg);
813 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
814 !device->device_prep_dma_cyclic);
815 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
816 !device->device_control);
817 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
818 !device->device_prep_interleaved_dma);
819
820 BUG_ON(!device->device_alloc_chan_resources);
821 BUG_ON(!device->device_free_chan_resources);
822 BUG_ON(!device->device_tx_status);
823 BUG_ON(!device->device_issue_pending);
824 BUG_ON(!device->dev);
825
826 /* note: this only matters in the
827 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
828 */
829 if (device_has_all_tx_types(device))
830 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
831
832 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
833 if (!idr_ref)
834 return -ENOMEM;
835 rc = get_dma_id(device);
836 if (rc != 0) {
837 kfree(idr_ref);
838 return rc;
839 }
840
841 atomic_set(idr_ref, 0);
842
843 /* represent channels in sysfs. Probably want devs too */
844 list_for_each_entry(chan, &device->channels, device_node) {
845 rc = -ENOMEM;
846 chan->local = alloc_percpu(typeof(*chan->local));
847 if (chan->local == NULL)
848 goto err_out;
849 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
850 if (chan->dev == NULL) {
851 free_percpu(chan->local);
852 chan->local = NULL;
853 goto err_out;
854 }
855
856 chan->chan_id = chancnt++;
857 chan->dev->device.class = &dma_devclass;
858 chan->dev->device.parent = device->dev;
859 chan->dev->chan = chan;
860 chan->dev->idr_ref = idr_ref;
861 chan->dev->dev_id = device->dev_id;
862 atomic_inc(idr_ref);
863 dev_set_name(&chan->dev->device, "dma%dchan%d",
864 device->dev_id, chan->chan_id);
865
866 rc = device_register(&chan->dev->device);
867 if (rc) {
868 free_percpu(chan->local);
869 chan->local = NULL;
870 kfree(chan->dev);
871 atomic_dec(idr_ref);
872 goto err_out;
873 }
874 chan->client_count = 0;
875 }
876 device->chancnt = chancnt;
877
878 mutex_lock(&dma_list_mutex);
879 /* take references on public channels */
880 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
881 list_for_each_entry(chan, &device->channels, device_node) {
882 /* if clients are already waiting for channels we need
883 * to take references on their behalf
884 */
885 if (dma_chan_get(chan) == -ENODEV) {
886 /* note we can only get here for the first
887 * channel as the remaining channels are
888 * guaranteed to get a reference
889 */
890 rc = -ENODEV;
891 mutex_unlock(&dma_list_mutex);
892 goto err_out;
893 }
894 }
895 list_add_tail_rcu(&device->global_node, &dma_device_list);
896 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
897 device->privatecnt++; /* Always private */
898 dma_channel_rebalance();
899 mutex_unlock(&dma_list_mutex);
900
901 return 0;
902
903err_out:
904 /* if we never registered a channel just release the idr */
905 if (atomic_read(idr_ref) == 0) {
906 mutex_lock(&dma_list_mutex);
907 idr_remove(&dma_idr, device->dev_id);
908 mutex_unlock(&dma_list_mutex);
909 kfree(idr_ref);
910 return rc;
911 }
912
913 list_for_each_entry(chan, &device->channels, device_node) {
914 if (chan->local == NULL)
915 continue;
916 mutex_lock(&dma_list_mutex);
917 chan->dev->chan = NULL;
918 mutex_unlock(&dma_list_mutex);
919 device_unregister(&chan->dev->device);
920 free_percpu(chan->local);
921 }
922 return rc;
923}
924EXPORT_SYMBOL(dma_async_device_register);
925
926/**
927 * dma_async_device_unregister - unregister a DMA device
928 * @device: &dma_device
929 *
930 * This routine is called by dma driver exit routines, dmaengine holds module
931 * references to prevent it being called while channels are in use.
932 */
933void dma_async_device_unregister(struct dma_device *device)
934{
935 struct dma_chan *chan;
936
937 mutex_lock(&dma_list_mutex);
938 list_del_rcu(&device->global_node);
939 dma_channel_rebalance();
940 mutex_unlock(&dma_list_mutex);
941
942 list_for_each_entry(chan, &device->channels, device_node) {
943 WARN_ONCE(chan->client_count,
944 "%s called while %d clients hold a reference\n",
945 __func__, chan->client_count);
946 mutex_lock(&dma_list_mutex);
947 chan->dev->chan = NULL;
948 mutex_unlock(&dma_list_mutex);
949 device_unregister(&chan->dev->device);
950 free_percpu(chan->local);
951 }
952}
953EXPORT_SYMBOL(dma_async_device_unregister);
954
955struct dmaengine_unmap_pool {
956 struct kmem_cache *cache;
957 const char *name;
958 mempool_t *pool;
959 size_t size;
960};
961
962#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
963static struct dmaengine_unmap_pool unmap_pool[] = {
964 __UNMAP_POOL(2),
965 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
966 __UNMAP_POOL(16),
967 __UNMAP_POOL(128),
968 __UNMAP_POOL(256),
969 #endif
970};
971
972static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
973{
974 int order = get_count_order(nr);
975
976 switch (order) {
977 case 0 ... 1:
978 return &unmap_pool[0];
979 case 2 ... 4:
980 return &unmap_pool[1];
981 case 5 ... 7:
982 return &unmap_pool[2];
983 case 8:
984 return &unmap_pool[3];
985 default:
986 BUG();
987 return NULL;
988 }
989}
990
991static void dmaengine_unmap(struct kref *kref)
992{
993 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
994 struct device *dev = unmap->dev;
995 int cnt, i;
996
997 cnt = unmap->to_cnt;
998 for (i = 0; i < cnt; i++)
999 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1000 DMA_TO_DEVICE);
1001 cnt += unmap->from_cnt;
1002 for (; i < cnt; i++)
1003 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1004 DMA_FROM_DEVICE);
1005 cnt += unmap->bidi_cnt;
1006 for (; i < cnt; i++) {
1007 if (unmap->addr[i] == 0)
1008 continue;
1009 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1010 DMA_BIDIRECTIONAL);
1011 }
1012 cnt = unmap->map_cnt;
1013 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1014}
1015
1016void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1017{
1018 if (unmap)
1019 kref_put(&unmap->kref, dmaengine_unmap);
1020}
1021EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1022
1023static void dmaengine_destroy_unmap_pool(void)
1024{
1025 int i;
1026
1027 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1028 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1029
1030 if (p->pool)
1031 mempool_destroy(p->pool);
1032 p->pool = NULL;
1033 if (p->cache)
1034 kmem_cache_destroy(p->cache);
1035 p->cache = NULL;
1036 }
1037}
1038
1039static int __init dmaengine_init_unmap_pool(void)
1040{
1041 int i;
1042
1043 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1044 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1045 size_t size;
1046
1047 size = sizeof(struct dmaengine_unmap_data) +
1048 sizeof(dma_addr_t) * p->size;
1049
1050 p->cache = kmem_cache_create(p->name, size, 0,
1051 SLAB_HWCACHE_ALIGN, NULL);
1052 if (!p->cache)
1053 break;
1054 p->pool = mempool_create_slab_pool(1, p->cache);
1055 if (!p->pool)
1056 break;
1057 }
1058
1059 if (i == ARRAY_SIZE(unmap_pool))
1060 return 0;
1061
1062 dmaengine_destroy_unmap_pool();
1063 return -ENOMEM;
1064}
1065
1066struct dmaengine_unmap_data *
1067dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1068{
1069 struct dmaengine_unmap_data *unmap;
1070
1071 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1072 if (!unmap)
1073 return NULL;
1074
1075 memset(unmap, 0, sizeof(*unmap));
1076 kref_init(&unmap->kref);
1077 unmap->dev = dev;
1078 unmap->map_cnt = nr;
1079
1080 return unmap;
1081}
1082EXPORT_SYMBOL(dmaengine_get_unmap_data);
1083
1084/**
1085 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
1086 * @chan: DMA channel to offload copy to
1087 * @dest_pg: destination page
1088 * @dest_off: offset in page to copy to
1089 * @src_pg: source page
1090 * @src_off: offset in page to copy from
1091 * @len: length
1092 *
1093 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
1094 * address according to the DMA mapping API rules for streaming mappings.
1095 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
1096 * (kernel memory or locked user space pages).
1097 */
1098dma_cookie_t
1099dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
1100 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
1101 size_t len)
1102{
1103 struct dma_device *dev = chan->device;
1104 struct dma_async_tx_descriptor *tx;
1105 struct dmaengine_unmap_data *unmap;
1106 dma_cookie_t cookie;
1107 unsigned long flags;
1108
1109 unmap = dmaengine_get_unmap_data(dev->dev, 2, GFP_NOWAIT);
1110 if (!unmap)
1111 return -ENOMEM;
1112
1113 unmap->to_cnt = 1;
1114 unmap->from_cnt = 1;
1115 unmap->addr[0] = dma_map_page(dev->dev, src_pg, src_off, len,
1116 DMA_TO_DEVICE);
1117 unmap->addr[1] = dma_map_page(dev->dev, dest_pg, dest_off, len,
1118 DMA_FROM_DEVICE);
1119 unmap->len = len;
1120 flags = DMA_CTRL_ACK;
1121 tx = dev->device_prep_dma_memcpy(chan, unmap->addr[1], unmap->addr[0],
1122 len, flags);
1123
1124 if (!tx) {
1125 dmaengine_unmap_put(unmap);
1126 return -ENOMEM;
1127 }
1128
1129 dma_set_unmap(tx, unmap);
1130 cookie = tx->tx_submit(tx);
1131 dmaengine_unmap_put(unmap);
1132
1133 preempt_disable();
1134 __this_cpu_add(chan->local->bytes_transferred, len);
1135 __this_cpu_inc(chan->local->memcpy_count);
1136 preempt_enable();
1137
1138 return cookie;
1139}
1140EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
1141
1142/**
1143 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
1144 * @chan: DMA channel to offload copy to
1145 * @dest: destination address (virtual)
1146 * @src: source address (virtual)
1147 * @len: length
1148 *
1149 * Both @dest and @src must be mappable to a bus address according to the
1150 * DMA mapping API rules for streaming mappings.
1151 * Both @dest and @src must stay memory resident (kernel memory or locked
1152 * user space pages).
1153 */
1154dma_cookie_t
1155dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
1156 void *src, size_t len)
1157{
1158 return dma_async_memcpy_pg_to_pg(chan, virt_to_page(dest),
1159 (unsigned long) dest & ~PAGE_MASK,
1160 virt_to_page(src),
1161 (unsigned long) src & ~PAGE_MASK, len);
1162}
1163EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
1164
1165/**
1166 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
1167 * @chan: DMA channel to offload copy to
1168 * @page: destination page
1169 * @offset: offset in page to copy to
1170 * @kdata: source address (virtual)
1171 * @len: length
1172 *
1173 * Both @page/@offset and @kdata must be mappable to a bus address according
1174 * to the DMA mapping API rules for streaming mappings.
1175 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
1176 * locked user space pages)
1177 */
1178dma_cookie_t
1179dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
1180 unsigned int offset, void *kdata, size_t len)
1181{
1182 return dma_async_memcpy_pg_to_pg(chan, page, offset,
1183 virt_to_page(kdata),
1184 (unsigned long) kdata & ~PAGE_MASK, len);
1185}
1186EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
1187
1188void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1189 struct dma_chan *chan)
1190{
1191 tx->chan = chan;
1192 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1193 spin_lock_init(&tx->lock);
1194 #endif
1195}
1196EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1197
1198/* dma_wait_for_async_tx - spin wait for a transaction to complete
1199 * @tx: in-flight transaction to wait on
1200 */
1201enum dma_status
1202dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1203{
1204 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1205
1206 if (!tx)
1207 return DMA_COMPLETE;
1208
1209 while (tx->cookie == -EBUSY) {
1210 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1211 pr_err("%s timeout waiting for descriptor submission\n",
1212 __func__);
1213 return DMA_ERROR;
1214 }
1215 cpu_relax();
1216 }
1217 return dma_sync_wait(tx->chan, tx->cookie);
1218}
1219EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1220
1221/* dma_run_dependencies - helper routine for dma drivers to process
1222 * (start) dependent operations on their target channel
1223 * @tx: transaction with dependencies
1224 */
1225void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1226{
1227 struct dma_async_tx_descriptor *dep = txd_next(tx);
1228 struct dma_async_tx_descriptor *dep_next;
1229 struct dma_chan *chan;
1230
1231 if (!dep)
1232 return;
1233
1234 /* we'll submit tx->next now, so clear the link */
1235 txd_clear_next(tx);
1236 chan = dep->chan;
1237
1238 /* keep submitting up until a channel switch is detected
1239 * in that case we will be called again as a result of
1240 * processing the interrupt from async_tx_channel_switch
1241 */
1242 for (; dep; dep = dep_next) {
1243 txd_lock(dep);
1244 txd_clear_parent(dep);
1245 dep_next = txd_next(dep);
1246 if (dep_next && dep_next->chan == chan)
1247 txd_clear_next(dep); /* ->next will be submitted */
1248 else
1249 dep_next = NULL; /* submit current dep and terminate */
1250 txd_unlock(dep);
1251
1252 dep->tx_submit(dep);
1253 }
1254
1255 chan->device->device_issue_pending(chan);
1256}
1257EXPORT_SYMBOL_GPL(dma_run_dependencies);
1258
1259static int __init dma_bus_init(void)
1260{
1261 int err = dmaengine_init_unmap_pool();
1262
1263 if (err)
1264 return err;
1265 return class_register(&dma_devclass);
1266}
1267arch_initcall(dma_bus_init);
1268
1269
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 */
5
6/*
7 * This code implements the DMA subsystem. It provides a HW-neutral interface
8 * for other kernel code to use asynchronous memory copy capabilities,
9 * if present, and allows different HW DMA drivers to register as providing
10 * this capability.
11 *
12 * Due to the fact we are accelerating what is already a relatively fast
13 * operation, the code goes to great lengths to avoid additional overhead,
14 * such as locking.
15 *
16 * LOCKING:
17 *
18 * The subsystem keeps a global list of dma_device structs it is protected by a
19 * mutex, dma_list_mutex.
20 *
21 * A subsystem can get access to a channel by calling dmaengine_get() followed
22 * by dma_find_channel(), or if it has need for an exclusive channel it can call
23 * dma_request_channel(). Once a channel is allocated a reference is taken
24 * against its corresponding driver to disable removal.
25 *
26 * Each device has a channels list, which runs unlocked but is never modified
27 * once the device is registered, it's just setup by the driver.
28 *
29 * See Documentation/driver-api/dmaengine for more details
30 */
31
32#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34#include <linux/platform_device.h>
35#include <linux/dma-mapping.h>
36#include <linux/init.h>
37#include <linux/module.h>
38#include <linux/mm.h>
39#include <linux/device.h>
40#include <linux/dmaengine.h>
41#include <linux/hardirq.h>
42#include <linux/spinlock.h>
43#include <linux/percpu.h>
44#include <linux/rcupdate.h>
45#include <linux/mutex.h>
46#include <linux/jiffies.h>
47#include <linux/rculist.h>
48#include <linux/idr.h>
49#include <linux/slab.h>
50#include <linux/acpi.h>
51#include <linux/acpi_dma.h>
52#include <linux/of_dma.h>
53#include <linux/mempool.h>
54#include <linux/numa.h>
55
56#include "dmaengine.h"
57
58static DEFINE_MUTEX(dma_list_mutex);
59static DEFINE_IDA(dma_ida);
60static LIST_HEAD(dma_device_list);
61static long dmaengine_ref_count;
62
63/* --- debugfs implementation --- */
64#ifdef CONFIG_DEBUG_FS
65#include <linux/debugfs.h>
66
67static struct dentry *rootdir;
68
69static void dmaengine_debug_register(struct dma_device *dma_dev)
70{
71 dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev),
72 rootdir);
73 if (IS_ERR(dma_dev->dbg_dev_root))
74 dma_dev->dbg_dev_root = NULL;
75}
76
77static void dmaengine_debug_unregister(struct dma_device *dma_dev)
78{
79 debugfs_remove_recursive(dma_dev->dbg_dev_root);
80 dma_dev->dbg_dev_root = NULL;
81}
82
83static void dmaengine_dbg_summary_show(struct seq_file *s,
84 struct dma_device *dma_dev)
85{
86 struct dma_chan *chan;
87
88 list_for_each_entry(chan, &dma_dev->channels, device_node) {
89 if (chan->client_count) {
90 seq_printf(s, " %-13s| %s", dma_chan_name(chan),
91 chan->dbg_client_name ?: "in-use");
92
93 if (chan->router)
94 seq_printf(s, " (via router: %s)\n",
95 dev_name(chan->router->dev));
96 else
97 seq_puts(s, "\n");
98 }
99 }
100}
101
102static int dmaengine_summary_show(struct seq_file *s, void *data)
103{
104 struct dma_device *dma_dev = NULL;
105
106 mutex_lock(&dma_list_mutex);
107 list_for_each_entry(dma_dev, &dma_device_list, global_node) {
108 seq_printf(s, "dma%d (%s): number of channels: %u\n",
109 dma_dev->dev_id, dev_name(dma_dev->dev),
110 dma_dev->chancnt);
111
112 if (dma_dev->dbg_summary_show)
113 dma_dev->dbg_summary_show(s, dma_dev);
114 else
115 dmaengine_dbg_summary_show(s, dma_dev);
116
117 if (!list_is_last(&dma_dev->global_node, &dma_device_list))
118 seq_puts(s, "\n");
119 }
120 mutex_unlock(&dma_list_mutex);
121
122 return 0;
123}
124DEFINE_SHOW_ATTRIBUTE(dmaengine_summary);
125
126static void __init dmaengine_debugfs_init(void)
127{
128 rootdir = debugfs_create_dir("dmaengine", NULL);
129
130 /* /sys/kernel/debug/dmaengine/summary */
131 debugfs_create_file("summary", 0444, rootdir, NULL,
132 &dmaengine_summary_fops);
133}
134#else
135static inline void dmaengine_debugfs_init(void) { }
136static inline int dmaengine_debug_register(struct dma_device *dma_dev)
137{
138 return 0;
139}
140
141static inline void dmaengine_debug_unregister(struct dma_device *dma_dev) { }
142#endif /* DEBUG_FS */
143
144/* --- sysfs implementation --- */
145
146#define DMA_SLAVE_NAME "slave"
147
148/**
149 * dev_to_dma_chan - convert a device pointer to its sysfs container object
150 * @dev: device node
151 *
152 * Must be called under dma_list_mutex.
153 */
154static struct dma_chan *dev_to_dma_chan(struct device *dev)
155{
156 struct dma_chan_dev *chan_dev;
157
158 chan_dev = container_of(dev, typeof(*chan_dev), device);
159 return chan_dev->chan;
160}
161
162static ssize_t memcpy_count_show(struct device *dev,
163 struct device_attribute *attr, char *buf)
164{
165 struct dma_chan *chan;
166 unsigned long count = 0;
167 int i;
168 int err;
169
170 mutex_lock(&dma_list_mutex);
171 chan = dev_to_dma_chan(dev);
172 if (chan) {
173 for_each_possible_cpu(i)
174 count += per_cpu_ptr(chan->local, i)->memcpy_count;
175 err = sprintf(buf, "%lu\n", count);
176 } else
177 err = -ENODEV;
178 mutex_unlock(&dma_list_mutex);
179
180 return err;
181}
182static DEVICE_ATTR_RO(memcpy_count);
183
184static ssize_t bytes_transferred_show(struct device *dev,
185 struct device_attribute *attr, char *buf)
186{
187 struct dma_chan *chan;
188 unsigned long count = 0;
189 int i;
190 int err;
191
192 mutex_lock(&dma_list_mutex);
193 chan = dev_to_dma_chan(dev);
194 if (chan) {
195 for_each_possible_cpu(i)
196 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
197 err = sprintf(buf, "%lu\n", count);
198 } else
199 err = -ENODEV;
200 mutex_unlock(&dma_list_mutex);
201
202 return err;
203}
204static DEVICE_ATTR_RO(bytes_transferred);
205
206static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
207 char *buf)
208{
209 struct dma_chan *chan;
210 int err;
211
212 mutex_lock(&dma_list_mutex);
213 chan = dev_to_dma_chan(dev);
214 if (chan)
215 err = sprintf(buf, "%d\n", chan->client_count);
216 else
217 err = -ENODEV;
218 mutex_unlock(&dma_list_mutex);
219
220 return err;
221}
222static DEVICE_ATTR_RO(in_use);
223
224static struct attribute *dma_dev_attrs[] = {
225 &dev_attr_memcpy_count.attr,
226 &dev_attr_bytes_transferred.attr,
227 &dev_attr_in_use.attr,
228 NULL,
229};
230ATTRIBUTE_GROUPS(dma_dev);
231
232static void chan_dev_release(struct device *dev)
233{
234 struct dma_chan_dev *chan_dev;
235
236 chan_dev = container_of(dev, typeof(*chan_dev), device);
237 kfree(chan_dev);
238}
239
240static struct class dma_devclass = {
241 .name = "dma",
242 .dev_groups = dma_dev_groups,
243 .dev_release = chan_dev_release,
244};
245
246/* --- client and device registration --- */
247
248/* enable iteration over all operation types */
249static dma_cap_mask_t dma_cap_mask_all;
250
251/**
252 * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
253 * @chan: associated channel for this entry
254 */
255struct dma_chan_tbl_ent {
256 struct dma_chan *chan;
257};
258
259/* percpu lookup table for memory-to-memory offload providers */
260static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
261
262static int __init dma_channel_table_init(void)
263{
264 enum dma_transaction_type cap;
265 int err = 0;
266
267 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
268
269 /* 'interrupt', 'private', and 'slave' are channel capabilities,
270 * but are not associated with an operation so they do not need
271 * an entry in the channel_table
272 */
273 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
274 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
275 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
276
277 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
278 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
279 if (!channel_table[cap]) {
280 err = -ENOMEM;
281 break;
282 }
283 }
284
285 if (err) {
286 pr_err("dmaengine dma_channel_table_init failure: %d\n", err);
287 for_each_dma_cap_mask(cap, dma_cap_mask_all)
288 free_percpu(channel_table[cap]);
289 }
290
291 return err;
292}
293arch_initcall(dma_channel_table_init);
294
295/**
296 * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
297 * @chan: DMA channel to test
298 * @cpu: CPU index which the channel should be close to
299 *
300 * Returns true if the channel is in the same NUMA-node as the CPU.
301 */
302static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
303{
304 int node = dev_to_node(chan->device->dev);
305 return node == NUMA_NO_NODE ||
306 cpumask_test_cpu(cpu, cpumask_of_node(node));
307}
308
309/**
310 * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
311 * @cap: capability to match
312 * @cpu: CPU index which the channel should be close to
313 *
314 * If some channels are close to the given CPU, the one with the lowest
315 * reference count is returned. Otherwise, CPU is ignored and only the
316 * reference count is taken into account.
317 *
318 * Must be called under dma_list_mutex.
319 */
320static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
321{
322 struct dma_device *device;
323 struct dma_chan *chan;
324 struct dma_chan *min = NULL;
325 struct dma_chan *localmin = NULL;
326
327 list_for_each_entry(device, &dma_device_list, global_node) {
328 if (!dma_has_cap(cap, device->cap_mask) ||
329 dma_has_cap(DMA_PRIVATE, device->cap_mask))
330 continue;
331 list_for_each_entry(chan, &device->channels, device_node) {
332 if (!chan->client_count)
333 continue;
334 if (!min || chan->table_count < min->table_count)
335 min = chan;
336
337 if (dma_chan_is_local(chan, cpu))
338 if (!localmin ||
339 chan->table_count < localmin->table_count)
340 localmin = chan;
341 }
342 }
343
344 chan = localmin ? localmin : min;
345
346 if (chan)
347 chan->table_count++;
348
349 return chan;
350}
351
352/**
353 * dma_channel_rebalance - redistribute the available channels
354 *
355 * Optimize for CPU isolation (each CPU gets a dedicated channel for an
356 * operation type) in the SMP case, and operation isolation (avoid
357 * multi-tasking channels) in the non-SMP case.
358 *
359 * Must be called under dma_list_mutex.
360 */
361static void dma_channel_rebalance(void)
362{
363 struct dma_chan *chan;
364 struct dma_device *device;
365 int cpu;
366 int cap;
367
368 /* undo the last distribution */
369 for_each_dma_cap_mask(cap, dma_cap_mask_all)
370 for_each_possible_cpu(cpu)
371 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
372
373 list_for_each_entry(device, &dma_device_list, global_node) {
374 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
375 continue;
376 list_for_each_entry(chan, &device->channels, device_node)
377 chan->table_count = 0;
378 }
379
380 /* don't populate the channel_table if no clients are available */
381 if (!dmaengine_ref_count)
382 return;
383
384 /* redistribute available channels */
385 for_each_dma_cap_mask(cap, dma_cap_mask_all)
386 for_each_online_cpu(cpu) {
387 chan = min_chan(cap, cpu);
388 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
389 }
390}
391
392static int dma_device_satisfies_mask(struct dma_device *device,
393 const dma_cap_mask_t *want)
394{
395 dma_cap_mask_t has;
396
397 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
398 DMA_TX_TYPE_END);
399 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
400}
401
402static struct module *dma_chan_to_owner(struct dma_chan *chan)
403{
404 return chan->device->owner;
405}
406
407/**
408 * balance_ref_count - catch up the channel reference count
409 * @chan: channel to balance ->client_count versus dmaengine_ref_count
410 *
411 * Must be called under dma_list_mutex.
412 */
413static void balance_ref_count(struct dma_chan *chan)
414{
415 struct module *owner = dma_chan_to_owner(chan);
416
417 while (chan->client_count < dmaengine_ref_count) {
418 __module_get(owner);
419 chan->client_count++;
420 }
421}
422
423static void dma_device_release(struct kref *ref)
424{
425 struct dma_device *device = container_of(ref, struct dma_device, ref);
426
427 list_del_rcu(&device->global_node);
428 dma_channel_rebalance();
429
430 if (device->device_release)
431 device->device_release(device);
432}
433
434static void dma_device_put(struct dma_device *device)
435{
436 lockdep_assert_held(&dma_list_mutex);
437 kref_put(&device->ref, dma_device_release);
438}
439
440/**
441 * dma_chan_get - try to grab a DMA channel's parent driver module
442 * @chan: channel to grab
443 *
444 * Must be called under dma_list_mutex.
445 */
446static int dma_chan_get(struct dma_chan *chan)
447{
448 struct module *owner = dma_chan_to_owner(chan);
449 int ret;
450
451 /* The channel is already in use, update client count */
452 if (chan->client_count) {
453 __module_get(owner);
454 goto out;
455 }
456
457 if (!try_module_get(owner))
458 return -ENODEV;
459
460 ret = kref_get_unless_zero(&chan->device->ref);
461 if (!ret) {
462 ret = -ENODEV;
463 goto module_put_out;
464 }
465
466 /* allocate upon first client reference */
467 if (chan->device->device_alloc_chan_resources) {
468 ret = chan->device->device_alloc_chan_resources(chan);
469 if (ret < 0)
470 goto err_out;
471 }
472
473 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
474 balance_ref_count(chan);
475
476out:
477 chan->client_count++;
478 return 0;
479
480err_out:
481 dma_device_put(chan->device);
482module_put_out:
483 module_put(owner);
484 return ret;
485}
486
487/**
488 * dma_chan_put - drop a reference to a DMA channel's parent driver module
489 * @chan: channel to release
490 *
491 * Must be called under dma_list_mutex.
492 */
493static void dma_chan_put(struct dma_chan *chan)
494{
495 /* This channel is not in use, bail out */
496 if (!chan->client_count)
497 return;
498
499 chan->client_count--;
500
501 /* This channel is not in use anymore, free it */
502 if (!chan->client_count && chan->device->device_free_chan_resources) {
503 /* Make sure all operations have completed */
504 dmaengine_synchronize(chan);
505 chan->device->device_free_chan_resources(chan);
506 }
507
508 /* If the channel is used via a DMA request router, free the mapping */
509 if (chan->router && chan->router->route_free) {
510 chan->router->route_free(chan->router->dev, chan->route_data);
511 chan->router = NULL;
512 chan->route_data = NULL;
513 }
514
515 dma_device_put(chan->device);
516 module_put(dma_chan_to_owner(chan));
517}
518
519enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
520{
521 enum dma_status status;
522 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
523
524 dma_async_issue_pending(chan);
525 do {
526 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
527 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
528 dev_err(chan->device->dev, "%s: timeout!\n", __func__);
529 return DMA_ERROR;
530 }
531 if (status != DMA_IN_PROGRESS)
532 break;
533 cpu_relax();
534 } while (1);
535
536 return status;
537}
538EXPORT_SYMBOL(dma_sync_wait);
539
540/**
541 * dma_find_channel - find a channel to carry out the operation
542 * @tx_type: transaction type
543 */
544struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
545{
546 return this_cpu_read(channel_table[tx_type]->chan);
547}
548EXPORT_SYMBOL(dma_find_channel);
549
550/**
551 * dma_issue_pending_all - flush all pending operations across all channels
552 */
553void dma_issue_pending_all(void)
554{
555 struct dma_device *device;
556 struct dma_chan *chan;
557
558 rcu_read_lock();
559 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
560 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
561 continue;
562 list_for_each_entry(chan, &device->channels, device_node)
563 if (chan->client_count)
564 device->device_issue_pending(chan);
565 }
566 rcu_read_unlock();
567}
568EXPORT_SYMBOL(dma_issue_pending_all);
569
570int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
571{
572 struct dma_device *device;
573
574 if (!chan || !caps)
575 return -EINVAL;
576
577 device = chan->device;
578
579 /* check if the channel supports slave transactions */
580 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
581 test_bit(DMA_CYCLIC, device->cap_mask.bits)))
582 return -ENXIO;
583
584 /*
585 * Check whether it reports it uses the generic slave
586 * capabilities, if not, that means it doesn't support any
587 * kind of slave capabilities reporting.
588 */
589 if (!device->directions)
590 return -ENXIO;
591
592 caps->src_addr_widths = device->src_addr_widths;
593 caps->dst_addr_widths = device->dst_addr_widths;
594 caps->directions = device->directions;
595 caps->min_burst = device->min_burst;
596 caps->max_burst = device->max_burst;
597 caps->max_sg_burst = device->max_sg_burst;
598 caps->residue_granularity = device->residue_granularity;
599 caps->descriptor_reuse = device->descriptor_reuse;
600 caps->cmd_pause = !!device->device_pause;
601 caps->cmd_resume = !!device->device_resume;
602 caps->cmd_terminate = !!device->device_terminate_all;
603
604 /*
605 * DMA engine device might be configured with non-uniformly
606 * distributed slave capabilities per device channels. In this
607 * case the corresponding driver may provide the device_caps
608 * callback to override the generic capabilities with
609 * channel-specific ones.
610 */
611 if (device->device_caps)
612 device->device_caps(chan, caps);
613
614 return 0;
615}
616EXPORT_SYMBOL_GPL(dma_get_slave_caps);
617
618static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
619 struct dma_device *dev,
620 dma_filter_fn fn, void *fn_param)
621{
622 struct dma_chan *chan;
623
624 if (mask && !dma_device_satisfies_mask(dev, mask)) {
625 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
626 return NULL;
627 }
628 /* devices with multiple channels need special handling as we need to
629 * ensure that all channels are either private or public.
630 */
631 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
632 list_for_each_entry(chan, &dev->channels, device_node) {
633 /* some channels are already publicly allocated */
634 if (chan->client_count)
635 return NULL;
636 }
637
638 list_for_each_entry(chan, &dev->channels, device_node) {
639 if (chan->client_count) {
640 dev_dbg(dev->dev, "%s: %s busy\n",
641 __func__, dma_chan_name(chan));
642 continue;
643 }
644 if (fn && !fn(chan, fn_param)) {
645 dev_dbg(dev->dev, "%s: %s filter said false\n",
646 __func__, dma_chan_name(chan));
647 continue;
648 }
649 return chan;
650 }
651
652 return NULL;
653}
654
655static struct dma_chan *find_candidate(struct dma_device *device,
656 const dma_cap_mask_t *mask,
657 dma_filter_fn fn, void *fn_param)
658{
659 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
660 int err;
661
662 if (chan) {
663 /* Found a suitable channel, try to grab, prep, and return it.
664 * We first set DMA_PRIVATE to disable balance_ref_count as this
665 * channel will not be published in the general-purpose
666 * allocator
667 */
668 dma_cap_set(DMA_PRIVATE, device->cap_mask);
669 device->privatecnt++;
670 err = dma_chan_get(chan);
671
672 if (err) {
673 if (err == -ENODEV) {
674 dev_dbg(device->dev, "%s: %s module removed\n",
675 __func__, dma_chan_name(chan));
676 list_del_rcu(&device->global_node);
677 } else
678 dev_dbg(device->dev,
679 "%s: failed to get %s: (%d)\n",
680 __func__, dma_chan_name(chan), err);
681
682 if (--device->privatecnt == 0)
683 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
684
685 chan = ERR_PTR(err);
686 }
687 }
688
689 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
690}
691
692/**
693 * dma_get_slave_channel - try to get specific channel exclusively
694 * @chan: target channel
695 */
696struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
697{
698 int err = -EBUSY;
699
700 /* lock against __dma_request_channel */
701 mutex_lock(&dma_list_mutex);
702
703 if (chan->client_count == 0) {
704 struct dma_device *device = chan->device;
705
706 dma_cap_set(DMA_PRIVATE, device->cap_mask);
707 device->privatecnt++;
708 err = dma_chan_get(chan);
709 if (err) {
710 dev_dbg(chan->device->dev,
711 "%s: failed to get %s: (%d)\n",
712 __func__, dma_chan_name(chan), err);
713 chan = NULL;
714 if (--device->privatecnt == 0)
715 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
716 }
717 } else
718 chan = NULL;
719
720 mutex_unlock(&dma_list_mutex);
721
722
723 return chan;
724}
725EXPORT_SYMBOL_GPL(dma_get_slave_channel);
726
727struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
728{
729 dma_cap_mask_t mask;
730 struct dma_chan *chan;
731
732 dma_cap_zero(mask);
733 dma_cap_set(DMA_SLAVE, mask);
734
735 /* lock against __dma_request_channel */
736 mutex_lock(&dma_list_mutex);
737
738 chan = find_candidate(device, &mask, NULL, NULL);
739
740 mutex_unlock(&dma_list_mutex);
741
742 return IS_ERR(chan) ? NULL : chan;
743}
744EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
745
746/**
747 * __dma_request_channel - try to allocate an exclusive channel
748 * @mask: capabilities that the channel must satisfy
749 * @fn: optional callback to disposition available channels
750 * @fn_param: opaque parameter to pass to dma_filter_fn()
751 * @np: device node to look for DMA channels
752 *
753 * Returns pointer to appropriate DMA channel on success or NULL.
754 */
755struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
756 dma_filter_fn fn, void *fn_param,
757 struct device_node *np)
758{
759 struct dma_device *device, *_d;
760 struct dma_chan *chan = NULL;
761
762 /* Find a channel */
763 mutex_lock(&dma_list_mutex);
764 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
765 /* Finds a DMA controller with matching device node */
766 if (np && device->dev->of_node && np != device->dev->of_node)
767 continue;
768
769 chan = find_candidate(device, mask, fn, fn_param);
770 if (!IS_ERR(chan))
771 break;
772
773 chan = NULL;
774 }
775 mutex_unlock(&dma_list_mutex);
776
777 pr_debug("%s: %s (%s)\n",
778 __func__,
779 chan ? "success" : "fail",
780 chan ? dma_chan_name(chan) : NULL);
781
782 return chan;
783}
784EXPORT_SYMBOL_GPL(__dma_request_channel);
785
786static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
787 const char *name,
788 struct device *dev)
789{
790 int i;
791
792 if (!device->filter.mapcnt)
793 return NULL;
794
795 for (i = 0; i < device->filter.mapcnt; i++) {
796 const struct dma_slave_map *map = &device->filter.map[i];
797
798 if (!strcmp(map->devname, dev_name(dev)) &&
799 !strcmp(map->slave, name))
800 return map;
801 }
802
803 return NULL;
804}
805
806/**
807 * dma_request_chan - try to allocate an exclusive slave channel
808 * @dev: pointer to client device structure
809 * @name: slave channel name
810 *
811 * Returns pointer to appropriate DMA channel on success or an error pointer.
812 */
813struct dma_chan *dma_request_chan(struct device *dev, const char *name)
814{
815 struct dma_device *d, *_d;
816 struct dma_chan *chan = NULL;
817
818 /* If device-tree is present get slave info from here */
819 if (dev->of_node)
820 chan = of_dma_request_slave_channel(dev->of_node, name);
821
822 /* If device was enumerated by ACPI get slave info from here */
823 if (has_acpi_companion(dev) && !chan)
824 chan = acpi_dma_request_slave_chan_by_name(dev, name);
825
826 if (PTR_ERR(chan) == -EPROBE_DEFER)
827 return chan;
828
829 if (!IS_ERR_OR_NULL(chan))
830 goto found;
831
832 /* Try to find the channel via the DMA filter map(s) */
833 mutex_lock(&dma_list_mutex);
834 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
835 dma_cap_mask_t mask;
836 const struct dma_slave_map *map = dma_filter_match(d, name, dev);
837
838 if (!map)
839 continue;
840
841 dma_cap_zero(mask);
842 dma_cap_set(DMA_SLAVE, mask);
843
844 chan = find_candidate(d, &mask, d->filter.fn, map->param);
845 if (!IS_ERR(chan))
846 break;
847 }
848 mutex_unlock(&dma_list_mutex);
849
850 if (IS_ERR(chan))
851 return chan;
852 if (!chan)
853 return ERR_PTR(-EPROBE_DEFER);
854
855found:
856#ifdef CONFIG_DEBUG_FS
857 chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev),
858 name);
859#endif
860
861 chan->name = kasprintf(GFP_KERNEL, "dma:%s", name);
862 if (!chan->name)
863 return chan;
864 chan->slave = dev;
865
866 if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj,
867 DMA_SLAVE_NAME))
868 dev_warn(dev, "Cannot create DMA %s symlink\n", DMA_SLAVE_NAME);
869 if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name))
870 dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name);
871
872 return chan;
873}
874EXPORT_SYMBOL_GPL(dma_request_chan);
875
876/**
877 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
878 * @mask: capabilities that the channel must satisfy
879 *
880 * Returns pointer to appropriate DMA channel on success or an error pointer.
881 */
882struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
883{
884 struct dma_chan *chan;
885
886 if (!mask)
887 return ERR_PTR(-ENODEV);
888
889 chan = __dma_request_channel(mask, NULL, NULL, NULL);
890 if (!chan) {
891 mutex_lock(&dma_list_mutex);
892 if (list_empty(&dma_device_list))
893 chan = ERR_PTR(-EPROBE_DEFER);
894 else
895 chan = ERR_PTR(-ENODEV);
896 mutex_unlock(&dma_list_mutex);
897 }
898
899 return chan;
900}
901EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
902
903void dma_release_channel(struct dma_chan *chan)
904{
905 mutex_lock(&dma_list_mutex);
906 WARN_ONCE(chan->client_count != 1,
907 "chan reference count %d != 1\n", chan->client_count);
908 dma_chan_put(chan);
909 /* drop PRIVATE cap enabled by __dma_request_channel() */
910 if (--chan->device->privatecnt == 0)
911 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
912
913 if (chan->slave) {
914 sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME);
915 sysfs_remove_link(&chan->slave->kobj, chan->name);
916 kfree(chan->name);
917 chan->name = NULL;
918 chan->slave = NULL;
919 }
920
921#ifdef CONFIG_DEBUG_FS
922 kfree(chan->dbg_client_name);
923 chan->dbg_client_name = NULL;
924#endif
925 mutex_unlock(&dma_list_mutex);
926}
927EXPORT_SYMBOL_GPL(dma_release_channel);
928
929/**
930 * dmaengine_get - register interest in dma_channels
931 */
932void dmaengine_get(void)
933{
934 struct dma_device *device, *_d;
935 struct dma_chan *chan;
936 int err;
937
938 mutex_lock(&dma_list_mutex);
939 dmaengine_ref_count++;
940
941 /* try to grab channels */
942 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
943 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
944 continue;
945 list_for_each_entry(chan, &device->channels, device_node) {
946 err = dma_chan_get(chan);
947 if (err == -ENODEV) {
948 /* module removed before we could use it */
949 list_del_rcu(&device->global_node);
950 break;
951 } else if (err)
952 dev_dbg(chan->device->dev,
953 "%s: failed to get %s: (%d)\n",
954 __func__, dma_chan_name(chan), err);
955 }
956 }
957
958 /* if this is the first reference and there were channels
959 * waiting we need to rebalance to get those channels
960 * incorporated into the channel table
961 */
962 if (dmaengine_ref_count == 1)
963 dma_channel_rebalance();
964 mutex_unlock(&dma_list_mutex);
965}
966EXPORT_SYMBOL(dmaengine_get);
967
968/**
969 * dmaengine_put - let DMA drivers be removed when ref_count == 0
970 */
971void dmaengine_put(void)
972{
973 struct dma_device *device, *_d;
974 struct dma_chan *chan;
975
976 mutex_lock(&dma_list_mutex);
977 dmaengine_ref_count--;
978 BUG_ON(dmaengine_ref_count < 0);
979 /* drop channel references */
980 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
981 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
982 continue;
983 list_for_each_entry(chan, &device->channels, device_node)
984 dma_chan_put(chan);
985 }
986 mutex_unlock(&dma_list_mutex);
987}
988EXPORT_SYMBOL(dmaengine_put);
989
990static bool device_has_all_tx_types(struct dma_device *device)
991{
992 /* A device that satisfies this test has channels that will never cause
993 * an async_tx channel switch event as all possible operation types can
994 * be handled.
995 */
996 #ifdef CONFIG_ASYNC_TX_DMA
997 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
998 return false;
999 #endif
1000
1001 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
1002 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
1003 return false;
1004 #endif
1005
1006 #if IS_ENABLED(CONFIG_ASYNC_XOR)
1007 if (!dma_has_cap(DMA_XOR, device->cap_mask))
1008 return false;
1009
1010 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
1011 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
1012 return false;
1013 #endif
1014 #endif
1015
1016 #if IS_ENABLED(CONFIG_ASYNC_PQ)
1017 if (!dma_has_cap(DMA_PQ, device->cap_mask))
1018 return false;
1019
1020 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
1021 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
1022 return false;
1023 #endif
1024 #endif
1025
1026 return true;
1027}
1028
1029static int get_dma_id(struct dma_device *device)
1030{
1031 int rc = ida_alloc(&dma_ida, GFP_KERNEL);
1032
1033 if (rc < 0)
1034 return rc;
1035 device->dev_id = rc;
1036 return 0;
1037}
1038
1039static int __dma_async_device_channel_register(struct dma_device *device,
1040 struct dma_chan *chan)
1041{
1042 int rc;
1043
1044 chan->local = alloc_percpu(typeof(*chan->local));
1045 if (!chan->local)
1046 return -ENOMEM;
1047 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1048 if (!chan->dev) {
1049 rc = -ENOMEM;
1050 goto err_free_local;
1051 }
1052
1053 /*
1054 * When the chan_id is a negative value, we are dynamically adding
1055 * the channel. Otherwise we are static enumerating.
1056 */
1057 mutex_lock(&device->chan_mutex);
1058 chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL);
1059 mutex_unlock(&device->chan_mutex);
1060 if (chan->chan_id < 0) {
1061 pr_err("%s: unable to alloc ida for chan: %d\n",
1062 __func__, chan->chan_id);
1063 rc = chan->chan_id;
1064 goto err_free_dev;
1065 }
1066
1067 chan->dev->device.class = &dma_devclass;
1068 chan->dev->device.parent = device->dev;
1069 chan->dev->chan = chan;
1070 chan->dev->dev_id = device->dev_id;
1071 dev_set_name(&chan->dev->device, "dma%dchan%d",
1072 device->dev_id, chan->chan_id);
1073 rc = device_register(&chan->dev->device);
1074 if (rc)
1075 goto err_out_ida;
1076 chan->client_count = 0;
1077 device->chancnt++;
1078
1079 return 0;
1080
1081 err_out_ida:
1082 mutex_lock(&device->chan_mutex);
1083 ida_free(&device->chan_ida, chan->chan_id);
1084 mutex_unlock(&device->chan_mutex);
1085 err_free_dev:
1086 kfree(chan->dev);
1087 err_free_local:
1088 free_percpu(chan->local);
1089 chan->local = NULL;
1090 return rc;
1091}
1092
1093int dma_async_device_channel_register(struct dma_device *device,
1094 struct dma_chan *chan)
1095{
1096 int rc;
1097
1098 rc = __dma_async_device_channel_register(device, chan);
1099 if (rc < 0)
1100 return rc;
1101
1102 dma_channel_rebalance();
1103 return 0;
1104}
1105EXPORT_SYMBOL_GPL(dma_async_device_channel_register);
1106
1107static void __dma_async_device_channel_unregister(struct dma_device *device,
1108 struct dma_chan *chan)
1109{
1110 WARN_ONCE(!device->device_release && chan->client_count,
1111 "%s called while %d clients hold a reference\n",
1112 __func__, chan->client_count);
1113 mutex_lock(&dma_list_mutex);
1114 device->chancnt--;
1115 chan->dev->chan = NULL;
1116 mutex_unlock(&dma_list_mutex);
1117 mutex_lock(&device->chan_mutex);
1118 ida_free(&device->chan_ida, chan->chan_id);
1119 mutex_unlock(&device->chan_mutex);
1120 device_unregister(&chan->dev->device);
1121 free_percpu(chan->local);
1122}
1123
1124void dma_async_device_channel_unregister(struct dma_device *device,
1125 struct dma_chan *chan)
1126{
1127 __dma_async_device_channel_unregister(device, chan);
1128 dma_channel_rebalance();
1129}
1130EXPORT_SYMBOL_GPL(dma_async_device_channel_unregister);
1131
1132/**
1133 * dma_async_device_register - registers DMA devices found
1134 * @device: pointer to &struct dma_device
1135 *
1136 * After calling this routine the structure should not be freed except in the
1137 * device_release() callback which will be called after
1138 * dma_async_device_unregister() is called and no further references are taken.
1139 */
1140int dma_async_device_register(struct dma_device *device)
1141{
1142 int rc;
1143 struct dma_chan* chan;
1144
1145 if (!device)
1146 return -ENODEV;
1147
1148 /* validate device routines */
1149 if (!device->dev) {
1150 pr_err("DMAdevice must have dev\n");
1151 return -EIO;
1152 }
1153
1154 device->owner = device->dev->driver->owner;
1155
1156 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
1157 dev_err(device->dev,
1158 "Device claims capability %s, but op is not defined\n",
1159 "DMA_MEMCPY");
1160 return -EIO;
1161 }
1162
1163 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
1164 dev_err(device->dev,
1165 "Device claims capability %s, but op is not defined\n",
1166 "DMA_XOR");
1167 return -EIO;
1168 }
1169
1170 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
1171 dev_err(device->dev,
1172 "Device claims capability %s, but op is not defined\n",
1173 "DMA_XOR_VAL");
1174 return -EIO;
1175 }
1176
1177 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
1178 dev_err(device->dev,
1179 "Device claims capability %s, but op is not defined\n",
1180 "DMA_PQ");
1181 return -EIO;
1182 }
1183
1184 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
1185 dev_err(device->dev,
1186 "Device claims capability %s, but op is not defined\n",
1187 "DMA_PQ_VAL");
1188 return -EIO;
1189 }
1190
1191 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
1192 dev_err(device->dev,
1193 "Device claims capability %s, but op is not defined\n",
1194 "DMA_MEMSET");
1195 return -EIO;
1196 }
1197
1198 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
1199 dev_err(device->dev,
1200 "Device claims capability %s, but op is not defined\n",
1201 "DMA_INTERRUPT");
1202 return -EIO;
1203 }
1204
1205 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
1206 dev_err(device->dev,
1207 "Device claims capability %s, but op is not defined\n",
1208 "DMA_CYCLIC");
1209 return -EIO;
1210 }
1211
1212 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
1213 dev_err(device->dev,
1214 "Device claims capability %s, but op is not defined\n",
1215 "DMA_INTERLEAVE");
1216 return -EIO;
1217 }
1218
1219
1220 if (!device->device_tx_status) {
1221 dev_err(device->dev, "Device tx_status is not defined\n");
1222 return -EIO;
1223 }
1224
1225
1226 if (!device->device_issue_pending) {
1227 dev_err(device->dev, "Device issue_pending is not defined\n");
1228 return -EIO;
1229 }
1230
1231 if (!device->device_release)
1232 dev_dbg(device->dev,
1233 "WARN: Device release is not defined so it is not safe to unbind this driver while in use\n");
1234
1235 kref_init(&device->ref);
1236
1237 /* note: this only matters in the
1238 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1239 */
1240 if (device_has_all_tx_types(device))
1241 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1242
1243 rc = get_dma_id(device);
1244 if (rc != 0)
1245 return rc;
1246
1247 mutex_init(&device->chan_mutex);
1248 ida_init(&device->chan_ida);
1249
1250 /* represent channels in sysfs. Probably want devs too */
1251 list_for_each_entry(chan, &device->channels, device_node) {
1252 rc = __dma_async_device_channel_register(device, chan);
1253 if (rc < 0)
1254 goto err_out;
1255 }
1256
1257 mutex_lock(&dma_list_mutex);
1258 /* take references on public channels */
1259 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1260 list_for_each_entry(chan, &device->channels, device_node) {
1261 /* if clients are already waiting for channels we need
1262 * to take references on their behalf
1263 */
1264 if (dma_chan_get(chan) == -ENODEV) {
1265 /* note we can only get here for the first
1266 * channel as the remaining channels are
1267 * guaranteed to get a reference
1268 */
1269 rc = -ENODEV;
1270 mutex_unlock(&dma_list_mutex);
1271 goto err_out;
1272 }
1273 }
1274 list_add_tail_rcu(&device->global_node, &dma_device_list);
1275 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1276 device->privatecnt++; /* Always private */
1277 dma_channel_rebalance();
1278 mutex_unlock(&dma_list_mutex);
1279
1280 dmaengine_debug_register(device);
1281
1282 return 0;
1283
1284err_out:
1285 /* if we never registered a channel just release the idr */
1286 if (!device->chancnt) {
1287 ida_free(&dma_ida, device->dev_id);
1288 return rc;
1289 }
1290
1291 list_for_each_entry(chan, &device->channels, device_node) {
1292 if (chan->local == NULL)
1293 continue;
1294 mutex_lock(&dma_list_mutex);
1295 chan->dev->chan = NULL;
1296 mutex_unlock(&dma_list_mutex);
1297 device_unregister(&chan->dev->device);
1298 free_percpu(chan->local);
1299 }
1300 return rc;
1301}
1302EXPORT_SYMBOL(dma_async_device_register);
1303
1304/**
1305 * dma_async_device_unregister - unregister a DMA device
1306 * @device: pointer to &struct dma_device
1307 *
1308 * This routine is called by dma driver exit routines, dmaengine holds module
1309 * references to prevent it being called while channels are in use.
1310 */
1311void dma_async_device_unregister(struct dma_device *device)
1312{
1313 struct dma_chan *chan, *n;
1314
1315 dmaengine_debug_unregister(device);
1316
1317 list_for_each_entry_safe(chan, n, &device->channels, device_node)
1318 __dma_async_device_channel_unregister(device, chan);
1319
1320 mutex_lock(&dma_list_mutex);
1321 /*
1322 * setting DMA_PRIVATE ensures the device being torn down will not
1323 * be used in the channel_table
1324 */
1325 dma_cap_set(DMA_PRIVATE, device->cap_mask);
1326 dma_channel_rebalance();
1327 ida_free(&dma_ida, device->dev_id);
1328 dma_device_put(device);
1329 mutex_unlock(&dma_list_mutex);
1330}
1331EXPORT_SYMBOL(dma_async_device_unregister);
1332
1333static void dmam_device_release(struct device *dev, void *res)
1334{
1335 struct dma_device *device;
1336
1337 device = *(struct dma_device **)res;
1338 dma_async_device_unregister(device);
1339}
1340
1341/**
1342 * dmaenginem_async_device_register - registers DMA devices found
1343 * @device: pointer to &struct dma_device
1344 *
1345 * The operation is managed and will be undone on driver detach.
1346 */
1347int dmaenginem_async_device_register(struct dma_device *device)
1348{
1349 void *p;
1350 int ret;
1351
1352 p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
1353 if (!p)
1354 return -ENOMEM;
1355
1356 ret = dma_async_device_register(device);
1357 if (!ret) {
1358 *(struct dma_device **)p = device;
1359 devres_add(device->dev, p);
1360 } else {
1361 devres_free(p);
1362 }
1363
1364 return ret;
1365}
1366EXPORT_SYMBOL(dmaenginem_async_device_register);
1367
1368struct dmaengine_unmap_pool {
1369 struct kmem_cache *cache;
1370 const char *name;
1371 mempool_t *pool;
1372 size_t size;
1373};
1374
1375#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1376static struct dmaengine_unmap_pool unmap_pool[] = {
1377 __UNMAP_POOL(2),
1378 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1379 __UNMAP_POOL(16),
1380 __UNMAP_POOL(128),
1381 __UNMAP_POOL(256),
1382 #endif
1383};
1384
1385static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1386{
1387 int order = get_count_order(nr);
1388
1389 switch (order) {
1390 case 0 ... 1:
1391 return &unmap_pool[0];
1392#if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1393 case 2 ... 4:
1394 return &unmap_pool[1];
1395 case 5 ... 7:
1396 return &unmap_pool[2];
1397 case 8:
1398 return &unmap_pool[3];
1399#endif
1400 default:
1401 BUG();
1402 return NULL;
1403 }
1404}
1405
1406static void dmaengine_unmap(struct kref *kref)
1407{
1408 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1409 struct device *dev = unmap->dev;
1410 int cnt, i;
1411
1412 cnt = unmap->to_cnt;
1413 for (i = 0; i < cnt; i++)
1414 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1415 DMA_TO_DEVICE);
1416 cnt += unmap->from_cnt;
1417 for (; i < cnt; i++)
1418 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1419 DMA_FROM_DEVICE);
1420 cnt += unmap->bidi_cnt;
1421 for (; i < cnt; i++) {
1422 if (unmap->addr[i] == 0)
1423 continue;
1424 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1425 DMA_BIDIRECTIONAL);
1426 }
1427 cnt = unmap->map_cnt;
1428 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1429}
1430
1431void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1432{
1433 if (unmap)
1434 kref_put(&unmap->kref, dmaengine_unmap);
1435}
1436EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1437
1438static void dmaengine_destroy_unmap_pool(void)
1439{
1440 int i;
1441
1442 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1443 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1444
1445 mempool_destroy(p->pool);
1446 p->pool = NULL;
1447 kmem_cache_destroy(p->cache);
1448 p->cache = NULL;
1449 }
1450}
1451
1452static int __init dmaengine_init_unmap_pool(void)
1453{
1454 int i;
1455
1456 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1457 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1458 size_t size;
1459
1460 size = sizeof(struct dmaengine_unmap_data) +
1461 sizeof(dma_addr_t) * p->size;
1462
1463 p->cache = kmem_cache_create(p->name, size, 0,
1464 SLAB_HWCACHE_ALIGN, NULL);
1465 if (!p->cache)
1466 break;
1467 p->pool = mempool_create_slab_pool(1, p->cache);
1468 if (!p->pool)
1469 break;
1470 }
1471
1472 if (i == ARRAY_SIZE(unmap_pool))
1473 return 0;
1474
1475 dmaengine_destroy_unmap_pool();
1476 return -ENOMEM;
1477}
1478
1479struct dmaengine_unmap_data *
1480dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1481{
1482 struct dmaengine_unmap_data *unmap;
1483
1484 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1485 if (!unmap)
1486 return NULL;
1487
1488 memset(unmap, 0, sizeof(*unmap));
1489 kref_init(&unmap->kref);
1490 unmap->dev = dev;
1491 unmap->map_cnt = nr;
1492
1493 return unmap;
1494}
1495EXPORT_SYMBOL(dmaengine_get_unmap_data);
1496
1497void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1498 struct dma_chan *chan)
1499{
1500 tx->chan = chan;
1501 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1502 spin_lock_init(&tx->lock);
1503 #endif
1504}
1505EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1506
1507static inline int desc_check_and_set_metadata_mode(
1508 struct dma_async_tx_descriptor *desc, enum dma_desc_metadata_mode mode)
1509{
1510 /* Make sure that the metadata mode is not mixed */
1511 if (!desc->desc_metadata_mode) {
1512 if (dmaengine_is_metadata_mode_supported(desc->chan, mode))
1513 desc->desc_metadata_mode = mode;
1514 else
1515 return -ENOTSUPP;
1516 } else if (desc->desc_metadata_mode != mode) {
1517 return -EINVAL;
1518 }
1519
1520 return 0;
1521}
1522
1523int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
1524 void *data, size_t len)
1525{
1526 int ret;
1527
1528 if (!desc)
1529 return -EINVAL;
1530
1531 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_CLIENT);
1532 if (ret)
1533 return ret;
1534
1535 if (!desc->metadata_ops || !desc->metadata_ops->attach)
1536 return -ENOTSUPP;
1537
1538 return desc->metadata_ops->attach(desc, data, len);
1539}
1540EXPORT_SYMBOL_GPL(dmaengine_desc_attach_metadata);
1541
1542void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
1543 size_t *payload_len, size_t *max_len)
1544{
1545 int ret;
1546
1547 if (!desc)
1548 return ERR_PTR(-EINVAL);
1549
1550 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1551 if (ret)
1552 return ERR_PTR(ret);
1553
1554 if (!desc->metadata_ops || !desc->metadata_ops->get_ptr)
1555 return ERR_PTR(-ENOTSUPP);
1556
1557 return desc->metadata_ops->get_ptr(desc, payload_len, max_len);
1558}
1559EXPORT_SYMBOL_GPL(dmaengine_desc_get_metadata_ptr);
1560
1561int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
1562 size_t payload_len)
1563{
1564 int ret;
1565
1566 if (!desc)
1567 return -EINVAL;
1568
1569 ret = desc_check_and_set_metadata_mode(desc, DESC_METADATA_ENGINE);
1570 if (ret)
1571 return ret;
1572
1573 if (!desc->metadata_ops || !desc->metadata_ops->set_len)
1574 return -ENOTSUPP;
1575
1576 return desc->metadata_ops->set_len(desc, payload_len);
1577}
1578EXPORT_SYMBOL_GPL(dmaengine_desc_set_metadata_len);
1579
1580/**
1581 * dma_wait_for_async_tx - spin wait for a transaction to complete
1582 * @tx: in-flight transaction to wait on
1583 */
1584enum dma_status
1585dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1586{
1587 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1588
1589 if (!tx)
1590 return DMA_COMPLETE;
1591
1592 while (tx->cookie == -EBUSY) {
1593 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1594 dev_err(tx->chan->device->dev,
1595 "%s timeout waiting for descriptor submission\n",
1596 __func__);
1597 return DMA_ERROR;
1598 }
1599 cpu_relax();
1600 }
1601 return dma_sync_wait(tx->chan, tx->cookie);
1602}
1603EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1604
1605/**
1606 * dma_run_dependencies - process dependent operations on the target channel
1607 * @tx: transaction with dependencies
1608 *
1609 * Helper routine for DMA drivers to process (start) dependent operations
1610 * on their target channel.
1611 */
1612void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1613{
1614 struct dma_async_tx_descriptor *dep = txd_next(tx);
1615 struct dma_async_tx_descriptor *dep_next;
1616 struct dma_chan *chan;
1617
1618 if (!dep)
1619 return;
1620
1621 /* we'll submit tx->next now, so clear the link */
1622 txd_clear_next(tx);
1623 chan = dep->chan;
1624
1625 /* keep submitting up until a channel switch is detected
1626 * in that case we will be called again as a result of
1627 * processing the interrupt from async_tx_channel_switch
1628 */
1629 for (; dep; dep = dep_next) {
1630 txd_lock(dep);
1631 txd_clear_parent(dep);
1632 dep_next = txd_next(dep);
1633 if (dep_next && dep_next->chan == chan)
1634 txd_clear_next(dep); /* ->next will be submitted */
1635 else
1636 dep_next = NULL; /* submit current dep and terminate */
1637 txd_unlock(dep);
1638
1639 dep->tx_submit(dep);
1640 }
1641
1642 chan->device->device_issue_pending(chan);
1643}
1644EXPORT_SYMBOL_GPL(dma_run_dependencies);
1645
1646static int __init dma_bus_init(void)
1647{
1648 int err = dmaengine_init_unmap_pool();
1649
1650 if (err)
1651 return err;
1652
1653 err = class_register(&dma_devclass);
1654 if (!err)
1655 dmaengine_debugfs_init();
1656
1657 return err;
1658}
1659arch_initcall(dma_bus_init);