Loading...
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * The full GNU General Public License is included in this distribution in the
15 * file called COPYING.
16 */
17
18/*
19 * This code implements the DMA subsystem. It provides a HW-neutral interface
20 * for other kernel code to use asynchronous memory copy capabilities,
21 * if present, and allows different HW DMA drivers to register as providing
22 * this capability.
23 *
24 * Due to the fact we are accelerating what is already a relatively fast
25 * operation, the code goes to great lengths to avoid additional overhead,
26 * such as locking.
27 *
28 * LOCKING:
29 *
30 * The subsystem keeps a global list of dma_device structs it is protected by a
31 * mutex, dma_list_mutex.
32 *
33 * A subsystem can get access to a channel by calling dmaengine_get() followed
34 * by dma_find_channel(), or if it has need for an exclusive channel it can call
35 * dma_request_channel(). Once a channel is allocated a reference is taken
36 * against its corresponding driver to disable removal.
37 *
38 * Each device has a channels list, which runs unlocked but is never modified
39 * once the device is registered, it's just setup by the driver.
40 *
41 * See Documentation/dmaengine.txt for more details
42 */
43
44#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
45
46#include <linux/platform_device.h>
47#include <linux/dma-mapping.h>
48#include <linux/init.h>
49#include <linux/module.h>
50#include <linux/mm.h>
51#include <linux/device.h>
52#include <linux/dmaengine.h>
53#include <linux/hardirq.h>
54#include <linux/spinlock.h>
55#include <linux/percpu.h>
56#include <linux/rcupdate.h>
57#include <linux/mutex.h>
58#include <linux/jiffies.h>
59#include <linux/rculist.h>
60#include <linux/idr.h>
61#include <linux/slab.h>
62#include <linux/acpi.h>
63#include <linux/acpi_dma.h>
64#include <linux/of_dma.h>
65#include <linux/mempool.h>
66
67static DEFINE_MUTEX(dma_list_mutex);
68static DEFINE_IDR(dma_idr);
69static LIST_HEAD(dma_device_list);
70static long dmaengine_ref_count;
71
72/* --- sysfs implementation --- */
73
74/**
75 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
76 * @dev - device node
77 *
78 * Must be called under dma_list_mutex
79 */
80static struct dma_chan *dev_to_dma_chan(struct device *dev)
81{
82 struct dma_chan_dev *chan_dev;
83
84 chan_dev = container_of(dev, typeof(*chan_dev), device);
85 return chan_dev->chan;
86}
87
88static ssize_t memcpy_count_show(struct device *dev,
89 struct device_attribute *attr, char *buf)
90{
91 struct dma_chan *chan;
92 unsigned long count = 0;
93 int i;
94 int err;
95
96 mutex_lock(&dma_list_mutex);
97 chan = dev_to_dma_chan(dev);
98 if (chan) {
99 for_each_possible_cpu(i)
100 count += per_cpu_ptr(chan->local, i)->memcpy_count;
101 err = sprintf(buf, "%lu\n", count);
102 } else
103 err = -ENODEV;
104 mutex_unlock(&dma_list_mutex);
105
106 return err;
107}
108static DEVICE_ATTR_RO(memcpy_count);
109
110static ssize_t bytes_transferred_show(struct device *dev,
111 struct device_attribute *attr, char *buf)
112{
113 struct dma_chan *chan;
114 unsigned long count = 0;
115 int i;
116 int err;
117
118 mutex_lock(&dma_list_mutex);
119 chan = dev_to_dma_chan(dev);
120 if (chan) {
121 for_each_possible_cpu(i)
122 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
123 err = sprintf(buf, "%lu\n", count);
124 } else
125 err = -ENODEV;
126 mutex_unlock(&dma_list_mutex);
127
128 return err;
129}
130static DEVICE_ATTR_RO(bytes_transferred);
131
132static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
133 char *buf)
134{
135 struct dma_chan *chan;
136 int err;
137
138 mutex_lock(&dma_list_mutex);
139 chan = dev_to_dma_chan(dev);
140 if (chan)
141 err = sprintf(buf, "%d\n", chan->client_count);
142 else
143 err = -ENODEV;
144 mutex_unlock(&dma_list_mutex);
145
146 return err;
147}
148static DEVICE_ATTR_RO(in_use);
149
150static struct attribute *dma_dev_attrs[] = {
151 &dev_attr_memcpy_count.attr,
152 &dev_attr_bytes_transferred.attr,
153 &dev_attr_in_use.attr,
154 NULL,
155};
156ATTRIBUTE_GROUPS(dma_dev);
157
158static void chan_dev_release(struct device *dev)
159{
160 struct dma_chan_dev *chan_dev;
161
162 chan_dev = container_of(dev, typeof(*chan_dev), device);
163 if (atomic_dec_and_test(chan_dev->idr_ref)) {
164 mutex_lock(&dma_list_mutex);
165 idr_remove(&dma_idr, chan_dev->dev_id);
166 mutex_unlock(&dma_list_mutex);
167 kfree(chan_dev->idr_ref);
168 }
169 kfree(chan_dev);
170}
171
172static struct class dma_devclass = {
173 .name = "dma",
174 .dev_groups = dma_dev_groups,
175 .dev_release = chan_dev_release,
176};
177
178/* --- client and device registration --- */
179
180#define dma_device_satisfies_mask(device, mask) \
181 __dma_device_satisfies_mask((device), &(mask))
182static int
183__dma_device_satisfies_mask(struct dma_device *device,
184 const dma_cap_mask_t *want)
185{
186 dma_cap_mask_t has;
187
188 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
189 DMA_TX_TYPE_END);
190 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
191}
192
193static struct module *dma_chan_to_owner(struct dma_chan *chan)
194{
195 return chan->device->dev->driver->owner;
196}
197
198/**
199 * balance_ref_count - catch up the channel reference count
200 * @chan - channel to balance ->client_count versus dmaengine_ref_count
201 *
202 * balance_ref_count must be called under dma_list_mutex
203 */
204static void balance_ref_count(struct dma_chan *chan)
205{
206 struct module *owner = dma_chan_to_owner(chan);
207
208 while (chan->client_count < dmaengine_ref_count) {
209 __module_get(owner);
210 chan->client_count++;
211 }
212}
213
214/**
215 * dma_chan_get - try to grab a dma channel's parent driver module
216 * @chan - channel to grab
217 *
218 * Must be called under dma_list_mutex
219 */
220static int dma_chan_get(struct dma_chan *chan)
221{
222 struct module *owner = dma_chan_to_owner(chan);
223 int ret;
224
225 /* The channel is already in use, update client count */
226 if (chan->client_count) {
227 __module_get(owner);
228 goto out;
229 }
230
231 if (!try_module_get(owner))
232 return -ENODEV;
233
234 /* allocate upon first client reference */
235 if (chan->device->device_alloc_chan_resources) {
236 ret = chan->device->device_alloc_chan_resources(chan);
237 if (ret < 0)
238 goto err_out;
239 }
240
241 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
242 balance_ref_count(chan);
243
244out:
245 chan->client_count++;
246 return 0;
247
248err_out:
249 module_put(owner);
250 return ret;
251}
252
253/**
254 * dma_chan_put - drop a reference to a dma channel's parent driver module
255 * @chan - channel to release
256 *
257 * Must be called under dma_list_mutex
258 */
259static void dma_chan_put(struct dma_chan *chan)
260{
261 /* This channel is not in use, bail out */
262 if (!chan->client_count)
263 return;
264
265 chan->client_count--;
266 module_put(dma_chan_to_owner(chan));
267
268 /* This channel is not in use anymore, free it */
269 if (!chan->client_count && chan->device->device_free_chan_resources) {
270 /* Make sure all operations have completed */
271 dmaengine_synchronize(chan);
272 chan->device->device_free_chan_resources(chan);
273 }
274
275 /* If the channel is used via a DMA request router, free the mapping */
276 if (chan->router && chan->router->route_free) {
277 chan->router->route_free(chan->router->dev, chan->route_data);
278 chan->router = NULL;
279 chan->route_data = NULL;
280 }
281}
282
283enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
284{
285 enum dma_status status;
286 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
287
288 dma_async_issue_pending(chan);
289 do {
290 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
291 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
292 pr_err("%s: timeout!\n", __func__);
293 return DMA_ERROR;
294 }
295 if (status != DMA_IN_PROGRESS)
296 break;
297 cpu_relax();
298 } while (1);
299
300 return status;
301}
302EXPORT_SYMBOL(dma_sync_wait);
303
304/**
305 * dma_cap_mask_all - enable iteration over all operation types
306 */
307static dma_cap_mask_t dma_cap_mask_all;
308
309/**
310 * dma_chan_tbl_ent - tracks channel allocations per core/operation
311 * @chan - associated channel for this entry
312 */
313struct dma_chan_tbl_ent {
314 struct dma_chan *chan;
315};
316
317/**
318 * channel_table - percpu lookup table for memory-to-memory offload providers
319 */
320static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
321
322static int __init dma_channel_table_init(void)
323{
324 enum dma_transaction_type cap;
325 int err = 0;
326
327 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
328
329 /* 'interrupt', 'private', and 'slave' are channel capabilities,
330 * but are not associated with an operation so they do not need
331 * an entry in the channel_table
332 */
333 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
334 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
335 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
336
337 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
338 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
339 if (!channel_table[cap]) {
340 err = -ENOMEM;
341 break;
342 }
343 }
344
345 if (err) {
346 pr_err("initialization failure\n");
347 for_each_dma_cap_mask(cap, dma_cap_mask_all)
348 free_percpu(channel_table[cap]);
349 }
350
351 return err;
352}
353arch_initcall(dma_channel_table_init);
354
355/**
356 * dma_find_channel - find a channel to carry out the operation
357 * @tx_type: transaction type
358 */
359struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
360{
361 return this_cpu_read(channel_table[tx_type]->chan);
362}
363EXPORT_SYMBOL(dma_find_channel);
364
365/**
366 * dma_issue_pending_all - flush all pending operations across all channels
367 */
368void dma_issue_pending_all(void)
369{
370 struct dma_device *device;
371 struct dma_chan *chan;
372
373 rcu_read_lock();
374 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
375 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
376 continue;
377 list_for_each_entry(chan, &device->channels, device_node)
378 if (chan->client_count)
379 device->device_issue_pending(chan);
380 }
381 rcu_read_unlock();
382}
383EXPORT_SYMBOL(dma_issue_pending_all);
384
385/**
386 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
387 */
388static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
389{
390 int node = dev_to_node(chan->device->dev);
391 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node));
392}
393
394/**
395 * min_chan - returns the channel with min count and in the same numa-node as the cpu
396 * @cap: capability to match
397 * @cpu: cpu index which the channel should be close to
398 *
399 * If some channels are close to the given cpu, the one with the lowest
400 * reference count is returned. Otherwise, cpu is ignored and only the
401 * reference count is taken into account.
402 * Must be called under dma_list_mutex.
403 */
404static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
405{
406 struct dma_device *device;
407 struct dma_chan *chan;
408 struct dma_chan *min = NULL;
409 struct dma_chan *localmin = NULL;
410
411 list_for_each_entry(device, &dma_device_list, global_node) {
412 if (!dma_has_cap(cap, device->cap_mask) ||
413 dma_has_cap(DMA_PRIVATE, device->cap_mask))
414 continue;
415 list_for_each_entry(chan, &device->channels, device_node) {
416 if (!chan->client_count)
417 continue;
418 if (!min || chan->table_count < min->table_count)
419 min = chan;
420
421 if (dma_chan_is_local(chan, cpu))
422 if (!localmin ||
423 chan->table_count < localmin->table_count)
424 localmin = chan;
425 }
426 }
427
428 chan = localmin ? localmin : min;
429
430 if (chan)
431 chan->table_count++;
432
433 return chan;
434}
435
436/**
437 * dma_channel_rebalance - redistribute the available channels
438 *
439 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
440 * operation type) in the SMP case, and operation isolation (avoid
441 * multi-tasking channels) in the non-SMP case. Must be called under
442 * dma_list_mutex.
443 */
444static void dma_channel_rebalance(void)
445{
446 struct dma_chan *chan;
447 struct dma_device *device;
448 int cpu;
449 int cap;
450
451 /* undo the last distribution */
452 for_each_dma_cap_mask(cap, dma_cap_mask_all)
453 for_each_possible_cpu(cpu)
454 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
455
456 list_for_each_entry(device, &dma_device_list, global_node) {
457 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
458 continue;
459 list_for_each_entry(chan, &device->channels, device_node)
460 chan->table_count = 0;
461 }
462
463 /* don't populate the channel_table if no clients are available */
464 if (!dmaengine_ref_count)
465 return;
466
467 /* redistribute available channels */
468 for_each_dma_cap_mask(cap, dma_cap_mask_all)
469 for_each_online_cpu(cpu) {
470 chan = min_chan(cap, cpu);
471 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
472 }
473}
474
475int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
476{
477 struct dma_device *device;
478
479 if (!chan || !caps)
480 return -EINVAL;
481
482 device = chan->device;
483
484 /* check if the channel supports slave transactions */
485 if (!test_bit(DMA_SLAVE, device->cap_mask.bits))
486 return -ENXIO;
487
488 /*
489 * Check whether it reports it uses the generic slave
490 * capabilities, if not, that means it doesn't support any
491 * kind of slave capabilities reporting.
492 */
493 if (!device->directions)
494 return -ENXIO;
495
496 caps->src_addr_widths = device->src_addr_widths;
497 caps->dst_addr_widths = device->dst_addr_widths;
498 caps->directions = device->directions;
499 caps->max_burst = device->max_burst;
500 caps->residue_granularity = device->residue_granularity;
501 caps->descriptor_reuse = device->descriptor_reuse;
502
503 /*
504 * Some devices implement only pause (e.g. to get residuum) but no
505 * resume. However cmd_pause is advertised as pause AND resume.
506 */
507 caps->cmd_pause = !!(device->device_pause && device->device_resume);
508 caps->cmd_terminate = !!device->device_terminate_all;
509
510 return 0;
511}
512EXPORT_SYMBOL_GPL(dma_get_slave_caps);
513
514static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
515 struct dma_device *dev,
516 dma_filter_fn fn, void *fn_param)
517{
518 struct dma_chan *chan;
519
520 if (mask && !__dma_device_satisfies_mask(dev, mask)) {
521 pr_debug("%s: wrong capabilities\n", __func__);
522 return NULL;
523 }
524 /* devices with multiple channels need special handling as we need to
525 * ensure that all channels are either private or public.
526 */
527 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
528 list_for_each_entry(chan, &dev->channels, device_node) {
529 /* some channels are already publicly allocated */
530 if (chan->client_count)
531 return NULL;
532 }
533
534 list_for_each_entry(chan, &dev->channels, device_node) {
535 if (chan->client_count) {
536 pr_debug("%s: %s busy\n",
537 __func__, dma_chan_name(chan));
538 continue;
539 }
540 if (fn && !fn(chan, fn_param)) {
541 pr_debug("%s: %s filter said false\n",
542 __func__, dma_chan_name(chan));
543 continue;
544 }
545 return chan;
546 }
547
548 return NULL;
549}
550
551static struct dma_chan *find_candidate(struct dma_device *device,
552 const dma_cap_mask_t *mask,
553 dma_filter_fn fn, void *fn_param)
554{
555 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
556 int err;
557
558 if (chan) {
559 /* Found a suitable channel, try to grab, prep, and return it.
560 * We first set DMA_PRIVATE to disable balance_ref_count as this
561 * channel will not be published in the general-purpose
562 * allocator
563 */
564 dma_cap_set(DMA_PRIVATE, device->cap_mask);
565 device->privatecnt++;
566 err = dma_chan_get(chan);
567
568 if (err) {
569 if (err == -ENODEV) {
570 pr_debug("%s: %s module removed\n", __func__,
571 dma_chan_name(chan));
572 list_del_rcu(&device->global_node);
573 } else
574 pr_debug("%s: failed to get %s: (%d)\n",
575 __func__, dma_chan_name(chan), err);
576
577 if (--device->privatecnt == 0)
578 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
579
580 chan = ERR_PTR(err);
581 }
582 }
583
584 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
585}
586
587/**
588 * dma_get_slave_channel - try to get specific channel exclusively
589 * @chan: target channel
590 */
591struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
592{
593 int err = -EBUSY;
594
595 /* lock against __dma_request_channel */
596 mutex_lock(&dma_list_mutex);
597
598 if (chan->client_count == 0) {
599 struct dma_device *device = chan->device;
600
601 dma_cap_set(DMA_PRIVATE, device->cap_mask);
602 device->privatecnt++;
603 err = dma_chan_get(chan);
604 if (err) {
605 pr_debug("%s: failed to get %s: (%d)\n",
606 __func__, dma_chan_name(chan), err);
607 chan = NULL;
608 if (--device->privatecnt == 0)
609 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
610 }
611 } else
612 chan = NULL;
613
614 mutex_unlock(&dma_list_mutex);
615
616
617 return chan;
618}
619EXPORT_SYMBOL_GPL(dma_get_slave_channel);
620
621struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
622{
623 dma_cap_mask_t mask;
624 struct dma_chan *chan;
625
626 dma_cap_zero(mask);
627 dma_cap_set(DMA_SLAVE, mask);
628
629 /* lock against __dma_request_channel */
630 mutex_lock(&dma_list_mutex);
631
632 chan = find_candidate(device, &mask, NULL, NULL);
633
634 mutex_unlock(&dma_list_mutex);
635
636 return IS_ERR(chan) ? NULL : chan;
637}
638EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
639
640/**
641 * __dma_request_channel - try to allocate an exclusive channel
642 * @mask: capabilities that the channel must satisfy
643 * @fn: optional callback to disposition available channels
644 * @fn_param: opaque parameter to pass to dma_filter_fn
645 *
646 * Returns pointer to appropriate DMA channel on success or NULL.
647 */
648struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
649 dma_filter_fn fn, void *fn_param)
650{
651 struct dma_device *device, *_d;
652 struct dma_chan *chan = NULL;
653
654 /* Find a channel */
655 mutex_lock(&dma_list_mutex);
656 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
657 chan = find_candidate(device, mask, fn, fn_param);
658 if (!IS_ERR(chan))
659 break;
660
661 chan = NULL;
662 }
663 mutex_unlock(&dma_list_mutex);
664
665 pr_debug("%s: %s (%s)\n",
666 __func__,
667 chan ? "success" : "fail",
668 chan ? dma_chan_name(chan) : NULL);
669
670 return chan;
671}
672EXPORT_SYMBOL_GPL(__dma_request_channel);
673
674static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
675 const char *name,
676 struct device *dev)
677{
678 int i;
679
680 if (!device->filter.mapcnt)
681 return NULL;
682
683 for (i = 0; i < device->filter.mapcnt; i++) {
684 const struct dma_slave_map *map = &device->filter.map[i];
685
686 if (!strcmp(map->devname, dev_name(dev)) &&
687 !strcmp(map->slave, name))
688 return map;
689 }
690
691 return NULL;
692}
693
694/**
695 * dma_request_chan - try to allocate an exclusive slave channel
696 * @dev: pointer to client device structure
697 * @name: slave channel name
698 *
699 * Returns pointer to appropriate DMA channel on success or an error pointer.
700 */
701struct dma_chan *dma_request_chan(struct device *dev, const char *name)
702{
703 struct dma_device *d, *_d;
704 struct dma_chan *chan = NULL;
705
706 /* If device-tree is present get slave info from here */
707 if (dev->of_node)
708 chan = of_dma_request_slave_channel(dev->of_node, name);
709
710 /* If device was enumerated by ACPI get slave info from here */
711 if (has_acpi_companion(dev) && !chan)
712 chan = acpi_dma_request_slave_chan_by_name(dev, name);
713
714 if (chan) {
715 /* Valid channel found or requester need to be deferred */
716 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
717 return chan;
718 }
719
720 /* Try to find the channel via the DMA filter map(s) */
721 mutex_lock(&dma_list_mutex);
722 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
723 dma_cap_mask_t mask;
724 const struct dma_slave_map *map = dma_filter_match(d, name, dev);
725
726 if (!map)
727 continue;
728
729 dma_cap_zero(mask);
730 dma_cap_set(DMA_SLAVE, mask);
731
732 chan = find_candidate(d, &mask, d->filter.fn, map->param);
733 if (!IS_ERR(chan))
734 break;
735 }
736 mutex_unlock(&dma_list_mutex);
737
738 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
739}
740EXPORT_SYMBOL_GPL(dma_request_chan);
741
742/**
743 * dma_request_slave_channel - try to allocate an exclusive slave channel
744 * @dev: pointer to client device structure
745 * @name: slave channel name
746 *
747 * Returns pointer to appropriate DMA channel on success or NULL.
748 */
749struct dma_chan *dma_request_slave_channel(struct device *dev,
750 const char *name)
751{
752 struct dma_chan *ch = dma_request_chan(dev, name);
753 if (IS_ERR(ch))
754 return NULL;
755
756 return ch;
757}
758EXPORT_SYMBOL_GPL(dma_request_slave_channel);
759
760/**
761 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
762 * @mask: capabilities that the channel must satisfy
763 *
764 * Returns pointer to appropriate DMA channel on success or an error pointer.
765 */
766struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
767{
768 struct dma_chan *chan;
769
770 if (!mask)
771 return ERR_PTR(-ENODEV);
772
773 chan = __dma_request_channel(mask, NULL, NULL);
774 if (!chan)
775 chan = ERR_PTR(-ENODEV);
776
777 return chan;
778}
779EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
780
781void dma_release_channel(struct dma_chan *chan)
782{
783 mutex_lock(&dma_list_mutex);
784 WARN_ONCE(chan->client_count != 1,
785 "chan reference count %d != 1\n", chan->client_count);
786 dma_chan_put(chan);
787 /* drop PRIVATE cap enabled by __dma_request_channel() */
788 if (--chan->device->privatecnt == 0)
789 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
790 mutex_unlock(&dma_list_mutex);
791}
792EXPORT_SYMBOL_GPL(dma_release_channel);
793
794/**
795 * dmaengine_get - register interest in dma_channels
796 */
797void dmaengine_get(void)
798{
799 struct dma_device *device, *_d;
800 struct dma_chan *chan;
801 int err;
802
803 mutex_lock(&dma_list_mutex);
804 dmaengine_ref_count++;
805
806 /* try to grab channels */
807 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
808 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
809 continue;
810 list_for_each_entry(chan, &device->channels, device_node) {
811 err = dma_chan_get(chan);
812 if (err == -ENODEV) {
813 /* module removed before we could use it */
814 list_del_rcu(&device->global_node);
815 break;
816 } else if (err)
817 pr_debug("%s: failed to get %s: (%d)\n",
818 __func__, dma_chan_name(chan), err);
819 }
820 }
821
822 /* if this is the first reference and there were channels
823 * waiting we need to rebalance to get those channels
824 * incorporated into the channel table
825 */
826 if (dmaengine_ref_count == 1)
827 dma_channel_rebalance();
828 mutex_unlock(&dma_list_mutex);
829}
830EXPORT_SYMBOL(dmaengine_get);
831
832/**
833 * dmaengine_put - let dma drivers be removed when ref_count == 0
834 */
835void dmaengine_put(void)
836{
837 struct dma_device *device;
838 struct dma_chan *chan;
839
840 mutex_lock(&dma_list_mutex);
841 dmaengine_ref_count--;
842 BUG_ON(dmaengine_ref_count < 0);
843 /* drop channel references */
844 list_for_each_entry(device, &dma_device_list, global_node) {
845 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
846 continue;
847 list_for_each_entry(chan, &device->channels, device_node)
848 dma_chan_put(chan);
849 }
850 mutex_unlock(&dma_list_mutex);
851}
852EXPORT_SYMBOL(dmaengine_put);
853
854static bool device_has_all_tx_types(struct dma_device *device)
855{
856 /* A device that satisfies this test has channels that will never cause
857 * an async_tx channel switch event as all possible operation types can
858 * be handled.
859 */
860 #ifdef CONFIG_ASYNC_TX_DMA
861 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
862 return false;
863 #endif
864
865 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
866 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
867 return false;
868 #endif
869
870 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
871 if (!dma_has_cap(DMA_XOR, device->cap_mask))
872 return false;
873
874 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
875 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
876 return false;
877 #endif
878 #endif
879
880 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
881 if (!dma_has_cap(DMA_PQ, device->cap_mask))
882 return false;
883
884 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
885 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
886 return false;
887 #endif
888 #endif
889
890 return true;
891}
892
893static int get_dma_id(struct dma_device *device)
894{
895 int rc;
896
897 mutex_lock(&dma_list_mutex);
898
899 rc = idr_alloc(&dma_idr, NULL, 0, 0, GFP_KERNEL);
900 if (rc >= 0)
901 device->dev_id = rc;
902
903 mutex_unlock(&dma_list_mutex);
904 return rc < 0 ? rc : 0;
905}
906
907/**
908 * dma_async_device_register - registers DMA devices found
909 * @device: &dma_device
910 */
911int dma_async_device_register(struct dma_device *device)
912{
913 int chancnt = 0, rc;
914 struct dma_chan* chan;
915 atomic_t *idr_ref;
916
917 if (!device)
918 return -ENODEV;
919
920 /* validate device routines */
921 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
922 !device->device_prep_dma_memcpy);
923 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
924 !device->device_prep_dma_xor);
925 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
926 !device->device_prep_dma_xor_val);
927 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
928 !device->device_prep_dma_pq);
929 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
930 !device->device_prep_dma_pq_val);
931 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
932 !device->device_prep_dma_memset);
933 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
934 !device->device_prep_dma_interrupt);
935 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
936 !device->device_prep_dma_sg);
937 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
938 !device->device_prep_dma_cyclic);
939 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
940 !device->device_prep_interleaved_dma);
941
942 BUG_ON(!device->device_tx_status);
943 BUG_ON(!device->device_issue_pending);
944 BUG_ON(!device->dev);
945
946 /* note: this only matters in the
947 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
948 */
949 if (device_has_all_tx_types(device))
950 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
951
952 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
953 if (!idr_ref)
954 return -ENOMEM;
955 rc = get_dma_id(device);
956 if (rc != 0) {
957 kfree(idr_ref);
958 return rc;
959 }
960
961 atomic_set(idr_ref, 0);
962
963 /* represent channels in sysfs. Probably want devs too */
964 list_for_each_entry(chan, &device->channels, device_node) {
965 rc = -ENOMEM;
966 chan->local = alloc_percpu(typeof(*chan->local));
967 if (chan->local == NULL)
968 goto err_out;
969 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
970 if (chan->dev == NULL) {
971 free_percpu(chan->local);
972 chan->local = NULL;
973 goto err_out;
974 }
975
976 chan->chan_id = chancnt++;
977 chan->dev->device.class = &dma_devclass;
978 chan->dev->device.parent = device->dev;
979 chan->dev->chan = chan;
980 chan->dev->idr_ref = idr_ref;
981 chan->dev->dev_id = device->dev_id;
982 atomic_inc(idr_ref);
983 dev_set_name(&chan->dev->device, "dma%dchan%d",
984 device->dev_id, chan->chan_id);
985
986 rc = device_register(&chan->dev->device);
987 if (rc) {
988 free_percpu(chan->local);
989 chan->local = NULL;
990 kfree(chan->dev);
991 atomic_dec(idr_ref);
992 goto err_out;
993 }
994 chan->client_count = 0;
995 }
996 device->chancnt = chancnt;
997
998 mutex_lock(&dma_list_mutex);
999 /* take references on public channels */
1000 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1001 list_for_each_entry(chan, &device->channels, device_node) {
1002 /* if clients are already waiting for channels we need
1003 * to take references on their behalf
1004 */
1005 if (dma_chan_get(chan) == -ENODEV) {
1006 /* note we can only get here for the first
1007 * channel as the remaining channels are
1008 * guaranteed to get a reference
1009 */
1010 rc = -ENODEV;
1011 mutex_unlock(&dma_list_mutex);
1012 goto err_out;
1013 }
1014 }
1015 list_add_tail_rcu(&device->global_node, &dma_device_list);
1016 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1017 device->privatecnt++; /* Always private */
1018 dma_channel_rebalance();
1019 mutex_unlock(&dma_list_mutex);
1020
1021 return 0;
1022
1023err_out:
1024 /* if we never registered a channel just release the idr */
1025 if (atomic_read(idr_ref) == 0) {
1026 mutex_lock(&dma_list_mutex);
1027 idr_remove(&dma_idr, device->dev_id);
1028 mutex_unlock(&dma_list_mutex);
1029 kfree(idr_ref);
1030 return rc;
1031 }
1032
1033 list_for_each_entry(chan, &device->channels, device_node) {
1034 if (chan->local == NULL)
1035 continue;
1036 mutex_lock(&dma_list_mutex);
1037 chan->dev->chan = NULL;
1038 mutex_unlock(&dma_list_mutex);
1039 device_unregister(&chan->dev->device);
1040 free_percpu(chan->local);
1041 }
1042 return rc;
1043}
1044EXPORT_SYMBOL(dma_async_device_register);
1045
1046/**
1047 * dma_async_device_unregister - unregister a DMA device
1048 * @device: &dma_device
1049 *
1050 * This routine is called by dma driver exit routines, dmaengine holds module
1051 * references to prevent it being called while channels are in use.
1052 */
1053void dma_async_device_unregister(struct dma_device *device)
1054{
1055 struct dma_chan *chan;
1056
1057 mutex_lock(&dma_list_mutex);
1058 list_del_rcu(&device->global_node);
1059 dma_channel_rebalance();
1060 mutex_unlock(&dma_list_mutex);
1061
1062 list_for_each_entry(chan, &device->channels, device_node) {
1063 WARN_ONCE(chan->client_count,
1064 "%s called while %d clients hold a reference\n",
1065 __func__, chan->client_count);
1066 mutex_lock(&dma_list_mutex);
1067 chan->dev->chan = NULL;
1068 mutex_unlock(&dma_list_mutex);
1069 device_unregister(&chan->dev->device);
1070 free_percpu(chan->local);
1071 }
1072}
1073EXPORT_SYMBOL(dma_async_device_unregister);
1074
1075struct dmaengine_unmap_pool {
1076 struct kmem_cache *cache;
1077 const char *name;
1078 mempool_t *pool;
1079 size_t size;
1080};
1081
1082#define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1083static struct dmaengine_unmap_pool unmap_pool[] = {
1084 __UNMAP_POOL(2),
1085 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1086 __UNMAP_POOL(16),
1087 __UNMAP_POOL(128),
1088 __UNMAP_POOL(256),
1089 #endif
1090};
1091
1092static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1093{
1094 int order = get_count_order(nr);
1095
1096 switch (order) {
1097 case 0 ... 1:
1098 return &unmap_pool[0];
1099 case 2 ... 4:
1100 return &unmap_pool[1];
1101 case 5 ... 7:
1102 return &unmap_pool[2];
1103 case 8:
1104 return &unmap_pool[3];
1105 default:
1106 BUG();
1107 return NULL;
1108 }
1109}
1110
1111static void dmaengine_unmap(struct kref *kref)
1112{
1113 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1114 struct device *dev = unmap->dev;
1115 int cnt, i;
1116
1117 cnt = unmap->to_cnt;
1118 for (i = 0; i < cnt; i++)
1119 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1120 DMA_TO_DEVICE);
1121 cnt += unmap->from_cnt;
1122 for (; i < cnt; i++)
1123 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1124 DMA_FROM_DEVICE);
1125 cnt += unmap->bidi_cnt;
1126 for (; i < cnt; i++) {
1127 if (unmap->addr[i] == 0)
1128 continue;
1129 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1130 DMA_BIDIRECTIONAL);
1131 }
1132 cnt = unmap->map_cnt;
1133 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1134}
1135
1136void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1137{
1138 if (unmap)
1139 kref_put(&unmap->kref, dmaengine_unmap);
1140}
1141EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1142
1143static void dmaengine_destroy_unmap_pool(void)
1144{
1145 int i;
1146
1147 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1148 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1149
1150 mempool_destroy(p->pool);
1151 p->pool = NULL;
1152 kmem_cache_destroy(p->cache);
1153 p->cache = NULL;
1154 }
1155}
1156
1157static int __init dmaengine_init_unmap_pool(void)
1158{
1159 int i;
1160
1161 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1162 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1163 size_t size;
1164
1165 size = sizeof(struct dmaengine_unmap_data) +
1166 sizeof(dma_addr_t) * p->size;
1167
1168 p->cache = kmem_cache_create(p->name, size, 0,
1169 SLAB_HWCACHE_ALIGN, NULL);
1170 if (!p->cache)
1171 break;
1172 p->pool = mempool_create_slab_pool(1, p->cache);
1173 if (!p->pool)
1174 break;
1175 }
1176
1177 if (i == ARRAY_SIZE(unmap_pool))
1178 return 0;
1179
1180 dmaengine_destroy_unmap_pool();
1181 return -ENOMEM;
1182}
1183
1184struct dmaengine_unmap_data *
1185dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1186{
1187 struct dmaengine_unmap_data *unmap;
1188
1189 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1190 if (!unmap)
1191 return NULL;
1192
1193 memset(unmap, 0, sizeof(*unmap));
1194 kref_init(&unmap->kref);
1195 unmap->dev = dev;
1196 unmap->map_cnt = nr;
1197
1198 return unmap;
1199}
1200EXPORT_SYMBOL(dmaengine_get_unmap_data);
1201
1202void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1203 struct dma_chan *chan)
1204{
1205 tx->chan = chan;
1206 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1207 spin_lock_init(&tx->lock);
1208 #endif
1209}
1210EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1211
1212/* dma_wait_for_async_tx - spin wait for a transaction to complete
1213 * @tx: in-flight transaction to wait on
1214 */
1215enum dma_status
1216dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1217{
1218 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1219
1220 if (!tx)
1221 return DMA_COMPLETE;
1222
1223 while (tx->cookie == -EBUSY) {
1224 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1225 pr_err("%s timeout waiting for descriptor submission\n",
1226 __func__);
1227 return DMA_ERROR;
1228 }
1229 cpu_relax();
1230 }
1231 return dma_sync_wait(tx->chan, tx->cookie);
1232}
1233EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1234
1235/* dma_run_dependencies - helper routine for dma drivers to process
1236 * (start) dependent operations on their target channel
1237 * @tx: transaction with dependencies
1238 */
1239void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1240{
1241 struct dma_async_tx_descriptor *dep = txd_next(tx);
1242 struct dma_async_tx_descriptor *dep_next;
1243 struct dma_chan *chan;
1244
1245 if (!dep)
1246 return;
1247
1248 /* we'll submit tx->next now, so clear the link */
1249 txd_clear_next(tx);
1250 chan = dep->chan;
1251
1252 /* keep submitting up until a channel switch is detected
1253 * in that case we will be called again as a result of
1254 * processing the interrupt from async_tx_channel_switch
1255 */
1256 for (; dep; dep = dep_next) {
1257 txd_lock(dep);
1258 txd_clear_parent(dep);
1259 dep_next = txd_next(dep);
1260 if (dep_next && dep_next->chan == chan)
1261 txd_clear_next(dep); /* ->next will be submitted */
1262 else
1263 dep_next = NULL; /* submit current dep and terminate */
1264 txd_unlock(dep);
1265
1266 dep->tx_submit(dep);
1267 }
1268
1269 chan->device->device_issue_pending(chan);
1270}
1271EXPORT_SYMBOL_GPL(dma_run_dependencies);
1272
1273static int __init dma_bus_init(void)
1274{
1275 int err = dmaengine_init_unmap_pool();
1276
1277 if (err)
1278 return err;
1279 return class_register(&dma_devclass);
1280}
1281arch_initcall(dma_bus_init);
1282
1283
1/*
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the Free
6 * Software Foundation; either version 2 of the License, or (at your option)
7 * any later version.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc., 59
16 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called COPYING.
20 */
21
22/*
23 * This code implements the DMA subsystem. It provides a HW-neutral interface
24 * for other kernel code to use asynchronous memory copy capabilities,
25 * if present, and allows different HW DMA drivers to register as providing
26 * this capability.
27 *
28 * Due to the fact we are accelerating what is already a relatively fast
29 * operation, the code goes to great lengths to avoid additional overhead,
30 * such as locking.
31 *
32 * LOCKING:
33 *
34 * The subsystem keeps a global list of dma_device structs it is protected by a
35 * mutex, dma_list_mutex.
36 *
37 * A subsystem can get access to a channel by calling dmaengine_get() followed
38 * by dma_find_channel(), or if it has need for an exclusive channel it can call
39 * dma_request_channel(). Once a channel is allocated a reference is taken
40 * against its corresponding driver to disable removal.
41 *
42 * Each device has a channels list, which runs unlocked but is never modified
43 * once the device is registered, it's just setup by the driver.
44 *
45 * See Documentation/dmaengine.txt for more details
46 */
47
48#include <linux/dma-mapping.h>
49#include <linux/init.h>
50#include <linux/module.h>
51#include <linux/mm.h>
52#include <linux/device.h>
53#include <linux/dmaengine.h>
54#include <linux/hardirq.h>
55#include <linux/spinlock.h>
56#include <linux/percpu.h>
57#include <linux/rcupdate.h>
58#include <linux/mutex.h>
59#include <linux/jiffies.h>
60#include <linux/rculist.h>
61#include <linux/idr.h>
62#include <linux/slab.h>
63
64static DEFINE_MUTEX(dma_list_mutex);
65static DEFINE_IDR(dma_idr);
66static LIST_HEAD(dma_device_list);
67static long dmaengine_ref_count;
68
69/* --- sysfs implementation --- */
70
71/**
72 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
73 * @dev - device node
74 *
75 * Must be called under dma_list_mutex
76 */
77static struct dma_chan *dev_to_dma_chan(struct device *dev)
78{
79 struct dma_chan_dev *chan_dev;
80
81 chan_dev = container_of(dev, typeof(*chan_dev), device);
82 return chan_dev->chan;
83}
84
85static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf)
86{
87 struct dma_chan *chan;
88 unsigned long count = 0;
89 int i;
90 int err;
91
92 mutex_lock(&dma_list_mutex);
93 chan = dev_to_dma_chan(dev);
94 if (chan) {
95 for_each_possible_cpu(i)
96 count += per_cpu_ptr(chan->local, i)->memcpy_count;
97 err = sprintf(buf, "%lu\n", count);
98 } else
99 err = -ENODEV;
100 mutex_unlock(&dma_list_mutex);
101
102 return err;
103}
104
105static ssize_t show_bytes_transferred(struct device *dev, struct device_attribute *attr,
106 char *buf)
107{
108 struct dma_chan *chan;
109 unsigned long count = 0;
110 int i;
111 int err;
112
113 mutex_lock(&dma_list_mutex);
114 chan = dev_to_dma_chan(dev);
115 if (chan) {
116 for_each_possible_cpu(i)
117 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
118 err = sprintf(buf, "%lu\n", count);
119 } else
120 err = -ENODEV;
121 mutex_unlock(&dma_list_mutex);
122
123 return err;
124}
125
126static ssize_t show_in_use(struct device *dev, struct device_attribute *attr, char *buf)
127{
128 struct dma_chan *chan;
129 int err;
130
131 mutex_lock(&dma_list_mutex);
132 chan = dev_to_dma_chan(dev);
133 if (chan)
134 err = sprintf(buf, "%d\n", chan->client_count);
135 else
136 err = -ENODEV;
137 mutex_unlock(&dma_list_mutex);
138
139 return err;
140}
141
142static struct device_attribute dma_attrs[] = {
143 __ATTR(memcpy_count, S_IRUGO, show_memcpy_count, NULL),
144 __ATTR(bytes_transferred, S_IRUGO, show_bytes_transferred, NULL),
145 __ATTR(in_use, S_IRUGO, show_in_use, NULL),
146 __ATTR_NULL
147};
148
149static void chan_dev_release(struct device *dev)
150{
151 struct dma_chan_dev *chan_dev;
152
153 chan_dev = container_of(dev, typeof(*chan_dev), device);
154 if (atomic_dec_and_test(chan_dev->idr_ref)) {
155 mutex_lock(&dma_list_mutex);
156 idr_remove(&dma_idr, chan_dev->dev_id);
157 mutex_unlock(&dma_list_mutex);
158 kfree(chan_dev->idr_ref);
159 }
160 kfree(chan_dev);
161}
162
163static struct class dma_devclass = {
164 .name = "dma",
165 .dev_attrs = dma_attrs,
166 .dev_release = chan_dev_release,
167};
168
169/* --- client and device registration --- */
170
171#define dma_device_satisfies_mask(device, mask) \
172 __dma_device_satisfies_mask((device), &(mask))
173static int
174__dma_device_satisfies_mask(struct dma_device *device, dma_cap_mask_t *want)
175{
176 dma_cap_mask_t has;
177
178 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
179 DMA_TX_TYPE_END);
180 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
181}
182
183static struct module *dma_chan_to_owner(struct dma_chan *chan)
184{
185 return chan->device->dev->driver->owner;
186}
187
188/**
189 * balance_ref_count - catch up the channel reference count
190 * @chan - channel to balance ->client_count versus dmaengine_ref_count
191 *
192 * balance_ref_count must be called under dma_list_mutex
193 */
194static void balance_ref_count(struct dma_chan *chan)
195{
196 struct module *owner = dma_chan_to_owner(chan);
197
198 while (chan->client_count < dmaengine_ref_count) {
199 __module_get(owner);
200 chan->client_count++;
201 }
202}
203
204/**
205 * dma_chan_get - try to grab a dma channel's parent driver module
206 * @chan - channel to grab
207 *
208 * Must be called under dma_list_mutex
209 */
210static int dma_chan_get(struct dma_chan *chan)
211{
212 int err = -ENODEV;
213 struct module *owner = dma_chan_to_owner(chan);
214
215 if (chan->client_count) {
216 __module_get(owner);
217 err = 0;
218 } else if (try_module_get(owner))
219 err = 0;
220
221 if (err == 0)
222 chan->client_count++;
223
224 /* allocate upon first client reference */
225 if (chan->client_count == 1 && err == 0) {
226 int desc_cnt = chan->device->device_alloc_chan_resources(chan);
227
228 if (desc_cnt < 0) {
229 err = desc_cnt;
230 chan->client_count = 0;
231 module_put(owner);
232 } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
233 balance_ref_count(chan);
234 }
235
236 return err;
237}
238
239/**
240 * dma_chan_put - drop a reference to a dma channel's parent driver module
241 * @chan - channel to release
242 *
243 * Must be called under dma_list_mutex
244 */
245static void dma_chan_put(struct dma_chan *chan)
246{
247 if (!chan->client_count)
248 return; /* this channel failed alloc_chan_resources */
249 chan->client_count--;
250 module_put(dma_chan_to_owner(chan));
251 if (chan->client_count == 0)
252 chan->device->device_free_chan_resources(chan);
253}
254
255enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
256{
257 enum dma_status status;
258 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
259
260 dma_async_issue_pending(chan);
261 do {
262 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
263 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
264 printk(KERN_ERR "dma_sync_wait_timeout!\n");
265 return DMA_ERROR;
266 }
267 } while (status == DMA_IN_PROGRESS);
268
269 return status;
270}
271EXPORT_SYMBOL(dma_sync_wait);
272
273/**
274 * dma_cap_mask_all - enable iteration over all operation types
275 */
276static dma_cap_mask_t dma_cap_mask_all;
277
278/**
279 * dma_chan_tbl_ent - tracks channel allocations per core/operation
280 * @chan - associated channel for this entry
281 */
282struct dma_chan_tbl_ent {
283 struct dma_chan *chan;
284};
285
286/**
287 * channel_table - percpu lookup table for memory-to-memory offload providers
288 */
289static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
290
291static int __init dma_channel_table_init(void)
292{
293 enum dma_transaction_type cap;
294 int err = 0;
295
296 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
297
298 /* 'interrupt', 'private', and 'slave' are channel capabilities,
299 * but are not associated with an operation so they do not need
300 * an entry in the channel_table
301 */
302 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
303 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
304 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
305
306 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
307 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
308 if (!channel_table[cap]) {
309 err = -ENOMEM;
310 break;
311 }
312 }
313
314 if (err) {
315 pr_err("dmaengine: initialization failure\n");
316 for_each_dma_cap_mask(cap, dma_cap_mask_all)
317 if (channel_table[cap])
318 free_percpu(channel_table[cap]);
319 }
320
321 return err;
322}
323arch_initcall(dma_channel_table_init);
324
325/**
326 * dma_find_channel - find a channel to carry out the operation
327 * @tx_type: transaction type
328 */
329struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
330{
331 return this_cpu_read(channel_table[tx_type]->chan);
332}
333EXPORT_SYMBOL(dma_find_channel);
334
335/*
336 * net_dma_find_channel - find a channel for net_dma
337 * net_dma has alignment requirements
338 */
339struct dma_chan *net_dma_find_channel(void)
340{
341 struct dma_chan *chan = dma_find_channel(DMA_MEMCPY);
342 if (chan && !is_dma_copy_aligned(chan->device, 1, 1, 1))
343 return NULL;
344
345 return chan;
346}
347EXPORT_SYMBOL(net_dma_find_channel);
348
349/**
350 * dma_issue_pending_all - flush all pending operations across all channels
351 */
352void dma_issue_pending_all(void)
353{
354 struct dma_device *device;
355 struct dma_chan *chan;
356
357 rcu_read_lock();
358 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
359 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
360 continue;
361 list_for_each_entry(chan, &device->channels, device_node)
362 if (chan->client_count)
363 device->device_issue_pending(chan);
364 }
365 rcu_read_unlock();
366}
367EXPORT_SYMBOL(dma_issue_pending_all);
368
369/**
370 * nth_chan - returns the nth channel of the given capability
371 * @cap: capability to match
372 * @n: nth channel desired
373 *
374 * Defaults to returning the channel with the desired capability and the
375 * lowest reference count when 'n' cannot be satisfied. Must be called
376 * under dma_list_mutex.
377 */
378static struct dma_chan *nth_chan(enum dma_transaction_type cap, int n)
379{
380 struct dma_device *device;
381 struct dma_chan *chan;
382 struct dma_chan *ret = NULL;
383 struct dma_chan *min = NULL;
384
385 list_for_each_entry(device, &dma_device_list, global_node) {
386 if (!dma_has_cap(cap, device->cap_mask) ||
387 dma_has_cap(DMA_PRIVATE, device->cap_mask))
388 continue;
389 list_for_each_entry(chan, &device->channels, device_node) {
390 if (!chan->client_count)
391 continue;
392 if (!min)
393 min = chan;
394 else if (chan->table_count < min->table_count)
395 min = chan;
396
397 if (n-- == 0) {
398 ret = chan;
399 break; /* done */
400 }
401 }
402 if (ret)
403 break; /* done */
404 }
405
406 if (!ret)
407 ret = min;
408
409 if (ret)
410 ret->table_count++;
411
412 return ret;
413}
414
415/**
416 * dma_channel_rebalance - redistribute the available channels
417 *
418 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
419 * operation type) in the SMP case, and operation isolation (avoid
420 * multi-tasking channels) in the non-SMP case. Must be called under
421 * dma_list_mutex.
422 */
423static void dma_channel_rebalance(void)
424{
425 struct dma_chan *chan;
426 struct dma_device *device;
427 int cpu;
428 int cap;
429 int n;
430
431 /* undo the last distribution */
432 for_each_dma_cap_mask(cap, dma_cap_mask_all)
433 for_each_possible_cpu(cpu)
434 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
435
436 list_for_each_entry(device, &dma_device_list, global_node) {
437 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
438 continue;
439 list_for_each_entry(chan, &device->channels, device_node)
440 chan->table_count = 0;
441 }
442
443 /* don't populate the channel_table if no clients are available */
444 if (!dmaengine_ref_count)
445 return;
446
447 /* redistribute available channels */
448 n = 0;
449 for_each_dma_cap_mask(cap, dma_cap_mask_all)
450 for_each_online_cpu(cpu) {
451 if (num_possible_cpus() > 1)
452 chan = nth_chan(cap, n++);
453 else
454 chan = nth_chan(cap, -1);
455
456 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
457 }
458}
459
460static struct dma_chan *private_candidate(dma_cap_mask_t *mask, struct dma_device *dev,
461 dma_filter_fn fn, void *fn_param)
462{
463 struct dma_chan *chan;
464
465 if (!__dma_device_satisfies_mask(dev, mask)) {
466 pr_debug("%s: wrong capabilities\n", __func__);
467 return NULL;
468 }
469 /* devices with multiple channels need special handling as we need to
470 * ensure that all channels are either private or public.
471 */
472 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
473 list_for_each_entry(chan, &dev->channels, device_node) {
474 /* some channels are already publicly allocated */
475 if (chan->client_count)
476 return NULL;
477 }
478
479 list_for_each_entry(chan, &dev->channels, device_node) {
480 if (chan->client_count) {
481 pr_debug("%s: %s busy\n",
482 __func__, dma_chan_name(chan));
483 continue;
484 }
485 if (fn && !fn(chan, fn_param)) {
486 pr_debug("%s: %s filter said false\n",
487 __func__, dma_chan_name(chan));
488 continue;
489 }
490 return chan;
491 }
492
493 return NULL;
494}
495
496/**
497 * dma_request_channel - try to allocate an exclusive channel
498 * @mask: capabilities that the channel must satisfy
499 * @fn: optional callback to disposition available channels
500 * @fn_param: opaque parameter to pass to dma_filter_fn
501 */
502struct dma_chan *__dma_request_channel(dma_cap_mask_t *mask, dma_filter_fn fn, void *fn_param)
503{
504 struct dma_device *device, *_d;
505 struct dma_chan *chan = NULL;
506 int err;
507
508 /* Find a channel */
509 mutex_lock(&dma_list_mutex);
510 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
511 chan = private_candidate(mask, device, fn, fn_param);
512 if (chan) {
513 /* Found a suitable channel, try to grab, prep, and
514 * return it. We first set DMA_PRIVATE to disable
515 * balance_ref_count as this channel will not be
516 * published in the general-purpose allocator
517 */
518 dma_cap_set(DMA_PRIVATE, device->cap_mask);
519 device->privatecnt++;
520 err = dma_chan_get(chan);
521
522 if (err == -ENODEV) {
523 pr_debug("%s: %s module removed\n", __func__,
524 dma_chan_name(chan));
525 list_del_rcu(&device->global_node);
526 } else if (err)
527 pr_debug("%s: failed to get %s: (%d)\n",
528 __func__, dma_chan_name(chan), err);
529 else
530 break;
531 if (--device->privatecnt == 0)
532 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
533 chan = NULL;
534 }
535 }
536 mutex_unlock(&dma_list_mutex);
537
538 pr_debug("%s: %s (%s)\n", __func__, chan ? "success" : "fail",
539 chan ? dma_chan_name(chan) : NULL);
540
541 return chan;
542}
543EXPORT_SYMBOL_GPL(__dma_request_channel);
544
545void dma_release_channel(struct dma_chan *chan)
546{
547 mutex_lock(&dma_list_mutex);
548 WARN_ONCE(chan->client_count != 1,
549 "chan reference count %d != 1\n", chan->client_count);
550 dma_chan_put(chan);
551 /* drop PRIVATE cap enabled by __dma_request_channel() */
552 if (--chan->device->privatecnt == 0)
553 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
554 mutex_unlock(&dma_list_mutex);
555}
556EXPORT_SYMBOL_GPL(dma_release_channel);
557
558/**
559 * dmaengine_get - register interest in dma_channels
560 */
561void dmaengine_get(void)
562{
563 struct dma_device *device, *_d;
564 struct dma_chan *chan;
565 int err;
566
567 mutex_lock(&dma_list_mutex);
568 dmaengine_ref_count++;
569
570 /* try to grab channels */
571 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
572 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
573 continue;
574 list_for_each_entry(chan, &device->channels, device_node) {
575 err = dma_chan_get(chan);
576 if (err == -ENODEV) {
577 /* module removed before we could use it */
578 list_del_rcu(&device->global_node);
579 break;
580 } else if (err)
581 pr_err("%s: failed to get %s: (%d)\n",
582 __func__, dma_chan_name(chan), err);
583 }
584 }
585
586 /* if this is the first reference and there were channels
587 * waiting we need to rebalance to get those channels
588 * incorporated into the channel table
589 */
590 if (dmaengine_ref_count == 1)
591 dma_channel_rebalance();
592 mutex_unlock(&dma_list_mutex);
593}
594EXPORT_SYMBOL(dmaengine_get);
595
596/**
597 * dmaengine_put - let dma drivers be removed when ref_count == 0
598 */
599void dmaengine_put(void)
600{
601 struct dma_device *device;
602 struct dma_chan *chan;
603
604 mutex_lock(&dma_list_mutex);
605 dmaengine_ref_count--;
606 BUG_ON(dmaengine_ref_count < 0);
607 /* drop channel references */
608 list_for_each_entry(device, &dma_device_list, global_node) {
609 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
610 continue;
611 list_for_each_entry(chan, &device->channels, device_node)
612 dma_chan_put(chan);
613 }
614 mutex_unlock(&dma_list_mutex);
615}
616EXPORT_SYMBOL(dmaengine_put);
617
618static bool device_has_all_tx_types(struct dma_device *device)
619{
620 /* A device that satisfies this test has channels that will never cause
621 * an async_tx channel switch event as all possible operation types can
622 * be handled.
623 */
624 #ifdef CONFIG_ASYNC_TX_DMA
625 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
626 return false;
627 #endif
628
629 #if defined(CONFIG_ASYNC_MEMCPY) || defined(CONFIG_ASYNC_MEMCPY_MODULE)
630 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
631 return false;
632 #endif
633
634 #if defined(CONFIG_ASYNC_MEMSET) || defined(CONFIG_ASYNC_MEMSET_MODULE)
635 if (!dma_has_cap(DMA_MEMSET, device->cap_mask))
636 return false;
637 #endif
638
639 #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
640 if (!dma_has_cap(DMA_XOR, device->cap_mask))
641 return false;
642
643 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
644 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
645 return false;
646 #endif
647 #endif
648
649 #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
650 if (!dma_has_cap(DMA_PQ, device->cap_mask))
651 return false;
652
653 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
654 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
655 return false;
656 #endif
657 #endif
658
659 return true;
660}
661
662static int get_dma_id(struct dma_device *device)
663{
664 int rc;
665
666 idr_retry:
667 if (!idr_pre_get(&dma_idr, GFP_KERNEL))
668 return -ENOMEM;
669 mutex_lock(&dma_list_mutex);
670 rc = idr_get_new(&dma_idr, NULL, &device->dev_id);
671 mutex_unlock(&dma_list_mutex);
672 if (rc == -EAGAIN)
673 goto idr_retry;
674 else if (rc != 0)
675 return rc;
676
677 return 0;
678}
679
680/**
681 * dma_async_device_register - registers DMA devices found
682 * @device: &dma_device
683 */
684int dma_async_device_register(struct dma_device *device)
685{
686 int chancnt = 0, rc;
687 struct dma_chan* chan;
688 atomic_t *idr_ref;
689
690 if (!device)
691 return -ENODEV;
692
693 /* validate device routines */
694 BUG_ON(dma_has_cap(DMA_MEMCPY, device->cap_mask) &&
695 !device->device_prep_dma_memcpy);
696 BUG_ON(dma_has_cap(DMA_XOR, device->cap_mask) &&
697 !device->device_prep_dma_xor);
698 BUG_ON(dma_has_cap(DMA_XOR_VAL, device->cap_mask) &&
699 !device->device_prep_dma_xor_val);
700 BUG_ON(dma_has_cap(DMA_PQ, device->cap_mask) &&
701 !device->device_prep_dma_pq);
702 BUG_ON(dma_has_cap(DMA_PQ_VAL, device->cap_mask) &&
703 !device->device_prep_dma_pq_val);
704 BUG_ON(dma_has_cap(DMA_MEMSET, device->cap_mask) &&
705 !device->device_prep_dma_memset);
706 BUG_ON(dma_has_cap(DMA_INTERRUPT, device->cap_mask) &&
707 !device->device_prep_dma_interrupt);
708 BUG_ON(dma_has_cap(DMA_SG, device->cap_mask) &&
709 !device->device_prep_dma_sg);
710 BUG_ON(dma_has_cap(DMA_CYCLIC, device->cap_mask) &&
711 !device->device_prep_dma_cyclic);
712 BUG_ON(dma_has_cap(DMA_SLAVE, device->cap_mask) &&
713 !device->device_control);
714 BUG_ON(dma_has_cap(DMA_INTERLEAVE, device->cap_mask) &&
715 !device->device_prep_interleaved_dma);
716
717 BUG_ON(!device->device_alloc_chan_resources);
718 BUG_ON(!device->device_free_chan_resources);
719 BUG_ON(!device->device_tx_status);
720 BUG_ON(!device->device_issue_pending);
721 BUG_ON(!device->dev);
722
723 /* note: this only matters in the
724 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
725 */
726 if (device_has_all_tx_types(device))
727 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
728
729 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
730 if (!idr_ref)
731 return -ENOMEM;
732 rc = get_dma_id(device);
733 if (rc != 0) {
734 kfree(idr_ref);
735 return rc;
736 }
737
738 atomic_set(idr_ref, 0);
739
740 /* represent channels in sysfs. Probably want devs too */
741 list_for_each_entry(chan, &device->channels, device_node) {
742 rc = -ENOMEM;
743 chan->local = alloc_percpu(typeof(*chan->local));
744 if (chan->local == NULL)
745 goto err_out;
746 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
747 if (chan->dev == NULL) {
748 free_percpu(chan->local);
749 chan->local = NULL;
750 goto err_out;
751 }
752
753 chan->chan_id = chancnt++;
754 chan->dev->device.class = &dma_devclass;
755 chan->dev->device.parent = device->dev;
756 chan->dev->chan = chan;
757 chan->dev->idr_ref = idr_ref;
758 chan->dev->dev_id = device->dev_id;
759 atomic_inc(idr_ref);
760 dev_set_name(&chan->dev->device, "dma%dchan%d",
761 device->dev_id, chan->chan_id);
762
763 rc = device_register(&chan->dev->device);
764 if (rc) {
765 free_percpu(chan->local);
766 chan->local = NULL;
767 kfree(chan->dev);
768 atomic_dec(idr_ref);
769 goto err_out;
770 }
771 chan->client_count = 0;
772 }
773 device->chancnt = chancnt;
774
775 mutex_lock(&dma_list_mutex);
776 /* take references on public channels */
777 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
778 list_for_each_entry(chan, &device->channels, device_node) {
779 /* if clients are already waiting for channels we need
780 * to take references on their behalf
781 */
782 if (dma_chan_get(chan) == -ENODEV) {
783 /* note we can only get here for the first
784 * channel as the remaining channels are
785 * guaranteed to get a reference
786 */
787 rc = -ENODEV;
788 mutex_unlock(&dma_list_mutex);
789 goto err_out;
790 }
791 }
792 list_add_tail_rcu(&device->global_node, &dma_device_list);
793 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
794 device->privatecnt++; /* Always private */
795 dma_channel_rebalance();
796 mutex_unlock(&dma_list_mutex);
797
798 return 0;
799
800err_out:
801 /* if we never registered a channel just release the idr */
802 if (atomic_read(idr_ref) == 0) {
803 mutex_lock(&dma_list_mutex);
804 idr_remove(&dma_idr, device->dev_id);
805 mutex_unlock(&dma_list_mutex);
806 kfree(idr_ref);
807 return rc;
808 }
809
810 list_for_each_entry(chan, &device->channels, device_node) {
811 if (chan->local == NULL)
812 continue;
813 mutex_lock(&dma_list_mutex);
814 chan->dev->chan = NULL;
815 mutex_unlock(&dma_list_mutex);
816 device_unregister(&chan->dev->device);
817 free_percpu(chan->local);
818 }
819 return rc;
820}
821EXPORT_SYMBOL(dma_async_device_register);
822
823/**
824 * dma_async_device_unregister - unregister a DMA device
825 * @device: &dma_device
826 *
827 * This routine is called by dma driver exit routines, dmaengine holds module
828 * references to prevent it being called while channels are in use.
829 */
830void dma_async_device_unregister(struct dma_device *device)
831{
832 struct dma_chan *chan;
833
834 mutex_lock(&dma_list_mutex);
835 list_del_rcu(&device->global_node);
836 dma_channel_rebalance();
837 mutex_unlock(&dma_list_mutex);
838
839 list_for_each_entry(chan, &device->channels, device_node) {
840 WARN_ONCE(chan->client_count,
841 "%s called while %d clients hold a reference\n",
842 __func__, chan->client_count);
843 mutex_lock(&dma_list_mutex);
844 chan->dev->chan = NULL;
845 mutex_unlock(&dma_list_mutex);
846 device_unregister(&chan->dev->device);
847 free_percpu(chan->local);
848 }
849}
850EXPORT_SYMBOL(dma_async_device_unregister);
851
852/**
853 * dma_async_memcpy_buf_to_buf - offloaded copy between virtual addresses
854 * @chan: DMA channel to offload copy to
855 * @dest: destination address (virtual)
856 * @src: source address (virtual)
857 * @len: length
858 *
859 * Both @dest and @src must be mappable to a bus address according to the
860 * DMA mapping API rules for streaming mappings.
861 * Both @dest and @src must stay memory resident (kernel memory or locked
862 * user space pages).
863 */
864dma_cookie_t
865dma_async_memcpy_buf_to_buf(struct dma_chan *chan, void *dest,
866 void *src, size_t len)
867{
868 struct dma_device *dev = chan->device;
869 struct dma_async_tx_descriptor *tx;
870 dma_addr_t dma_dest, dma_src;
871 dma_cookie_t cookie;
872 unsigned long flags;
873
874 dma_src = dma_map_single(dev->dev, src, len, DMA_TO_DEVICE);
875 dma_dest = dma_map_single(dev->dev, dest, len, DMA_FROM_DEVICE);
876 flags = DMA_CTRL_ACK |
877 DMA_COMPL_SRC_UNMAP_SINGLE |
878 DMA_COMPL_DEST_UNMAP_SINGLE;
879 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
880
881 if (!tx) {
882 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
883 dma_unmap_single(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
884 return -ENOMEM;
885 }
886
887 tx->callback = NULL;
888 cookie = tx->tx_submit(tx);
889
890 preempt_disable();
891 __this_cpu_add(chan->local->bytes_transferred, len);
892 __this_cpu_inc(chan->local->memcpy_count);
893 preempt_enable();
894
895 return cookie;
896}
897EXPORT_SYMBOL(dma_async_memcpy_buf_to_buf);
898
899/**
900 * dma_async_memcpy_buf_to_pg - offloaded copy from address to page
901 * @chan: DMA channel to offload copy to
902 * @page: destination page
903 * @offset: offset in page to copy to
904 * @kdata: source address (virtual)
905 * @len: length
906 *
907 * Both @page/@offset and @kdata must be mappable to a bus address according
908 * to the DMA mapping API rules for streaming mappings.
909 * Both @page/@offset and @kdata must stay memory resident (kernel memory or
910 * locked user space pages)
911 */
912dma_cookie_t
913dma_async_memcpy_buf_to_pg(struct dma_chan *chan, struct page *page,
914 unsigned int offset, void *kdata, size_t len)
915{
916 struct dma_device *dev = chan->device;
917 struct dma_async_tx_descriptor *tx;
918 dma_addr_t dma_dest, dma_src;
919 dma_cookie_t cookie;
920 unsigned long flags;
921
922 dma_src = dma_map_single(dev->dev, kdata, len, DMA_TO_DEVICE);
923 dma_dest = dma_map_page(dev->dev, page, offset, len, DMA_FROM_DEVICE);
924 flags = DMA_CTRL_ACK | DMA_COMPL_SRC_UNMAP_SINGLE;
925 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
926
927 if (!tx) {
928 dma_unmap_single(dev->dev, dma_src, len, DMA_TO_DEVICE);
929 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
930 return -ENOMEM;
931 }
932
933 tx->callback = NULL;
934 cookie = tx->tx_submit(tx);
935
936 preempt_disable();
937 __this_cpu_add(chan->local->bytes_transferred, len);
938 __this_cpu_inc(chan->local->memcpy_count);
939 preempt_enable();
940
941 return cookie;
942}
943EXPORT_SYMBOL(dma_async_memcpy_buf_to_pg);
944
945/**
946 * dma_async_memcpy_pg_to_pg - offloaded copy from page to page
947 * @chan: DMA channel to offload copy to
948 * @dest_pg: destination page
949 * @dest_off: offset in page to copy to
950 * @src_pg: source page
951 * @src_off: offset in page to copy from
952 * @len: length
953 *
954 * Both @dest_page/@dest_off and @src_page/@src_off must be mappable to a bus
955 * address according to the DMA mapping API rules for streaming mappings.
956 * Both @dest_page/@dest_off and @src_page/@src_off must stay memory resident
957 * (kernel memory or locked user space pages).
958 */
959dma_cookie_t
960dma_async_memcpy_pg_to_pg(struct dma_chan *chan, struct page *dest_pg,
961 unsigned int dest_off, struct page *src_pg, unsigned int src_off,
962 size_t len)
963{
964 struct dma_device *dev = chan->device;
965 struct dma_async_tx_descriptor *tx;
966 dma_addr_t dma_dest, dma_src;
967 dma_cookie_t cookie;
968 unsigned long flags;
969
970 dma_src = dma_map_page(dev->dev, src_pg, src_off, len, DMA_TO_DEVICE);
971 dma_dest = dma_map_page(dev->dev, dest_pg, dest_off, len,
972 DMA_FROM_DEVICE);
973 flags = DMA_CTRL_ACK;
974 tx = dev->device_prep_dma_memcpy(chan, dma_dest, dma_src, len, flags);
975
976 if (!tx) {
977 dma_unmap_page(dev->dev, dma_src, len, DMA_TO_DEVICE);
978 dma_unmap_page(dev->dev, dma_dest, len, DMA_FROM_DEVICE);
979 return -ENOMEM;
980 }
981
982 tx->callback = NULL;
983 cookie = tx->tx_submit(tx);
984
985 preempt_disable();
986 __this_cpu_add(chan->local->bytes_transferred, len);
987 __this_cpu_inc(chan->local->memcpy_count);
988 preempt_enable();
989
990 return cookie;
991}
992EXPORT_SYMBOL(dma_async_memcpy_pg_to_pg);
993
994void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
995 struct dma_chan *chan)
996{
997 tx->chan = chan;
998 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
999 spin_lock_init(&tx->lock);
1000 #endif
1001}
1002EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1003
1004/* dma_wait_for_async_tx - spin wait for a transaction to complete
1005 * @tx: in-flight transaction to wait on
1006 */
1007enum dma_status
1008dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1009{
1010 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1011
1012 if (!tx)
1013 return DMA_SUCCESS;
1014
1015 while (tx->cookie == -EBUSY) {
1016 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1017 pr_err("%s timeout waiting for descriptor submission\n",
1018 __func__);
1019 return DMA_ERROR;
1020 }
1021 cpu_relax();
1022 }
1023 return dma_sync_wait(tx->chan, tx->cookie);
1024}
1025EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1026
1027/* dma_run_dependencies - helper routine for dma drivers to process
1028 * (start) dependent operations on their target channel
1029 * @tx: transaction with dependencies
1030 */
1031void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1032{
1033 struct dma_async_tx_descriptor *dep = txd_next(tx);
1034 struct dma_async_tx_descriptor *dep_next;
1035 struct dma_chan *chan;
1036
1037 if (!dep)
1038 return;
1039
1040 /* we'll submit tx->next now, so clear the link */
1041 txd_clear_next(tx);
1042 chan = dep->chan;
1043
1044 /* keep submitting up until a channel switch is detected
1045 * in that case we will be called again as a result of
1046 * processing the interrupt from async_tx_channel_switch
1047 */
1048 for (; dep; dep = dep_next) {
1049 txd_lock(dep);
1050 txd_clear_parent(dep);
1051 dep_next = txd_next(dep);
1052 if (dep_next && dep_next->chan == chan)
1053 txd_clear_next(dep); /* ->next will be submitted */
1054 else
1055 dep_next = NULL; /* submit current dep and terminate */
1056 txd_unlock(dep);
1057
1058 dep->tx_submit(dep);
1059 }
1060
1061 chan->device->device_issue_pending(chan);
1062}
1063EXPORT_SYMBOL_GPL(dma_run_dependencies);
1064
1065static int __init dma_bus_init(void)
1066{
1067 return class_register(&dma_devclass);
1068}
1069arch_initcall(dma_bus_init);
1070
1071