Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 *
5 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 */
7
8#define pr_fmt(fmt) "DMA-API: " fmt
9
10#include <linux/sched/task_stack.h>
11#include <linux/scatterlist.h>
12#include <linux/dma-mapping.h>
13#include <linux/sched/task.h>
14#include <linux/stacktrace.h>
15#include <linux/dma-debug.h>
16#include <linux/spinlock.h>
17#include <linux/vmalloc.h>
18#include <linux/debugfs.h>
19#include <linux/uaccess.h>
20#include <linux/export.h>
21#include <linux/device.h>
22#include <linux/types.h>
23#include <linux/sched.h>
24#include <linux/ctype.h>
25#include <linux/list.h>
26#include <linux/slab.h>
27
28#include <asm/sections.h>
29
30#define HASH_SIZE 1024ULL
31#define HASH_FN_SHIFT 13
32#define HASH_FN_MASK (HASH_SIZE - 1)
33
34#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
35/* If the pool runs out, add this many new entries at once */
36#define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
37
38enum {
39 dma_debug_single,
40 dma_debug_sg,
41 dma_debug_coherent,
42 dma_debug_resource,
43};
44
45enum map_err_types {
46 MAP_ERR_CHECK_NOT_APPLICABLE,
47 MAP_ERR_NOT_CHECKED,
48 MAP_ERR_CHECKED,
49};
50
51#define DMA_DEBUG_STACKTRACE_ENTRIES 5
52
53/**
54 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
55 * @list: node on pre-allocated free_entries list
56 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
57 * @type: single, page, sg, coherent
58 * @pfn: page frame of the start address
59 * @offset: offset of mapping relative to pfn
60 * @size: length of the mapping
61 * @direction: enum dma_data_direction
62 * @sg_call_ents: 'nents' from dma_map_sg
63 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
64 * @map_err_type: track whether dma_mapping_error() was checked
65 * @stacktrace: support backtraces when a violation is detected
66 */
67struct dma_debug_entry {
68 struct list_head list;
69 struct device *dev;
70 int type;
71 unsigned long pfn;
72 size_t offset;
73 u64 dev_addr;
74 u64 size;
75 int direction;
76 int sg_call_ents;
77 int sg_mapped_ents;
78 enum map_err_types map_err_type;
79#ifdef CONFIG_STACKTRACE
80 unsigned int stack_len;
81 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
82#endif
83};
84
85typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
86
87struct hash_bucket {
88 struct list_head list;
89 spinlock_t lock;
90} ____cacheline_aligned_in_smp;
91
92/* Hash list to save the allocated dma addresses */
93static struct hash_bucket dma_entry_hash[HASH_SIZE];
94/* List of pre-allocated dma_debug_entry's */
95static LIST_HEAD(free_entries);
96/* Lock for the list above */
97static DEFINE_SPINLOCK(free_entries_lock);
98
99/* Global disable flag - will be set in case of an error */
100static bool global_disable __read_mostly;
101
102/* Early initialization disable flag, set at the end of dma_debug_init */
103static bool dma_debug_initialized __read_mostly;
104
105static inline bool dma_debug_disabled(void)
106{
107 return global_disable || !dma_debug_initialized;
108}
109
110/* Global error count */
111static u32 error_count;
112
113/* Global error show enable*/
114static u32 show_all_errors __read_mostly;
115/* Number of errors to show */
116static u32 show_num_errors = 1;
117
118static u32 num_free_entries;
119static u32 min_free_entries;
120static u32 nr_total_entries;
121
122/* number of preallocated entries requested by kernel cmdline */
123static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
124
125/* per-driver filter related state */
126
127#define NAME_MAX_LEN 64
128
129static char current_driver_name[NAME_MAX_LEN] __read_mostly;
130static struct device_driver *current_driver __read_mostly;
131
132static DEFINE_RWLOCK(driver_name_lock);
133
134static const char *const maperr2str[] = {
135 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
136 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
137 [MAP_ERR_CHECKED] = "dma map error checked",
138};
139
140static const char *type2name[5] = { "single", "page",
141 "scather-gather", "coherent",
142 "resource" };
143
144static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
145 "DMA_FROM_DEVICE", "DMA_NONE" };
146
147/*
148 * The access to some variables in this macro is racy. We can't use atomic_t
149 * here because all these variables are exported to debugfs. Some of them even
150 * writeable. This is also the reason why a lock won't help much. But anyway,
151 * the races are no big deal. Here is why:
152 *
153 * error_count: the addition is racy, but the worst thing that can happen is
154 * that we don't count some errors
155 * show_num_errors: the subtraction is racy. Also no big deal because in
156 * worst case this will result in one warning more in the
157 * system log than the user configured. This variable is
158 * writeable via debugfs.
159 */
160static inline void dump_entry_trace(struct dma_debug_entry *entry)
161{
162#ifdef CONFIG_STACKTRACE
163 if (entry) {
164 pr_warning("Mapped at:\n");
165 stack_trace_print(entry->stack_entries, entry->stack_len, 0);
166 }
167#endif
168}
169
170static bool driver_filter(struct device *dev)
171{
172 struct device_driver *drv;
173 unsigned long flags;
174 bool ret;
175
176 /* driver filter off */
177 if (likely(!current_driver_name[0]))
178 return true;
179
180 /* driver filter on and initialized */
181 if (current_driver && dev && dev->driver == current_driver)
182 return true;
183
184 /* driver filter on, but we can't filter on a NULL device... */
185 if (!dev)
186 return false;
187
188 if (current_driver || !current_driver_name[0])
189 return false;
190
191 /* driver filter on but not yet initialized */
192 drv = dev->driver;
193 if (!drv)
194 return false;
195
196 /* lock to protect against change of current_driver_name */
197 read_lock_irqsave(&driver_name_lock, flags);
198
199 ret = false;
200 if (drv->name &&
201 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
202 current_driver = drv;
203 ret = true;
204 }
205
206 read_unlock_irqrestore(&driver_name_lock, flags);
207
208 return ret;
209}
210
211#define err_printk(dev, entry, format, arg...) do { \
212 error_count += 1; \
213 if (driver_filter(dev) && \
214 (show_all_errors || show_num_errors > 0)) { \
215 WARN(1, pr_fmt("%s %s: ") format, \
216 dev ? dev_driver_string(dev) : "NULL", \
217 dev ? dev_name(dev) : "NULL", ## arg); \
218 dump_entry_trace(entry); \
219 } \
220 if (!show_all_errors && show_num_errors > 0) \
221 show_num_errors -= 1; \
222 } while (0);
223
224/*
225 * Hash related functions
226 *
227 * Every DMA-API request is saved into a struct dma_debug_entry. To
228 * have quick access to these structs they are stored into a hash.
229 */
230static int hash_fn(struct dma_debug_entry *entry)
231{
232 /*
233 * Hash function is based on the dma address.
234 * We use bits 20-27 here as the index into the hash
235 */
236 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
237}
238
239/*
240 * Request exclusive access to a hash bucket for a given dma_debug_entry.
241 */
242static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
243 unsigned long *flags)
244 __acquires(&dma_entry_hash[idx].lock)
245{
246 int idx = hash_fn(entry);
247 unsigned long __flags;
248
249 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
250 *flags = __flags;
251 return &dma_entry_hash[idx];
252}
253
254/*
255 * Give up exclusive access to the hash bucket
256 */
257static void put_hash_bucket(struct hash_bucket *bucket,
258 unsigned long *flags)
259 __releases(&bucket->lock)
260{
261 unsigned long __flags = *flags;
262
263 spin_unlock_irqrestore(&bucket->lock, __flags);
264}
265
266static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
267{
268 return ((a->dev_addr == b->dev_addr) &&
269 (a->dev == b->dev)) ? true : false;
270}
271
272static bool containing_match(struct dma_debug_entry *a,
273 struct dma_debug_entry *b)
274{
275 if (a->dev != b->dev)
276 return false;
277
278 if ((b->dev_addr <= a->dev_addr) &&
279 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
280 return true;
281
282 return false;
283}
284
285/*
286 * Search a given entry in the hash bucket list
287 */
288static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
289 struct dma_debug_entry *ref,
290 match_fn match)
291{
292 struct dma_debug_entry *entry, *ret = NULL;
293 int matches = 0, match_lvl, last_lvl = -1;
294
295 list_for_each_entry(entry, &bucket->list, list) {
296 if (!match(ref, entry))
297 continue;
298
299 /*
300 * Some drivers map the same physical address multiple
301 * times. Without a hardware IOMMU this results in the
302 * same device addresses being put into the dma-debug
303 * hash multiple times too. This can result in false
304 * positives being reported. Therefore we implement a
305 * best-fit algorithm here which returns the entry from
306 * the hash which fits best to the reference value
307 * instead of the first-fit.
308 */
309 matches += 1;
310 match_lvl = 0;
311 entry->size == ref->size ? ++match_lvl : 0;
312 entry->type == ref->type ? ++match_lvl : 0;
313 entry->direction == ref->direction ? ++match_lvl : 0;
314 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
315
316 if (match_lvl == 4) {
317 /* perfect-fit - return the result */
318 return entry;
319 } else if (match_lvl > last_lvl) {
320 /*
321 * We found an entry that fits better then the
322 * previous one or it is the 1st match.
323 */
324 last_lvl = match_lvl;
325 ret = entry;
326 }
327 }
328
329 /*
330 * If we have multiple matches but no perfect-fit, just return
331 * NULL.
332 */
333 ret = (matches == 1) ? ret : NULL;
334
335 return ret;
336}
337
338static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
339 struct dma_debug_entry *ref)
340{
341 return __hash_bucket_find(bucket, ref, exact_match);
342}
343
344static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
345 struct dma_debug_entry *ref,
346 unsigned long *flags)
347{
348
349 unsigned int max_range = dma_get_max_seg_size(ref->dev);
350 struct dma_debug_entry *entry, index = *ref;
351 unsigned int range = 0;
352
353 while (range <= max_range) {
354 entry = __hash_bucket_find(*bucket, ref, containing_match);
355
356 if (entry)
357 return entry;
358
359 /*
360 * Nothing found, go back a hash bucket
361 */
362 put_hash_bucket(*bucket, flags);
363 range += (1 << HASH_FN_SHIFT);
364 index.dev_addr -= (1 << HASH_FN_SHIFT);
365 *bucket = get_hash_bucket(&index, flags);
366 }
367
368 return NULL;
369}
370
371/*
372 * Add an entry to a hash bucket
373 */
374static void hash_bucket_add(struct hash_bucket *bucket,
375 struct dma_debug_entry *entry)
376{
377 list_add_tail(&entry->list, &bucket->list);
378}
379
380/*
381 * Remove entry from a hash bucket list
382 */
383static void hash_bucket_del(struct dma_debug_entry *entry)
384{
385 list_del(&entry->list);
386}
387
388static unsigned long long phys_addr(struct dma_debug_entry *entry)
389{
390 if (entry->type == dma_debug_resource)
391 return __pfn_to_phys(entry->pfn) + entry->offset;
392
393 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
394}
395
396/*
397 * Dump mapping entries for debugging purposes
398 */
399void debug_dma_dump_mappings(struct device *dev)
400{
401 int idx;
402
403 for (idx = 0; idx < HASH_SIZE; idx++) {
404 struct hash_bucket *bucket = &dma_entry_hash[idx];
405 struct dma_debug_entry *entry;
406 unsigned long flags;
407
408 spin_lock_irqsave(&bucket->lock, flags);
409
410 list_for_each_entry(entry, &bucket->list, list) {
411 if (!dev || dev == entry->dev) {
412 dev_info(entry->dev,
413 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
414 type2name[entry->type], idx,
415 phys_addr(entry), entry->pfn,
416 entry->dev_addr, entry->size,
417 dir2name[entry->direction],
418 maperr2str[entry->map_err_type]);
419 }
420 }
421
422 spin_unlock_irqrestore(&bucket->lock, flags);
423 }
424}
425
426/*
427 * For each mapping (initial cacheline in the case of
428 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
429 * scatterlist, or the cacheline specified in dma_map_single) insert
430 * into this tree using the cacheline as the key. At
431 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
432 * the entry already exists at insertion time add a tag as a reference
433 * count for the overlapping mappings. For now, the overlap tracking
434 * just ensures that 'unmaps' balance 'maps' before marking the
435 * cacheline idle, but we should also be flagging overlaps as an API
436 * violation.
437 *
438 * Memory usage is mostly constrained by the maximum number of available
439 * dma-debug entries in that we need a free dma_debug_entry before
440 * inserting into the tree. In the case of dma_map_page and
441 * dma_alloc_coherent there is only one dma_debug_entry and one
442 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
443 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
444 * entries into the tree.
445 *
446 * At any time debug_dma_assert_idle() can be called to trigger a
447 * warning if any cachelines in the given page are in the active set.
448 */
449static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
450static DEFINE_SPINLOCK(radix_lock);
451#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
452#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
453#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
454
455static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
456{
457 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
458 (entry->offset >> L1_CACHE_SHIFT);
459}
460
461static int active_cacheline_read_overlap(phys_addr_t cln)
462{
463 int overlap = 0, i;
464
465 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
466 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
467 overlap |= 1 << i;
468 return overlap;
469}
470
471static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
472{
473 int i;
474
475 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
476 return overlap;
477
478 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
479 if (overlap & 1 << i)
480 radix_tree_tag_set(&dma_active_cacheline, cln, i);
481 else
482 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
483
484 return overlap;
485}
486
487static void active_cacheline_inc_overlap(phys_addr_t cln)
488{
489 int overlap = active_cacheline_read_overlap(cln);
490
491 overlap = active_cacheline_set_overlap(cln, ++overlap);
492
493 /* If we overflowed the overlap counter then we're potentially
494 * leaking dma-mappings. Otherwise, if maps and unmaps are
495 * balanced then this overflow may cause false negatives in
496 * debug_dma_assert_idle() as the cacheline may be marked idle
497 * prematurely.
498 */
499 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
500 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
501 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
502}
503
504static int active_cacheline_dec_overlap(phys_addr_t cln)
505{
506 int overlap = active_cacheline_read_overlap(cln);
507
508 return active_cacheline_set_overlap(cln, --overlap);
509}
510
511static int active_cacheline_insert(struct dma_debug_entry *entry)
512{
513 phys_addr_t cln = to_cacheline_number(entry);
514 unsigned long flags;
515 int rc;
516
517 /* If the device is not writing memory then we don't have any
518 * concerns about the cpu consuming stale data. This mitigates
519 * legitimate usages of overlapping mappings.
520 */
521 if (entry->direction == DMA_TO_DEVICE)
522 return 0;
523
524 spin_lock_irqsave(&radix_lock, flags);
525 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
526 if (rc == -EEXIST)
527 active_cacheline_inc_overlap(cln);
528 spin_unlock_irqrestore(&radix_lock, flags);
529
530 return rc;
531}
532
533static void active_cacheline_remove(struct dma_debug_entry *entry)
534{
535 phys_addr_t cln = to_cacheline_number(entry);
536 unsigned long flags;
537
538 /* ...mirror the insert case */
539 if (entry->direction == DMA_TO_DEVICE)
540 return;
541
542 spin_lock_irqsave(&radix_lock, flags);
543 /* since we are counting overlaps the final put of the
544 * cacheline will occur when the overlap count is 0.
545 * active_cacheline_dec_overlap() returns -1 in that case
546 */
547 if (active_cacheline_dec_overlap(cln) < 0)
548 radix_tree_delete(&dma_active_cacheline, cln);
549 spin_unlock_irqrestore(&radix_lock, flags);
550}
551
552/**
553 * debug_dma_assert_idle() - assert that a page is not undergoing dma
554 * @page: page to lookup in the dma_active_cacheline tree
555 *
556 * Place a call to this routine in cases where the cpu touching the page
557 * before the dma completes (page is dma_unmapped) will lead to data
558 * corruption.
559 */
560void debug_dma_assert_idle(struct page *page)
561{
562 static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
563 struct dma_debug_entry *entry = NULL;
564 void **results = (void **) &ents;
565 unsigned int nents, i;
566 unsigned long flags;
567 phys_addr_t cln;
568
569 if (dma_debug_disabled())
570 return;
571
572 if (!page)
573 return;
574
575 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
576 spin_lock_irqsave(&radix_lock, flags);
577 nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
578 CACHELINES_PER_PAGE);
579 for (i = 0; i < nents; i++) {
580 phys_addr_t ent_cln = to_cacheline_number(ents[i]);
581
582 if (ent_cln == cln) {
583 entry = ents[i];
584 break;
585 } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
586 break;
587 }
588 spin_unlock_irqrestore(&radix_lock, flags);
589
590 if (!entry)
591 return;
592
593 cln = to_cacheline_number(entry);
594 err_printk(entry->dev, entry,
595 "cpu touching an active dma mapped cacheline [cln=%pa]\n",
596 &cln);
597}
598
599/*
600 * Wrapper function for adding an entry to the hash.
601 * This function takes care of locking itself.
602 */
603static void add_dma_entry(struct dma_debug_entry *entry)
604{
605 struct hash_bucket *bucket;
606 unsigned long flags;
607 int rc;
608
609 bucket = get_hash_bucket(entry, &flags);
610 hash_bucket_add(bucket, entry);
611 put_hash_bucket(bucket, &flags);
612
613 rc = active_cacheline_insert(entry);
614 if (rc == -ENOMEM) {
615 pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
616 global_disable = true;
617 }
618
619 /* TODO: report -EEXIST errors here as overlapping mappings are
620 * not supported by the DMA API
621 */
622}
623
624static int dma_debug_create_entries(gfp_t gfp)
625{
626 struct dma_debug_entry *entry;
627 int i;
628
629 entry = (void *)get_zeroed_page(gfp);
630 if (!entry)
631 return -ENOMEM;
632
633 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
634 list_add_tail(&entry[i].list, &free_entries);
635
636 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
637 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
638
639 return 0;
640}
641
642static struct dma_debug_entry *__dma_entry_alloc(void)
643{
644 struct dma_debug_entry *entry;
645
646 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
647 list_del(&entry->list);
648 memset(entry, 0, sizeof(*entry));
649
650 num_free_entries -= 1;
651 if (num_free_entries < min_free_entries)
652 min_free_entries = num_free_entries;
653
654 return entry;
655}
656
657void __dma_entry_alloc_check_leak(void)
658{
659 u32 tmp = nr_total_entries % nr_prealloc_entries;
660
661 /* Shout each time we tick over some multiple of the initial pool */
662 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
663 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
664 nr_total_entries,
665 (nr_total_entries / nr_prealloc_entries));
666 }
667}
668
669/* struct dma_entry allocator
670 *
671 * The next two functions implement the allocator for
672 * struct dma_debug_entries.
673 */
674static struct dma_debug_entry *dma_entry_alloc(void)
675{
676 struct dma_debug_entry *entry;
677 unsigned long flags;
678
679 spin_lock_irqsave(&free_entries_lock, flags);
680 if (num_free_entries == 0) {
681 if (dma_debug_create_entries(GFP_ATOMIC)) {
682 global_disable = true;
683 spin_unlock_irqrestore(&free_entries_lock, flags);
684 pr_err("debugging out of memory - disabling\n");
685 return NULL;
686 }
687 __dma_entry_alloc_check_leak();
688 }
689
690 entry = __dma_entry_alloc();
691
692 spin_unlock_irqrestore(&free_entries_lock, flags);
693
694#ifdef CONFIG_STACKTRACE
695 entry->stack_len = stack_trace_save(entry->stack_entries,
696 ARRAY_SIZE(entry->stack_entries),
697 1);
698#endif
699 return entry;
700}
701
702static void dma_entry_free(struct dma_debug_entry *entry)
703{
704 unsigned long flags;
705
706 active_cacheline_remove(entry);
707
708 /*
709 * add to beginning of the list - this way the entries are
710 * more likely cache hot when they are reallocated.
711 */
712 spin_lock_irqsave(&free_entries_lock, flags);
713 list_add(&entry->list, &free_entries);
714 num_free_entries += 1;
715 spin_unlock_irqrestore(&free_entries_lock, flags);
716}
717
718/*
719 * DMA-API debugging init code
720 *
721 * The init code does two things:
722 * 1. Initialize core data structures
723 * 2. Preallocate a given number of dma_debug_entry structs
724 */
725
726static ssize_t filter_read(struct file *file, char __user *user_buf,
727 size_t count, loff_t *ppos)
728{
729 char buf[NAME_MAX_LEN + 1];
730 unsigned long flags;
731 int len;
732
733 if (!current_driver_name[0])
734 return 0;
735
736 /*
737 * We can't copy to userspace directly because current_driver_name can
738 * only be read under the driver_name_lock with irqs disabled. So
739 * create a temporary copy first.
740 */
741 read_lock_irqsave(&driver_name_lock, flags);
742 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
743 read_unlock_irqrestore(&driver_name_lock, flags);
744
745 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
746}
747
748static ssize_t filter_write(struct file *file, const char __user *userbuf,
749 size_t count, loff_t *ppos)
750{
751 char buf[NAME_MAX_LEN];
752 unsigned long flags;
753 size_t len;
754 int i;
755
756 /*
757 * We can't copy from userspace directly. Access to
758 * current_driver_name is protected with a write_lock with irqs
759 * disabled. Since copy_from_user can fault and may sleep we
760 * need to copy to temporary buffer first
761 */
762 len = min(count, (size_t)(NAME_MAX_LEN - 1));
763 if (copy_from_user(buf, userbuf, len))
764 return -EFAULT;
765
766 buf[len] = 0;
767
768 write_lock_irqsave(&driver_name_lock, flags);
769
770 /*
771 * Now handle the string we got from userspace very carefully.
772 * The rules are:
773 * - only use the first token we got
774 * - token delimiter is everything looking like a space
775 * character (' ', '\n', '\t' ...)
776 *
777 */
778 if (!isalnum(buf[0])) {
779 /*
780 * If the first character userspace gave us is not
781 * alphanumerical then assume the filter should be
782 * switched off.
783 */
784 if (current_driver_name[0])
785 pr_info("switching off dma-debug driver filter\n");
786 current_driver_name[0] = 0;
787 current_driver = NULL;
788 goto out_unlock;
789 }
790
791 /*
792 * Now parse out the first token and use it as the name for the
793 * driver to filter for.
794 */
795 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
796 current_driver_name[i] = buf[i];
797 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
798 break;
799 }
800 current_driver_name[i] = 0;
801 current_driver = NULL;
802
803 pr_info("enable driver filter for driver [%s]\n",
804 current_driver_name);
805
806out_unlock:
807 write_unlock_irqrestore(&driver_name_lock, flags);
808
809 return count;
810}
811
812static const struct file_operations filter_fops = {
813 .read = filter_read,
814 .write = filter_write,
815 .llseek = default_llseek,
816};
817
818static int dump_show(struct seq_file *seq, void *v)
819{
820 int idx;
821
822 for (idx = 0; idx < HASH_SIZE; idx++) {
823 struct hash_bucket *bucket = &dma_entry_hash[idx];
824 struct dma_debug_entry *entry;
825 unsigned long flags;
826
827 spin_lock_irqsave(&bucket->lock, flags);
828 list_for_each_entry(entry, &bucket->list, list) {
829 seq_printf(seq,
830 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
831 dev_name(entry->dev),
832 dev_driver_string(entry->dev),
833 type2name[entry->type], idx,
834 phys_addr(entry), entry->pfn,
835 entry->dev_addr, entry->size,
836 dir2name[entry->direction],
837 maperr2str[entry->map_err_type]);
838 }
839 spin_unlock_irqrestore(&bucket->lock, flags);
840 }
841 return 0;
842}
843DEFINE_SHOW_ATTRIBUTE(dump);
844
845static void dma_debug_fs_init(void)
846{
847 struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
848
849 debugfs_create_bool("disabled", 0444, dentry, &global_disable);
850 debugfs_create_u32("error_count", 0444, dentry, &error_count);
851 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
852 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
853 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
854 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
855 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
856 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
857 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
858}
859
860static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
861{
862 struct dma_debug_entry *entry;
863 unsigned long flags;
864 int count = 0, i;
865
866 for (i = 0; i < HASH_SIZE; ++i) {
867 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
868 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
869 if (entry->dev == dev) {
870 count += 1;
871 *out_entry = entry;
872 }
873 }
874 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
875 }
876
877 return count;
878}
879
880static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
881{
882 struct device *dev = data;
883 struct dma_debug_entry *uninitialized_var(entry);
884 int count;
885
886 if (dma_debug_disabled())
887 return 0;
888
889 switch (action) {
890 case BUS_NOTIFY_UNBOUND_DRIVER:
891 count = device_dma_allocations(dev, &entry);
892 if (count == 0)
893 break;
894 err_printk(dev, entry, "device driver has pending "
895 "DMA allocations while released from device "
896 "[count=%d]\n"
897 "One of leaked entries details: "
898 "[device address=0x%016llx] [size=%llu bytes] "
899 "[mapped with %s] [mapped as %s]\n",
900 count, entry->dev_addr, entry->size,
901 dir2name[entry->direction], type2name[entry->type]);
902 break;
903 default:
904 break;
905 }
906
907 return 0;
908}
909
910void dma_debug_add_bus(struct bus_type *bus)
911{
912 struct notifier_block *nb;
913
914 if (dma_debug_disabled())
915 return;
916
917 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
918 if (nb == NULL) {
919 pr_err("dma_debug_add_bus: out of memory\n");
920 return;
921 }
922
923 nb->notifier_call = dma_debug_device_change;
924
925 bus_register_notifier(bus, nb);
926}
927
928static int dma_debug_init(void)
929{
930 int i, nr_pages;
931
932 /* Do not use dma_debug_initialized here, since we really want to be
933 * called to set dma_debug_initialized
934 */
935 if (global_disable)
936 return 0;
937
938 for (i = 0; i < HASH_SIZE; ++i) {
939 INIT_LIST_HEAD(&dma_entry_hash[i].list);
940 spin_lock_init(&dma_entry_hash[i].lock);
941 }
942
943 dma_debug_fs_init();
944
945 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
946 for (i = 0; i < nr_pages; ++i)
947 dma_debug_create_entries(GFP_KERNEL);
948 if (num_free_entries >= nr_prealloc_entries) {
949 pr_info("preallocated %d debug entries\n", nr_total_entries);
950 } else if (num_free_entries > 0) {
951 pr_warn("%d debug entries requested but only %d allocated\n",
952 nr_prealloc_entries, nr_total_entries);
953 } else {
954 pr_err("debugging out of memory error - disabled\n");
955 global_disable = true;
956
957 return 0;
958 }
959 min_free_entries = num_free_entries;
960
961 dma_debug_initialized = true;
962
963 pr_info("debugging enabled by kernel config\n");
964 return 0;
965}
966core_initcall(dma_debug_init);
967
968static __init int dma_debug_cmdline(char *str)
969{
970 if (!str)
971 return -EINVAL;
972
973 if (strncmp(str, "off", 3) == 0) {
974 pr_info("debugging disabled on kernel command line\n");
975 global_disable = true;
976 }
977
978 return 0;
979}
980
981static __init int dma_debug_entries_cmdline(char *str)
982{
983 if (!str)
984 return -EINVAL;
985 if (!get_option(&str, &nr_prealloc_entries))
986 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
987 return 0;
988}
989
990__setup("dma_debug=", dma_debug_cmdline);
991__setup("dma_debug_entries=", dma_debug_entries_cmdline);
992
993static void check_unmap(struct dma_debug_entry *ref)
994{
995 struct dma_debug_entry *entry;
996 struct hash_bucket *bucket;
997 unsigned long flags;
998
999 bucket = get_hash_bucket(ref, &flags);
1000 entry = bucket_find_exact(bucket, ref);
1001
1002 if (!entry) {
1003 /* must drop lock before calling dma_mapping_error */
1004 put_hash_bucket(bucket, &flags);
1005
1006 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
1007 err_printk(ref->dev, NULL,
1008 "device driver tries to free an "
1009 "invalid DMA memory address\n");
1010 } else {
1011 err_printk(ref->dev, NULL,
1012 "device driver tries to free DMA "
1013 "memory it has not allocated [device "
1014 "address=0x%016llx] [size=%llu bytes]\n",
1015 ref->dev_addr, ref->size);
1016 }
1017 return;
1018 }
1019
1020 if (ref->size != entry->size) {
1021 err_printk(ref->dev, entry, "device driver frees "
1022 "DMA memory with different size "
1023 "[device address=0x%016llx] [map size=%llu bytes] "
1024 "[unmap size=%llu bytes]\n",
1025 ref->dev_addr, entry->size, ref->size);
1026 }
1027
1028 if (ref->type != entry->type) {
1029 err_printk(ref->dev, entry, "device driver frees "
1030 "DMA memory with wrong function "
1031 "[device address=0x%016llx] [size=%llu bytes] "
1032 "[mapped as %s] [unmapped as %s]\n",
1033 ref->dev_addr, ref->size,
1034 type2name[entry->type], type2name[ref->type]);
1035 } else if ((entry->type == dma_debug_coherent) &&
1036 (phys_addr(ref) != phys_addr(entry))) {
1037 err_printk(ref->dev, entry, "device driver frees "
1038 "DMA memory with different CPU address "
1039 "[device address=0x%016llx] [size=%llu bytes] "
1040 "[cpu alloc address=0x%016llx] "
1041 "[cpu free address=0x%016llx]",
1042 ref->dev_addr, ref->size,
1043 phys_addr(entry),
1044 phys_addr(ref));
1045 }
1046
1047 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1048 ref->sg_call_ents != entry->sg_call_ents) {
1049 err_printk(ref->dev, entry, "device driver frees "
1050 "DMA sg list with different entry count "
1051 "[map count=%d] [unmap count=%d]\n",
1052 entry->sg_call_ents, ref->sg_call_ents);
1053 }
1054
1055 /*
1056 * This may be no bug in reality - but most implementations of the
1057 * DMA API don't handle this properly, so check for it here
1058 */
1059 if (ref->direction != entry->direction) {
1060 err_printk(ref->dev, entry, "device driver frees "
1061 "DMA memory with different direction "
1062 "[device address=0x%016llx] [size=%llu bytes] "
1063 "[mapped with %s] [unmapped with %s]\n",
1064 ref->dev_addr, ref->size,
1065 dir2name[entry->direction],
1066 dir2name[ref->direction]);
1067 }
1068
1069 /*
1070 * Drivers should use dma_mapping_error() to check the returned
1071 * addresses of dma_map_single() and dma_map_page().
1072 * If not, print this warning message. See Documentation/DMA-API.txt.
1073 */
1074 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1075 err_printk(ref->dev, entry,
1076 "device driver failed to check map error"
1077 "[device address=0x%016llx] [size=%llu bytes] "
1078 "[mapped as %s]",
1079 ref->dev_addr, ref->size,
1080 type2name[entry->type]);
1081 }
1082
1083 hash_bucket_del(entry);
1084 dma_entry_free(entry);
1085
1086 put_hash_bucket(bucket, &flags);
1087}
1088
1089static void check_for_stack(struct device *dev,
1090 struct page *page, size_t offset)
1091{
1092 void *addr;
1093 struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1094
1095 if (!stack_vm_area) {
1096 /* Stack is direct-mapped. */
1097 if (PageHighMem(page))
1098 return;
1099 addr = page_address(page) + offset;
1100 if (object_is_on_stack(addr))
1101 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1102 } else {
1103 /* Stack is vmalloced. */
1104 int i;
1105
1106 for (i = 0; i < stack_vm_area->nr_pages; i++) {
1107 if (page != stack_vm_area->pages[i])
1108 continue;
1109
1110 addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1111 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1112 break;
1113 }
1114 }
1115}
1116
1117static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
1118{
1119 unsigned long a1 = (unsigned long)addr;
1120 unsigned long b1 = a1 + len;
1121 unsigned long a2 = (unsigned long)start;
1122 unsigned long b2 = (unsigned long)end;
1123
1124 return !(b1 <= a2 || a1 >= b2);
1125}
1126
1127static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1128{
1129 if (overlap(addr, len, _stext, _etext) ||
1130 overlap(addr, len, __start_rodata, __end_rodata))
1131 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1132}
1133
1134static void check_sync(struct device *dev,
1135 struct dma_debug_entry *ref,
1136 bool to_cpu)
1137{
1138 struct dma_debug_entry *entry;
1139 struct hash_bucket *bucket;
1140 unsigned long flags;
1141
1142 bucket = get_hash_bucket(ref, &flags);
1143
1144 entry = bucket_find_contain(&bucket, ref, &flags);
1145
1146 if (!entry) {
1147 err_printk(dev, NULL, "device driver tries "
1148 "to sync DMA memory it has not allocated "
1149 "[device address=0x%016llx] [size=%llu bytes]\n",
1150 (unsigned long long)ref->dev_addr, ref->size);
1151 goto out;
1152 }
1153
1154 if (ref->size > entry->size) {
1155 err_printk(dev, entry, "device driver syncs"
1156 " DMA memory outside allocated range "
1157 "[device address=0x%016llx] "
1158 "[allocation size=%llu bytes] "
1159 "[sync offset+size=%llu]\n",
1160 entry->dev_addr, entry->size,
1161 ref->size);
1162 }
1163
1164 if (entry->direction == DMA_BIDIRECTIONAL)
1165 goto out;
1166
1167 if (ref->direction != entry->direction) {
1168 err_printk(dev, entry, "device driver syncs "
1169 "DMA memory with different direction "
1170 "[device address=0x%016llx] [size=%llu bytes] "
1171 "[mapped with %s] [synced with %s]\n",
1172 (unsigned long long)ref->dev_addr, entry->size,
1173 dir2name[entry->direction],
1174 dir2name[ref->direction]);
1175 }
1176
1177 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1178 !(ref->direction == DMA_TO_DEVICE))
1179 err_printk(dev, entry, "device driver syncs "
1180 "device read-only DMA memory for cpu "
1181 "[device address=0x%016llx] [size=%llu bytes] "
1182 "[mapped with %s] [synced with %s]\n",
1183 (unsigned long long)ref->dev_addr, entry->size,
1184 dir2name[entry->direction],
1185 dir2name[ref->direction]);
1186
1187 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1188 !(ref->direction == DMA_FROM_DEVICE))
1189 err_printk(dev, entry, "device driver syncs "
1190 "device write-only DMA memory to device "
1191 "[device address=0x%016llx] [size=%llu bytes] "
1192 "[mapped with %s] [synced with %s]\n",
1193 (unsigned long long)ref->dev_addr, entry->size,
1194 dir2name[entry->direction],
1195 dir2name[ref->direction]);
1196
1197 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1198 ref->sg_call_ents != entry->sg_call_ents) {
1199 err_printk(ref->dev, entry, "device driver syncs "
1200 "DMA sg list with different entry count "
1201 "[map count=%d] [sync count=%d]\n",
1202 entry->sg_call_ents, ref->sg_call_ents);
1203 }
1204
1205out:
1206 put_hash_bucket(bucket, &flags);
1207}
1208
1209static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1210{
1211#ifdef CONFIG_DMA_API_DEBUG_SG
1212 unsigned int max_seg = dma_get_max_seg_size(dev);
1213 u64 start, end, boundary = dma_get_seg_boundary(dev);
1214
1215 /*
1216 * Either the driver forgot to set dma_parms appropriately, or
1217 * whoever generated the list forgot to check them.
1218 */
1219 if (sg->length > max_seg)
1220 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1221 sg->length, max_seg);
1222 /*
1223 * In some cases this could potentially be the DMA API
1224 * implementation's fault, but it would usually imply that
1225 * the scatterlist was built inappropriately to begin with.
1226 */
1227 start = sg_dma_address(sg);
1228 end = start + sg_dma_len(sg) - 1;
1229 if ((start ^ end) & ~boundary)
1230 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1231 start, end, boundary);
1232#endif
1233}
1234
1235void debug_dma_map_single(struct device *dev, const void *addr,
1236 unsigned long len)
1237{
1238 if (unlikely(dma_debug_disabled()))
1239 return;
1240
1241 if (!virt_addr_valid(addr))
1242 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1243 addr, len);
1244
1245 if (is_vmalloc_addr(addr))
1246 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1247 addr, len);
1248}
1249EXPORT_SYMBOL(debug_dma_map_single);
1250
1251void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1252 size_t size, int direction, dma_addr_t dma_addr)
1253{
1254 struct dma_debug_entry *entry;
1255
1256 if (unlikely(dma_debug_disabled()))
1257 return;
1258
1259 if (dma_mapping_error(dev, dma_addr))
1260 return;
1261
1262 entry = dma_entry_alloc();
1263 if (!entry)
1264 return;
1265
1266 entry->dev = dev;
1267 entry->type = dma_debug_single;
1268 entry->pfn = page_to_pfn(page);
1269 entry->offset = offset,
1270 entry->dev_addr = dma_addr;
1271 entry->size = size;
1272 entry->direction = direction;
1273 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1274
1275 check_for_stack(dev, page, offset);
1276
1277 if (!PageHighMem(page)) {
1278 void *addr = page_address(page) + offset;
1279
1280 check_for_illegal_area(dev, addr, size);
1281 }
1282
1283 add_dma_entry(entry);
1284}
1285EXPORT_SYMBOL(debug_dma_map_page);
1286
1287void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1288{
1289 struct dma_debug_entry ref;
1290 struct dma_debug_entry *entry;
1291 struct hash_bucket *bucket;
1292 unsigned long flags;
1293
1294 if (unlikely(dma_debug_disabled()))
1295 return;
1296
1297 ref.dev = dev;
1298 ref.dev_addr = dma_addr;
1299 bucket = get_hash_bucket(&ref, &flags);
1300
1301 list_for_each_entry(entry, &bucket->list, list) {
1302 if (!exact_match(&ref, entry))
1303 continue;
1304
1305 /*
1306 * The same physical address can be mapped multiple
1307 * times. Without a hardware IOMMU this results in the
1308 * same device addresses being put into the dma-debug
1309 * hash multiple times too. This can result in false
1310 * positives being reported. Therefore we implement a
1311 * best-fit algorithm here which updates the first entry
1312 * from the hash which fits the reference value and is
1313 * not currently listed as being checked.
1314 */
1315 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1316 entry->map_err_type = MAP_ERR_CHECKED;
1317 break;
1318 }
1319 }
1320
1321 put_hash_bucket(bucket, &flags);
1322}
1323EXPORT_SYMBOL(debug_dma_mapping_error);
1324
1325void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1326 size_t size, int direction)
1327{
1328 struct dma_debug_entry ref = {
1329 .type = dma_debug_single,
1330 .dev = dev,
1331 .dev_addr = addr,
1332 .size = size,
1333 .direction = direction,
1334 };
1335
1336 if (unlikely(dma_debug_disabled()))
1337 return;
1338 check_unmap(&ref);
1339}
1340EXPORT_SYMBOL(debug_dma_unmap_page);
1341
1342void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1343 int nents, int mapped_ents, int direction)
1344{
1345 struct dma_debug_entry *entry;
1346 struct scatterlist *s;
1347 int i;
1348
1349 if (unlikely(dma_debug_disabled()))
1350 return;
1351
1352 for_each_sg(sg, s, mapped_ents, i) {
1353 entry = dma_entry_alloc();
1354 if (!entry)
1355 return;
1356
1357 entry->type = dma_debug_sg;
1358 entry->dev = dev;
1359 entry->pfn = page_to_pfn(sg_page(s));
1360 entry->offset = s->offset,
1361 entry->size = sg_dma_len(s);
1362 entry->dev_addr = sg_dma_address(s);
1363 entry->direction = direction;
1364 entry->sg_call_ents = nents;
1365 entry->sg_mapped_ents = mapped_ents;
1366
1367 check_for_stack(dev, sg_page(s), s->offset);
1368
1369 if (!PageHighMem(sg_page(s))) {
1370 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1371 }
1372
1373 check_sg_segment(dev, s);
1374
1375 add_dma_entry(entry);
1376 }
1377}
1378EXPORT_SYMBOL(debug_dma_map_sg);
1379
1380static int get_nr_mapped_entries(struct device *dev,
1381 struct dma_debug_entry *ref)
1382{
1383 struct dma_debug_entry *entry;
1384 struct hash_bucket *bucket;
1385 unsigned long flags;
1386 int mapped_ents;
1387
1388 bucket = get_hash_bucket(ref, &flags);
1389 entry = bucket_find_exact(bucket, ref);
1390 mapped_ents = 0;
1391
1392 if (entry)
1393 mapped_ents = entry->sg_mapped_ents;
1394 put_hash_bucket(bucket, &flags);
1395
1396 return mapped_ents;
1397}
1398
1399void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1400 int nelems, int dir)
1401{
1402 struct scatterlist *s;
1403 int mapped_ents = 0, i;
1404
1405 if (unlikely(dma_debug_disabled()))
1406 return;
1407
1408 for_each_sg(sglist, s, nelems, i) {
1409
1410 struct dma_debug_entry ref = {
1411 .type = dma_debug_sg,
1412 .dev = dev,
1413 .pfn = page_to_pfn(sg_page(s)),
1414 .offset = s->offset,
1415 .dev_addr = sg_dma_address(s),
1416 .size = sg_dma_len(s),
1417 .direction = dir,
1418 .sg_call_ents = nelems,
1419 };
1420
1421 if (mapped_ents && i >= mapped_ents)
1422 break;
1423
1424 if (!i)
1425 mapped_ents = get_nr_mapped_entries(dev, &ref);
1426
1427 check_unmap(&ref);
1428 }
1429}
1430EXPORT_SYMBOL(debug_dma_unmap_sg);
1431
1432void debug_dma_alloc_coherent(struct device *dev, size_t size,
1433 dma_addr_t dma_addr, void *virt)
1434{
1435 struct dma_debug_entry *entry;
1436
1437 if (unlikely(dma_debug_disabled()))
1438 return;
1439
1440 if (unlikely(virt == NULL))
1441 return;
1442
1443 /* handle vmalloc and linear addresses */
1444 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1445 return;
1446
1447 entry = dma_entry_alloc();
1448 if (!entry)
1449 return;
1450
1451 entry->type = dma_debug_coherent;
1452 entry->dev = dev;
1453 entry->offset = offset_in_page(virt);
1454 entry->size = size;
1455 entry->dev_addr = dma_addr;
1456 entry->direction = DMA_BIDIRECTIONAL;
1457
1458 if (is_vmalloc_addr(virt))
1459 entry->pfn = vmalloc_to_pfn(virt);
1460 else
1461 entry->pfn = page_to_pfn(virt_to_page(virt));
1462
1463 add_dma_entry(entry);
1464}
1465
1466void debug_dma_free_coherent(struct device *dev, size_t size,
1467 void *virt, dma_addr_t addr)
1468{
1469 struct dma_debug_entry ref = {
1470 .type = dma_debug_coherent,
1471 .dev = dev,
1472 .offset = offset_in_page(virt),
1473 .dev_addr = addr,
1474 .size = size,
1475 .direction = DMA_BIDIRECTIONAL,
1476 };
1477
1478 /* handle vmalloc and linear addresses */
1479 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1480 return;
1481
1482 if (is_vmalloc_addr(virt))
1483 ref.pfn = vmalloc_to_pfn(virt);
1484 else
1485 ref.pfn = page_to_pfn(virt_to_page(virt));
1486
1487 if (unlikely(dma_debug_disabled()))
1488 return;
1489
1490 check_unmap(&ref);
1491}
1492
1493void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1494 int direction, dma_addr_t dma_addr)
1495{
1496 struct dma_debug_entry *entry;
1497
1498 if (unlikely(dma_debug_disabled()))
1499 return;
1500
1501 entry = dma_entry_alloc();
1502 if (!entry)
1503 return;
1504
1505 entry->type = dma_debug_resource;
1506 entry->dev = dev;
1507 entry->pfn = PHYS_PFN(addr);
1508 entry->offset = offset_in_page(addr);
1509 entry->size = size;
1510 entry->dev_addr = dma_addr;
1511 entry->direction = direction;
1512 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1513
1514 add_dma_entry(entry);
1515}
1516EXPORT_SYMBOL(debug_dma_map_resource);
1517
1518void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1519 size_t size, int direction)
1520{
1521 struct dma_debug_entry ref = {
1522 .type = dma_debug_resource,
1523 .dev = dev,
1524 .dev_addr = dma_addr,
1525 .size = size,
1526 .direction = direction,
1527 };
1528
1529 if (unlikely(dma_debug_disabled()))
1530 return;
1531
1532 check_unmap(&ref);
1533}
1534EXPORT_SYMBOL(debug_dma_unmap_resource);
1535
1536void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1537 size_t size, int direction)
1538{
1539 struct dma_debug_entry ref;
1540
1541 if (unlikely(dma_debug_disabled()))
1542 return;
1543
1544 ref.type = dma_debug_single;
1545 ref.dev = dev;
1546 ref.dev_addr = dma_handle;
1547 ref.size = size;
1548 ref.direction = direction;
1549 ref.sg_call_ents = 0;
1550
1551 check_sync(dev, &ref, true);
1552}
1553EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1554
1555void debug_dma_sync_single_for_device(struct device *dev,
1556 dma_addr_t dma_handle, size_t size,
1557 int direction)
1558{
1559 struct dma_debug_entry ref;
1560
1561 if (unlikely(dma_debug_disabled()))
1562 return;
1563
1564 ref.type = dma_debug_single;
1565 ref.dev = dev;
1566 ref.dev_addr = dma_handle;
1567 ref.size = size;
1568 ref.direction = direction;
1569 ref.sg_call_ents = 0;
1570
1571 check_sync(dev, &ref, false);
1572}
1573EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1574
1575void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1576 int nelems, int direction)
1577{
1578 struct scatterlist *s;
1579 int mapped_ents = 0, i;
1580
1581 if (unlikely(dma_debug_disabled()))
1582 return;
1583
1584 for_each_sg(sg, s, nelems, i) {
1585
1586 struct dma_debug_entry ref = {
1587 .type = dma_debug_sg,
1588 .dev = dev,
1589 .pfn = page_to_pfn(sg_page(s)),
1590 .offset = s->offset,
1591 .dev_addr = sg_dma_address(s),
1592 .size = sg_dma_len(s),
1593 .direction = direction,
1594 .sg_call_ents = nelems,
1595 };
1596
1597 if (!i)
1598 mapped_ents = get_nr_mapped_entries(dev, &ref);
1599
1600 if (i >= mapped_ents)
1601 break;
1602
1603 check_sync(dev, &ref, true);
1604 }
1605}
1606EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1607
1608void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1609 int nelems, int direction)
1610{
1611 struct scatterlist *s;
1612 int mapped_ents = 0, i;
1613
1614 if (unlikely(dma_debug_disabled()))
1615 return;
1616
1617 for_each_sg(sg, s, nelems, i) {
1618
1619 struct dma_debug_entry ref = {
1620 .type = dma_debug_sg,
1621 .dev = dev,
1622 .pfn = page_to_pfn(sg_page(s)),
1623 .offset = s->offset,
1624 .dev_addr = sg_dma_address(s),
1625 .size = sg_dma_len(s),
1626 .direction = direction,
1627 .sg_call_ents = nelems,
1628 };
1629 if (!i)
1630 mapped_ents = get_nr_mapped_entries(dev, &ref);
1631
1632 if (i >= mapped_ents)
1633 break;
1634
1635 check_sync(dev, &ref, false);
1636 }
1637}
1638EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1639
1640static int __init dma_debug_driver_setup(char *str)
1641{
1642 int i;
1643
1644 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1645 current_driver_name[i] = *str;
1646 if (*str == 0)
1647 break;
1648 }
1649
1650 if (current_driver_name[0])
1651 pr_info("enable driver filter for driver [%s]\n",
1652 current_driver_name);
1653
1654
1655 return 1;
1656}
1657__setup("dma_debug_driver=", dma_debug_driver_setup);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 *
5 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 */
7
8#define pr_fmt(fmt) "DMA-API: " fmt
9
10#include <linux/sched/task_stack.h>
11#include <linux/scatterlist.h>
12#include <linux/dma-map-ops.h>
13#include <linux/sched/task.h>
14#include <linux/stacktrace.h>
15#include <linux/spinlock.h>
16#include <linux/vmalloc.h>
17#include <linux/debugfs.h>
18#include <linux/uaccess.h>
19#include <linux/export.h>
20#include <linux/device.h>
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/ctype.h>
24#include <linux/list.h>
25#include <linux/slab.h>
26#include <asm/sections.h>
27#include "debug.h"
28
29#define HASH_SIZE 16384ULL
30#define HASH_FN_SHIFT 13
31#define HASH_FN_MASK (HASH_SIZE - 1)
32
33#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
34/* If the pool runs out, add this many new entries at once */
35#define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
36
37enum {
38 dma_debug_single,
39 dma_debug_sg,
40 dma_debug_coherent,
41 dma_debug_resource,
42};
43
44enum map_err_types {
45 MAP_ERR_CHECK_NOT_APPLICABLE,
46 MAP_ERR_NOT_CHECKED,
47 MAP_ERR_CHECKED,
48};
49
50#define DMA_DEBUG_STACKTRACE_ENTRIES 5
51
52/**
53 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
54 * @list: node on pre-allocated free_entries list
55 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
56 * @dev_addr: dma address
57 * @size: length of the mapping
58 * @type: single, page, sg, coherent
59 * @direction: enum dma_data_direction
60 * @sg_call_ents: 'nents' from dma_map_sg
61 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
62 * @paddr: physical start address of the mapping
63 * @map_err_type: track whether dma_mapping_error() was checked
64 * @stack_len: number of backtrace entries in @stack_entries
65 * @stack_entries: stack of backtrace history
66 */
67struct dma_debug_entry {
68 struct list_head list;
69 struct device *dev;
70 u64 dev_addr;
71 u64 size;
72 int type;
73 int direction;
74 int sg_call_ents;
75 int sg_mapped_ents;
76 phys_addr_t paddr;
77 enum map_err_types map_err_type;
78#ifdef CONFIG_STACKTRACE
79 unsigned int stack_len;
80 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
81#endif
82} ____cacheline_aligned_in_smp;
83
84typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
85
86struct hash_bucket {
87 struct list_head list;
88 spinlock_t lock;
89};
90
91/* Hash list to save the allocated dma addresses */
92static struct hash_bucket dma_entry_hash[HASH_SIZE];
93/* List of pre-allocated dma_debug_entry's */
94static LIST_HEAD(free_entries);
95/* Lock for the list above */
96static DEFINE_SPINLOCK(free_entries_lock);
97
98/* Global disable flag - will be set in case of an error */
99static bool global_disable __read_mostly;
100
101/* Early initialization disable flag, set at the end of dma_debug_init */
102static bool dma_debug_initialized __read_mostly;
103
104static inline bool dma_debug_disabled(void)
105{
106 return global_disable || !dma_debug_initialized;
107}
108
109/* Global error count */
110static u32 error_count;
111
112/* Global error show enable*/
113static u32 show_all_errors __read_mostly;
114/* Number of errors to show */
115static u32 show_num_errors = 1;
116
117static u32 num_free_entries;
118static u32 min_free_entries;
119static u32 nr_total_entries;
120
121/* number of preallocated entries requested by kernel cmdline */
122static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
123
124/* per-driver filter related state */
125
126#define NAME_MAX_LEN 64
127
128static char current_driver_name[NAME_MAX_LEN] __read_mostly;
129static struct device_driver *current_driver __read_mostly;
130
131static DEFINE_RWLOCK(driver_name_lock);
132
133static const char *const maperr2str[] = {
134 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
135 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
136 [MAP_ERR_CHECKED] = "dma map error checked",
137};
138
139static const char *type2name[] = {
140 [dma_debug_single] = "single",
141 [dma_debug_sg] = "scatter-gather",
142 [dma_debug_coherent] = "coherent",
143 [dma_debug_resource] = "resource",
144};
145
146static const char *dir2name[] = {
147 [DMA_BIDIRECTIONAL] = "DMA_BIDIRECTIONAL",
148 [DMA_TO_DEVICE] = "DMA_TO_DEVICE",
149 [DMA_FROM_DEVICE] = "DMA_FROM_DEVICE",
150 [DMA_NONE] = "DMA_NONE",
151};
152
153/*
154 * The access to some variables in this macro is racy. We can't use atomic_t
155 * here because all these variables are exported to debugfs. Some of them even
156 * writeable. This is also the reason why a lock won't help much. But anyway,
157 * the races are no big deal. Here is why:
158 *
159 * error_count: the addition is racy, but the worst thing that can happen is
160 * that we don't count some errors
161 * show_num_errors: the subtraction is racy. Also no big deal because in
162 * worst case this will result in one warning more in the
163 * system log than the user configured. This variable is
164 * writeable via debugfs.
165 */
166static inline void dump_entry_trace(struct dma_debug_entry *entry)
167{
168#ifdef CONFIG_STACKTRACE
169 if (entry) {
170 pr_warn("Mapped at:\n");
171 stack_trace_print(entry->stack_entries, entry->stack_len, 0);
172 }
173#endif
174}
175
176static bool driver_filter(struct device *dev)
177{
178 struct device_driver *drv;
179 unsigned long flags;
180 bool ret;
181
182 /* driver filter off */
183 if (likely(!current_driver_name[0]))
184 return true;
185
186 /* driver filter on and initialized */
187 if (current_driver && dev && dev->driver == current_driver)
188 return true;
189
190 /* driver filter on, but we can't filter on a NULL device... */
191 if (!dev)
192 return false;
193
194 if (current_driver || !current_driver_name[0])
195 return false;
196
197 /* driver filter on but not yet initialized */
198 drv = dev->driver;
199 if (!drv)
200 return false;
201
202 /* lock to protect against change of current_driver_name */
203 read_lock_irqsave(&driver_name_lock, flags);
204
205 ret = false;
206 if (drv->name &&
207 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
208 current_driver = drv;
209 ret = true;
210 }
211
212 read_unlock_irqrestore(&driver_name_lock, flags);
213
214 return ret;
215}
216
217#define err_printk(dev, entry, format, arg...) do { \
218 error_count += 1; \
219 if (driver_filter(dev) && \
220 (show_all_errors || show_num_errors > 0)) { \
221 WARN(1, pr_fmt("%s %s: ") format, \
222 dev ? dev_driver_string(dev) : "NULL", \
223 dev ? dev_name(dev) : "NULL", ## arg); \
224 dump_entry_trace(entry); \
225 } \
226 if (!show_all_errors && show_num_errors > 0) \
227 show_num_errors -= 1; \
228 } while (0);
229
230/*
231 * Hash related functions
232 *
233 * Every DMA-API request is saved into a struct dma_debug_entry. To
234 * have quick access to these structs they are stored into a hash.
235 */
236static int hash_fn(struct dma_debug_entry *entry)
237{
238 /*
239 * Hash function is based on the dma address.
240 * We use bits 20-27 here as the index into the hash
241 */
242 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
243}
244
245/*
246 * Request exclusive access to a hash bucket for a given dma_debug_entry.
247 */
248static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
249 unsigned long *flags)
250 __acquires(&dma_entry_hash[idx].lock)
251{
252 int idx = hash_fn(entry);
253 unsigned long __flags;
254
255 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
256 *flags = __flags;
257 return &dma_entry_hash[idx];
258}
259
260/*
261 * Give up exclusive access to the hash bucket
262 */
263static void put_hash_bucket(struct hash_bucket *bucket,
264 unsigned long flags)
265 __releases(&bucket->lock)
266{
267 spin_unlock_irqrestore(&bucket->lock, flags);
268}
269
270static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
271{
272 return ((a->dev_addr == b->dev_addr) &&
273 (a->dev == b->dev)) ? true : false;
274}
275
276static bool containing_match(struct dma_debug_entry *a,
277 struct dma_debug_entry *b)
278{
279 if (a->dev != b->dev)
280 return false;
281
282 if ((b->dev_addr <= a->dev_addr) &&
283 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
284 return true;
285
286 return false;
287}
288
289/*
290 * Search a given entry in the hash bucket list
291 */
292static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
293 struct dma_debug_entry *ref,
294 match_fn match)
295{
296 struct dma_debug_entry *entry, *ret = NULL;
297 int matches = 0, match_lvl, last_lvl = -1;
298
299 list_for_each_entry(entry, &bucket->list, list) {
300 if (!match(ref, entry))
301 continue;
302
303 /*
304 * Some drivers map the same physical address multiple
305 * times. Without a hardware IOMMU this results in the
306 * same device addresses being put into the dma-debug
307 * hash multiple times too. This can result in false
308 * positives being reported. Therefore we implement a
309 * best-fit algorithm here which returns the entry from
310 * the hash which fits best to the reference value
311 * instead of the first-fit.
312 */
313 matches += 1;
314 match_lvl = 0;
315 entry->size == ref->size ? ++match_lvl : 0;
316 entry->type == ref->type ? ++match_lvl : 0;
317 entry->direction == ref->direction ? ++match_lvl : 0;
318 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
319
320 if (match_lvl == 4) {
321 /* perfect-fit - return the result */
322 return entry;
323 } else if (match_lvl > last_lvl) {
324 /*
325 * We found an entry that fits better then the
326 * previous one or it is the 1st match.
327 */
328 last_lvl = match_lvl;
329 ret = entry;
330 }
331 }
332
333 /*
334 * If we have multiple matches but no perfect-fit, just return
335 * NULL.
336 */
337 ret = (matches == 1) ? ret : NULL;
338
339 return ret;
340}
341
342static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
343 struct dma_debug_entry *ref)
344{
345 return __hash_bucket_find(bucket, ref, exact_match);
346}
347
348static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
349 struct dma_debug_entry *ref,
350 unsigned long *flags)
351{
352
353 struct dma_debug_entry *entry, index = *ref;
354 int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1);
355
356 for (int i = 0; i < limit; i++) {
357 entry = __hash_bucket_find(*bucket, ref, containing_match);
358
359 if (entry)
360 return entry;
361
362 /*
363 * Nothing found, go back a hash bucket
364 */
365 put_hash_bucket(*bucket, *flags);
366 index.dev_addr -= (1 << HASH_FN_SHIFT);
367 *bucket = get_hash_bucket(&index, flags);
368 }
369
370 return NULL;
371}
372
373/*
374 * Add an entry to a hash bucket
375 */
376static void hash_bucket_add(struct hash_bucket *bucket,
377 struct dma_debug_entry *entry)
378{
379 list_add_tail(&entry->list, &bucket->list);
380}
381
382/*
383 * Remove entry from a hash bucket list
384 */
385static void hash_bucket_del(struct dma_debug_entry *entry)
386{
387 list_del(&entry->list);
388}
389
390/*
391 * For each mapping (initial cacheline in the case of
392 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
393 * scatterlist, or the cacheline specified in dma_map_single) insert
394 * into this tree using the cacheline as the key. At
395 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
396 * the entry already exists at insertion time add a tag as a reference
397 * count for the overlapping mappings. For now, the overlap tracking
398 * just ensures that 'unmaps' balance 'maps' before marking the
399 * cacheline idle, but we should also be flagging overlaps as an API
400 * violation.
401 *
402 * Memory usage is mostly constrained by the maximum number of available
403 * dma-debug entries in that we need a free dma_debug_entry before
404 * inserting into the tree. In the case of dma_map_page and
405 * dma_alloc_coherent there is only one dma_debug_entry and one
406 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
407 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
408 * entries into the tree.
409 *
410 * Use __GFP_NOWARN because the printk from an OOM, to netconsole, could end
411 * up right back in the DMA debugging code, leading to a deadlock.
412 */
413static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC | __GFP_NOWARN);
414static DEFINE_SPINLOCK(radix_lock);
415#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
416#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
417#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
418
419static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
420{
421 return ((entry->paddr >> PAGE_SHIFT) << CACHELINE_PER_PAGE_SHIFT) +
422 (offset_in_page(entry->paddr) >> L1_CACHE_SHIFT);
423}
424
425static int active_cacheline_read_overlap(phys_addr_t cln)
426{
427 int overlap = 0, i;
428
429 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
430 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
431 overlap |= 1 << i;
432 return overlap;
433}
434
435static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
436{
437 int i;
438
439 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
440 return overlap;
441
442 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
443 if (overlap & 1 << i)
444 radix_tree_tag_set(&dma_active_cacheline, cln, i);
445 else
446 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
447
448 return overlap;
449}
450
451static void active_cacheline_inc_overlap(phys_addr_t cln)
452{
453 int overlap = active_cacheline_read_overlap(cln);
454
455 overlap = active_cacheline_set_overlap(cln, ++overlap);
456
457 /* If we overflowed the overlap counter then we're potentially
458 * leaking dma-mappings.
459 */
460 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
461 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
462 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
463}
464
465static int active_cacheline_dec_overlap(phys_addr_t cln)
466{
467 int overlap = active_cacheline_read_overlap(cln);
468
469 return active_cacheline_set_overlap(cln, --overlap);
470}
471
472static int active_cacheline_insert(struct dma_debug_entry *entry)
473{
474 phys_addr_t cln = to_cacheline_number(entry);
475 unsigned long flags;
476 int rc;
477
478 /* If the device is not writing memory then we don't have any
479 * concerns about the cpu consuming stale data. This mitigates
480 * legitimate usages of overlapping mappings.
481 */
482 if (entry->direction == DMA_TO_DEVICE)
483 return 0;
484
485 spin_lock_irqsave(&radix_lock, flags);
486 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
487 if (rc == -EEXIST)
488 active_cacheline_inc_overlap(cln);
489 spin_unlock_irqrestore(&radix_lock, flags);
490
491 return rc;
492}
493
494static void active_cacheline_remove(struct dma_debug_entry *entry)
495{
496 phys_addr_t cln = to_cacheline_number(entry);
497 unsigned long flags;
498
499 /* ...mirror the insert case */
500 if (entry->direction == DMA_TO_DEVICE)
501 return;
502
503 spin_lock_irqsave(&radix_lock, flags);
504 /* since we are counting overlaps the final put of the
505 * cacheline will occur when the overlap count is 0.
506 * active_cacheline_dec_overlap() returns -1 in that case
507 */
508 if (active_cacheline_dec_overlap(cln) < 0)
509 radix_tree_delete(&dma_active_cacheline, cln);
510 spin_unlock_irqrestore(&radix_lock, flags);
511}
512
513/*
514 * Dump mappings entries on kernel space for debugging purposes
515 */
516void debug_dma_dump_mappings(struct device *dev)
517{
518 int idx;
519 phys_addr_t cln;
520
521 for (idx = 0; idx < HASH_SIZE; idx++) {
522 struct hash_bucket *bucket = &dma_entry_hash[idx];
523 struct dma_debug_entry *entry;
524 unsigned long flags;
525
526 spin_lock_irqsave(&bucket->lock, flags);
527 list_for_each_entry(entry, &bucket->list, list) {
528 if (!dev || dev == entry->dev) {
529 cln = to_cacheline_number(entry);
530 dev_info(entry->dev,
531 "%s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n",
532 type2name[entry->type], idx,
533 &entry->paddr, entry->dev_addr,
534 entry->size, &cln,
535 dir2name[entry->direction],
536 maperr2str[entry->map_err_type]);
537 }
538 }
539 spin_unlock_irqrestore(&bucket->lock, flags);
540
541 cond_resched();
542 }
543}
544
545/*
546 * Dump mappings entries on user space via debugfs
547 */
548static int dump_show(struct seq_file *seq, void *v)
549{
550 int idx;
551 phys_addr_t cln;
552
553 for (idx = 0; idx < HASH_SIZE; idx++) {
554 struct hash_bucket *bucket = &dma_entry_hash[idx];
555 struct dma_debug_entry *entry;
556 unsigned long flags;
557
558 spin_lock_irqsave(&bucket->lock, flags);
559 list_for_each_entry(entry, &bucket->list, list) {
560 cln = to_cacheline_number(entry);
561 seq_printf(seq,
562 "%s %s %s idx %d P=%pa D=%llx L=%llx cln=%pa %s %s\n",
563 dev_driver_string(entry->dev),
564 dev_name(entry->dev),
565 type2name[entry->type], idx,
566 &entry->paddr, entry->dev_addr,
567 entry->size, &cln,
568 dir2name[entry->direction],
569 maperr2str[entry->map_err_type]);
570 }
571 spin_unlock_irqrestore(&bucket->lock, flags);
572 }
573 return 0;
574}
575DEFINE_SHOW_ATTRIBUTE(dump);
576
577/*
578 * Wrapper function for adding an entry to the hash.
579 * This function takes care of locking itself.
580 */
581static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
582{
583 struct hash_bucket *bucket;
584 unsigned long flags;
585 int rc;
586
587 bucket = get_hash_bucket(entry, &flags);
588 hash_bucket_add(bucket, entry);
589 put_hash_bucket(bucket, flags);
590
591 rc = active_cacheline_insert(entry);
592 if (rc == -ENOMEM) {
593 pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
594 global_disable = true;
595 } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
596 err_printk(entry->dev, entry,
597 "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
598 }
599}
600
601static int dma_debug_create_entries(gfp_t gfp)
602{
603 struct dma_debug_entry *entry;
604 int i;
605
606 entry = (void *)get_zeroed_page(gfp);
607 if (!entry)
608 return -ENOMEM;
609
610 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
611 list_add_tail(&entry[i].list, &free_entries);
612
613 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
614 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
615
616 return 0;
617}
618
619static struct dma_debug_entry *__dma_entry_alloc(void)
620{
621 struct dma_debug_entry *entry;
622
623 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
624 list_del(&entry->list);
625 memset(entry, 0, sizeof(*entry));
626
627 num_free_entries -= 1;
628 if (num_free_entries < min_free_entries)
629 min_free_entries = num_free_entries;
630
631 return entry;
632}
633
634/*
635 * This should be called outside of free_entries_lock scope to avoid potential
636 * deadlocks with serial consoles that use DMA.
637 */
638static void __dma_entry_alloc_check_leak(u32 nr_entries)
639{
640 u32 tmp = nr_entries % nr_prealloc_entries;
641
642 /* Shout each time we tick over some multiple of the initial pool */
643 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
644 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
645 nr_entries,
646 (nr_entries / nr_prealloc_entries));
647 }
648}
649
650/* struct dma_entry allocator
651 *
652 * The next two functions implement the allocator for
653 * struct dma_debug_entries.
654 */
655static struct dma_debug_entry *dma_entry_alloc(void)
656{
657 bool alloc_check_leak = false;
658 struct dma_debug_entry *entry;
659 unsigned long flags;
660 u32 nr_entries;
661
662 spin_lock_irqsave(&free_entries_lock, flags);
663 if (num_free_entries == 0) {
664 if (dma_debug_create_entries(GFP_ATOMIC)) {
665 global_disable = true;
666 spin_unlock_irqrestore(&free_entries_lock, flags);
667 pr_err("debugging out of memory - disabling\n");
668 return NULL;
669 }
670 alloc_check_leak = true;
671 nr_entries = nr_total_entries;
672 }
673
674 entry = __dma_entry_alloc();
675
676 spin_unlock_irqrestore(&free_entries_lock, flags);
677
678 if (alloc_check_leak)
679 __dma_entry_alloc_check_leak(nr_entries);
680
681#ifdef CONFIG_STACKTRACE
682 entry->stack_len = stack_trace_save(entry->stack_entries,
683 ARRAY_SIZE(entry->stack_entries),
684 1);
685#endif
686 return entry;
687}
688
689static void dma_entry_free(struct dma_debug_entry *entry)
690{
691 unsigned long flags;
692
693 active_cacheline_remove(entry);
694
695 /*
696 * add to beginning of the list - this way the entries are
697 * more likely cache hot when they are reallocated.
698 */
699 spin_lock_irqsave(&free_entries_lock, flags);
700 list_add(&entry->list, &free_entries);
701 num_free_entries += 1;
702 spin_unlock_irqrestore(&free_entries_lock, flags);
703}
704
705/*
706 * DMA-API debugging init code
707 *
708 * The init code does two things:
709 * 1. Initialize core data structures
710 * 2. Preallocate a given number of dma_debug_entry structs
711 */
712
713static ssize_t filter_read(struct file *file, char __user *user_buf,
714 size_t count, loff_t *ppos)
715{
716 char buf[NAME_MAX_LEN + 1];
717 unsigned long flags;
718 int len;
719
720 if (!current_driver_name[0])
721 return 0;
722
723 /*
724 * We can't copy to userspace directly because current_driver_name can
725 * only be read under the driver_name_lock with irqs disabled. So
726 * create a temporary copy first.
727 */
728 read_lock_irqsave(&driver_name_lock, flags);
729 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
730 read_unlock_irqrestore(&driver_name_lock, flags);
731
732 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
733}
734
735static ssize_t filter_write(struct file *file, const char __user *userbuf,
736 size_t count, loff_t *ppos)
737{
738 char buf[NAME_MAX_LEN];
739 unsigned long flags;
740 size_t len;
741 int i;
742
743 /*
744 * We can't copy from userspace directly. Access to
745 * current_driver_name is protected with a write_lock with irqs
746 * disabled. Since copy_from_user can fault and may sleep we
747 * need to copy to temporary buffer first
748 */
749 len = min(count, (size_t)(NAME_MAX_LEN - 1));
750 if (copy_from_user(buf, userbuf, len))
751 return -EFAULT;
752
753 buf[len] = 0;
754
755 write_lock_irqsave(&driver_name_lock, flags);
756
757 /*
758 * Now handle the string we got from userspace very carefully.
759 * The rules are:
760 * - only use the first token we got
761 * - token delimiter is everything looking like a space
762 * character (' ', '\n', '\t' ...)
763 *
764 */
765 if (!isalnum(buf[0])) {
766 /*
767 * If the first character userspace gave us is not
768 * alphanumerical then assume the filter should be
769 * switched off.
770 */
771 if (current_driver_name[0])
772 pr_info("switching off dma-debug driver filter\n");
773 current_driver_name[0] = 0;
774 current_driver = NULL;
775 goto out_unlock;
776 }
777
778 /*
779 * Now parse out the first token and use it as the name for the
780 * driver to filter for.
781 */
782 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
783 current_driver_name[i] = buf[i];
784 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
785 break;
786 }
787 current_driver_name[i] = 0;
788 current_driver = NULL;
789
790 pr_info("enable driver filter for driver [%s]\n",
791 current_driver_name);
792
793out_unlock:
794 write_unlock_irqrestore(&driver_name_lock, flags);
795
796 return count;
797}
798
799static const struct file_operations filter_fops = {
800 .read = filter_read,
801 .write = filter_write,
802 .llseek = default_llseek,
803};
804
805static int __init dma_debug_fs_init(void)
806{
807 struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
808
809 debugfs_create_bool("disabled", 0444, dentry, &global_disable);
810 debugfs_create_u32("error_count", 0444, dentry, &error_count);
811 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
812 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
813 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
814 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
815 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
816 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
817 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
818
819 return 0;
820}
821core_initcall_sync(dma_debug_fs_init);
822
823static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
824{
825 struct dma_debug_entry *entry;
826 unsigned long flags;
827 int count = 0, i;
828
829 for (i = 0; i < HASH_SIZE; ++i) {
830 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
831 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
832 if (entry->dev == dev) {
833 count += 1;
834 *out_entry = entry;
835 }
836 }
837 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
838 }
839
840 return count;
841}
842
843static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
844{
845 struct device *dev = data;
846 struct dma_debug_entry *entry;
847 int count;
848
849 if (dma_debug_disabled())
850 return 0;
851
852 switch (action) {
853 case BUS_NOTIFY_UNBOUND_DRIVER:
854 count = device_dma_allocations(dev, &entry);
855 if (count == 0)
856 break;
857 err_printk(dev, entry, "device driver has pending "
858 "DMA allocations while released from device "
859 "[count=%d]\n"
860 "One of leaked entries details: "
861 "[device address=0x%016llx] [size=%llu bytes] "
862 "[mapped with %s] [mapped as %s]\n",
863 count, entry->dev_addr, entry->size,
864 dir2name[entry->direction], type2name[entry->type]);
865 break;
866 default:
867 break;
868 }
869
870 return 0;
871}
872
873void dma_debug_add_bus(const struct bus_type *bus)
874{
875 struct notifier_block *nb;
876
877 if (dma_debug_disabled())
878 return;
879
880 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
881 if (nb == NULL) {
882 pr_err("dma_debug_add_bus: out of memory\n");
883 return;
884 }
885
886 nb->notifier_call = dma_debug_device_change;
887
888 bus_register_notifier(bus, nb);
889}
890
891static int dma_debug_init(void)
892{
893 int i, nr_pages;
894
895 /* Do not use dma_debug_initialized here, since we really want to be
896 * called to set dma_debug_initialized
897 */
898 if (global_disable)
899 return 0;
900
901 for (i = 0; i < HASH_SIZE; ++i) {
902 INIT_LIST_HEAD(&dma_entry_hash[i].list);
903 spin_lock_init(&dma_entry_hash[i].lock);
904 }
905
906 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
907 for (i = 0; i < nr_pages; ++i)
908 dma_debug_create_entries(GFP_KERNEL);
909 if (num_free_entries >= nr_prealloc_entries) {
910 pr_info("preallocated %d debug entries\n", nr_total_entries);
911 } else if (num_free_entries > 0) {
912 pr_warn("%d debug entries requested but only %d allocated\n",
913 nr_prealloc_entries, nr_total_entries);
914 } else {
915 pr_err("debugging out of memory error - disabled\n");
916 global_disable = true;
917
918 return 0;
919 }
920 min_free_entries = num_free_entries;
921
922 dma_debug_initialized = true;
923
924 pr_info("debugging enabled by kernel config\n");
925 return 0;
926}
927core_initcall(dma_debug_init);
928
929static __init int dma_debug_cmdline(char *str)
930{
931 if (!str)
932 return -EINVAL;
933
934 if (strncmp(str, "off", 3) == 0) {
935 pr_info("debugging disabled on kernel command line\n");
936 global_disable = true;
937 }
938
939 return 1;
940}
941
942static __init int dma_debug_entries_cmdline(char *str)
943{
944 if (!str)
945 return -EINVAL;
946 if (!get_option(&str, &nr_prealloc_entries))
947 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
948 return 1;
949}
950
951__setup("dma_debug=", dma_debug_cmdline);
952__setup("dma_debug_entries=", dma_debug_entries_cmdline);
953
954static void check_unmap(struct dma_debug_entry *ref)
955{
956 struct dma_debug_entry *entry;
957 struct hash_bucket *bucket;
958 unsigned long flags;
959
960 bucket = get_hash_bucket(ref, &flags);
961 entry = bucket_find_exact(bucket, ref);
962
963 if (!entry) {
964 /* must drop lock before calling dma_mapping_error */
965 put_hash_bucket(bucket, flags);
966
967 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
968 err_printk(ref->dev, NULL,
969 "device driver tries to free an "
970 "invalid DMA memory address\n");
971 } else {
972 err_printk(ref->dev, NULL,
973 "device driver tries to free DMA "
974 "memory it has not allocated [device "
975 "address=0x%016llx] [size=%llu bytes]\n",
976 ref->dev_addr, ref->size);
977 }
978 return;
979 }
980
981 if (ref->size != entry->size) {
982 err_printk(ref->dev, entry, "device driver frees "
983 "DMA memory with different size "
984 "[device address=0x%016llx] [map size=%llu bytes] "
985 "[unmap size=%llu bytes]\n",
986 ref->dev_addr, entry->size, ref->size);
987 }
988
989 if (ref->type != entry->type) {
990 err_printk(ref->dev, entry, "device driver frees "
991 "DMA memory with wrong function "
992 "[device address=0x%016llx] [size=%llu bytes] "
993 "[mapped as %s] [unmapped as %s]\n",
994 ref->dev_addr, ref->size,
995 type2name[entry->type], type2name[ref->type]);
996 } else if (entry->type == dma_debug_coherent &&
997 ref->paddr != entry->paddr) {
998 err_printk(ref->dev, entry, "device driver frees "
999 "DMA memory with different CPU address "
1000 "[device address=0x%016llx] [size=%llu bytes] "
1001 "[cpu alloc address=0x%pa] "
1002 "[cpu free address=0x%pa]",
1003 ref->dev_addr, ref->size,
1004 &entry->paddr,
1005 &ref->paddr);
1006 }
1007
1008 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1009 ref->sg_call_ents != entry->sg_call_ents) {
1010 err_printk(ref->dev, entry, "device driver frees "
1011 "DMA sg list with different entry count "
1012 "[map count=%d] [unmap count=%d]\n",
1013 entry->sg_call_ents, ref->sg_call_ents);
1014 }
1015
1016 /*
1017 * This may be no bug in reality - but most implementations of the
1018 * DMA API don't handle this properly, so check for it here
1019 */
1020 if (ref->direction != entry->direction) {
1021 err_printk(ref->dev, entry, "device driver frees "
1022 "DMA memory with different direction "
1023 "[device address=0x%016llx] [size=%llu bytes] "
1024 "[mapped with %s] [unmapped with %s]\n",
1025 ref->dev_addr, ref->size,
1026 dir2name[entry->direction],
1027 dir2name[ref->direction]);
1028 }
1029
1030 /*
1031 * Drivers should use dma_mapping_error() to check the returned
1032 * addresses of dma_map_single() and dma_map_page().
1033 * If not, print this warning message. See Documentation/core-api/dma-api.rst.
1034 */
1035 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1036 err_printk(ref->dev, entry,
1037 "device driver failed to check map error"
1038 "[device address=0x%016llx] [size=%llu bytes] "
1039 "[mapped as %s]",
1040 ref->dev_addr, ref->size,
1041 type2name[entry->type]);
1042 }
1043
1044 hash_bucket_del(entry);
1045 put_hash_bucket(bucket, flags);
1046
1047 /*
1048 * Free the entry outside of bucket_lock to avoid ABBA deadlocks
1049 * between that and radix_lock.
1050 */
1051 dma_entry_free(entry);
1052}
1053
1054static void check_for_stack(struct device *dev,
1055 struct page *page, size_t offset)
1056{
1057 void *addr;
1058 struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1059
1060 if (!stack_vm_area) {
1061 /* Stack is direct-mapped. */
1062 if (PageHighMem(page))
1063 return;
1064 addr = page_address(page) + offset;
1065 if (object_is_on_stack(addr))
1066 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1067 } else {
1068 /* Stack is vmalloced. */
1069 int i;
1070
1071 for (i = 0; i < stack_vm_area->nr_pages; i++) {
1072 if (page != stack_vm_area->pages[i])
1073 continue;
1074
1075 addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1076 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1077 break;
1078 }
1079 }
1080}
1081
1082static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1083{
1084 if (memory_intersects(_stext, _etext, addr, len) ||
1085 memory_intersects(__start_rodata, __end_rodata, addr, len))
1086 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1087}
1088
1089static void check_sync(struct device *dev,
1090 struct dma_debug_entry *ref,
1091 bool to_cpu)
1092{
1093 struct dma_debug_entry *entry;
1094 struct hash_bucket *bucket;
1095 unsigned long flags;
1096
1097 bucket = get_hash_bucket(ref, &flags);
1098
1099 entry = bucket_find_contain(&bucket, ref, &flags);
1100
1101 if (!entry) {
1102 err_printk(dev, NULL, "device driver tries "
1103 "to sync DMA memory it has not allocated "
1104 "[device address=0x%016llx] [size=%llu bytes]\n",
1105 (unsigned long long)ref->dev_addr, ref->size);
1106 goto out;
1107 }
1108
1109 if (ref->size > entry->size) {
1110 err_printk(dev, entry, "device driver syncs"
1111 " DMA memory outside allocated range "
1112 "[device address=0x%016llx] "
1113 "[allocation size=%llu bytes] "
1114 "[sync offset+size=%llu]\n",
1115 entry->dev_addr, entry->size,
1116 ref->size);
1117 }
1118
1119 if (entry->direction == DMA_BIDIRECTIONAL)
1120 goto out;
1121
1122 if (ref->direction != entry->direction) {
1123 err_printk(dev, entry, "device driver syncs "
1124 "DMA memory with different direction "
1125 "[device address=0x%016llx] [size=%llu bytes] "
1126 "[mapped with %s] [synced with %s]\n",
1127 (unsigned long long)ref->dev_addr, entry->size,
1128 dir2name[entry->direction],
1129 dir2name[ref->direction]);
1130 }
1131
1132 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1133 !(ref->direction == DMA_TO_DEVICE))
1134 err_printk(dev, entry, "device driver syncs "
1135 "device read-only DMA memory for cpu "
1136 "[device address=0x%016llx] [size=%llu bytes] "
1137 "[mapped with %s] [synced with %s]\n",
1138 (unsigned long long)ref->dev_addr, entry->size,
1139 dir2name[entry->direction],
1140 dir2name[ref->direction]);
1141
1142 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1143 !(ref->direction == DMA_FROM_DEVICE))
1144 err_printk(dev, entry, "device driver syncs "
1145 "device write-only DMA memory to device "
1146 "[device address=0x%016llx] [size=%llu bytes] "
1147 "[mapped with %s] [synced with %s]\n",
1148 (unsigned long long)ref->dev_addr, entry->size,
1149 dir2name[entry->direction],
1150 dir2name[ref->direction]);
1151
1152 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1153 ref->sg_call_ents != entry->sg_call_ents) {
1154 err_printk(ref->dev, entry, "device driver syncs "
1155 "DMA sg list with different entry count "
1156 "[map count=%d] [sync count=%d]\n",
1157 entry->sg_call_ents, ref->sg_call_ents);
1158 }
1159
1160out:
1161 put_hash_bucket(bucket, flags);
1162}
1163
1164static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1165{
1166 unsigned int max_seg = dma_get_max_seg_size(dev);
1167 u64 start, end, boundary = dma_get_seg_boundary(dev);
1168
1169 /*
1170 * Either the driver forgot to set dma_parms appropriately, or
1171 * whoever generated the list forgot to check them.
1172 */
1173 if (sg->length > max_seg)
1174 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1175 sg->length, max_seg);
1176 /*
1177 * In some cases this could potentially be the DMA API
1178 * implementation's fault, but it would usually imply that
1179 * the scatterlist was built inappropriately to begin with.
1180 */
1181 start = sg_dma_address(sg);
1182 end = start + sg_dma_len(sg) - 1;
1183 if ((start ^ end) & ~boundary)
1184 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1185 start, end, boundary);
1186}
1187
1188void debug_dma_map_single(struct device *dev, const void *addr,
1189 unsigned long len)
1190{
1191 if (unlikely(dma_debug_disabled()))
1192 return;
1193
1194 if (!virt_addr_valid(addr))
1195 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1196 addr, len);
1197
1198 if (is_vmalloc_addr(addr))
1199 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1200 addr, len);
1201}
1202EXPORT_SYMBOL(debug_dma_map_single);
1203
1204void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1205 size_t size, int direction, dma_addr_t dma_addr,
1206 unsigned long attrs)
1207{
1208 struct dma_debug_entry *entry;
1209
1210 if (unlikely(dma_debug_disabled()))
1211 return;
1212
1213 if (dma_mapping_error(dev, dma_addr))
1214 return;
1215
1216 entry = dma_entry_alloc();
1217 if (!entry)
1218 return;
1219
1220 entry->dev = dev;
1221 entry->type = dma_debug_single;
1222 entry->paddr = page_to_phys(page) + offset;
1223 entry->dev_addr = dma_addr;
1224 entry->size = size;
1225 entry->direction = direction;
1226 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1227
1228 check_for_stack(dev, page, offset);
1229
1230 if (!PageHighMem(page)) {
1231 void *addr = page_address(page) + offset;
1232
1233 check_for_illegal_area(dev, addr, size);
1234 }
1235
1236 add_dma_entry(entry, attrs);
1237}
1238
1239void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1240{
1241 struct dma_debug_entry ref;
1242 struct dma_debug_entry *entry;
1243 struct hash_bucket *bucket;
1244 unsigned long flags;
1245
1246 if (unlikely(dma_debug_disabled()))
1247 return;
1248
1249 ref.dev = dev;
1250 ref.dev_addr = dma_addr;
1251 bucket = get_hash_bucket(&ref, &flags);
1252
1253 list_for_each_entry(entry, &bucket->list, list) {
1254 if (!exact_match(&ref, entry))
1255 continue;
1256
1257 /*
1258 * The same physical address can be mapped multiple
1259 * times. Without a hardware IOMMU this results in the
1260 * same device addresses being put into the dma-debug
1261 * hash multiple times too. This can result in false
1262 * positives being reported. Therefore we implement a
1263 * best-fit algorithm here which updates the first entry
1264 * from the hash which fits the reference value and is
1265 * not currently listed as being checked.
1266 */
1267 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1268 entry->map_err_type = MAP_ERR_CHECKED;
1269 break;
1270 }
1271 }
1272
1273 put_hash_bucket(bucket, flags);
1274}
1275EXPORT_SYMBOL(debug_dma_mapping_error);
1276
1277void debug_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
1278 size_t size, int direction)
1279{
1280 struct dma_debug_entry ref = {
1281 .type = dma_debug_single,
1282 .dev = dev,
1283 .dev_addr = dma_addr,
1284 .size = size,
1285 .direction = direction,
1286 };
1287
1288 if (unlikely(dma_debug_disabled()))
1289 return;
1290 check_unmap(&ref);
1291}
1292
1293void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1294 int nents, int mapped_ents, int direction,
1295 unsigned long attrs)
1296{
1297 struct dma_debug_entry *entry;
1298 struct scatterlist *s;
1299 int i;
1300
1301 if (unlikely(dma_debug_disabled()))
1302 return;
1303
1304 for_each_sg(sg, s, nents, i) {
1305 check_for_stack(dev, sg_page(s), s->offset);
1306 if (!PageHighMem(sg_page(s)))
1307 check_for_illegal_area(dev, sg_virt(s), s->length);
1308 }
1309
1310 for_each_sg(sg, s, mapped_ents, i) {
1311 entry = dma_entry_alloc();
1312 if (!entry)
1313 return;
1314
1315 entry->type = dma_debug_sg;
1316 entry->dev = dev;
1317 entry->paddr = sg_phys(s);
1318 entry->size = sg_dma_len(s);
1319 entry->dev_addr = sg_dma_address(s);
1320 entry->direction = direction;
1321 entry->sg_call_ents = nents;
1322 entry->sg_mapped_ents = mapped_ents;
1323
1324 check_sg_segment(dev, s);
1325
1326 add_dma_entry(entry, attrs);
1327 }
1328}
1329
1330static int get_nr_mapped_entries(struct device *dev,
1331 struct dma_debug_entry *ref)
1332{
1333 struct dma_debug_entry *entry;
1334 struct hash_bucket *bucket;
1335 unsigned long flags;
1336 int mapped_ents;
1337
1338 bucket = get_hash_bucket(ref, &flags);
1339 entry = bucket_find_exact(bucket, ref);
1340 mapped_ents = 0;
1341
1342 if (entry)
1343 mapped_ents = entry->sg_mapped_ents;
1344 put_hash_bucket(bucket, flags);
1345
1346 return mapped_ents;
1347}
1348
1349void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1350 int nelems, int dir)
1351{
1352 struct scatterlist *s;
1353 int mapped_ents = 0, i;
1354
1355 if (unlikely(dma_debug_disabled()))
1356 return;
1357
1358 for_each_sg(sglist, s, nelems, i) {
1359
1360 struct dma_debug_entry ref = {
1361 .type = dma_debug_sg,
1362 .dev = dev,
1363 .paddr = sg_phys(s),
1364 .dev_addr = sg_dma_address(s),
1365 .size = sg_dma_len(s),
1366 .direction = dir,
1367 .sg_call_ents = nelems,
1368 };
1369
1370 if (mapped_ents && i >= mapped_ents)
1371 break;
1372
1373 if (!i)
1374 mapped_ents = get_nr_mapped_entries(dev, &ref);
1375
1376 check_unmap(&ref);
1377 }
1378}
1379
1380static phys_addr_t virt_to_paddr(void *virt)
1381{
1382 struct page *page;
1383
1384 if (is_vmalloc_addr(virt))
1385 page = vmalloc_to_page(virt);
1386 else
1387 page = virt_to_page(virt);
1388
1389 return page_to_phys(page) + offset_in_page(virt);
1390}
1391
1392void debug_dma_alloc_coherent(struct device *dev, size_t size,
1393 dma_addr_t dma_addr, void *virt,
1394 unsigned long attrs)
1395{
1396 struct dma_debug_entry *entry;
1397
1398 if (unlikely(dma_debug_disabled()))
1399 return;
1400
1401 if (unlikely(virt == NULL))
1402 return;
1403
1404 /* handle vmalloc and linear addresses */
1405 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1406 return;
1407
1408 entry = dma_entry_alloc();
1409 if (!entry)
1410 return;
1411
1412 entry->type = dma_debug_coherent;
1413 entry->dev = dev;
1414 entry->paddr = virt_to_paddr(virt);
1415 entry->size = size;
1416 entry->dev_addr = dma_addr;
1417 entry->direction = DMA_BIDIRECTIONAL;
1418
1419 add_dma_entry(entry, attrs);
1420}
1421
1422void debug_dma_free_coherent(struct device *dev, size_t size,
1423 void *virt, dma_addr_t dma_addr)
1424{
1425 struct dma_debug_entry ref = {
1426 .type = dma_debug_coherent,
1427 .dev = dev,
1428 .dev_addr = dma_addr,
1429 .size = size,
1430 .direction = DMA_BIDIRECTIONAL,
1431 };
1432
1433 /* handle vmalloc and linear addresses */
1434 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1435 return;
1436
1437 ref.paddr = virt_to_paddr(virt);
1438
1439 if (unlikely(dma_debug_disabled()))
1440 return;
1441
1442 check_unmap(&ref);
1443}
1444
1445void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1446 int direction, dma_addr_t dma_addr,
1447 unsigned long attrs)
1448{
1449 struct dma_debug_entry *entry;
1450
1451 if (unlikely(dma_debug_disabled()))
1452 return;
1453
1454 entry = dma_entry_alloc();
1455 if (!entry)
1456 return;
1457
1458 entry->type = dma_debug_resource;
1459 entry->dev = dev;
1460 entry->paddr = addr;
1461 entry->size = size;
1462 entry->dev_addr = dma_addr;
1463 entry->direction = direction;
1464 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1465
1466 add_dma_entry(entry, attrs);
1467}
1468
1469void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1470 size_t size, int direction)
1471{
1472 struct dma_debug_entry ref = {
1473 .type = dma_debug_resource,
1474 .dev = dev,
1475 .dev_addr = dma_addr,
1476 .size = size,
1477 .direction = direction,
1478 };
1479
1480 if (unlikely(dma_debug_disabled()))
1481 return;
1482
1483 check_unmap(&ref);
1484}
1485
1486void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1487 size_t size, int direction)
1488{
1489 struct dma_debug_entry ref;
1490
1491 if (unlikely(dma_debug_disabled()))
1492 return;
1493
1494 ref.type = dma_debug_single;
1495 ref.dev = dev;
1496 ref.dev_addr = dma_handle;
1497 ref.size = size;
1498 ref.direction = direction;
1499 ref.sg_call_ents = 0;
1500
1501 check_sync(dev, &ref, true);
1502}
1503
1504void debug_dma_sync_single_for_device(struct device *dev,
1505 dma_addr_t dma_handle, size_t size,
1506 int direction)
1507{
1508 struct dma_debug_entry ref;
1509
1510 if (unlikely(dma_debug_disabled()))
1511 return;
1512
1513 ref.type = dma_debug_single;
1514 ref.dev = dev;
1515 ref.dev_addr = dma_handle;
1516 ref.size = size;
1517 ref.direction = direction;
1518 ref.sg_call_ents = 0;
1519
1520 check_sync(dev, &ref, false);
1521}
1522
1523void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1524 int nelems, int direction)
1525{
1526 struct scatterlist *s;
1527 int mapped_ents = 0, i;
1528
1529 if (unlikely(dma_debug_disabled()))
1530 return;
1531
1532 for_each_sg(sg, s, nelems, i) {
1533
1534 struct dma_debug_entry ref = {
1535 .type = dma_debug_sg,
1536 .dev = dev,
1537 .paddr = sg_phys(s),
1538 .dev_addr = sg_dma_address(s),
1539 .size = sg_dma_len(s),
1540 .direction = direction,
1541 .sg_call_ents = nelems,
1542 };
1543
1544 if (!i)
1545 mapped_ents = get_nr_mapped_entries(dev, &ref);
1546
1547 if (i >= mapped_ents)
1548 break;
1549
1550 check_sync(dev, &ref, true);
1551 }
1552}
1553
1554void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1555 int nelems, int direction)
1556{
1557 struct scatterlist *s;
1558 int mapped_ents = 0, i;
1559
1560 if (unlikely(dma_debug_disabled()))
1561 return;
1562
1563 for_each_sg(sg, s, nelems, i) {
1564
1565 struct dma_debug_entry ref = {
1566 .type = dma_debug_sg,
1567 .dev = dev,
1568 .paddr = sg_phys(sg),
1569 .dev_addr = sg_dma_address(s),
1570 .size = sg_dma_len(s),
1571 .direction = direction,
1572 .sg_call_ents = nelems,
1573 };
1574 if (!i)
1575 mapped_ents = get_nr_mapped_entries(dev, &ref);
1576
1577 if (i >= mapped_ents)
1578 break;
1579
1580 check_sync(dev, &ref, false);
1581 }
1582}
1583
1584static int __init dma_debug_driver_setup(char *str)
1585{
1586 int i;
1587
1588 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1589 current_driver_name[i] = *str;
1590 if (*str == 0)
1591 break;
1592 }
1593
1594 if (current_driver_name[0])
1595 pr_info("enable driver filter for driver [%s]\n",
1596 current_driver_name);
1597
1598
1599 return 1;
1600}
1601__setup("dma_debug_driver=", dma_debug_driver_setup);