Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 *
5 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 */
7
8#define pr_fmt(fmt) "DMA-API: " fmt
9
10#include <linux/sched/task_stack.h>
11#include <linux/scatterlist.h>
12#include <linux/dma-map-ops.h>
13#include <linux/sched/task.h>
14#include <linux/stacktrace.h>
15#include <linux/spinlock.h>
16#include <linux/vmalloc.h>
17#include <linux/debugfs.h>
18#include <linux/uaccess.h>
19#include <linux/export.h>
20#include <linux/device.h>
21#include <linux/types.h>
22#include <linux/sched.h>
23#include <linux/ctype.h>
24#include <linux/list.h>
25#include <linux/slab.h>
26#include <asm/sections.h>
27#include "debug.h"
28
29#define HASH_SIZE 16384ULL
30#define HASH_FN_SHIFT 13
31#define HASH_FN_MASK (HASH_SIZE - 1)
32
33#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
34/* If the pool runs out, add this many new entries at once */
35#define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
36
37enum {
38 dma_debug_single,
39 dma_debug_sg,
40 dma_debug_coherent,
41 dma_debug_resource,
42};
43
44enum map_err_types {
45 MAP_ERR_CHECK_NOT_APPLICABLE,
46 MAP_ERR_NOT_CHECKED,
47 MAP_ERR_CHECKED,
48};
49
50#define DMA_DEBUG_STACKTRACE_ENTRIES 5
51
52/**
53 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
54 * @list: node on pre-allocated free_entries list
55 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
56 * @size: length of the mapping
57 * @type: single, page, sg, coherent
58 * @direction: enum dma_data_direction
59 * @sg_call_ents: 'nents' from dma_map_sg
60 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
61 * @pfn: page frame of the start address
62 * @offset: offset of mapping relative to pfn
63 * @map_err_type: track whether dma_mapping_error() was checked
64 * @stacktrace: support backtraces when a violation is detected
65 */
66struct dma_debug_entry {
67 struct list_head list;
68 struct device *dev;
69 u64 dev_addr;
70 u64 size;
71 int type;
72 int direction;
73 int sg_call_ents;
74 int sg_mapped_ents;
75 unsigned long pfn;
76 size_t offset;
77 enum map_err_types map_err_type;
78#ifdef CONFIG_STACKTRACE
79 unsigned int stack_len;
80 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
81#endif
82} ____cacheline_aligned_in_smp;
83
84typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
85
86struct hash_bucket {
87 struct list_head list;
88 spinlock_t lock;
89};
90
91/* Hash list to save the allocated dma addresses */
92static struct hash_bucket dma_entry_hash[HASH_SIZE];
93/* List of pre-allocated dma_debug_entry's */
94static LIST_HEAD(free_entries);
95/* Lock for the list above */
96static DEFINE_SPINLOCK(free_entries_lock);
97
98/* Global disable flag - will be set in case of an error */
99static bool global_disable __read_mostly;
100
101/* Early initialization disable flag, set at the end of dma_debug_init */
102static bool dma_debug_initialized __read_mostly;
103
104static inline bool dma_debug_disabled(void)
105{
106 return global_disable || !dma_debug_initialized;
107}
108
109/* Global error count */
110static u32 error_count;
111
112/* Global error show enable*/
113static u32 show_all_errors __read_mostly;
114/* Number of errors to show */
115static u32 show_num_errors = 1;
116
117static u32 num_free_entries;
118static u32 min_free_entries;
119static u32 nr_total_entries;
120
121/* number of preallocated entries requested by kernel cmdline */
122static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
123
124/* per-driver filter related state */
125
126#define NAME_MAX_LEN 64
127
128static char current_driver_name[NAME_MAX_LEN] __read_mostly;
129static struct device_driver *current_driver __read_mostly;
130
131static DEFINE_RWLOCK(driver_name_lock);
132
133static const char *const maperr2str[] = {
134 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
135 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
136 [MAP_ERR_CHECKED] = "dma map error checked",
137};
138
139static const char *type2name[] = {
140 [dma_debug_single] = "single",
141 [dma_debug_sg] = "scather-gather",
142 [dma_debug_coherent] = "coherent",
143 [dma_debug_resource] = "resource",
144};
145
146static const char *dir2name[] = {
147 [DMA_BIDIRECTIONAL] = "DMA_BIDIRECTIONAL",
148 [DMA_TO_DEVICE] = "DMA_TO_DEVICE",
149 [DMA_FROM_DEVICE] = "DMA_FROM_DEVICE",
150 [DMA_NONE] = "DMA_NONE",
151};
152
153/*
154 * The access to some variables in this macro is racy. We can't use atomic_t
155 * here because all these variables are exported to debugfs. Some of them even
156 * writeable. This is also the reason why a lock won't help much. But anyway,
157 * the races are no big deal. Here is why:
158 *
159 * error_count: the addition is racy, but the worst thing that can happen is
160 * that we don't count some errors
161 * show_num_errors: the subtraction is racy. Also no big deal because in
162 * worst case this will result in one warning more in the
163 * system log than the user configured. This variable is
164 * writeable via debugfs.
165 */
166static inline void dump_entry_trace(struct dma_debug_entry *entry)
167{
168#ifdef CONFIG_STACKTRACE
169 if (entry) {
170 pr_warn("Mapped at:\n");
171 stack_trace_print(entry->stack_entries, entry->stack_len, 0);
172 }
173#endif
174}
175
176static bool driver_filter(struct device *dev)
177{
178 struct device_driver *drv;
179 unsigned long flags;
180 bool ret;
181
182 /* driver filter off */
183 if (likely(!current_driver_name[0]))
184 return true;
185
186 /* driver filter on and initialized */
187 if (current_driver && dev && dev->driver == current_driver)
188 return true;
189
190 /* driver filter on, but we can't filter on a NULL device... */
191 if (!dev)
192 return false;
193
194 if (current_driver || !current_driver_name[0])
195 return false;
196
197 /* driver filter on but not yet initialized */
198 drv = dev->driver;
199 if (!drv)
200 return false;
201
202 /* lock to protect against change of current_driver_name */
203 read_lock_irqsave(&driver_name_lock, flags);
204
205 ret = false;
206 if (drv->name &&
207 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
208 current_driver = drv;
209 ret = true;
210 }
211
212 read_unlock_irqrestore(&driver_name_lock, flags);
213
214 return ret;
215}
216
217#define err_printk(dev, entry, format, arg...) do { \
218 error_count += 1; \
219 if (driver_filter(dev) && \
220 (show_all_errors || show_num_errors > 0)) { \
221 WARN(1, pr_fmt("%s %s: ") format, \
222 dev ? dev_driver_string(dev) : "NULL", \
223 dev ? dev_name(dev) : "NULL", ## arg); \
224 dump_entry_trace(entry); \
225 } \
226 if (!show_all_errors && show_num_errors > 0) \
227 show_num_errors -= 1; \
228 } while (0);
229
230/*
231 * Hash related functions
232 *
233 * Every DMA-API request is saved into a struct dma_debug_entry. To
234 * have quick access to these structs they are stored into a hash.
235 */
236static int hash_fn(struct dma_debug_entry *entry)
237{
238 /*
239 * Hash function is based on the dma address.
240 * We use bits 20-27 here as the index into the hash
241 */
242 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
243}
244
245/*
246 * Request exclusive access to a hash bucket for a given dma_debug_entry.
247 */
248static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
249 unsigned long *flags)
250 __acquires(&dma_entry_hash[idx].lock)
251{
252 int idx = hash_fn(entry);
253 unsigned long __flags;
254
255 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
256 *flags = __flags;
257 return &dma_entry_hash[idx];
258}
259
260/*
261 * Give up exclusive access to the hash bucket
262 */
263static void put_hash_bucket(struct hash_bucket *bucket,
264 unsigned long flags)
265 __releases(&bucket->lock)
266{
267 spin_unlock_irqrestore(&bucket->lock, flags);
268}
269
270static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
271{
272 return ((a->dev_addr == b->dev_addr) &&
273 (a->dev == b->dev)) ? true : false;
274}
275
276static bool containing_match(struct dma_debug_entry *a,
277 struct dma_debug_entry *b)
278{
279 if (a->dev != b->dev)
280 return false;
281
282 if ((b->dev_addr <= a->dev_addr) &&
283 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
284 return true;
285
286 return false;
287}
288
289/*
290 * Search a given entry in the hash bucket list
291 */
292static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
293 struct dma_debug_entry *ref,
294 match_fn match)
295{
296 struct dma_debug_entry *entry, *ret = NULL;
297 int matches = 0, match_lvl, last_lvl = -1;
298
299 list_for_each_entry(entry, &bucket->list, list) {
300 if (!match(ref, entry))
301 continue;
302
303 /*
304 * Some drivers map the same physical address multiple
305 * times. Without a hardware IOMMU this results in the
306 * same device addresses being put into the dma-debug
307 * hash multiple times too. This can result in false
308 * positives being reported. Therefore we implement a
309 * best-fit algorithm here which returns the entry from
310 * the hash which fits best to the reference value
311 * instead of the first-fit.
312 */
313 matches += 1;
314 match_lvl = 0;
315 entry->size == ref->size ? ++match_lvl : 0;
316 entry->type == ref->type ? ++match_lvl : 0;
317 entry->direction == ref->direction ? ++match_lvl : 0;
318 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
319
320 if (match_lvl == 4) {
321 /* perfect-fit - return the result */
322 return entry;
323 } else if (match_lvl > last_lvl) {
324 /*
325 * We found an entry that fits better then the
326 * previous one or it is the 1st match.
327 */
328 last_lvl = match_lvl;
329 ret = entry;
330 }
331 }
332
333 /*
334 * If we have multiple matches but no perfect-fit, just return
335 * NULL.
336 */
337 ret = (matches == 1) ? ret : NULL;
338
339 return ret;
340}
341
342static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
343 struct dma_debug_entry *ref)
344{
345 return __hash_bucket_find(bucket, ref, exact_match);
346}
347
348static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
349 struct dma_debug_entry *ref,
350 unsigned long *flags)
351{
352
353 struct dma_debug_entry *entry, index = *ref;
354 int limit = min(HASH_SIZE, (index.dev_addr >> HASH_FN_SHIFT) + 1);
355
356 for (int i = 0; i < limit; i++) {
357 entry = __hash_bucket_find(*bucket, ref, containing_match);
358
359 if (entry)
360 return entry;
361
362 /*
363 * Nothing found, go back a hash bucket
364 */
365 put_hash_bucket(*bucket, *flags);
366 index.dev_addr -= (1 << HASH_FN_SHIFT);
367 *bucket = get_hash_bucket(&index, flags);
368 }
369
370 return NULL;
371}
372
373/*
374 * Add an entry to a hash bucket
375 */
376static void hash_bucket_add(struct hash_bucket *bucket,
377 struct dma_debug_entry *entry)
378{
379 list_add_tail(&entry->list, &bucket->list);
380}
381
382/*
383 * Remove entry from a hash bucket list
384 */
385static void hash_bucket_del(struct dma_debug_entry *entry)
386{
387 list_del(&entry->list);
388}
389
390static unsigned long long phys_addr(struct dma_debug_entry *entry)
391{
392 if (entry->type == dma_debug_resource)
393 return __pfn_to_phys(entry->pfn) + entry->offset;
394
395 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
396}
397
398/*
399 * Dump mapping entries for debugging purposes
400 */
401void debug_dma_dump_mappings(struct device *dev)
402{
403 int idx;
404
405 for (idx = 0; idx < HASH_SIZE; idx++) {
406 struct hash_bucket *bucket = &dma_entry_hash[idx];
407 struct dma_debug_entry *entry;
408 unsigned long flags;
409
410 spin_lock_irqsave(&bucket->lock, flags);
411
412 list_for_each_entry(entry, &bucket->list, list) {
413 if (!dev || dev == entry->dev) {
414 dev_info(entry->dev,
415 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
416 type2name[entry->type], idx,
417 phys_addr(entry), entry->pfn,
418 entry->dev_addr, entry->size,
419 dir2name[entry->direction],
420 maperr2str[entry->map_err_type]);
421 }
422 }
423
424 spin_unlock_irqrestore(&bucket->lock, flags);
425 cond_resched();
426 }
427}
428
429/*
430 * For each mapping (initial cacheline in the case of
431 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
432 * scatterlist, or the cacheline specified in dma_map_single) insert
433 * into this tree using the cacheline as the key. At
434 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
435 * the entry already exists at insertion time add a tag as a reference
436 * count for the overlapping mappings. For now, the overlap tracking
437 * just ensures that 'unmaps' balance 'maps' before marking the
438 * cacheline idle, but we should also be flagging overlaps as an API
439 * violation.
440 *
441 * Memory usage is mostly constrained by the maximum number of available
442 * dma-debug entries in that we need a free dma_debug_entry before
443 * inserting into the tree. In the case of dma_map_page and
444 * dma_alloc_coherent there is only one dma_debug_entry and one
445 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
446 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
447 * entries into the tree.
448 */
449static RADIX_TREE(dma_active_cacheline, GFP_ATOMIC);
450static DEFINE_SPINLOCK(radix_lock);
451#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
452#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
453#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
454
455static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
456{
457 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
458 (entry->offset >> L1_CACHE_SHIFT);
459}
460
461static int active_cacheline_read_overlap(phys_addr_t cln)
462{
463 int overlap = 0, i;
464
465 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
466 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
467 overlap |= 1 << i;
468 return overlap;
469}
470
471static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
472{
473 int i;
474
475 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
476 return overlap;
477
478 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
479 if (overlap & 1 << i)
480 radix_tree_tag_set(&dma_active_cacheline, cln, i);
481 else
482 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
483
484 return overlap;
485}
486
487static void active_cacheline_inc_overlap(phys_addr_t cln)
488{
489 int overlap = active_cacheline_read_overlap(cln);
490
491 overlap = active_cacheline_set_overlap(cln, ++overlap);
492
493 /* If we overflowed the overlap counter then we're potentially
494 * leaking dma-mappings.
495 */
496 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
497 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
498 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
499}
500
501static int active_cacheline_dec_overlap(phys_addr_t cln)
502{
503 int overlap = active_cacheline_read_overlap(cln);
504
505 return active_cacheline_set_overlap(cln, --overlap);
506}
507
508static int active_cacheline_insert(struct dma_debug_entry *entry)
509{
510 phys_addr_t cln = to_cacheline_number(entry);
511 unsigned long flags;
512 int rc;
513
514 /* If the device is not writing memory then we don't have any
515 * concerns about the cpu consuming stale data. This mitigates
516 * legitimate usages of overlapping mappings.
517 */
518 if (entry->direction == DMA_TO_DEVICE)
519 return 0;
520
521 spin_lock_irqsave(&radix_lock, flags);
522 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
523 if (rc == -EEXIST)
524 active_cacheline_inc_overlap(cln);
525 spin_unlock_irqrestore(&radix_lock, flags);
526
527 return rc;
528}
529
530static void active_cacheline_remove(struct dma_debug_entry *entry)
531{
532 phys_addr_t cln = to_cacheline_number(entry);
533 unsigned long flags;
534
535 /* ...mirror the insert case */
536 if (entry->direction == DMA_TO_DEVICE)
537 return;
538
539 spin_lock_irqsave(&radix_lock, flags);
540 /* since we are counting overlaps the final put of the
541 * cacheline will occur when the overlap count is 0.
542 * active_cacheline_dec_overlap() returns -1 in that case
543 */
544 if (active_cacheline_dec_overlap(cln) < 0)
545 radix_tree_delete(&dma_active_cacheline, cln);
546 spin_unlock_irqrestore(&radix_lock, flags);
547}
548
549/*
550 * Wrapper function for adding an entry to the hash.
551 * This function takes care of locking itself.
552 */
553static void add_dma_entry(struct dma_debug_entry *entry, unsigned long attrs)
554{
555 struct hash_bucket *bucket;
556 unsigned long flags;
557 int rc;
558
559 bucket = get_hash_bucket(entry, &flags);
560 hash_bucket_add(bucket, entry);
561 put_hash_bucket(bucket, flags);
562
563 rc = active_cacheline_insert(entry);
564 if (rc == -ENOMEM) {
565 pr_err_once("cacheline tracking ENOMEM, dma-debug disabled\n");
566 global_disable = true;
567 } else if (rc == -EEXIST && !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
568 err_printk(entry->dev, entry,
569 "cacheline tracking EEXIST, overlapping mappings aren't supported\n");
570 }
571}
572
573static int dma_debug_create_entries(gfp_t gfp)
574{
575 struct dma_debug_entry *entry;
576 int i;
577
578 entry = (void *)get_zeroed_page(gfp);
579 if (!entry)
580 return -ENOMEM;
581
582 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
583 list_add_tail(&entry[i].list, &free_entries);
584
585 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
586 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
587
588 return 0;
589}
590
591static struct dma_debug_entry *__dma_entry_alloc(void)
592{
593 struct dma_debug_entry *entry;
594
595 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
596 list_del(&entry->list);
597 memset(entry, 0, sizeof(*entry));
598
599 num_free_entries -= 1;
600 if (num_free_entries < min_free_entries)
601 min_free_entries = num_free_entries;
602
603 return entry;
604}
605
606static void __dma_entry_alloc_check_leak(void)
607{
608 u32 tmp = nr_total_entries % nr_prealloc_entries;
609
610 /* Shout each time we tick over some multiple of the initial pool */
611 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
612 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
613 nr_total_entries,
614 (nr_total_entries / nr_prealloc_entries));
615 }
616}
617
618/* struct dma_entry allocator
619 *
620 * The next two functions implement the allocator for
621 * struct dma_debug_entries.
622 */
623static struct dma_debug_entry *dma_entry_alloc(void)
624{
625 struct dma_debug_entry *entry;
626 unsigned long flags;
627
628 spin_lock_irqsave(&free_entries_lock, flags);
629 if (num_free_entries == 0) {
630 if (dma_debug_create_entries(GFP_ATOMIC)) {
631 global_disable = true;
632 spin_unlock_irqrestore(&free_entries_lock, flags);
633 pr_err("debugging out of memory - disabling\n");
634 return NULL;
635 }
636 __dma_entry_alloc_check_leak();
637 }
638
639 entry = __dma_entry_alloc();
640
641 spin_unlock_irqrestore(&free_entries_lock, flags);
642
643#ifdef CONFIG_STACKTRACE
644 entry->stack_len = stack_trace_save(entry->stack_entries,
645 ARRAY_SIZE(entry->stack_entries),
646 1);
647#endif
648 return entry;
649}
650
651static void dma_entry_free(struct dma_debug_entry *entry)
652{
653 unsigned long flags;
654
655 active_cacheline_remove(entry);
656
657 /*
658 * add to beginning of the list - this way the entries are
659 * more likely cache hot when they are reallocated.
660 */
661 spin_lock_irqsave(&free_entries_lock, flags);
662 list_add(&entry->list, &free_entries);
663 num_free_entries += 1;
664 spin_unlock_irqrestore(&free_entries_lock, flags);
665}
666
667/*
668 * DMA-API debugging init code
669 *
670 * The init code does two things:
671 * 1. Initialize core data structures
672 * 2. Preallocate a given number of dma_debug_entry structs
673 */
674
675static ssize_t filter_read(struct file *file, char __user *user_buf,
676 size_t count, loff_t *ppos)
677{
678 char buf[NAME_MAX_LEN + 1];
679 unsigned long flags;
680 int len;
681
682 if (!current_driver_name[0])
683 return 0;
684
685 /*
686 * We can't copy to userspace directly because current_driver_name can
687 * only be read under the driver_name_lock with irqs disabled. So
688 * create a temporary copy first.
689 */
690 read_lock_irqsave(&driver_name_lock, flags);
691 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
692 read_unlock_irqrestore(&driver_name_lock, flags);
693
694 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
695}
696
697static ssize_t filter_write(struct file *file, const char __user *userbuf,
698 size_t count, loff_t *ppos)
699{
700 char buf[NAME_MAX_LEN];
701 unsigned long flags;
702 size_t len;
703 int i;
704
705 /*
706 * We can't copy from userspace directly. Access to
707 * current_driver_name is protected with a write_lock with irqs
708 * disabled. Since copy_from_user can fault and may sleep we
709 * need to copy to temporary buffer first
710 */
711 len = min(count, (size_t)(NAME_MAX_LEN - 1));
712 if (copy_from_user(buf, userbuf, len))
713 return -EFAULT;
714
715 buf[len] = 0;
716
717 write_lock_irqsave(&driver_name_lock, flags);
718
719 /*
720 * Now handle the string we got from userspace very carefully.
721 * The rules are:
722 * - only use the first token we got
723 * - token delimiter is everything looking like a space
724 * character (' ', '\n', '\t' ...)
725 *
726 */
727 if (!isalnum(buf[0])) {
728 /*
729 * If the first character userspace gave us is not
730 * alphanumerical then assume the filter should be
731 * switched off.
732 */
733 if (current_driver_name[0])
734 pr_info("switching off dma-debug driver filter\n");
735 current_driver_name[0] = 0;
736 current_driver = NULL;
737 goto out_unlock;
738 }
739
740 /*
741 * Now parse out the first token and use it as the name for the
742 * driver to filter for.
743 */
744 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
745 current_driver_name[i] = buf[i];
746 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
747 break;
748 }
749 current_driver_name[i] = 0;
750 current_driver = NULL;
751
752 pr_info("enable driver filter for driver [%s]\n",
753 current_driver_name);
754
755out_unlock:
756 write_unlock_irqrestore(&driver_name_lock, flags);
757
758 return count;
759}
760
761static const struct file_operations filter_fops = {
762 .read = filter_read,
763 .write = filter_write,
764 .llseek = default_llseek,
765};
766
767static int dump_show(struct seq_file *seq, void *v)
768{
769 int idx;
770
771 for (idx = 0; idx < HASH_SIZE; idx++) {
772 struct hash_bucket *bucket = &dma_entry_hash[idx];
773 struct dma_debug_entry *entry;
774 unsigned long flags;
775
776 spin_lock_irqsave(&bucket->lock, flags);
777 list_for_each_entry(entry, &bucket->list, list) {
778 seq_printf(seq,
779 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
780 dev_name(entry->dev),
781 dev_driver_string(entry->dev),
782 type2name[entry->type], idx,
783 phys_addr(entry), entry->pfn,
784 entry->dev_addr, entry->size,
785 dir2name[entry->direction],
786 maperr2str[entry->map_err_type]);
787 }
788 spin_unlock_irqrestore(&bucket->lock, flags);
789 }
790 return 0;
791}
792DEFINE_SHOW_ATTRIBUTE(dump);
793
794static int __init dma_debug_fs_init(void)
795{
796 struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
797
798 debugfs_create_bool("disabled", 0444, dentry, &global_disable);
799 debugfs_create_u32("error_count", 0444, dentry, &error_count);
800 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
801 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
802 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
803 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
804 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
805 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
806 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
807
808 return 0;
809}
810core_initcall_sync(dma_debug_fs_init);
811
812static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
813{
814 struct dma_debug_entry *entry;
815 unsigned long flags;
816 int count = 0, i;
817
818 for (i = 0; i < HASH_SIZE; ++i) {
819 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
820 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
821 if (entry->dev == dev) {
822 count += 1;
823 *out_entry = entry;
824 }
825 }
826 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
827 }
828
829 return count;
830}
831
832static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
833{
834 struct device *dev = data;
835 struct dma_debug_entry *entry;
836 int count;
837
838 if (dma_debug_disabled())
839 return 0;
840
841 switch (action) {
842 case BUS_NOTIFY_UNBOUND_DRIVER:
843 count = device_dma_allocations(dev, &entry);
844 if (count == 0)
845 break;
846 err_printk(dev, entry, "device driver has pending "
847 "DMA allocations while released from device "
848 "[count=%d]\n"
849 "One of leaked entries details: "
850 "[device address=0x%016llx] [size=%llu bytes] "
851 "[mapped with %s] [mapped as %s]\n",
852 count, entry->dev_addr, entry->size,
853 dir2name[entry->direction], type2name[entry->type]);
854 break;
855 default:
856 break;
857 }
858
859 return 0;
860}
861
862void dma_debug_add_bus(struct bus_type *bus)
863{
864 struct notifier_block *nb;
865
866 if (dma_debug_disabled())
867 return;
868
869 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
870 if (nb == NULL) {
871 pr_err("dma_debug_add_bus: out of memory\n");
872 return;
873 }
874
875 nb->notifier_call = dma_debug_device_change;
876
877 bus_register_notifier(bus, nb);
878}
879
880static int dma_debug_init(void)
881{
882 int i, nr_pages;
883
884 /* Do not use dma_debug_initialized here, since we really want to be
885 * called to set dma_debug_initialized
886 */
887 if (global_disable)
888 return 0;
889
890 for (i = 0; i < HASH_SIZE; ++i) {
891 INIT_LIST_HEAD(&dma_entry_hash[i].list);
892 spin_lock_init(&dma_entry_hash[i].lock);
893 }
894
895 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
896 for (i = 0; i < nr_pages; ++i)
897 dma_debug_create_entries(GFP_KERNEL);
898 if (num_free_entries >= nr_prealloc_entries) {
899 pr_info("preallocated %d debug entries\n", nr_total_entries);
900 } else if (num_free_entries > 0) {
901 pr_warn("%d debug entries requested but only %d allocated\n",
902 nr_prealloc_entries, nr_total_entries);
903 } else {
904 pr_err("debugging out of memory error - disabled\n");
905 global_disable = true;
906
907 return 0;
908 }
909 min_free_entries = num_free_entries;
910
911 dma_debug_initialized = true;
912
913 pr_info("debugging enabled by kernel config\n");
914 return 0;
915}
916core_initcall(dma_debug_init);
917
918static __init int dma_debug_cmdline(char *str)
919{
920 if (!str)
921 return -EINVAL;
922
923 if (strncmp(str, "off", 3) == 0) {
924 pr_info("debugging disabled on kernel command line\n");
925 global_disable = true;
926 }
927
928 return 1;
929}
930
931static __init int dma_debug_entries_cmdline(char *str)
932{
933 if (!str)
934 return -EINVAL;
935 if (!get_option(&str, &nr_prealloc_entries))
936 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
937 return 1;
938}
939
940__setup("dma_debug=", dma_debug_cmdline);
941__setup("dma_debug_entries=", dma_debug_entries_cmdline);
942
943static void check_unmap(struct dma_debug_entry *ref)
944{
945 struct dma_debug_entry *entry;
946 struct hash_bucket *bucket;
947 unsigned long flags;
948
949 bucket = get_hash_bucket(ref, &flags);
950 entry = bucket_find_exact(bucket, ref);
951
952 if (!entry) {
953 /* must drop lock before calling dma_mapping_error */
954 put_hash_bucket(bucket, flags);
955
956 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
957 err_printk(ref->dev, NULL,
958 "device driver tries to free an "
959 "invalid DMA memory address\n");
960 } else {
961 err_printk(ref->dev, NULL,
962 "device driver tries to free DMA "
963 "memory it has not allocated [device "
964 "address=0x%016llx] [size=%llu bytes]\n",
965 ref->dev_addr, ref->size);
966 }
967 return;
968 }
969
970 if (ref->size != entry->size) {
971 err_printk(ref->dev, entry, "device driver frees "
972 "DMA memory with different size "
973 "[device address=0x%016llx] [map size=%llu bytes] "
974 "[unmap size=%llu bytes]\n",
975 ref->dev_addr, entry->size, ref->size);
976 }
977
978 if (ref->type != entry->type) {
979 err_printk(ref->dev, entry, "device driver frees "
980 "DMA memory with wrong function "
981 "[device address=0x%016llx] [size=%llu bytes] "
982 "[mapped as %s] [unmapped as %s]\n",
983 ref->dev_addr, ref->size,
984 type2name[entry->type], type2name[ref->type]);
985 } else if ((entry->type == dma_debug_coherent) &&
986 (phys_addr(ref) != phys_addr(entry))) {
987 err_printk(ref->dev, entry, "device driver frees "
988 "DMA memory with different CPU address "
989 "[device address=0x%016llx] [size=%llu bytes] "
990 "[cpu alloc address=0x%016llx] "
991 "[cpu free address=0x%016llx]",
992 ref->dev_addr, ref->size,
993 phys_addr(entry),
994 phys_addr(ref));
995 }
996
997 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
998 ref->sg_call_ents != entry->sg_call_ents) {
999 err_printk(ref->dev, entry, "device driver frees "
1000 "DMA sg list with different entry count "
1001 "[map count=%d] [unmap count=%d]\n",
1002 entry->sg_call_ents, ref->sg_call_ents);
1003 }
1004
1005 /*
1006 * This may be no bug in reality - but most implementations of the
1007 * DMA API don't handle this properly, so check for it here
1008 */
1009 if (ref->direction != entry->direction) {
1010 err_printk(ref->dev, entry, "device driver frees "
1011 "DMA memory with different direction "
1012 "[device address=0x%016llx] [size=%llu bytes] "
1013 "[mapped with %s] [unmapped with %s]\n",
1014 ref->dev_addr, ref->size,
1015 dir2name[entry->direction],
1016 dir2name[ref->direction]);
1017 }
1018
1019 /*
1020 * Drivers should use dma_mapping_error() to check the returned
1021 * addresses of dma_map_single() and dma_map_page().
1022 * If not, print this warning message. See Documentation/core-api/dma-api.rst.
1023 */
1024 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1025 err_printk(ref->dev, entry,
1026 "device driver failed to check map error"
1027 "[device address=0x%016llx] [size=%llu bytes] "
1028 "[mapped as %s]",
1029 ref->dev_addr, ref->size,
1030 type2name[entry->type]);
1031 }
1032
1033 hash_bucket_del(entry);
1034 dma_entry_free(entry);
1035
1036 put_hash_bucket(bucket, flags);
1037}
1038
1039static void check_for_stack(struct device *dev,
1040 struct page *page, size_t offset)
1041{
1042 void *addr;
1043 struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1044
1045 if (!stack_vm_area) {
1046 /* Stack is direct-mapped. */
1047 if (PageHighMem(page))
1048 return;
1049 addr = page_address(page) + offset;
1050 if (object_is_on_stack(addr))
1051 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1052 } else {
1053 /* Stack is vmalloced. */
1054 int i;
1055
1056 for (i = 0; i < stack_vm_area->nr_pages; i++) {
1057 if (page != stack_vm_area->pages[i])
1058 continue;
1059
1060 addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1061 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1062 break;
1063 }
1064 }
1065}
1066
1067static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1068{
1069 if (memory_intersects(_stext, _etext, addr, len) ||
1070 memory_intersects(__start_rodata, __end_rodata, addr, len))
1071 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1072}
1073
1074static void check_sync(struct device *dev,
1075 struct dma_debug_entry *ref,
1076 bool to_cpu)
1077{
1078 struct dma_debug_entry *entry;
1079 struct hash_bucket *bucket;
1080 unsigned long flags;
1081
1082 bucket = get_hash_bucket(ref, &flags);
1083
1084 entry = bucket_find_contain(&bucket, ref, &flags);
1085
1086 if (!entry) {
1087 err_printk(dev, NULL, "device driver tries "
1088 "to sync DMA memory it has not allocated "
1089 "[device address=0x%016llx] [size=%llu bytes]\n",
1090 (unsigned long long)ref->dev_addr, ref->size);
1091 goto out;
1092 }
1093
1094 if (ref->size > entry->size) {
1095 err_printk(dev, entry, "device driver syncs"
1096 " DMA memory outside allocated range "
1097 "[device address=0x%016llx] "
1098 "[allocation size=%llu bytes] "
1099 "[sync offset+size=%llu]\n",
1100 entry->dev_addr, entry->size,
1101 ref->size);
1102 }
1103
1104 if (entry->direction == DMA_BIDIRECTIONAL)
1105 goto out;
1106
1107 if (ref->direction != entry->direction) {
1108 err_printk(dev, entry, "device driver syncs "
1109 "DMA memory with different direction "
1110 "[device address=0x%016llx] [size=%llu bytes] "
1111 "[mapped with %s] [synced with %s]\n",
1112 (unsigned long long)ref->dev_addr, entry->size,
1113 dir2name[entry->direction],
1114 dir2name[ref->direction]);
1115 }
1116
1117 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1118 !(ref->direction == DMA_TO_DEVICE))
1119 err_printk(dev, entry, "device driver syncs "
1120 "device read-only DMA memory for cpu "
1121 "[device address=0x%016llx] [size=%llu bytes] "
1122 "[mapped with %s] [synced with %s]\n",
1123 (unsigned long long)ref->dev_addr, entry->size,
1124 dir2name[entry->direction],
1125 dir2name[ref->direction]);
1126
1127 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1128 !(ref->direction == DMA_FROM_DEVICE))
1129 err_printk(dev, entry, "device driver syncs "
1130 "device write-only DMA memory to device "
1131 "[device address=0x%016llx] [size=%llu bytes] "
1132 "[mapped with %s] [synced with %s]\n",
1133 (unsigned long long)ref->dev_addr, entry->size,
1134 dir2name[entry->direction],
1135 dir2name[ref->direction]);
1136
1137 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1138 ref->sg_call_ents != entry->sg_call_ents) {
1139 err_printk(ref->dev, entry, "device driver syncs "
1140 "DMA sg list with different entry count "
1141 "[map count=%d] [sync count=%d]\n",
1142 entry->sg_call_ents, ref->sg_call_ents);
1143 }
1144
1145out:
1146 put_hash_bucket(bucket, flags);
1147}
1148
1149static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1150{
1151#ifdef CONFIG_DMA_API_DEBUG_SG
1152 unsigned int max_seg = dma_get_max_seg_size(dev);
1153 u64 start, end, boundary = dma_get_seg_boundary(dev);
1154
1155 /*
1156 * Either the driver forgot to set dma_parms appropriately, or
1157 * whoever generated the list forgot to check them.
1158 */
1159 if (sg->length > max_seg)
1160 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1161 sg->length, max_seg);
1162 /*
1163 * In some cases this could potentially be the DMA API
1164 * implementation's fault, but it would usually imply that
1165 * the scatterlist was built inappropriately to begin with.
1166 */
1167 start = sg_dma_address(sg);
1168 end = start + sg_dma_len(sg) - 1;
1169 if ((start ^ end) & ~boundary)
1170 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1171 start, end, boundary);
1172#endif
1173}
1174
1175void debug_dma_map_single(struct device *dev, const void *addr,
1176 unsigned long len)
1177{
1178 if (unlikely(dma_debug_disabled()))
1179 return;
1180
1181 if (!virt_addr_valid(addr))
1182 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1183 addr, len);
1184
1185 if (is_vmalloc_addr(addr))
1186 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1187 addr, len);
1188}
1189EXPORT_SYMBOL(debug_dma_map_single);
1190
1191void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1192 size_t size, int direction, dma_addr_t dma_addr,
1193 unsigned long attrs)
1194{
1195 struct dma_debug_entry *entry;
1196
1197 if (unlikely(dma_debug_disabled()))
1198 return;
1199
1200 if (dma_mapping_error(dev, dma_addr))
1201 return;
1202
1203 entry = dma_entry_alloc();
1204 if (!entry)
1205 return;
1206
1207 entry->dev = dev;
1208 entry->type = dma_debug_single;
1209 entry->pfn = page_to_pfn(page);
1210 entry->offset = offset;
1211 entry->dev_addr = dma_addr;
1212 entry->size = size;
1213 entry->direction = direction;
1214 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1215
1216 check_for_stack(dev, page, offset);
1217
1218 if (!PageHighMem(page)) {
1219 void *addr = page_address(page) + offset;
1220
1221 check_for_illegal_area(dev, addr, size);
1222 }
1223
1224 add_dma_entry(entry, attrs);
1225}
1226
1227void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1228{
1229 struct dma_debug_entry ref;
1230 struct dma_debug_entry *entry;
1231 struct hash_bucket *bucket;
1232 unsigned long flags;
1233
1234 if (unlikely(dma_debug_disabled()))
1235 return;
1236
1237 ref.dev = dev;
1238 ref.dev_addr = dma_addr;
1239 bucket = get_hash_bucket(&ref, &flags);
1240
1241 list_for_each_entry(entry, &bucket->list, list) {
1242 if (!exact_match(&ref, entry))
1243 continue;
1244
1245 /*
1246 * The same physical address can be mapped multiple
1247 * times. Without a hardware IOMMU this results in the
1248 * same device addresses being put into the dma-debug
1249 * hash multiple times too. This can result in false
1250 * positives being reported. Therefore we implement a
1251 * best-fit algorithm here which updates the first entry
1252 * from the hash which fits the reference value and is
1253 * not currently listed as being checked.
1254 */
1255 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1256 entry->map_err_type = MAP_ERR_CHECKED;
1257 break;
1258 }
1259 }
1260
1261 put_hash_bucket(bucket, flags);
1262}
1263EXPORT_SYMBOL(debug_dma_mapping_error);
1264
1265void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1266 size_t size, int direction)
1267{
1268 struct dma_debug_entry ref = {
1269 .type = dma_debug_single,
1270 .dev = dev,
1271 .dev_addr = addr,
1272 .size = size,
1273 .direction = direction,
1274 };
1275
1276 if (unlikely(dma_debug_disabled()))
1277 return;
1278 check_unmap(&ref);
1279}
1280
1281void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1282 int nents, int mapped_ents, int direction,
1283 unsigned long attrs)
1284{
1285 struct dma_debug_entry *entry;
1286 struct scatterlist *s;
1287 int i;
1288
1289 if (unlikely(dma_debug_disabled()))
1290 return;
1291
1292 for_each_sg(sg, s, nents, i) {
1293 check_for_stack(dev, sg_page(s), s->offset);
1294 if (!PageHighMem(sg_page(s)))
1295 check_for_illegal_area(dev, sg_virt(s), s->length);
1296 }
1297
1298 for_each_sg(sg, s, mapped_ents, i) {
1299 entry = dma_entry_alloc();
1300 if (!entry)
1301 return;
1302
1303 entry->type = dma_debug_sg;
1304 entry->dev = dev;
1305 entry->pfn = page_to_pfn(sg_page(s));
1306 entry->offset = s->offset;
1307 entry->size = sg_dma_len(s);
1308 entry->dev_addr = sg_dma_address(s);
1309 entry->direction = direction;
1310 entry->sg_call_ents = nents;
1311 entry->sg_mapped_ents = mapped_ents;
1312
1313 check_sg_segment(dev, s);
1314
1315 add_dma_entry(entry, attrs);
1316 }
1317}
1318
1319static int get_nr_mapped_entries(struct device *dev,
1320 struct dma_debug_entry *ref)
1321{
1322 struct dma_debug_entry *entry;
1323 struct hash_bucket *bucket;
1324 unsigned long flags;
1325 int mapped_ents;
1326
1327 bucket = get_hash_bucket(ref, &flags);
1328 entry = bucket_find_exact(bucket, ref);
1329 mapped_ents = 0;
1330
1331 if (entry)
1332 mapped_ents = entry->sg_mapped_ents;
1333 put_hash_bucket(bucket, flags);
1334
1335 return mapped_ents;
1336}
1337
1338void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1339 int nelems, int dir)
1340{
1341 struct scatterlist *s;
1342 int mapped_ents = 0, i;
1343
1344 if (unlikely(dma_debug_disabled()))
1345 return;
1346
1347 for_each_sg(sglist, s, nelems, i) {
1348
1349 struct dma_debug_entry ref = {
1350 .type = dma_debug_sg,
1351 .dev = dev,
1352 .pfn = page_to_pfn(sg_page(s)),
1353 .offset = s->offset,
1354 .dev_addr = sg_dma_address(s),
1355 .size = sg_dma_len(s),
1356 .direction = dir,
1357 .sg_call_ents = nelems,
1358 };
1359
1360 if (mapped_ents && i >= mapped_ents)
1361 break;
1362
1363 if (!i)
1364 mapped_ents = get_nr_mapped_entries(dev, &ref);
1365
1366 check_unmap(&ref);
1367 }
1368}
1369
1370void debug_dma_alloc_coherent(struct device *dev, size_t size,
1371 dma_addr_t dma_addr, void *virt,
1372 unsigned long attrs)
1373{
1374 struct dma_debug_entry *entry;
1375
1376 if (unlikely(dma_debug_disabled()))
1377 return;
1378
1379 if (unlikely(virt == NULL))
1380 return;
1381
1382 /* handle vmalloc and linear addresses */
1383 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1384 return;
1385
1386 entry = dma_entry_alloc();
1387 if (!entry)
1388 return;
1389
1390 entry->type = dma_debug_coherent;
1391 entry->dev = dev;
1392 entry->offset = offset_in_page(virt);
1393 entry->size = size;
1394 entry->dev_addr = dma_addr;
1395 entry->direction = DMA_BIDIRECTIONAL;
1396
1397 if (is_vmalloc_addr(virt))
1398 entry->pfn = vmalloc_to_pfn(virt);
1399 else
1400 entry->pfn = page_to_pfn(virt_to_page(virt));
1401
1402 add_dma_entry(entry, attrs);
1403}
1404
1405void debug_dma_free_coherent(struct device *dev, size_t size,
1406 void *virt, dma_addr_t addr)
1407{
1408 struct dma_debug_entry ref = {
1409 .type = dma_debug_coherent,
1410 .dev = dev,
1411 .offset = offset_in_page(virt),
1412 .dev_addr = addr,
1413 .size = size,
1414 .direction = DMA_BIDIRECTIONAL,
1415 };
1416
1417 /* handle vmalloc and linear addresses */
1418 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1419 return;
1420
1421 if (is_vmalloc_addr(virt))
1422 ref.pfn = vmalloc_to_pfn(virt);
1423 else
1424 ref.pfn = page_to_pfn(virt_to_page(virt));
1425
1426 if (unlikely(dma_debug_disabled()))
1427 return;
1428
1429 check_unmap(&ref);
1430}
1431
1432void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1433 int direction, dma_addr_t dma_addr,
1434 unsigned long attrs)
1435{
1436 struct dma_debug_entry *entry;
1437
1438 if (unlikely(dma_debug_disabled()))
1439 return;
1440
1441 entry = dma_entry_alloc();
1442 if (!entry)
1443 return;
1444
1445 entry->type = dma_debug_resource;
1446 entry->dev = dev;
1447 entry->pfn = PHYS_PFN(addr);
1448 entry->offset = offset_in_page(addr);
1449 entry->size = size;
1450 entry->dev_addr = dma_addr;
1451 entry->direction = direction;
1452 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1453
1454 add_dma_entry(entry, attrs);
1455}
1456
1457void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1458 size_t size, int direction)
1459{
1460 struct dma_debug_entry ref = {
1461 .type = dma_debug_resource,
1462 .dev = dev,
1463 .dev_addr = dma_addr,
1464 .size = size,
1465 .direction = direction,
1466 };
1467
1468 if (unlikely(dma_debug_disabled()))
1469 return;
1470
1471 check_unmap(&ref);
1472}
1473
1474void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1475 size_t size, int direction)
1476{
1477 struct dma_debug_entry ref;
1478
1479 if (unlikely(dma_debug_disabled()))
1480 return;
1481
1482 ref.type = dma_debug_single;
1483 ref.dev = dev;
1484 ref.dev_addr = dma_handle;
1485 ref.size = size;
1486 ref.direction = direction;
1487 ref.sg_call_ents = 0;
1488
1489 check_sync(dev, &ref, true);
1490}
1491
1492void debug_dma_sync_single_for_device(struct device *dev,
1493 dma_addr_t dma_handle, size_t size,
1494 int direction)
1495{
1496 struct dma_debug_entry ref;
1497
1498 if (unlikely(dma_debug_disabled()))
1499 return;
1500
1501 ref.type = dma_debug_single;
1502 ref.dev = dev;
1503 ref.dev_addr = dma_handle;
1504 ref.size = size;
1505 ref.direction = direction;
1506 ref.sg_call_ents = 0;
1507
1508 check_sync(dev, &ref, false);
1509}
1510
1511void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1512 int nelems, int direction)
1513{
1514 struct scatterlist *s;
1515 int mapped_ents = 0, i;
1516
1517 if (unlikely(dma_debug_disabled()))
1518 return;
1519
1520 for_each_sg(sg, s, nelems, i) {
1521
1522 struct dma_debug_entry ref = {
1523 .type = dma_debug_sg,
1524 .dev = dev,
1525 .pfn = page_to_pfn(sg_page(s)),
1526 .offset = s->offset,
1527 .dev_addr = sg_dma_address(s),
1528 .size = sg_dma_len(s),
1529 .direction = direction,
1530 .sg_call_ents = nelems,
1531 };
1532
1533 if (!i)
1534 mapped_ents = get_nr_mapped_entries(dev, &ref);
1535
1536 if (i >= mapped_ents)
1537 break;
1538
1539 check_sync(dev, &ref, true);
1540 }
1541}
1542
1543void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1544 int nelems, int direction)
1545{
1546 struct scatterlist *s;
1547 int mapped_ents = 0, i;
1548
1549 if (unlikely(dma_debug_disabled()))
1550 return;
1551
1552 for_each_sg(sg, s, nelems, i) {
1553
1554 struct dma_debug_entry ref = {
1555 .type = dma_debug_sg,
1556 .dev = dev,
1557 .pfn = page_to_pfn(sg_page(s)),
1558 .offset = s->offset,
1559 .dev_addr = sg_dma_address(s),
1560 .size = sg_dma_len(s),
1561 .direction = direction,
1562 .sg_call_ents = nelems,
1563 };
1564 if (!i)
1565 mapped_ents = get_nr_mapped_entries(dev, &ref);
1566
1567 if (i >= mapped_ents)
1568 break;
1569
1570 check_sync(dev, &ref, false);
1571 }
1572}
1573
1574static int __init dma_debug_driver_setup(char *str)
1575{
1576 int i;
1577
1578 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1579 current_driver_name[i] = *str;
1580 if (*str == 0)
1581 break;
1582 }
1583
1584 if (current_driver_name[0])
1585 pr_info("enable driver filter for driver [%s]\n",
1586 current_driver_name);
1587
1588
1589 return 1;
1590}
1591__setup("dma_debug_driver=", dma_debug_driver_setup);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Copyright (C) 2008 Advanced Micro Devices, Inc.
4 *
5 * Author: Joerg Roedel <joerg.roedel@amd.com>
6 */
7
8#define pr_fmt(fmt) "DMA-API: " fmt
9
10#include <linux/sched/task_stack.h>
11#include <linux/scatterlist.h>
12#include <linux/dma-mapping.h>
13#include <linux/sched/task.h>
14#include <linux/stacktrace.h>
15#include <linux/dma-debug.h>
16#include <linux/spinlock.h>
17#include <linux/vmalloc.h>
18#include <linux/debugfs.h>
19#include <linux/uaccess.h>
20#include <linux/export.h>
21#include <linux/device.h>
22#include <linux/types.h>
23#include <linux/sched.h>
24#include <linux/ctype.h>
25#include <linux/list.h>
26#include <linux/slab.h>
27
28#include <asm/sections.h>
29
30#define HASH_SIZE 1024ULL
31#define HASH_FN_SHIFT 13
32#define HASH_FN_MASK (HASH_SIZE - 1)
33
34#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
35/* If the pool runs out, add this many new entries at once */
36#define DMA_DEBUG_DYNAMIC_ENTRIES (PAGE_SIZE / sizeof(struct dma_debug_entry))
37
38enum {
39 dma_debug_single,
40 dma_debug_sg,
41 dma_debug_coherent,
42 dma_debug_resource,
43};
44
45enum map_err_types {
46 MAP_ERR_CHECK_NOT_APPLICABLE,
47 MAP_ERR_NOT_CHECKED,
48 MAP_ERR_CHECKED,
49};
50
51#define DMA_DEBUG_STACKTRACE_ENTRIES 5
52
53/**
54 * struct dma_debug_entry - track a dma_map* or dma_alloc_coherent mapping
55 * @list: node on pre-allocated free_entries list
56 * @dev: 'dev' argument to dma_map_{page|single|sg} or dma_alloc_coherent
57 * @type: single, page, sg, coherent
58 * @pfn: page frame of the start address
59 * @offset: offset of mapping relative to pfn
60 * @size: length of the mapping
61 * @direction: enum dma_data_direction
62 * @sg_call_ents: 'nents' from dma_map_sg
63 * @sg_mapped_ents: 'mapped_ents' from dma_map_sg
64 * @map_err_type: track whether dma_mapping_error() was checked
65 * @stacktrace: support backtraces when a violation is detected
66 */
67struct dma_debug_entry {
68 struct list_head list;
69 struct device *dev;
70 int type;
71 unsigned long pfn;
72 size_t offset;
73 u64 dev_addr;
74 u64 size;
75 int direction;
76 int sg_call_ents;
77 int sg_mapped_ents;
78 enum map_err_types map_err_type;
79#ifdef CONFIG_STACKTRACE
80 unsigned int stack_len;
81 unsigned long stack_entries[DMA_DEBUG_STACKTRACE_ENTRIES];
82#endif
83};
84
85typedef bool (*match_fn)(struct dma_debug_entry *, struct dma_debug_entry *);
86
87struct hash_bucket {
88 struct list_head list;
89 spinlock_t lock;
90} ____cacheline_aligned_in_smp;
91
92/* Hash list to save the allocated dma addresses */
93static struct hash_bucket dma_entry_hash[HASH_SIZE];
94/* List of pre-allocated dma_debug_entry's */
95static LIST_HEAD(free_entries);
96/* Lock for the list above */
97static DEFINE_SPINLOCK(free_entries_lock);
98
99/* Global disable flag - will be set in case of an error */
100static bool global_disable __read_mostly;
101
102/* Early initialization disable flag, set at the end of dma_debug_init */
103static bool dma_debug_initialized __read_mostly;
104
105static inline bool dma_debug_disabled(void)
106{
107 return global_disable || !dma_debug_initialized;
108}
109
110/* Global error count */
111static u32 error_count;
112
113/* Global error show enable*/
114static u32 show_all_errors __read_mostly;
115/* Number of errors to show */
116static u32 show_num_errors = 1;
117
118static u32 num_free_entries;
119static u32 min_free_entries;
120static u32 nr_total_entries;
121
122/* number of preallocated entries requested by kernel cmdline */
123static u32 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
124
125/* per-driver filter related state */
126
127#define NAME_MAX_LEN 64
128
129static char current_driver_name[NAME_MAX_LEN] __read_mostly;
130static struct device_driver *current_driver __read_mostly;
131
132static DEFINE_RWLOCK(driver_name_lock);
133
134static const char *const maperr2str[] = {
135 [MAP_ERR_CHECK_NOT_APPLICABLE] = "dma map error check not applicable",
136 [MAP_ERR_NOT_CHECKED] = "dma map error not checked",
137 [MAP_ERR_CHECKED] = "dma map error checked",
138};
139
140static const char *type2name[5] = { "single", "page",
141 "scather-gather", "coherent",
142 "resource" };
143
144static const char *dir2name[4] = { "DMA_BIDIRECTIONAL", "DMA_TO_DEVICE",
145 "DMA_FROM_DEVICE", "DMA_NONE" };
146
147/*
148 * The access to some variables in this macro is racy. We can't use atomic_t
149 * here because all these variables are exported to debugfs. Some of them even
150 * writeable. This is also the reason why a lock won't help much. But anyway,
151 * the races are no big deal. Here is why:
152 *
153 * error_count: the addition is racy, but the worst thing that can happen is
154 * that we don't count some errors
155 * show_num_errors: the subtraction is racy. Also no big deal because in
156 * worst case this will result in one warning more in the
157 * system log than the user configured. This variable is
158 * writeable via debugfs.
159 */
160static inline void dump_entry_trace(struct dma_debug_entry *entry)
161{
162#ifdef CONFIG_STACKTRACE
163 if (entry) {
164 pr_warning("Mapped at:\n");
165 stack_trace_print(entry->stack_entries, entry->stack_len, 0);
166 }
167#endif
168}
169
170static bool driver_filter(struct device *dev)
171{
172 struct device_driver *drv;
173 unsigned long flags;
174 bool ret;
175
176 /* driver filter off */
177 if (likely(!current_driver_name[0]))
178 return true;
179
180 /* driver filter on and initialized */
181 if (current_driver && dev && dev->driver == current_driver)
182 return true;
183
184 /* driver filter on, but we can't filter on a NULL device... */
185 if (!dev)
186 return false;
187
188 if (current_driver || !current_driver_name[0])
189 return false;
190
191 /* driver filter on but not yet initialized */
192 drv = dev->driver;
193 if (!drv)
194 return false;
195
196 /* lock to protect against change of current_driver_name */
197 read_lock_irqsave(&driver_name_lock, flags);
198
199 ret = false;
200 if (drv->name &&
201 strncmp(current_driver_name, drv->name, NAME_MAX_LEN - 1) == 0) {
202 current_driver = drv;
203 ret = true;
204 }
205
206 read_unlock_irqrestore(&driver_name_lock, flags);
207
208 return ret;
209}
210
211#define err_printk(dev, entry, format, arg...) do { \
212 error_count += 1; \
213 if (driver_filter(dev) && \
214 (show_all_errors || show_num_errors > 0)) { \
215 WARN(1, pr_fmt("%s %s: ") format, \
216 dev ? dev_driver_string(dev) : "NULL", \
217 dev ? dev_name(dev) : "NULL", ## arg); \
218 dump_entry_trace(entry); \
219 } \
220 if (!show_all_errors && show_num_errors > 0) \
221 show_num_errors -= 1; \
222 } while (0);
223
224/*
225 * Hash related functions
226 *
227 * Every DMA-API request is saved into a struct dma_debug_entry. To
228 * have quick access to these structs they are stored into a hash.
229 */
230static int hash_fn(struct dma_debug_entry *entry)
231{
232 /*
233 * Hash function is based on the dma address.
234 * We use bits 20-27 here as the index into the hash
235 */
236 return (entry->dev_addr >> HASH_FN_SHIFT) & HASH_FN_MASK;
237}
238
239/*
240 * Request exclusive access to a hash bucket for a given dma_debug_entry.
241 */
242static struct hash_bucket *get_hash_bucket(struct dma_debug_entry *entry,
243 unsigned long *flags)
244 __acquires(&dma_entry_hash[idx].lock)
245{
246 int idx = hash_fn(entry);
247 unsigned long __flags;
248
249 spin_lock_irqsave(&dma_entry_hash[idx].lock, __flags);
250 *flags = __flags;
251 return &dma_entry_hash[idx];
252}
253
254/*
255 * Give up exclusive access to the hash bucket
256 */
257static void put_hash_bucket(struct hash_bucket *bucket,
258 unsigned long *flags)
259 __releases(&bucket->lock)
260{
261 unsigned long __flags = *flags;
262
263 spin_unlock_irqrestore(&bucket->lock, __flags);
264}
265
266static bool exact_match(struct dma_debug_entry *a, struct dma_debug_entry *b)
267{
268 return ((a->dev_addr == b->dev_addr) &&
269 (a->dev == b->dev)) ? true : false;
270}
271
272static bool containing_match(struct dma_debug_entry *a,
273 struct dma_debug_entry *b)
274{
275 if (a->dev != b->dev)
276 return false;
277
278 if ((b->dev_addr <= a->dev_addr) &&
279 ((b->dev_addr + b->size) >= (a->dev_addr + a->size)))
280 return true;
281
282 return false;
283}
284
285/*
286 * Search a given entry in the hash bucket list
287 */
288static struct dma_debug_entry *__hash_bucket_find(struct hash_bucket *bucket,
289 struct dma_debug_entry *ref,
290 match_fn match)
291{
292 struct dma_debug_entry *entry, *ret = NULL;
293 int matches = 0, match_lvl, last_lvl = -1;
294
295 list_for_each_entry(entry, &bucket->list, list) {
296 if (!match(ref, entry))
297 continue;
298
299 /*
300 * Some drivers map the same physical address multiple
301 * times. Without a hardware IOMMU this results in the
302 * same device addresses being put into the dma-debug
303 * hash multiple times too. This can result in false
304 * positives being reported. Therefore we implement a
305 * best-fit algorithm here which returns the entry from
306 * the hash which fits best to the reference value
307 * instead of the first-fit.
308 */
309 matches += 1;
310 match_lvl = 0;
311 entry->size == ref->size ? ++match_lvl : 0;
312 entry->type == ref->type ? ++match_lvl : 0;
313 entry->direction == ref->direction ? ++match_lvl : 0;
314 entry->sg_call_ents == ref->sg_call_ents ? ++match_lvl : 0;
315
316 if (match_lvl == 4) {
317 /* perfect-fit - return the result */
318 return entry;
319 } else if (match_lvl > last_lvl) {
320 /*
321 * We found an entry that fits better then the
322 * previous one or it is the 1st match.
323 */
324 last_lvl = match_lvl;
325 ret = entry;
326 }
327 }
328
329 /*
330 * If we have multiple matches but no perfect-fit, just return
331 * NULL.
332 */
333 ret = (matches == 1) ? ret : NULL;
334
335 return ret;
336}
337
338static struct dma_debug_entry *bucket_find_exact(struct hash_bucket *bucket,
339 struct dma_debug_entry *ref)
340{
341 return __hash_bucket_find(bucket, ref, exact_match);
342}
343
344static struct dma_debug_entry *bucket_find_contain(struct hash_bucket **bucket,
345 struct dma_debug_entry *ref,
346 unsigned long *flags)
347{
348
349 unsigned int max_range = dma_get_max_seg_size(ref->dev);
350 struct dma_debug_entry *entry, index = *ref;
351 unsigned int range = 0;
352
353 while (range <= max_range) {
354 entry = __hash_bucket_find(*bucket, ref, containing_match);
355
356 if (entry)
357 return entry;
358
359 /*
360 * Nothing found, go back a hash bucket
361 */
362 put_hash_bucket(*bucket, flags);
363 range += (1 << HASH_FN_SHIFT);
364 index.dev_addr -= (1 << HASH_FN_SHIFT);
365 *bucket = get_hash_bucket(&index, flags);
366 }
367
368 return NULL;
369}
370
371/*
372 * Add an entry to a hash bucket
373 */
374static void hash_bucket_add(struct hash_bucket *bucket,
375 struct dma_debug_entry *entry)
376{
377 list_add_tail(&entry->list, &bucket->list);
378}
379
380/*
381 * Remove entry from a hash bucket list
382 */
383static void hash_bucket_del(struct dma_debug_entry *entry)
384{
385 list_del(&entry->list);
386}
387
388static unsigned long long phys_addr(struct dma_debug_entry *entry)
389{
390 if (entry->type == dma_debug_resource)
391 return __pfn_to_phys(entry->pfn) + entry->offset;
392
393 return page_to_phys(pfn_to_page(entry->pfn)) + entry->offset;
394}
395
396/*
397 * Dump mapping entries for debugging purposes
398 */
399void debug_dma_dump_mappings(struct device *dev)
400{
401 int idx;
402
403 for (idx = 0; idx < HASH_SIZE; idx++) {
404 struct hash_bucket *bucket = &dma_entry_hash[idx];
405 struct dma_debug_entry *entry;
406 unsigned long flags;
407
408 spin_lock_irqsave(&bucket->lock, flags);
409
410 list_for_each_entry(entry, &bucket->list, list) {
411 if (!dev || dev == entry->dev) {
412 dev_info(entry->dev,
413 "%s idx %d P=%Lx N=%lx D=%Lx L=%Lx %s %s\n",
414 type2name[entry->type], idx,
415 phys_addr(entry), entry->pfn,
416 entry->dev_addr, entry->size,
417 dir2name[entry->direction],
418 maperr2str[entry->map_err_type]);
419 }
420 }
421
422 spin_unlock_irqrestore(&bucket->lock, flags);
423 }
424}
425
426/*
427 * For each mapping (initial cacheline in the case of
428 * dma_alloc_coherent/dma_map_page, initial cacheline in each page of a
429 * scatterlist, or the cacheline specified in dma_map_single) insert
430 * into this tree using the cacheline as the key. At
431 * dma_unmap_{single|sg|page} or dma_free_coherent delete the entry. If
432 * the entry already exists at insertion time add a tag as a reference
433 * count for the overlapping mappings. For now, the overlap tracking
434 * just ensures that 'unmaps' balance 'maps' before marking the
435 * cacheline idle, but we should also be flagging overlaps as an API
436 * violation.
437 *
438 * Memory usage is mostly constrained by the maximum number of available
439 * dma-debug entries in that we need a free dma_debug_entry before
440 * inserting into the tree. In the case of dma_map_page and
441 * dma_alloc_coherent there is only one dma_debug_entry and one
442 * dma_active_cacheline entry to track per event. dma_map_sg(), on the
443 * other hand, consumes a single dma_debug_entry, but inserts 'nents'
444 * entries into the tree.
445 *
446 * At any time debug_dma_assert_idle() can be called to trigger a
447 * warning if any cachelines in the given page are in the active set.
448 */
449static RADIX_TREE(dma_active_cacheline, GFP_NOWAIT);
450static DEFINE_SPINLOCK(radix_lock);
451#define ACTIVE_CACHELINE_MAX_OVERLAP ((1 << RADIX_TREE_MAX_TAGS) - 1)
452#define CACHELINE_PER_PAGE_SHIFT (PAGE_SHIFT - L1_CACHE_SHIFT)
453#define CACHELINES_PER_PAGE (1 << CACHELINE_PER_PAGE_SHIFT)
454
455static phys_addr_t to_cacheline_number(struct dma_debug_entry *entry)
456{
457 return (entry->pfn << CACHELINE_PER_PAGE_SHIFT) +
458 (entry->offset >> L1_CACHE_SHIFT);
459}
460
461static int active_cacheline_read_overlap(phys_addr_t cln)
462{
463 int overlap = 0, i;
464
465 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
466 if (radix_tree_tag_get(&dma_active_cacheline, cln, i))
467 overlap |= 1 << i;
468 return overlap;
469}
470
471static int active_cacheline_set_overlap(phys_addr_t cln, int overlap)
472{
473 int i;
474
475 if (overlap > ACTIVE_CACHELINE_MAX_OVERLAP || overlap < 0)
476 return overlap;
477
478 for (i = RADIX_TREE_MAX_TAGS - 1; i >= 0; i--)
479 if (overlap & 1 << i)
480 radix_tree_tag_set(&dma_active_cacheline, cln, i);
481 else
482 radix_tree_tag_clear(&dma_active_cacheline, cln, i);
483
484 return overlap;
485}
486
487static void active_cacheline_inc_overlap(phys_addr_t cln)
488{
489 int overlap = active_cacheline_read_overlap(cln);
490
491 overlap = active_cacheline_set_overlap(cln, ++overlap);
492
493 /* If we overflowed the overlap counter then we're potentially
494 * leaking dma-mappings. Otherwise, if maps and unmaps are
495 * balanced then this overflow may cause false negatives in
496 * debug_dma_assert_idle() as the cacheline may be marked idle
497 * prematurely.
498 */
499 WARN_ONCE(overlap > ACTIVE_CACHELINE_MAX_OVERLAP,
500 pr_fmt("exceeded %d overlapping mappings of cacheline %pa\n"),
501 ACTIVE_CACHELINE_MAX_OVERLAP, &cln);
502}
503
504static int active_cacheline_dec_overlap(phys_addr_t cln)
505{
506 int overlap = active_cacheline_read_overlap(cln);
507
508 return active_cacheline_set_overlap(cln, --overlap);
509}
510
511static int active_cacheline_insert(struct dma_debug_entry *entry)
512{
513 phys_addr_t cln = to_cacheline_number(entry);
514 unsigned long flags;
515 int rc;
516
517 /* If the device is not writing memory then we don't have any
518 * concerns about the cpu consuming stale data. This mitigates
519 * legitimate usages of overlapping mappings.
520 */
521 if (entry->direction == DMA_TO_DEVICE)
522 return 0;
523
524 spin_lock_irqsave(&radix_lock, flags);
525 rc = radix_tree_insert(&dma_active_cacheline, cln, entry);
526 if (rc == -EEXIST)
527 active_cacheline_inc_overlap(cln);
528 spin_unlock_irqrestore(&radix_lock, flags);
529
530 return rc;
531}
532
533static void active_cacheline_remove(struct dma_debug_entry *entry)
534{
535 phys_addr_t cln = to_cacheline_number(entry);
536 unsigned long flags;
537
538 /* ...mirror the insert case */
539 if (entry->direction == DMA_TO_DEVICE)
540 return;
541
542 spin_lock_irqsave(&radix_lock, flags);
543 /* since we are counting overlaps the final put of the
544 * cacheline will occur when the overlap count is 0.
545 * active_cacheline_dec_overlap() returns -1 in that case
546 */
547 if (active_cacheline_dec_overlap(cln) < 0)
548 radix_tree_delete(&dma_active_cacheline, cln);
549 spin_unlock_irqrestore(&radix_lock, flags);
550}
551
552/**
553 * debug_dma_assert_idle() - assert that a page is not undergoing dma
554 * @page: page to lookup in the dma_active_cacheline tree
555 *
556 * Place a call to this routine in cases where the cpu touching the page
557 * before the dma completes (page is dma_unmapped) will lead to data
558 * corruption.
559 */
560void debug_dma_assert_idle(struct page *page)
561{
562 static struct dma_debug_entry *ents[CACHELINES_PER_PAGE];
563 struct dma_debug_entry *entry = NULL;
564 void **results = (void **) &ents;
565 unsigned int nents, i;
566 unsigned long flags;
567 phys_addr_t cln;
568
569 if (dma_debug_disabled())
570 return;
571
572 if (!page)
573 return;
574
575 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT;
576 spin_lock_irqsave(&radix_lock, flags);
577 nents = radix_tree_gang_lookup(&dma_active_cacheline, results, cln,
578 CACHELINES_PER_PAGE);
579 for (i = 0; i < nents; i++) {
580 phys_addr_t ent_cln = to_cacheline_number(ents[i]);
581
582 if (ent_cln == cln) {
583 entry = ents[i];
584 break;
585 } else if (ent_cln >= cln + CACHELINES_PER_PAGE)
586 break;
587 }
588 spin_unlock_irqrestore(&radix_lock, flags);
589
590 if (!entry)
591 return;
592
593 cln = to_cacheline_number(entry);
594 err_printk(entry->dev, entry,
595 "cpu touching an active dma mapped cacheline [cln=%pa]\n",
596 &cln);
597}
598
599/*
600 * Wrapper function for adding an entry to the hash.
601 * This function takes care of locking itself.
602 */
603static void add_dma_entry(struct dma_debug_entry *entry)
604{
605 struct hash_bucket *bucket;
606 unsigned long flags;
607 int rc;
608
609 bucket = get_hash_bucket(entry, &flags);
610 hash_bucket_add(bucket, entry);
611 put_hash_bucket(bucket, &flags);
612
613 rc = active_cacheline_insert(entry);
614 if (rc == -ENOMEM) {
615 pr_err("cacheline tracking ENOMEM, dma-debug disabled\n");
616 global_disable = true;
617 }
618
619 /* TODO: report -EEXIST errors here as overlapping mappings are
620 * not supported by the DMA API
621 */
622}
623
624static int dma_debug_create_entries(gfp_t gfp)
625{
626 struct dma_debug_entry *entry;
627 int i;
628
629 entry = (void *)get_zeroed_page(gfp);
630 if (!entry)
631 return -ENOMEM;
632
633 for (i = 0; i < DMA_DEBUG_DYNAMIC_ENTRIES; i++)
634 list_add_tail(&entry[i].list, &free_entries);
635
636 num_free_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
637 nr_total_entries += DMA_DEBUG_DYNAMIC_ENTRIES;
638
639 return 0;
640}
641
642static struct dma_debug_entry *__dma_entry_alloc(void)
643{
644 struct dma_debug_entry *entry;
645
646 entry = list_entry(free_entries.next, struct dma_debug_entry, list);
647 list_del(&entry->list);
648 memset(entry, 0, sizeof(*entry));
649
650 num_free_entries -= 1;
651 if (num_free_entries < min_free_entries)
652 min_free_entries = num_free_entries;
653
654 return entry;
655}
656
657void __dma_entry_alloc_check_leak(void)
658{
659 u32 tmp = nr_total_entries % nr_prealloc_entries;
660
661 /* Shout each time we tick over some multiple of the initial pool */
662 if (tmp < DMA_DEBUG_DYNAMIC_ENTRIES) {
663 pr_info("dma_debug_entry pool grown to %u (%u00%%)\n",
664 nr_total_entries,
665 (nr_total_entries / nr_prealloc_entries));
666 }
667}
668
669/* struct dma_entry allocator
670 *
671 * The next two functions implement the allocator for
672 * struct dma_debug_entries.
673 */
674static struct dma_debug_entry *dma_entry_alloc(void)
675{
676 struct dma_debug_entry *entry;
677 unsigned long flags;
678
679 spin_lock_irqsave(&free_entries_lock, flags);
680 if (num_free_entries == 0) {
681 if (dma_debug_create_entries(GFP_ATOMIC)) {
682 global_disable = true;
683 spin_unlock_irqrestore(&free_entries_lock, flags);
684 pr_err("debugging out of memory - disabling\n");
685 return NULL;
686 }
687 __dma_entry_alloc_check_leak();
688 }
689
690 entry = __dma_entry_alloc();
691
692 spin_unlock_irqrestore(&free_entries_lock, flags);
693
694#ifdef CONFIG_STACKTRACE
695 entry->stack_len = stack_trace_save(entry->stack_entries,
696 ARRAY_SIZE(entry->stack_entries),
697 1);
698#endif
699 return entry;
700}
701
702static void dma_entry_free(struct dma_debug_entry *entry)
703{
704 unsigned long flags;
705
706 active_cacheline_remove(entry);
707
708 /*
709 * add to beginning of the list - this way the entries are
710 * more likely cache hot when they are reallocated.
711 */
712 spin_lock_irqsave(&free_entries_lock, flags);
713 list_add(&entry->list, &free_entries);
714 num_free_entries += 1;
715 spin_unlock_irqrestore(&free_entries_lock, flags);
716}
717
718/*
719 * DMA-API debugging init code
720 *
721 * The init code does two things:
722 * 1. Initialize core data structures
723 * 2. Preallocate a given number of dma_debug_entry structs
724 */
725
726static ssize_t filter_read(struct file *file, char __user *user_buf,
727 size_t count, loff_t *ppos)
728{
729 char buf[NAME_MAX_LEN + 1];
730 unsigned long flags;
731 int len;
732
733 if (!current_driver_name[0])
734 return 0;
735
736 /*
737 * We can't copy to userspace directly because current_driver_name can
738 * only be read under the driver_name_lock with irqs disabled. So
739 * create a temporary copy first.
740 */
741 read_lock_irqsave(&driver_name_lock, flags);
742 len = scnprintf(buf, NAME_MAX_LEN + 1, "%s\n", current_driver_name);
743 read_unlock_irqrestore(&driver_name_lock, flags);
744
745 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
746}
747
748static ssize_t filter_write(struct file *file, const char __user *userbuf,
749 size_t count, loff_t *ppos)
750{
751 char buf[NAME_MAX_LEN];
752 unsigned long flags;
753 size_t len;
754 int i;
755
756 /*
757 * We can't copy from userspace directly. Access to
758 * current_driver_name is protected with a write_lock with irqs
759 * disabled. Since copy_from_user can fault and may sleep we
760 * need to copy to temporary buffer first
761 */
762 len = min(count, (size_t)(NAME_MAX_LEN - 1));
763 if (copy_from_user(buf, userbuf, len))
764 return -EFAULT;
765
766 buf[len] = 0;
767
768 write_lock_irqsave(&driver_name_lock, flags);
769
770 /*
771 * Now handle the string we got from userspace very carefully.
772 * The rules are:
773 * - only use the first token we got
774 * - token delimiter is everything looking like a space
775 * character (' ', '\n', '\t' ...)
776 *
777 */
778 if (!isalnum(buf[0])) {
779 /*
780 * If the first character userspace gave us is not
781 * alphanumerical then assume the filter should be
782 * switched off.
783 */
784 if (current_driver_name[0])
785 pr_info("switching off dma-debug driver filter\n");
786 current_driver_name[0] = 0;
787 current_driver = NULL;
788 goto out_unlock;
789 }
790
791 /*
792 * Now parse out the first token and use it as the name for the
793 * driver to filter for.
794 */
795 for (i = 0; i < NAME_MAX_LEN - 1; ++i) {
796 current_driver_name[i] = buf[i];
797 if (isspace(buf[i]) || buf[i] == ' ' || buf[i] == 0)
798 break;
799 }
800 current_driver_name[i] = 0;
801 current_driver = NULL;
802
803 pr_info("enable driver filter for driver [%s]\n",
804 current_driver_name);
805
806out_unlock:
807 write_unlock_irqrestore(&driver_name_lock, flags);
808
809 return count;
810}
811
812static const struct file_operations filter_fops = {
813 .read = filter_read,
814 .write = filter_write,
815 .llseek = default_llseek,
816};
817
818static int dump_show(struct seq_file *seq, void *v)
819{
820 int idx;
821
822 for (idx = 0; idx < HASH_SIZE; idx++) {
823 struct hash_bucket *bucket = &dma_entry_hash[idx];
824 struct dma_debug_entry *entry;
825 unsigned long flags;
826
827 spin_lock_irqsave(&bucket->lock, flags);
828 list_for_each_entry(entry, &bucket->list, list) {
829 seq_printf(seq,
830 "%s %s %s idx %d P=%llx N=%lx D=%llx L=%llx %s %s\n",
831 dev_name(entry->dev),
832 dev_driver_string(entry->dev),
833 type2name[entry->type], idx,
834 phys_addr(entry), entry->pfn,
835 entry->dev_addr, entry->size,
836 dir2name[entry->direction],
837 maperr2str[entry->map_err_type]);
838 }
839 spin_unlock_irqrestore(&bucket->lock, flags);
840 }
841 return 0;
842}
843DEFINE_SHOW_ATTRIBUTE(dump);
844
845static void dma_debug_fs_init(void)
846{
847 struct dentry *dentry = debugfs_create_dir("dma-api", NULL);
848
849 debugfs_create_bool("disabled", 0444, dentry, &global_disable);
850 debugfs_create_u32("error_count", 0444, dentry, &error_count);
851 debugfs_create_u32("all_errors", 0644, dentry, &show_all_errors);
852 debugfs_create_u32("num_errors", 0644, dentry, &show_num_errors);
853 debugfs_create_u32("num_free_entries", 0444, dentry, &num_free_entries);
854 debugfs_create_u32("min_free_entries", 0444, dentry, &min_free_entries);
855 debugfs_create_u32("nr_total_entries", 0444, dentry, &nr_total_entries);
856 debugfs_create_file("driver_filter", 0644, dentry, NULL, &filter_fops);
857 debugfs_create_file("dump", 0444, dentry, NULL, &dump_fops);
858}
859
860static int device_dma_allocations(struct device *dev, struct dma_debug_entry **out_entry)
861{
862 struct dma_debug_entry *entry;
863 unsigned long flags;
864 int count = 0, i;
865
866 for (i = 0; i < HASH_SIZE; ++i) {
867 spin_lock_irqsave(&dma_entry_hash[i].lock, flags);
868 list_for_each_entry(entry, &dma_entry_hash[i].list, list) {
869 if (entry->dev == dev) {
870 count += 1;
871 *out_entry = entry;
872 }
873 }
874 spin_unlock_irqrestore(&dma_entry_hash[i].lock, flags);
875 }
876
877 return count;
878}
879
880static int dma_debug_device_change(struct notifier_block *nb, unsigned long action, void *data)
881{
882 struct device *dev = data;
883 struct dma_debug_entry *uninitialized_var(entry);
884 int count;
885
886 if (dma_debug_disabled())
887 return 0;
888
889 switch (action) {
890 case BUS_NOTIFY_UNBOUND_DRIVER:
891 count = device_dma_allocations(dev, &entry);
892 if (count == 0)
893 break;
894 err_printk(dev, entry, "device driver has pending "
895 "DMA allocations while released from device "
896 "[count=%d]\n"
897 "One of leaked entries details: "
898 "[device address=0x%016llx] [size=%llu bytes] "
899 "[mapped with %s] [mapped as %s]\n",
900 count, entry->dev_addr, entry->size,
901 dir2name[entry->direction], type2name[entry->type]);
902 break;
903 default:
904 break;
905 }
906
907 return 0;
908}
909
910void dma_debug_add_bus(struct bus_type *bus)
911{
912 struct notifier_block *nb;
913
914 if (dma_debug_disabled())
915 return;
916
917 nb = kzalloc(sizeof(struct notifier_block), GFP_KERNEL);
918 if (nb == NULL) {
919 pr_err("dma_debug_add_bus: out of memory\n");
920 return;
921 }
922
923 nb->notifier_call = dma_debug_device_change;
924
925 bus_register_notifier(bus, nb);
926}
927
928static int dma_debug_init(void)
929{
930 int i, nr_pages;
931
932 /* Do not use dma_debug_initialized here, since we really want to be
933 * called to set dma_debug_initialized
934 */
935 if (global_disable)
936 return 0;
937
938 for (i = 0; i < HASH_SIZE; ++i) {
939 INIT_LIST_HEAD(&dma_entry_hash[i].list);
940 spin_lock_init(&dma_entry_hash[i].lock);
941 }
942
943 dma_debug_fs_init();
944
945 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES);
946 for (i = 0; i < nr_pages; ++i)
947 dma_debug_create_entries(GFP_KERNEL);
948 if (num_free_entries >= nr_prealloc_entries) {
949 pr_info("preallocated %d debug entries\n", nr_total_entries);
950 } else if (num_free_entries > 0) {
951 pr_warn("%d debug entries requested but only %d allocated\n",
952 nr_prealloc_entries, nr_total_entries);
953 } else {
954 pr_err("debugging out of memory error - disabled\n");
955 global_disable = true;
956
957 return 0;
958 }
959 min_free_entries = num_free_entries;
960
961 dma_debug_initialized = true;
962
963 pr_info("debugging enabled by kernel config\n");
964 return 0;
965}
966core_initcall(dma_debug_init);
967
968static __init int dma_debug_cmdline(char *str)
969{
970 if (!str)
971 return -EINVAL;
972
973 if (strncmp(str, "off", 3) == 0) {
974 pr_info("debugging disabled on kernel command line\n");
975 global_disable = true;
976 }
977
978 return 0;
979}
980
981static __init int dma_debug_entries_cmdline(char *str)
982{
983 if (!str)
984 return -EINVAL;
985 if (!get_option(&str, &nr_prealloc_entries))
986 nr_prealloc_entries = PREALLOC_DMA_DEBUG_ENTRIES;
987 return 0;
988}
989
990__setup("dma_debug=", dma_debug_cmdline);
991__setup("dma_debug_entries=", dma_debug_entries_cmdline);
992
993static void check_unmap(struct dma_debug_entry *ref)
994{
995 struct dma_debug_entry *entry;
996 struct hash_bucket *bucket;
997 unsigned long flags;
998
999 bucket = get_hash_bucket(ref, &flags);
1000 entry = bucket_find_exact(bucket, ref);
1001
1002 if (!entry) {
1003 /* must drop lock before calling dma_mapping_error */
1004 put_hash_bucket(bucket, &flags);
1005
1006 if (dma_mapping_error(ref->dev, ref->dev_addr)) {
1007 err_printk(ref->dev, NULL,
1008 "device driver tries to free an "
1009 "invalid DMA memory address\n");
1010 } else {
1011 err_printk(ref->dev, NULL,
1012 "device driver tries to free DMA "
1013 "memory it has not allocated [device "
1014 "address=0x%016llx] [size=%llu bytes]\n",
1015 ref->dev_addr, ref->size);
1016 }
1017 return;
1018 }
1019
1020 if (ref->size != entry->size) {
1021 err_printk(ref->dev, entry, "device driver frees "
1022 "DMA memory with different size "
1023 "[device address=0x%016llx] [map size=%llu bytes] "
1024 "[unmap size=%llu bytes]\n",
1025 ref->dev_addr, entry->size, ref->size);
1026 }
1027
1028 if (ref->type != entry->type) {
1029 err_printk(ref->dev, entry, "device driver frees "
1030 "DMA memory with wrong function "
1031 "[device address=0x%016llx] [size=%llu bytes] "
1032 "[mapped as %s] [unmapped as %s]\n",
1033 ref->dev_addr, ref->size,
1034 type2name[entry->type], type2name[ref->type]);
1035 } else if ((entry->type == dma_debug_coherent) &&
1036 (phys_addr(ref) != phys_addr(entry))) {
1037 err_printk(ref->dev, entry, "device driver frees "
1038 "DMA memory with different CPU address "
1039 "[device address=0x%016llx] [size=%llu bytes] "
1040 "[cpu alloc address=0x%016llx] "
1041 "[cpu free address=0x%016llx]",
1042 ref->dev_addr, ref->size,
1043 phys_addr(entry),
1044 phys_addr(ref));
1045 }
1046
1047 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1048 ref->sg_call_ents != entry->sg_call_ents) {
1049 err_printk(ref->dev, entry, "device driver frees "
1050 "DMA sg list with different entry count "
1051 "[map count=%d] [unmap count=%d]\n",
1052 entry->sg_call_ents, ref->sg_call_ents);
1053 }
1054
1055 /*
1056 * This may be no bug in reality - but most implementations of the
1057 * DMA API don't handle this properly, so check for it here
1058 */
1059 if (ref->direction != entry->direction) {
1060 err_printk(ref->dev, entry, "device driver frees "
1061 "DMA memory with different direction "
1062 "[device address=0x%016llx] [size=%llu bytes] "
1063 "[mapped with %s] [unmapped with %s]\n",
1064 ref->dev_addr, ref->size,
1065 dir2name[entry->direction],
1066 dir2name[ref->direction]);
1067 }
1068
1069 /*
1070 * Drivers should use dma_mapping_error() to check the returned
1071 * addresses of dma_map_single() and dma_map_page().
1072 * If not, print this warning message. See Documentation/DMA-API.txt.
1073 */
1074 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1075 err_printk(ref->dev, entry,
1076 "device driver failed to check map error"
1077 "[device address=0x%016llx] [size=%llu bytes] "
1078 "[mapped as %s]",
1079 ref->dev_addr, ref->size,
1080 type2name[entry->type]);
1081 }
1082
1083 hash_bucket_del(entry);
1084 dma_entry_free(entry);
1085
1086 put_hash_bucket(bucket, &flags);
1087}
1088
1089static void check_for_stack(struct device *dev,
1090 struct page *page, size_t offset)
1091{
1092 void *addr;
1093 struct vm_struct *stack_vm_area = task_stack_vm_area(current);
1094
1095 if (!stack_vm_area) {
1096 /* Stack is direct-mapped. */
1097 if (PageHighMem(page))
1098 return;
1099 addr = page_address(page) + offset;
1100 if (object_is_on_stack(addr))
1101 err_printk(dev, NULL, "device driver maps memory from stack [addr=%p]\n", addr);
1102 } else {
1103 /* Stack is vmalloced. */
1104 int i;
1105
1106 for (i = 0; i < stack_vm_area->nr_pages; i++) {
1107 if (page != stack_vm_area->pages[i])
1108 continue;
1109
1110 addr = (u8 *)current->stack + i * PAGE_SIZE + offset;
1111 err_printk(dev, NULL, "device driver maps memory from stack [probable addr=%p]\n", addr);
1112 break;
1113 }
1114 }
1115}
1116
1117static inline bool overlap(void *addr, unsigned long len, void *start, void *end)
1118{
1119 unsigned long a1 = (unsigned long)addr;
1120 unsigned long b1 = a1 + len;
1121 unsigned long a2 = (unsigned long)start;
1122 unsigned long b2 = (unsigned long)end;
1123
1124 return !(b1 <= a2 || a1 >= b2);
1125}
1126
1127static void check_for_illegal_area(struct device *dev, void *addr, unsigned long len)
1128{
1129 if (overlap(addr, len, _stext, _etext) ||
1130 overlap(addr, len, __start_rodata, __end_rodata))
1131 err_printk(dev, NULL, "device driver maps memory from kernel text or rodata [addr=%p] [len=%lu]\n", addr, len);
1132}
1133
1134static void check_sync(struct device *dev,
1135 struct dma_debug_entry *ref,
1136 bool to_cpu)
1137{
1138 struct dma_debug_entry *entry;
1139 struct hash_bucket *bucket;
1140 unsigned long flags;
1141
1142 bucket = get_hash_bucket(ref, &flags);
1143
1144 entry = bucket_find_contain(&bucket, ref, &flags);
1145
1146 if (!entry) {
1147 err_printk(dev, NULL, "device driver tries "
1148 "to sync DMA memory it has not allocated "
1149 "[device address=0x%016llx] [size=%llu bytes]\n",
1150 (unsigned long long)ref->dev_addr, ref->size);
1151 goto out;
1152 }
1153
1154 if (ref->size > entry->size) {
1155 err_printk(dev, entry, "device driver syncs"
1156 " DMA memory outside allocated range "
1157 "[device address=0x%016llx] "
1158 "[allocation size=%llu bytes] "
1159 "[sync offset+size=%llu]\n",
1160 entry->dev_addr, entry->size,
1161 ref->size);
1162 }
1163
1164 if (entry->direction == DMA_BIDIRECTIONAL)
1165 goto out;
1166
1167 if (ref->direction != entry->direction) {
1168 err_printk(dev, entry, "device driver syncs "
1169 "DMA memory with different direction "
1170 "[device address=0x%016llx] [size=%llu bytes] "
1171 "[mapped with %s] [synced with %s]\n",
1172 (unsigned long long)ref->dev_addr, entry->size,
1173 dir2name[entry->direction],
1174 dir2name[ref->direction]);
1175 }
1176
1177 if (to_cpu && !(entry->direction == DMA_FROM_DEVICE) &&
1178 !(ref->direction == DMA_TO_DEVICE))
1179 err_printk(dev, entry, "device driver syncs "
1180 "device read-only DMA memory for cpu "
1181 "[device address=0x%016llx] [size=%llu bytes] "
1182 "[mapped with %s] [synced with %s]\n",
1183 (unsigned long long)ref->dev_addr, entry->size,
1184 dir2name[entry->direction],
1185 dir2name[ref->direction]);
1186
1187 if (!to_cpu && !(entry->direction == DMA_TO_DEVICE) &&
1188 !(ref->direction == DMA_FROM_DEVICE))
1189 err_printk(dev, entry, "device driver syncs "
1190 "device write-only DMA memory to device "
1191 "[device address=0x%016llx] [size=%llu bytes] "
1192 "[mapped with %s] [synced with %s]\n",
1193 (unsigned long long)ref->dev_addr, entry->size,
1194 dir2name[entry->direction],
1195 dir2name[ref->direction]);
1196
1197 if (ref->sg_call_ents && ref->type == dma_debug_sg &&
1198 ref->sg_call_ents != entry->sg_call_ents) {
1199 err_printk(ref->dev, entry, "device driver syncs "
1200 "DMA sg list with different entry count "
1201 "[map count=%d] [sync count=%d]\n",
1202 entry->sg_call_ents, ref->sg_call_ents);
1203 }
1204
1205out:
1206 put_hash_bucket(bucket, &flags);
1207}
1208
1209static void check_sg_segment(struct device *dev, struct scatterlist *sg)
1210{
1211#ifdef CONFIG_DMA_API_DEBUG_SG
1212 unsigned int max_seg = dma_get_max_seg_size(dev);
1213 u64 start, end, boundary = dma_get_seg_boundary(dev);
1214
1215 /*
1216 * Either the driver forgot to set dma_parms appropriately, or
1217 * whoever generated the list forgot to check them.
1218 */
1219 if (sg->length > max_seg)
1220 err_printk(dev, NULL, "mapping sg segment longer than device claims to support [len=%u] [max=%u]\n",
1221 sg->length, max_seg);
1222 /*
1223 * In some cases this could potentially be the DMA API
1224 * implementation's fault, but it would usually imply that
1225 * the scatterlist was built inappropriately to begin with.
1226 */
1227 start = sg_dma_address(sg);
1228 end = start + sg_dma_len(sg) - 1;
1229 if ((start ^ end) & ~boundary)
1230 err_printk(dev, NULL, "mapping sg segment across boundary [start=0x%016llx] [end=0x%016llx] [boundary=0x%016llx]\n",
1231 start, end, boundary);
1232#endif
1233}
1234
1235void debug_dma_map_single(struct device *dev, const void *addr,
1236 unsigned long len)
1237{
1238 if (unlikely(dma_debug_disabled()))
1239 return;
1240
1241 if (!virt_addr_valid(addr))
1242 err_printk(dev, NULL, "device driver maps memory from invalid area [addr=%p] [len=%lu]\n",
1243 addr, len);
1244
1245 if (is_vmalloc_addr(addr))
1246 err_printk(dev, NULL, "device driver maps memory from vmalloc area [addr=%p] [len=%lu]\n",
1247 addr, len);
1248}
1249EXPORT_SYMBOL(debug_dma_map_single);
1250
1251void debug_dma_map_page(struct device *dev, struct page *page, size_t offset,
1252 size_t size, int direction, dma_addr_t dma_addr)
1253{
1254 struct dma_debug_entry *entry;
1255
1256 if (unlikely(dma_debug_disabled()))
1257 return;
1258
1259 if (dma_mapping_error(dev, dma_addr))
1260 return;
1261
1262 entry = dma_entry_alloc();
1263 if (!entry)
1264 return;
1265
1266 entry->dev = dev;
1267 entry->type = dma_debug_single;
1268 entry->pfn = page_to_pfn(page);
1269 entry->offset = offset,
1270 entry->dev_addr = dma_addr;
1271 entry->size = size;
1272 entry->direction = direction;
1273 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1274
1275 check_for_stack(dev, page, offset);
1276
1277 if (!PageHighMem(page)) {
1278 void *addr = page_address(page) + offset;
1279
1280 check_for_illegal_area(dev, addr, size);
1281 }
1282
1283 add_dma_entry(entry);
1284}
1285EXPORT_SYMBOL(debug_dma_map_page);
1286
1287void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
1288{
1289 struct dma_debug_entry ref;
1290 struct dma_debug_entry *entry;
1291 struct hash_bucket *bucket;
1292 unsigned long flags;
1293
1294 if (unlikely(dma_debug_disabled()))
1295 return;
1296
1297 ref.dev = dev;
1298 ref.dev_addr = dma_addr;
1299 bucket = get_hash_bucket(&ref, &flags);
1300
1301 list_for_each_entry(entry, &bucket->list, list) {
1302 if (!exact_match(&ref, entry))
1303 continue;
1304
1305 /*
1306 * The same physical address can be mapped multiple
1307 * times. Without a hardware IOMMU this results in the
1308 * same device addresses being put into the dma-debug
1309 * hash multiple times too. This can result in false
1310 * positives being reported. Therefore we implement a
1311 * best-fit algorithm here which updates the first entry
1312 * from the hash which fits the reference value and is
1313 * not currently listed as being checked.
1314 */
1315 if (entry->map_err_type == MAP_ERR_NOT_CHECKED) {
1316 entry->map_err_type = MAP_ERR_CHECKED;
1317 break;
1318 }
1319 }
1320
1321 put_hash_bucket(bucket, &flags);
1322}
1323EXPORT_SYMBOL(debug_dma_mapping_error);
1324
1325void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
1326 size_t size, int direction)
1327{
1328 struct dma_debug_entry ref = {
1329 .type = dma_debug_single,
1330 .dev = dev,
1331 .dev_addr = addr,
1332 .size = size,
1333 .direction = direction,
1334 };
1335
1336 if (unlikely(dma_debug_disabled()))
1337 return;
1338 check_unmap(&ref);
1339}
1340EXPORT_SYMBOL(debug_dma_unmap_page);
1341
1342void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
1343 int nents, int mapped_ents, int direction)
1344{
1345 struct dma_debug_entry *entry;
1346 struct scatterlist *s;
1347 int i;
1348
1349 if (unlikely(dma_debug_disabled()))
1350 return;
1351
1352 for_each_sg(sg, s, mapped_ents, i) {
1353 entry = dma_entry_alloc();
1354 if (!entry)
1355 return;
1356
1357 entry->type = dma_debug_sg;
1358 entry->dev = dev;
1359 entry->pfn = page_to_pfn(sg_page(s));
1360 entry->offset = s->offset,
1361 entry->size = sg_dma_len(s);
1362 entry->dev_addr = sg_dma_address(s);
1363 entry->direction = direction;
1364 entry->sg_call_ents = nents;
1365 entry->sg_mapped_ents = mapped_ents;
1366
1367 check_for_stack(dev, sg_page(s), s->offset);
1368
1369 if (!PageHighMem(sg_page(s))) {
1370 check_for_illegal_area(dev, sg_virt(s), sg_dma_len(s));
1371 }
1372
1373 check_sg_segment(dev, s);
1374
1375 add_dma_entry(entry);
1376 }
1377}
1378EXPORT_SYMBOL(debug_dma_map_sg);
1379
1380static int get_nr_mapped_entries(struct device *dev,
1381 struct dma_debug_entry *ref)
1382{
1383 struct dma_debug_entry *entry;
1384 struct hash_bucket *bucket;
1385 unsigned long flags;
1386 int mapped_ents;
1387
1388 bucket = get_hash_bucket(ref, &flags);
1389 entry = bucket_find_exact(bucket, ref);
1390 mapped_ents = 0;
1391
1392 if (entry)
1393 mapped_ents = entry->sg_mapped_ents;
1394 put_hash_bucket(bucket, &flags);
1395
1396 return mapped_ents;
1397}
1398
1399void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
1400 int nelems, int dir)
1401{
1402 struct scatterlist *s;
1403 int mapped_ents = 0, i;
1404
1405 if (unlikely(dma_debug_disabled()))
1406 return;
1407
1408 for_each_sg(sglist, s, nelems, i) {
1409
1410 struct dma_debug_entry ref = {
1411 .type = dma_debug_sg,
1412 .dev = dev,
1413 .pfn = page_to_pfn(sg_page(s)),
1414 .offset = s->offset,
1415 .dev_addr = sg_dma_address(s),
1416 .size = sg_dma_len(s),
1417 .direction = dir,
1418 .sg_call_ents = nelems,
1419 };
1420
1421 if (mapped_ents && i >= mapped_ents)
1422 break;
1423
1424 if (!i)
1425 mapped_ents = get_nr_mapped_entries(dev, &ref);
1426
1427 check_unmap(&ref);
1428 }
1429}
1430EXPORT_SYMBOL(debug_dma_unmap_sg);
1431
1432void debug_dma_alloc_coherent(struct device *dev, size_t size,
1433 dma_addr_t dma_addr, void *virt)
1434{
1435 struct dma_debug_entry *entry;
1436
1437 if (unlikely(dma_debug_disabled()))
1438 return;
1439
1440 if (unlikely(virt == NULL))
1441 return;
1442
1443 /* handle vmalloc and linear addresses */
1444 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1445 return;
1446
1447 entry = dma_entry_alloc();
1448 if (!entry)
1449 return;
1450
1451 entry->type = dma_debug_coherent;
1452 entry->dev = dev;
1453 entry->offset = offset_in_page(virt);
1454 entry->size = size;
1455 entry->dev_addr = dma_addr;
1456 entry->direction = DMA_BIDIRECTIONAL;
1457
1458 if (is_vmalloc_addr(virt))
1459 entry->pfn = vmalloc_to_pfn(virt);
1460 else
1461 entry->pfn = page_to_pfn(virt_to_page(virt));
1462
1463 add_dma_entry(entry);
1464}
1465
1466void debug_dma_free_coherent(struct device *dev, size_t size,
1467 void *virt, dma_addr_t addr)
1468{
1469 struct dma_debug_entry ref = {
1470 .type = dma_debug_coherent,
1471 .dev = dev,
1472 .offset = offset_in_page(virt),
1473 .dev_addr = addr,
1474 .size = size,
1475 .direction = DMA_BIDIRECTIONAL,
1476 };
1477
1478 /* handle vmalloc and linear addresses */
1479 if (!is_vmalloc_addr(virt) && !virt_addr_valid(virt))
1480 return;
1481
1482 if (is_vmalloc_addr(virt))
1483 ref.pfn = vmalloc_to_pfn(virt);
1484 else
1485 ref.pfn = page_to_pfn(virt_to_page(virt));
1486
1487 if (unlikely(dma_debug_disabled()))
1488 return;
1489
1490 check_unmap(&ref);
1491}
1492
1493void debug_dma_map_resource(struct device *dev, phys_addr_t addr, size_t size,
1494 int direction, dma_addr_t dma_addr)
1495{
1496 struct dma_debug_entry *entry;
1497
1498 if (unlikely(dma_debug_disabled()))
1499 return;
1500
1501 entry = dma_entry_alloc();
1502 if (!entry)
1503 return;
1504
1505 entry->type = dma_debug_resource;
1506 entry->dev = dev;
1507 entry->pfn = PHYS_PFN(addr);
1508 entry->offset = offset_in_page(addr);
1509 entry->size = size;
1510 entry->dev_addr = dma_addr;
1511 entry->direction = direction;
1512 entry->map_err_type = MAP_ERR_NOT_CHECKED;
1513
1514 add_dma_entry(entry);
1515}
1516EXPORT_SYMBOL(debug_dma_map_resource);
1517
1518void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
1519 size_t size, int direction)
1520{
1521 struct dma_debug_entry ref = {
1522 .type = dma_debug_resource,
1523 .dev = dev,
1524 .dev_addr = dma_addr,
1525 .size = size,
1526 .direction = direction,
1527 };
1528
1529 if (unlikely(dma_debug_disabled()))
1530 return;
1531
1532 check_unmap(&ref);
1533}
1534EXPORT_SYMBOL(debug_dma_unmap_resource);
1535
1536void debug_dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
1537 size_t size, int direction)
1538{
1539 struct dma_debug_entry ref;
1540
1541 if (unlikely(dma_debug_disabled()))
1542 return;
1543
1544 ref.type = dma_debug_single;
1545 ref.dev = dev;
1546 ref.dev_addr = dma_handle;
1547 ref.size = size;
1548 ref.direction = direction;
1549 ref.sg_call_ents = 0;
1550
1551 check_sync(dev, &ref, true);
1552}
1553EXPORT_SYMBOL(debug_dma_sync_single_for_cpu);
1554
1555void debug_dma_sync_single_for_device(struct device *dev,
1556 dma_addr_t dma_handle, size_t size,
1557 int direction)
1558{
1559 struct dma_debug_entry ref;
1560
1561 if (unlikely(dma_debug_disabled()))
1562 return;
1563
1564 ref.type = dma_debug_single;
1565 ref.dev = dev;
1566 ref.dev_addr = dma_handle;
1567 ref.size = size;
1568 ref.direction = direction;
1569 ref.sg_call_ents = 0;
1570
1571 check_sync(dev, &ref, false);
1572}
1573EXPORT_SYMBOL(debug_dma_sync_single_for_device);
1574
1575void debug_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1576 int nelems, int direction)
1577{
1578 struct scatterlist *s;
1579 int mapped_ents = 0, i;
1580
1581 if (unlikely(dma_debug_disabled()))
1582 return;
1583
1584 for_each_sg(sg, s, nelems, i) {
1585
1586 struct dma_debug_entry ref = {
1587 .type = dma_debug_sg,
1588 .dev = dev,
1589 .pfn = page_to_pfn(sg_page(s)),
1590 .offset = s->offset,
1591 .dev_addr = sg_dma_address(s),
1592 .size = sg_dma_len(s),
1593 .direction = direction,
1594 .sg_call_ents = nelems,
1595 };
1596
1597 if (!i)
1598 mapped_ents = get_nr_mapped_entries(dev, &ref);
1599
1600 if (i >= mapped_ents)
1601 break;
1602
1603 check_sync(dev, &ref, true);
1604 }
1605}
1606EXPORT_SYMBOL(debug_dma_sync_sg_for_cpu);
1607
1608void debug_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1609 int nelems, int direction)
1610{
1611 struct scatterlist *s;
1612 int mapped_ents = 0, i;
1613
1614 if (unlikely(dma_debug_disabled()))
1615 return;
1616
1617 for_each_sg(sg, s, nelems, i) {
1618
1619 struct dma_debug_entry ref = {
1620 .type = dma_debug_sg,
1621 .dev = dev,
1622 .pfn = page_to_pfn(sg_page(s)),
1623 .offset = s->offset,
1624 .dev_addr = sg_dma_address(s),
1625 .size = sg_dma_len(s),
1626 .direction = direction,
1627 .sg_call_ents = nelems,
1628 };
1629 if (!i)
1630 mapped_ents = get_nr_mapped_entries(dev, &ref);
1631
1632 if (i >= mapped_ents)
1633 break;
1634
1635 check_sync(dev, &ref, false);
1636 }
1637}
1638EXPORT_SYMBOL(debug_dma_sync_sg_for_device);
1639
1640static int __init dma_debug_driver_setup(char *str)
1641{
1642 int i;
1643
1644 for (i = 0; i < NAME_MAX_LEN - 1; ++i, ++str) {
1645 current_driver_name[i] = *str;
1646 if (*str == 0)
1647 break;
1648 }
1649
1650 if (current_driver_name[0])
1651 pr_info("enable driver filter for driver [%s]\n",
1652 current_driver_name);
1653
1654
1655 return 1;
1656}
1657__setup("dma_debug_driver=", dma_debug_driver_setup);