Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
16 * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
17 * accesses to the object_tree_root (or object_phys_tree_root). The
18 * object_list is the main list holding the metadata (struct kmemleak_object)
19 * for the allocated memory blocks. The object_tree_root and object_phys_tree_root
20 * are red black trees used to look-up metadata based on a pointer to the
21 * corresponding memory block. The object_phys_tree_root is for objects
22 * allocated with physical address. The kmemleak_object structures are
23 * added to the object_list and object_tree_root (or object_phys_tree_root)
24 * in the create_object() function called from the kmemleak_alloc() (or
25 * kmemleak_alloc_phys()) callback and removed in delete_object() called from
26 * the kmemleak_free() callback
27 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
28 * Accesses to the metadata (e.g. count) are protected by this lock. Note
29 * that some members of this structure may be protected by other means
30 * (atomic or kmemleak_lock). This lock is also held when scanning the
31 * corresponding memory block to avoid the kernel freeing it via the
32 * kmemleak_free() callback. This is less heavyweight than holding a global
33 * lock like kmemleak_lock during scanning.
34 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
35 * unreferenced objects at a time. The gray_list contains the objects which
36 * are already referenced or marked as false positives and need to be
37 * scanned. This list is only modified during a scanning episode when the
38 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
39 * Note that the kmemleak_object.use_count is incremented when an object is
40 * added to the gray_list and therefore cannot be freed. This mutex also
41 * prevents multiple users of the "kmemleak" debugfs file together with
42 * modifications to the memory scanning parameters including the scan_thread
43 * pointer
44 *
45 * Locks and mutexes are acquired/nested in the following order:
46 *
47 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
48 *
49 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
50 * regions.
51 *
52 * The kmemleak_object structures have a use_count incremented or decremented
53 * using the get_object()/put_object() functions. When the use_count becomes
54 * 0, this count can no longer be incremented and put_object() schedules the
55 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
56 * function must be protected by rcu_read_lock() to avoid accessing a freed
57 * structure.
58 */
59
60#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61
62#include <linux/init.h>
63#include <linux/kernel.h>
64#include <linux/list.h>
65#include <linux/sched/signal.h>
66#include <linux/sched/task.h>
67#include <linux/sched/task_stack.h>
68#include <linux/jiffies.h>
69#include <linux/delay.h>
70#include <linux/export.h>
71#include <linux/kthread.h>
72#include <linux/rbtree.h>
73#include <linux/fs.h>
74#include <linux/debugfs.h>
75#include <linux/seq_file.h>
76#include <linux/cpumask.h>
77#include <linux/spinlock.h>
78#include <linux/module.h>
79#include <linux/mutex.h>
80#include <linux/rcupdate.h>
81#include <linux/stacktrace.h>
82#include <linux/stackdepot.h>
83#include <linux/cache.h>
84#include <linux/percpu.h>
85#include <linux/memblock.h>
86#include <linux/pfn.h>
87#include <linux/mmzone.h>
88#include <linux/slab.h>
89#include <linux/thread_info.h>
90#include <linux/err.h>
91#include <linux/uaccess.h>
92#include <linux/string.h>
93#include <linux/nodemask.h>
94#include <linux/mm.h>
95#include <linux/workqueue.h>
96#include <linux/crc32.h>
97
98#include <asm/sections.h>
99#include <asm/processor.h>
100#include <linux/atomic.h>
101
102#include <linux/kasan.h>
103#include <linux/kfence.h>
104#include <linux/kmemleak.h>
105#include <linux/memory_hotplug.h>
106
107/*
108 * Kmemleak configuration and common defines.
109 */
110#define MAX_TRACE 16 /* stack trace length */
111#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
112#define SECS_FIRST_SCAN 60 /* delay before the first scan */
113#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
114#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
115
116#define BYTES_PER_POINTER sizeof(void *)
117
118/* GFP bitmask for kmemleak internal allocations */
119#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
120 __GFP_NOLOCKDEP)) | \
121 __GFP_NORETRY | __GFP_NOMEMALLOC | \
122 __GFP_NOWARN)
123
124/* scanning area inside a memory block */
125struct kmemleak_scan_area {
126 struct hlist_node node;
127 unsigned long start;
128 size_t size;
129};
130
131#define KMEMLEAK_GREY 0
132#define KMEMLEAK_BLACK -1
133
134/*
135 * Structure holding the metadata for each allocated memory block.
136 * Modifications to such objects should be made while holding the
137 * object->lock. Insertions or deletions from object_list, gray_list or
138 * rb_node are already protected by the corresponding locks or mutex (see
139 * the notes on locking above). These objects are reference-counted
140 * (use_count) and freed using the RCU mechanism.
141 */
142struct kmemleak_object {
143 raw_spinlock_t lock;
144 unsigned int flags; /* object status flags */
145 struct list_head object_list;
146 struct list_head gray_list;
147 struct rb_node rb_node;
148 struct rcu_head rcu; /* object_list lockless traversal */
149 /* object usage count; object freed when use_count == 0 */
150 atomic_t use_count;
151 unsigned long pointer;
152 size_t size;
153 /* pass surplus references to this pointer */
154 unsigned long excess_ref;
155 /* minimum number of a pointers found before it is considered leak */
156 int min_count;
157 /* the total number of pointers found pointing to this object */
158 int count;
159 /* checksum for detecting modified objects */
160 u32 checksum;
161 /* memory ranges to be scanned inside an object (empty for all) */
162 struct hlist_head area_list;
163 depot_stack_handle_t trace_handle;
164 unsigned long jiffies; /* creation timestamp */
165 pid_t pid; /* pid of the current task */
166 char comm[TASK_COMM_LEN]; /* executable name */
167};
168
169/* flag representing the memory block allocation status */
170#define OBJECT_ALLOCATED (1 << 0)
171/* flag set after the first reporting of an unreference object */
172#define OBJECT_REPORTED (1 << 1)
173/* flag set to not scan the object */
174#define OBJECT_NO_SCAN (1 << 2)
175/* flag set to fully scan the object when scan_area allocation failed */
176#define OBJECT_FULL_SCAN (1 << 3)
177/* flag set for object allocated with physical address */
178#define OBJECT_PHYS (1 << 4)
179
180#define HEX_PREFIX " "
181/* number of bytes to print per line; must be 16 or 32 */
182#define HEX_ROW_SIZE 16
183/* number of bytes to print at a time (1, 2, 4, 8) */
184#define HEX_GROUP_SIZE 1
185/* include ASCII after the hex output */
186#define HEX_ASCII 1
187/* max number of lines to be printed */
188#define HEX_MAX_LINES 2
189
190/* the list of all allocated objects */
191static LIST_HEAD(object_list);
192/* the list of gray-colored objects (see color_gray comment below) */
193static LIST_HEAD(gray_list);
194/* memory pool allocation */
195static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
196static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
197static LIST_HEAD(mem_pool_free_list);
198/* search tree for object boundaries */
199static struct rb_root object_tree_root = RB_ROOT;
200/* search tree for object (with OBJECT_PHYS flag) boundaries */
201static struct rb_root object_phys_tree_root = RB_ROOT;
202/* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
203static DEFINE_RAW_SPINLOCK(kmemleak_lock);
204
205/* allocation caches for kmemleak internal data */
206static struct kmem_cache *object_cache;
207static struct kmem_cache *scan_area_cache;
208
209/* set if tracing memory operations is enabled */
210static int kmemleak_enabled = 1;
211/* same as above but only for the kmemleak_free() callback */
212static int kmemleak_free_enabled = 1;
213/* set in the late_initcall if there were no errors */
214static int kmemleak_initialized;
215/* set if a kmemleak warning was issued */
216static int kmemleak_warning;
217/* set if a fatal kmemleak error has occurred */
218static int kmemleak_error;
219
220/* minimum and maximum address that may be valid pointers */
221static unsigned long min_addr = ULONG_MAX;
222static unsigned long max_addr;
223
224static struct task_struct *scan_thread;
225/* used to avoid reporting of recently allocated objects */
226static unsigned long jiffies_min_age;
227static unsigned long jiffies_last_scan;
228/* delay between automatic memory scannings */
229static unsigned long jiffies_scan_wait;
230/* enables or disables the task stacks scanning */
231static int kmemleak_stack_scan = 1;
232/* protects the memory scanning, parameters and debug/kmemleak file access */
233static DEFINE_MUTEX(scan_mutex);
234/* setting kmemleak=on, will set this var, skipping the disable */
235static int kmemleak_skip_disable;
236/* If there are leaks that can be reported */
237static bool kmemleak_found_leaks;
238
239static bool kmemleak_verbose;
240module_param_named(verbose, kmemleak_verbose, bool, 0600);
241
242static void kmemleak_disable(void);
243
244/*
245 * Print a warning and dump the stack trace.
246 */
247#define kmemleak_warn(x...) do { \
248 pr_warn(x); \
249 dump_stack(); \
250 kmemleak_warning = 1; \
251} while (0)
252
253/*
254 * Macro invoked when a serious kmemleak condition occurred and cannot be
255 * recovered from. Kmemleak will be disabled and further allocation/freeing
256 * tracing no longer available.
257 */
258#define kmemleak_stop(x...) do { \
259 kmemleak_warn(x); \
260 kmemleak_disable(); \
261} while (0)
262
263#define warn_or_seq_printf(seq, fmt, ...) do { \
264 if (seq) \
265 seq_printf(seq, fmt, ##__VA_ARGS__); \
266 else \
267 pr_warn(fmt, ##__VA_ARGS__); \
268} while (0)
269
270static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
271 int rowsize, int groupsize, const void *buf,
272 size_t len, bool ascii)
273{
274 if (seq)
275 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
276 buf, len, ascii);
277 else
278 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
279 rowsize, groupsize, buf, len, ascii);
280}
281
282/*
283 * Printing of the objects hex dump to the seq file. The number of lines to be
284 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
285 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
286 * with the object->lock held.
287 */
288static void hex_dump_object(struct seq_file *seq,
289 struct kmemleak_object *object)
290{
291 const u8 *ptr = (const u8 *)object->pointer;
292 size_t len;
293
294 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
295 return;
296
297 /* limit the number of lines to HEX_MAX_LINES */
298 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
299
300 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
301 kasan_disable_current();
302 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
303 HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
304 kasan_enable_current();
305}
306
307/*
308 * Object colors, encoded with count and min_count:
309 * - white - orphan object, not enough references to it (count < min_count)
310 * - gray - not orphan, not marked as false positive (min_count == 0) or
311 * sufficient references to it (count >= min_count)
312 * - black - ignore, it doesn't contain references (e.g. text section)
313 * (min_count == -1). No function defined for this color.
314 * Newly created objects don't have any color assigned (object->count == -1)
315 * before the next memory scan when they become white.
316 */
317static bool color_white(const struct kmemleak_object *object)
318{
319 return object->count != KMEMLEAK_BLACK &&
320 object->count < object->min_count;
321}
322
323static bool color_gray(const struct kmemleak_object *object)
324{
325 return object->min_count != KMEMLEAK_BLACK &&
326 object->count >= object->min_count;
327}
328
329/*
330 * Objects are considered unreferenced only if their color is white, they have
331 * not be deleted and have a minimum age to avoid false positives caused by
332 * pointers temporarily stored in CPU registers.
333 */
334static bool unreferenced_object(struct kmemleak_object *object)
335{
336 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
337 time_before_eq(object->jiffies + jiffies_min_age,
338 jiffies_last_scan);
339}
340
341/*
342 * Printing of the unreferenced objects information to the seq file. The
343 * print_unreferenced function must be called with the object->lock held.
344 */
345static void print_unreferenced(struct seq_file *seq,
346 struct kmemleak_object *object)
347{
348 int i;
349 unsigned long *entries;
350 unsigned int nr_entries;
351 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
352
353 nr_entries = stack_depot_fetch(object->trace_handle, &entries);
354 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
355 object->pointer, object->size);
356 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
357 object->comm, object->pid, object->jiffies,
358 msecs_age / 1000, msecs_age % 1000);
359 hex_dump_object(seq, object);
360 warn_or_seq_printf(seq, " backtrace:\n");
361
362 for (i = 0; i < nr_entries; i++) {
363 void *ptr = (void *)entries[i];
364 warn_or_seq_printf(seq, " [<%pK>] %pS\n", ptr, ptr);
365 }
366}
367
368/*
369 * Print the kmemleak_object information. This function is used mainly for
370 * debugging special cases when kmemleak operations. It must be called with
371 * the object->lock held.
372 */
373static void dump_object_info(struct kmemleak_object *object)
374{
375 pr_notice("Object 0x%08lx (size %zu):\n",
376 object->pointer, object->size);
377 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
378 object->comm, object->pid, object->jiffies);
379 pr_notice(" min_count = %d\n", object->min_count);
380 pr_notice(" count = %d\n", object->count);
381 pr_notice(" flags = 0x%x\n", object->flags);
382 pr_notice(" checksum = %u\n", object->checksum);
383 pr_notice(" backtrace:\n");
384 if (object->trace_handle)
385 stack_depot_print(object->trace_handle);
386}
387
388/*
389 * Look-up a memory block metadata (kmemleak_object) in the object search
390 * tree based on a pointer value. If alias is 0, only values pointing to the
391 * beginning of the memory block are allowed. The kmemleak_lock must be held
392 * when calling this function.
393 */
394static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
395 bool is_phys)
396{
397 struct rb_node *rb = is_phys ? object_phys_tree_root.rb_node :
398 object_tree_root.rb_node;
399 unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
400
401 while (rb) {
402 struct kmemleak_object *object;
403 unsigned long untagged_objp;
404
405 object = rb_entry(rb, struct kmemleak_object, rb_node);
406 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
407
408 if (untagged_ptr < untagged_objp)
409 rb = object->rb_node.rb_left;
410 else if (untagged_objp + object->size <= untagged_ptr)
411 rb = object->rb_node.rb_right;
412 else if (untagged_objp == untagged_ptr || alias)
413 return object;
414 else {
415 kmemleak_warn("Found object by alias at 0x%08lx\n",
416 ptr);
417 dump_object_info(object);
418 break;
419 }
420 }
421 return NULL;
422}
423
424/* Look-up a kmemleak object which allocated with virtual address. */
425static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
426{
427 return __lookup_object(ptr, alias, false);
428}
429
430/*
431 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
432 * that once an object's use_count reached 0, the RCU freeing was already
433 * registered and the object should no longer be used. This function must be
434 * called under the protection of rcu_read_lock().
435 */
436static int get_object(struct kmemleak_object *object)
437{
438 return atomic_inc_not_zero(&object->use_count);
439}
440
441/*
442 * Memory pool allocation and freeing. kmemleak_lock must not be held.
443 */
444static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
445{
446 unsigned long flags;
447 struct kmemleak_object *object;
448
449 /* try the slab allocator first */
450 if (object_cache) {
451 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
452 if (object)
453 return object;
454 }
455
456 /* slab allocation failed, try the memory pool */
457 raw_spin_lock_irqsave(&kmemleak_lock, flags);
458 object = list_first_entry_or_null(&mem_pool_free_list,
459 typeof(*object), object_list);
460 if (object)
461 list_del(&object->object_list);
462 else if (mem_pool_free_count)
463 object = &mem_pool[--mem_pool_free_count];
464 else
465 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
466 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
467
468 return object;
469}
470
471/*
472 * Return the object to either the slab allocator or the memory pool.
473 */
474static void mem_pool_free(struct kmemleak_object *object)
475{
476 unsigned long flags;
477
478 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
479 kmem_cache_free(object_cache, object);
480 return;
481 }
482
483 /* add the object to the memory pool free list */
484 raw_spin_lock_irqsave(&kmemleak_lock, flags);
485 list_add(&object->object_list, &mem_pool_free_list);
486 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
487}
488
489/*
490 * RCU callback to free a kmemleak_object.
491 */
492static void free_object_rcu(struct rcu_head *rcu)
493{
494 struct hlist_node *tmp;
495 struct kmemleak_scan_area *area;
496 struct kmemleak_object *object =
497 container_of(rcu, struct kmemleak_object, rcu);
498
499 /*
500 * Once use_count is 0 (guaranteed by put_object), there is no other
501 * code accessing this object, hence no need for locking.
502 */
503 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
504 hlist_del(&area->node);
505 kmem_cache_free(scan_area_cache, area);
506 }
507 mem_pool_free(object);
508}
509
510/*
511 * Decrement the object use_count. Once the count is 0, free the object using
512 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
513 * delete_object() path, the delayed RCU freeing ensures that there is no
514 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
515 * is also possible.
516 */
517static void put_object(struct kmemleak_object *object)
518{
519 if (!atomic_dec_and_test(&object->use_count))
520 return;
521
522 /* should only get here after delete_object was called */
523 WARN_ON(object->flags & OBJECT_ALLOCATED);
524
525 /*
526 * It may be too early for the RCU callbacks, however, there is no
527 * concurrent object_list traversal when !object_cache and all objects
528 * came from the memory pool. Free the object directly.
529 */
530 if (object_cache)
531 call_rcu(&object->rcu, free_object_rcu);
532 else
533 free_object_rcu(&object->rcu);
534}
535
536/*
537 * Look up an object in the object search tree and increase its use_count.
538 */
539static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
540 bool is_phys)
541{
542 unsigned long flags;
543 struct kmemleak_object *object;
544
545 rcu_read_lock();
546 raw_spin_lock_irqsave(&kmemleak_lock, flags);
547 object = __lookup_object(ptr, alias, is_phys);
548 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
549
550 /* check whether the object is still available */
551 if (object && !get_object(object))
552 object = NULL;
553 rcu_read_unlock();
554
555 return object;
556}
557
558/* Look up and get an object which allocated with virtual address. */
559static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
560{
561 return __find_and_get_object(ptr, alias, false);
562}
563
564/*
565 * Remove an object from the object_tree_root (or object_phys_tree_root)
566 * and object_list. Must be called with the kmemleak_lock held _if_ kmemleak
567 * is still enabled.
568 */
569static void __remove_object(struct kmemleak_object *object)
570{
571 rb_erase(&object->rb_node, object->flags & OBJECT_PHYS ?
572 &object_phys_tree_root :
573 &object_tree_root);
574 list_del_rcu(&object->object_list);
575}
576
577/*
578 * Look up an object in the object search tree and remove it from both
579 * object_tree_root (or object_phys_tree_root) and object_list. The
580 * returned object's use_count should be at least 1, as initially set
581 * by create_object().
582 */
583static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
584 bool is_phys)
585{
586 unsigned long flags;
587 struct kmemleak_object *object;
588
589 raw_spin_lock_irqsave(&kmemleak_lock, flags);
590 object = __lookup_object(ptr, alias, is_phys);
591 if (object)
592 __remove_object(object);
593 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
594
595 return object;
596}
597
598static noinline depot_stack_handle_t set_track_prepare(void)
599{
600 depot_stack_handle_t trace_handle;
601 unsigned long entries[MAX_TRACE];
602 unsigned int nr_entries;
603
604 if (!kmemleak_initialized)
605 return 0;
606 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
607 trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
608
609 return trace_handle;
610}
611
612/*
613 * Create the metadata (struct kmemleak_object) corresponding to an allocated
614 * memory block and add it to the object_list and object_tree_root (or
615 * object_phys_tree_root).
616 */
617static void __create_object(unsigned long ptr, size_t size,
618 int min_count, gfp_t gfp, bool is_phys)
619{
620 unsigned long flags;
621 struct kmemleak_object *object, *parent;
622 struct rb_node **link, *rb_parent;
623 unsigned long untagged_ptr;
624 unsigned long untagged_objp;
625
626 object = mem_pool_alloc(gfp);
627 if (!object) {
628 pr_warn("Cannot allocate a kmemleak_object structure\n");
629 kmemleak_disable();
630 return;
631 }
632
633 INIT_LIST_HEAD(&object->object_list);
634 INIT_LIST_HEAD(&object->gray_list);
635 INIT_HLIST_HEAD(&object->area_list);
636 raw_spin_lock_init(&object->lock);
637 atomic_set(&object->use_count, 1);
638 object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
639 object->pointer = ptr;
640 object->size = kfence_ksize((void *)ptr) ?: size;
641 object->excess_ref = 0;
642 object->min_count = min_count;
643 object->count = 0; /* white color initially */
644 object->jiffies = jiffies;
645 object->checksum = 0;
646
647 /* task information */
648 if (in_hardirq()) {
649 object->pid = 0;
650 strncpy(object->comm, "hardirq", sizeof(object->comm));
651 } else if (in_serving_softirq()) {
652 object->pid = 0;
653 strncpy(object->comm, "softirq", sizeof(object->comm));
654 } else {
655 object->pid = current->pid;
656 /*
657 * There is a small chance of a race with set_task_comm(),
658 * however using get_task_comm() here may cause locking
659 * dependency issues with current->alloc_lock. In the worst
660 * case, the command line is not correct.
661 */
662 strncpy(object->comm, current->comm, sizeof(object->comm));
663 }
664
665 /* kernel backtrace */
666 object->trace_handle = set_track_prepare();
667
668 raw_spin_lock_irqsave(&kmemleak_lock, flags);
669
670 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
671 /*
672 * Only update min_addr and max_addr with object
673 * storing virtual address.
674 */
675 if (!is_phys) {
676 min_addr = min(min_addr, untagged_ptr);
677 max_addr = max(max_addr, untagged_ptr + size);
678 }
679 link = is_phys ? &object_phys_tree_root.rb_node :
680 &object_tree_root.rb_node;
681 rb_parent = NULL;
682 while (*link) {
683 rb_parent = *link;
684 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
685 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
686 if (untagged_ptr + size <= untagged_objp)
687 link = &parent->rb_node.rb_left;
688 else if (untagged_objp + parent->size <= untagged_ptr)
689 link = &parent->rb_node.rb_right;
690 else {
691 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
692 ptr);
693 /*
694 * No need for parent->lock here since "parent" cannot
695 * be freed while the kmemleak_lock is held.
696 */
697 dump_object_info(parent);
698 kmem_cache_free(object_cache, object);
699 goto out;
700 }
701 }
702 rb_link_node(&object->rb_node, rb_parent, link);
703 rb_insert_color(&object->rb_node, is_phys ? &object_phys_tree_root :
704 &object_tree_root);
705 list_add_tail_rcu(&object->object_list, &object_list);
706out:
707 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
708}
709
710/* Create kmemleak object which allocated with virtual address. */
711static void create_object(unsigned long ptr, size_t size,
712 int min_count, gfp_t gfp)
713{
714 __create_object(ptr, size, min_count, gfp, false);
715}
716
717/* Create kmemleak object which allocated with physical address. */
718static void create_object_phys(unsigned long ptr, size_t size,
719 int min_count, gfp_t gfp)
720{
721 __create_object(ptr, size, min_count, gfp, true);
722}
723
724/*
725 * Mark the object as not allocated and schedule RCU freeing via put_object().
726 */
727static void __delete_object(struct kmemleak_object *object)
728{
729 unsigned long flags;
730
731 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
732 WARN_ON(atomic_read(&object->use_count) < 1);
733
734 /*
735 * Locking here also ensures that the corresponding memory block
736 * cannot be freed when it is being scanned.
737 */
738 raw_spin_lock_irqsave(&object->lock, flags);
739 object->flags &= ~OBJECT_ALLOCATED;
740 raw_spin_unlock_irqrestore(&object->lock, flags);
741 put_object(object);
742}
743
744/*
745 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
746 * delete it.
747 */
748static void delete_object_full(unsigned long ptr)
749{
750 struct kmemleak_object *object;
751
752 object = find_and_remove_object(ptr, 0, false);
753 if (!object) {
754#ifdef DEBUG
755 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
756 ptr);
757#endif
758 return;
759 }
760 __delete_object(object);
761}
762
763/*
764 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
765 * delete it. If the memory block is partially freed, the function may create
766 * additional metadata for the remaining parts of the block.
767 */
768static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
769{
770 struct kmemleak_object *object;
771 unsigned long start, end;
772
773 object = find_and_remove_object(ptr, 1, is_phys);
774 if (!object) {
775#ifdef DEBUG
776 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
777 ptr, size);
778#endif
779 return;
780 }
781
782 /*
783 * Create one or two objects that may result from the memory block
784 * split. Note that partial freeing is only done by free_bootmem() and
785 * this happens before kmemleak_init() is called.
786 */
787 start = object->pointer;
788 end = object->pointer + object->size;
789 if (ptr > start)
790 __create_object(start, ptr - start, object->min_count,
791 GFP_KERNEL, is_phys);
792 if (ptr + size < end)
793 __create_object(ptr + size, end - ptr - size, object->min_count,
794 GFP_KERNEL, is_phys);
795
796 __delete_object(object);
797}
798
799static void __paint_it(struct kmemleak_object *object, int color)
800{
801 object->min_count = color;
802 if (color == KMEMLEAK_BLACK)
803 object->flags |= OBJECT_NO_SCAN;
804}
805
806static void paint_it(struct kmemleak_object *object, int color)
807{
808 unsigned long flags;
809
810 raw_spin_lock_irqsave(&object->lock, flags);
811 __paint_it(object, color);
812 raw_spin_unlock_irqrestore(&object->lock, flags);
813}
814
815static void paint_ptr(unsigned long ptr, int color, bool is_phys)
816{
817 struct kmemleak_object *object;
818
819 object = __find_and_get_object(ptr, 0, is_phys);
820 if (!object) {
821 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
822 ptr,
823 (color == KMEMLEAK_GREY) ? "Grey" :
824 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
825 return;
826 }
827 paint_it(object, color);
828 put_object(object);
829}
830
831/*
832 * Mark an object permanently as gray-colored so that it can no longer be
833 * reported as a leak. This is used in general to mark a false positive.
834 */
835static void make_gray_object(unsigned long ptr)
836{
837 paint_ptr(ptr, KMEMLEAK_GREY, false);
838}
839
840/*
841 * Mark the object as black-colored so that it is ignored from scans and
842 * reporting.
843 */
844static void make_black_object(unsigned long ptr, bool is_phys)
845{
846 paint_ptr(ptr, KMEMLEAK_BLACK, is_phys);
847}
848
849/*
850 * Add a scanning area to the object. If at least one such area is added,
851 * kmemleak will only scan these ranges rather than the whole memory block.
852 */
853static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
854{
855 unsigned long flags;
856 struct kmemleak_object *object;
857 struct kmemleak_scan_area *area = NULL;
858 unsigned long untagged_ptr;
859 unsigned long untagged_objp;
860
861 object = find_and_get_object(ptr, 1);
862 if (!object) {
863 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
864 ptr);
865 return;
866 }
867
868 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
869 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
870
871 if (scan_area_cache)
872 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
873
874 raw_spin_lock_irqsave(&object->lock, flags);
875 if (!area) {
876 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
877 /* mark the object for full scan to avoid false positives */
878 object->flags |= OBJECT_FULL_SCAN;
879 goto out_unlock;
880 }
881 if (size == SIZE_MAX) {
882 size = untagged_objp + object->size - untagged_ptr;
883 } else if (untagged_ptr + size > untagged_objp + object->size) {
884 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
885 dump_object_info(object);
886 kmem_cache_free(scan_area_cache, area);
887 goto out_unlock;
888 }
889
890 INIT_HLIST_NODE(&area->node);
891 area->start = ptr;
892 area->size = size;
893
894 hlist_add_head(&area->node, &object->area_list);
895out_unlock:
896 raw_spin_unlock_irqrestore(&object->lock, flags);
897 put_object(object);
898}
899
900/*
901 * Any surplus references (object already gray) to 'ptr' are passed to
902 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
903 * vm_struct may be used as an alternative reference to the vmalloc'ed object
904 * (see free_thread_stack()).
905 */
906static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
907{
908 unsigned long flags;
909 struct kmemleak_object *object;
910
911 object = find_and_get_object(ptr, 0);
912 if (!object) {
913 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
914 ptr);
915 return;
916 }
917
918 raw_spin_lock_irqsave(&object->lock, flags);
919 object->excess_ref = excess_ref;
920 raw_spin_unlock_irqrestore(&object->lock, flags);
921 put_object(object);
922}
923
924/*
925 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
926 * pointer. Such object will not be scanned by kmemleak but references to it
927 * are searched.
928 */
929static void object_no_scan(unsigned long ptr)
930{
931 unsigned long flags;
932 struct kmemleak_object *object;
933
934 object = find_and_get_object(ptr, 0);
935 if (!object) {
936 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
937 return;
938 }
939
940 raw_spin_lock_irqsave(&object->lock, flags);
941 object->flags |= OBJECT_NO_SCAN;
942 raw_spin_unlock_irqrestore(&object->lock, flags);
943 put_object(object);
944}
945
946/**
947 * kmemleak_alloc - register a newly allocated object
948 * @ptr: pointer to beginning of the object
949 * @size: size of the object
950 * @min_count: minimum number of references to this object. If during memory
951 * scanning a number of references less than @min_count is found,
952 * the object is reported as a memory leak. If @min_count is 0,
953 * the object is never reported as a leak. If @min_count is -1,
954 * the object is ignored (not scanned and not reported as a leak)
955 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
956 *
957 * This function is called from the kernel allocators when a new object
958 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
959 */
960void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
961 gfp_t gfp)
962{
963 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
964
965 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
966 create_object((unsigned long)ptr, size, min_count, gfp);
967}
968EXPORT_SYMBOL_GPL(kmemleak_alloc);
969
970/**
971 * kmemleak_alloc_percpu - register a newly allocated __percpu object
972 * @ptr: __percpu pointer to beginning of the object
973 * @size: size of the object
974 * @gfp: flags used for kmemleak internal memory allocations
975 *
976 * This function is called from the kernel percpu allocator when a new object
977 * (memory block) is allocated (alloc_percpu).
978 */
979void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
980 gfp_t gfp)
981{
982 unsigned int cpu;
983
984 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
985
986 /*
987 * Percpu allocations are only scanned and not reported as leaks
988 * (min_count is set to 0).
989 */
990 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
991 for_each_possible_cpu(cpu)
992 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
993 size, 0, gfp);
994}
995EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
996
997/**
998 * kmemleak_vmalloc - register a newly vmalloc'ed object
999 * @area: pointer to vm_struct
1000 * @size: size of the object
1001 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
1002 *
1003 * This function is called from the vmalloc() kernel allocator when a new
1004 * object (memory block) is allocated.
1005 */
1006void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1007{
1008 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
1009
1010 /*
1011 * A min_count = 2 is needed because vm_struct contains a reference to
1012 * the virtual address of the vmalloc'ed block.
1013 */
1014 if (kmemleak_enabled) {
1015 create_object((unsigned long)area->addr, size, 2, gfp);
1016 object_set_excess_ref((unsigned long)area,
1017 (unsigned long)area->addr);
1018 }
1019}
1020EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1021
1022/**
1023 * kmemleak_free - unregister a previously registered object
1024 * @ptr: pointer to beginning of the object
1025 *
1026 * This function is called from the kernel allocators when an object (memory
1027 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1028 */
1029void __ref kmemleak_free(const void *ptr)
1030{
1031 pr_debug("%s(0x%p)\n", __func__, ptr);
1032
1033 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1034 delete_object_full((unsigned long)ptr);
1035}
1036EXPORT_SYMBOL_GPL(kmemleak_free);
1037
1038/**
1039 * kmemleak_free_part - partially unregister a previously registered object
1040 * @ptr: pointer to the beginning or inside the object. This also
1041 * represents the start of the range to be freed
1042 * @size: size to be unregistered
1043 *
1044 * This function is called when only a part of a memory block is freed
1045 * (usually from the bootmem allocator).
1046 */
1047void __ref kmemleak_free_part(const void *ptr, size_t size)
1048{
1049 pr_debug("%s(0x%p)\n", __func__, ptr);
1050
1051 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1052 delete_object_part((unsigned long)ptr, size, false);
1053}
1054EXPORT_SYMBOL_GPL(kmemleak_free_part);
1055
1056/**
1057 * kmemleak_free_percpu - unregister a previously registered __percpu object
1058 * @ptr: __percpu pointer to beginning of the object
1059 *
1060 * This function is called from the kernel percpu allocator when an object
1061 * (memory block) is freed (free_percpu).
1062 */
1063void __ref kmemleak_free_percpu(const void __percpu *ptr)
1064{
1065 unsigned int cpu;
1066
1067 pr_debug("%s(0x%p)\n", __func__, ptr);
1068
1069 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1070 for_each_possible_cpu(cpu)
1071 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1072 cpu));
1073}
1074EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1075
1076/**
1077 * kmemleak_update_trace - update object allocation stack trace
1078 * @ptr: pointer to beginning of the object
1079 *
1080 * Override the object allocation stack trace for cases where the actual
1081 * allocation place is not always useful.
1082 */
1083void __ref kmemleak_update_trace(const void *ptr)
1084{
1085 struct kmemleak_object *object;
1086 unsigned long flags;
1087
1088 pr_debug("%s(0x%p)\n", __func__, ptr);
1089
1090 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1091 return;
1092
1093 object = find_and_get_object((unsigned long)ptr, 1);
1094 if (!object) {
1095#ifdef DEBUG
1096 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1097 ptr);
1098#endif
1099 return;
1100 }
1101
1102 raw_spin_lock_irqsave(&object->lock, flags);
1103 object->trace_handle = set_track_prepare();
1104 raw_spin_unlock_irqrestore(&object->lock, flags);
1105
1106 put_object(object);
1107}
1108EXPORT_SYMBOL(kmemleak_update_trace);
1109
1110/**
1111 * kmemleak_not_leak - mark an allocated object as false positive
1112 * @ptr: pointer to beginning of the object
1113 *
1114 * Calling this function on an object will cause the memory block to no longer
1115 * be reported as leak and always be scanned.
1116 */
1117void __ref kmemleak_not_leak(const void *ptr)
1118{
1119 pr_debug("%s(0x%p)\n", __func__, ptr);
1120
1121 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1122 make_gray_object((unsigned long)ptr);
1123}
1124EXPORT_SYMBOL(kmemleak_not_leak);
1125
1126/**
1127 * kmemleak_ignore - ignore an allocated object
1128 * @ptr: pointer to beginning of the object
1129 *
1130 * Calling this function on an object will cause the memory block to be
1131 * ignored (not scanned and not reported as a leak). This is usually done when
1132 * it is known that the corresponding block is not a leak and does not contain
1133 * any references to other allocated memory blocks.
1134 */
1135void __ref kmemleak_ignore(const void *ptr)
1136{
1137 pr_debug("%s(0x%p)\n", __func__, ptr);
1138
1139 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1140 make_black_object((unsigned long)ptr, false);
1141}
1142EXPORT_SYMBOL(kmemleak_ignore);
1143
1144/**
1145 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1146 * @ptr: pointer to beginning or inside the object. This also
1147 * represents the start of the scan area
1148 * @size: size of the scan area
1149 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1150 *
1151 * This function is used when it is known that only certain parts of an object
1152 * contain references to other objects. Kmemleak will only scan these areas
1153 * reducing the number false negatives.
1154 */
1155void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1156{
1157 pr_debug("%s(0x%p)\n", __func__, ptr);
1158
1159 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1160 add_scan_area((unsigned long)ptr, size, gfp);
1161}
1162EXPORT_SYMBOL(kmemleak_scan_area);
1163
1164/**
1165 * kmemleak_no_scan - do not scan an allocated object
1166 * @ptr: pointer to beginning of the object
1167 *
1168 * This function notifies kmemleak not to scan the given memory block. Useful
1169 * in situations where it is known that the given object does not contain any
1170 * references to other objects. Kmemleak will not scan such objects reducing
1171 * the number of false negatives.
1172 */
1173void __ref kmemleak_no_scan(const void *ptr)
1174{
1175 pr_debug("%s(0x%p)\n", __func__, ptr);
1176
1177 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1178 object_no_scan((unsigned long)ptr);
1179}
1180EXPORT_SYMBOL(kmemleak_no_scan);
1181
1182/**
1183 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1184 * address argument
1185 * @phys: physical address of the object
1186 * @size: size of the object
1187 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1188 */
1189void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1190{
1191 pr_debug("%s(0x%pa, %zu)\n", __func__, &phys, size);
1192
1193 if (kmemleak_enabled)
1194 /*
1195 * Create object with OBJECT_PHYS flag and
1196 * assume min_count 0.
1197 */
1198 create_object_phys((unsigned long)phys, size, 0, gfp);
1199}
1200EXPORT_SYMBOL(kmemleak_alloc_phys);
1201
1202/**
1203 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1204 * physical address argument
1205 * @phys: physical address if the beginning or inside an object. This
1206 * also represents the start of the range to be freed
1207 * @size: size to be unregistered
1208 */
1209void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1210{
1211 pr_debug("%s(0x%pa)\n", __func__, &phys);
1212
1213 if (kmemleak_enabled)
1214 delete_object_part((unsigned long)phys, size, true);
1215}
1216EXPORT_SYMBOL(kmemleak_free_part_phys);
1217
1218/**
1219 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1220 * address argument
1221 * @phys: physical address of the object
1222 */
1223void __ref kmemleak_ignore_phys(phys_addr_t phys)
1224{
1225 pr_debug("%s(0x%pa)\n", __func__, &phys);
1226
1227 if (kmemleak_enabled)
1228 make_black_object((unsigned long)phys, true);
1229}
1230EXPORT_SYMBOL(kmemleak_ignore_phys);
1231
1232/*
1233 * Update an object's checksum and return true if it was modified.
1234 */
1235static bool update_checksum(struct kmemleak_object *object)
1236{
1237 u32 old_csum = object->checksum;
1238
1239 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1240 return false;
1241
1242 kasan_disable_current();
1243 kcsan_disable_current();
1244 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1245 kasan_enable_current();
1246 kcsan_enable_current();
1247
1248 return object->checksum != old_csum;
1249}
1250
1251/*
1252 * Update an object's references. object->lock must be held by the caller.
1253 */
1254static void update_refs(struct kmemleak_object *object)
1255{
1256 if (!color_white(object)) {
1257 /* non-orphan, ignored or new */
1258 return;
1259 }
1260
1261 /*
1262 * Increase the object's reference count (number of pointers to the
1263 * memory block). If this count reaches the required minimum, the
1264 * object's color will become gray and it will be added to the
1265 * gray_list.
1266 */
1267 object->count++;
1268 if (color_gray(object)) {
1269 /* put_object() called when removing from gray_list */
1270 WARN_ON(!get_object(object));
1271 list_add_tail(&object->gray_list, &gray_list);
1272 }
1273}
1274
1275/*
1276 * Memory scanning is a long process and it needs to be interruptible. This
1277 * function checks whether such interrupt condition occurred.
1278 */
1279static int scan_should_stop(void)
1280{
1281 if (!kmemleak_enabled)
1282 return 1;
1283
1284 /*
1285 * This function may be called from either process or kthread context,
1286 * hence the need to check for both stop conditions.
1287 */
1288 if (current->mm)
1289 return signal_pending(current);
1290 else
1291 return kthread_should_stop();
1292
1293 return 0;
1294}
1295
1296/*
1297 * Scan a memory block (exclusive range) for valid pointers and add those
1298 * found to the gray list.
1299 */
1300static void scan_block(void *_start, void *_end,
1301 struct kmemleak_object *scanned)
1302{
1303 unsigned long *ptr;
1304 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1305 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1306 unsigned long flags;
1307 unsigned long untagged_ptr;
1308
1309 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1310 for (ptr = start; ptr < end; ptr++) {
1311 struct kmemleak_object *object;
1312 unsigned long pointer;
1313 unsigned long excess_ref;
1314
1315 if (scan_should_stop())
1316 break;
1317
1318 kasan_disable_current();
1319 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1320 kasan_enable_current();
1321
1322 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1323 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1324 continue;
1325
1326 /*
1327 * No need for get_object() here since we hold kmemleak_lock.
1328 * object->use_count cannot be dropped to 0 while the object
1329 * is still present in object_tree_root and object_list
1330 * (with updates protected by kmemleak_lock).
1331 */
1332 object = lookup_object(pointer, 1);
1333 if (!object)
1334 continue;
1335 if (object == scanned)
1336 /* self referenced, ignore */
1337 continue;
1338
1339 /*
1340 * Avoid the lockdep recursive warning on object->lock being
1341 * previously acquired in scan_object(). These locks are
1342 * enclosed by scan_mutex.
1343 */
1344 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1345 /* only pass surplus references (object already gray) */
1346 if (color_gray(object)) {
1347 excess_ref = object->excess_ref;
1348 /* no need for update_refs() if object already gray */
1349 } else {
1350 excess_ref = 0;
1351 update_refs(object);
1352 }
1353 raw_spin_unlock(&object->lock);
1354
1355 if (excess_ref) {
1356 object = lookup_object(excess_ref, 0);
1357 if (!object)
1358 continue;
1359 if (object == scanned)
1360 /* circular reference, ignore */
1361 continue;
1362 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1363 update_refs(object);
1364 raw_spin_unlock(&object->lock);
1365 }
1366 }
1367 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1368}
1369
1370/*
1371 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1372 */
1373#ifdef CONFIG_SMP
1374static void scan_large_block(void *start, void *end)
1375{
1376 void *next;
1377
1378 while (start < end) {
1379 next = min(start + MAX_SCAN_SIZE, end);
1380 scan_block(start, next, NULL);
1381 start = next;
1382 cond_resched();
1383 }
1384}
1385#endif
1386
1387/*
1388 * Scan a memory block corresponding to a kmemleak_object. A condition is
1389 * that object->use_count >= 1.
1390 */
1391static void scan_object(struct kmemleak_object *object)
1392{
1393 struct kmemleak_scan_area *area;
1394 unsigned long flags;
1395 void *obj_ptr;
1396
1397 /*
1398 * Once the object->lock is acquired, the corresponding memory block
1399 * cannot be freed (the same lock is acquired in delete_object).
1400 */
1401 raw_spin_lock_irqsave(&object->lock, flags);
1402 if (object->flags & OBJECT_NO_SCAN)
1403 goto out;
1404 if (!(object->flags & OBJECT_ALLOCATED))
1405 /* already freed object */
1406 goto out;
1407
1408 obj_ptr = object->flags & OBJECT_PHYS ?
1409 __va((phys_addr_t)object->pointer) :
1410 (void *)object->pointer;
1411
1412 if (hlist_empty(&object->area_list) ||
1413 object->flags & OBJECT_FULL_SCAN) {
1414 void *start = obj_ptr;
1415 void *end = obj_ptr + object->size;
1416 void *next;
1417
1418 do {
1419 next = min(start + MAX_SCAN_SIZE, end);
1420 scan_block(start, next, object);
1421
1422 start = next;
1423 if (start >= end)
1424 break;
1425
1426 raw_spin_unlock_irqrestore(&object->lock, flags);
1427 cond_resched();
1428 raw_spin_lock_irqsave(&object->lock, flags);
1429 } while (object->flags & OBJECT_ALLOCATED);
1430 } else
1431 hlist_for_each_entry(area, &object->area_list, node)
1432 scan_block((void *)area->start,
1433 (void *)(area->start + area->size),
1434 object);
1435out:
1436 raw_spin_unlock_irqrestore(&object->lock, flags);
1437}
1438
1439/*
1440 * Scan the objects already referenced (gray objects). More objects will be
1441 * referenced and, if there are no memory leaks, all the objects are scanned.
1442 */
1443static void scan_gray_list(void)
1444{
1445 struct kmemleak_object *object, *tmp;
1446
1447 /*
1448 * The list traversal is safe for both tail additions and removals
1449 * from inside the loop. The kmemleak objects cannot be freed from
1450 * outside the loop because their use_count was incremented.
1451 */
1452 object = list_entry(gray_list.next, typeof(*object), gray_list);
1453 while (&object->gray_list != &gray_list) {
1454 cond_resched();
1455
1456 /* may add new objects to the list */
1457 if (!scan_should_stop())
1458 scan_object(object);
1459
1460 tmp = list_entry(object->gray_list.next, typeof(*object),
1461 gray_list);
1462
1463 /* remove the object from the list and release it */
1464 list_del(&object->gray_list);
1465 put_object(object);
1466
1467 object = tmp;
1468 }
1469 WARN_ON(!list_empty(&gray_list));
1470}
1471
1472/*
1473 * Conditionally call resched() in an object iteration loop while making sure
1474 * that the given object won't go away without RCU read lock by performing a
1475 * get_object() if !pinned.
1476 *
1477 * Return: false if can't do a cond_resched() due to get_object() failure
1478 * true otherwise
1479 */
1480static bool kmemleak_cond_resched(struct kmemleak_object *object, bool pinned)
1481{
1482 if (!pinned && !get_object(object))
1483 return false;
1484
1485 rcu_read_unlock();
1486 cond_resched();
1487 rcu_read_lock();
1488 if (!pinned)
1489 put_object(object);
1490 return true;
1491}
1492
1493/*
1494 * Scan data sections and all the referenced memory blocks allocated via the
1495 * kernel's standard allocators. This function must be called with the
1496 * scan_mutex held.
1497 */
1498static void kmemleak_scan(void)
1499{
1500 struct kmemleak_object *object;
1501 struct zone *zone;
1502 int __maybe_unused i;
1503 int new_leaks = 0;
1504 int loop_cnt = 0;
1505
1506 jiffies_last_scan = jiffies;
1507
1508 /* prepare the kmemleak_object's */
1509 rcu_read_lock();
1510 list_for_each_entry_rcu(object, &object_list, object_list) {
1511 bool obj_pinned = false;
1512
1513 raw_spin_lock_irq(&object->lock);
1514#ifdef DEBUG
1515 /*
1516 * With a few exceptions there should be a maximum of
1517 * 1 reference to any object at this point.
1518 */
1519 if (atomic_read(&object->use_count) > 1) {
1520 pr_debug("object->use_count = %d\n",
1521 atomic_read(&object->use_count));
1522 dump_object_info(object);
1523 }
1524#endif
1525
1526 /* ignore objects outside lowmem (paint them black) */
1527 if ((object->flags & OBJECT_PHYS) &&
1528 !(object->flags & OBJECT_NO_SCAN)) {
1529 unsigned long phys = object->pointer;
1530
1531 if (PHYS_PFN(phys) < min_low_pfn ||
1532 PHYS_PFN(phys + object->size) >= max_low_pfn)
1533 __paint_it(object, KMEMLEAK_BLACK);
1534 }
1535
1536 /* reset the reference count (whiten the object) */
1537 object->count = 0;
1538 if (color_gray(object) && get_object(object)) {
1539 list_add_tail(&object->gray_list, &gray_list);
1540 obj_pinned = true;
1541 }
1542
1543 raw_spin_unlock_irq(&object->lock);
1544
1545 /*
1546 * Do a cond_resched() every 64k objects to avoid soft lockup.
1547 */
1548 if (!(++loop_cnt & 0xffff) &&
1549 !kmemleak_cond_resched(object, obj_pinned))
1550 loop_cnt--; /* Try again on next object */
1551 }
1552 rcu_read_unlock();
1553
1554#ifdef CONFIG_SMP
1555 /* per-cpu sections scanning */
1556 for_each_possible_cpu(i)
1557 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1558 __per_cpu_end + per_cpu_offset(i));
1559#endif
1560
1561 /*
1562 * Struct page scanning for each node.
1563 */
1564 get_online_mems();
1565 for_each_populated_zone(zone) {
1566 unsigned long start_pfn = zone->zone_start_pfn;
1567 unsigned long end_pfn = zone_end_pfn(zone);
1568 unsigned long pfn;
1569
1570 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1571 struct page *page = pfn_to_online_page(pfn);
1572
1573 if (!page)
1574 continue;
1575
1576 /* only scan pages belonging to this zone */
1577 if (page_zone(page) != zone)
1578 continue;
1579 /* only scan if page is in use */
1580 if (page_count(page) == 0)
1581 continue;
1582 scan_block(page, page + 1, NULL);
1583 if (!(pfn & 63))
1584 cond_resched();
1585 }
1586 }
1587 put_online_mems();
1588
1589 /*
1590 * Scanning the task stacks (may introduce false negatives).
1591 */
1592 if (kmemleak_stack_scan) {
1593 struct task_struct *p, *g;
1594
1595 rcu_read_lock();
1596 for_each_process_thread(g, p) {
1597 void *stack = try_get_task_stack(p);
1598 if (stack) {
1599 scan_block(stack, stack + THREAD_SIZE, NULL);
1600 put_task_stack(p);
1601 }
1602 }
1603 rcu_read_unlock();
1604 }
1605
1606 /*
1607 * Scan the objects already referenced from the sections scanned
1608 * above.
1609 */
1610 scan_gray_list();
1611
1612 /*
1613 * Check for new or unreferenced objects modified since the previous
1614 * scan and color them gray until the next scan.
1615 */
1616 rcu_read_lock();
1617 loop_cnt = 0;
1618 list_for_each_entry_rcu(object, &object_list, object_list) {
1619 /*
1620 * Do a cond_resched() every 64k objects to avoid soft lockup.
1621 */
1622 if (!(++loop_cnt & 0xffff) &&
1623 !kmemleak_cond_resched(object, false))
1624 loop_cnt--; /* Try again on next object */
1625
1626 /*
1627 * This is racy but we can save the overhead of lock/unlock
1628 * calls. The missed objects, if any, should be caught in
1629 * the next scan.
1630 */
1631 if (!color_white(object))
1632 continue;
1633 raw_spin_lock_irq(&object->lock);
1634 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1635 && update_checksum(object) && get_object(object)) {
1636 /* color it gray temporarily */
1637 object->count = object->min_count;
1638 list_add_tail(&object->gray_list, &gray_list);
1639 }
1640 raw_spin_unlock_irq(&object->lock);
1641 }
1642 rcu_read_unlock();
1643
1644 /*
1645 * Re-scan the gray list for modified unreferenced objects.
1646 */
1647 scan_gray_list();
1648
1649 /*
1650 * If scanning was stopped do not report any new unreferenced objects.
1651 */
1652 if (scan_should_stop())
1653 return;
1654
1655 /*
1656 * Scanning result reporting.
1657 */
1658 rcu_read_lock();
1659 loop_cnt = 0;
1660 list_for_each_entry_rcu(object, &object_list, object_list) {
1661 /*
1662 * Do a cond_resched() every 64k objects to avoid soft lockup.
1663 */
1664 if (!(++loop_cnt & 0xffff) &&
1665 !kmemleak_cond_resched(object, false))
1666 loop_cnt--; /* Try again on next object */
1667
1668 /*
1669 * This is racy but we can save the overhead of lock/unlock
1670 * calls. The missed objects, if any, should be caught in
1671 * the next scan.
1672 */
1673 if (!color_white(object))
1674 continue;
1675 raw_spin_lock_irq(&object->lock);
1676 if (unreferenced_object(object) &&
1677 !(object->flags & OBJECT_REPORTED)) {
1678 object->flags |= OBJECT_REPORTED;
1679
1680 if (kmemleak_verbose)
1681 print_unreferenced(NULL, object);
1682
1683 new_leaks++;
1684 }
1685 raw_spin_unlock_irq(&object->lock);
1686 }
1687 rcu_read_unlock();
1688
1689 if (new_leaks) {
1690 kmemleak_found_leaks = true;
1691
1692 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1693 new_leaks);
1694 }
1695
1696}
1697
1698/*
1699 * Thread function performing automatic memory scanning. Unreferenced objects
1700 * at the end of a memory scan are reported but only the first time.
1701 */
1702static int kmemleak_scan_thread(void *arg)
1703{
1704 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1705
1706 pr_info("Automatic memory scanning thread started\n");
1707 set_user_nice(current, 10);
1708
1709 /*
1710 * Wait before the first scan to allow the system to fully initialize.
1711 */
1712 if (first_run) {
1713 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1714 first_run = 0;
1715 while (timeout && !kthread_should_stop())
1716 timeout = schedule_timeout_interruptible(timeout);
1717 }
1718
1719 while (!kthread_should_stop()) {
1720 signed long timeout = READ_ONCE(jiffies_scan_wait);
1721
1722 mutex_lock(&scan_mutex);
1723 kmemleak_scan();
1724 mutex_unlock(&scan_mutex);
1725
1726 /* wait before the next scan */
1727 while (timeout && !kthread_should_stop())
1728 timeout = schedule_timeout_interruptible(timeout);
1729 }
1730
1731 pr_info("Automatic memory scanning thread ended\n");
1732
1733 return 0;
1734}
1735
1736/*
1737 * Start the automatic memory scanning thread. This function must be called
1738 * with the scan_mutex held.
1739 */
1740static void start_scan_thread(void)
1741{
1742 if (scan_thread)
1743 return;
1744 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1745 if (IS_ERR(scan_thread)) {
1746 pr_warn("Failed to create the scan thread\n");
1747 scan_thread = NULL;
1748 }
1749}
1750
1751/*
1752 * Stop the automatic memory scanning thread.
1753 */
1754static void stop_scan_thread(void)
1755{
1756 if (scan_thread) {
1757 kthread_stop(scan_thread);
1758 scan_thread = NULL;
1759 }
1760}
1761
1762/*
1763 * Iterate over the object_list and return the first valid object at or after
1764 * the required position with its use_count incremented. The function triggers
1765 * a memory scanning when the pos argument points to the first position.
1766 */
1767static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1768{
1769 struct kmemleak_object *object;
1770 loff_t n = *pos;
1771 int err;
1772
1773 err = mutex_lock_interruptible(&scan_mutex);
1774 if (err < 0)
1775 return ERR_PTR(err);
1776
1777 rcu_read_lock();
1778 list_for_each_entry_rcu(object, &object_list, object_list) {
1779 if (n-- > 0)
1780 continue;
1781 if (get_object(object))
1782 goto out;
1783 }
1784 object = NULL;
1785out:
1786 return object;
1787}
1788
1789/*
1790 * Return the next object in the object_list. The function decrements the
1791 * use_count of the previous object and increases that of the next one.
1792 */
1793static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1794{
1795 struct kmemleak_object *prev_obj = v;
1796 struct kmemleak_object *next_obj = NULL;
1797 struct kmemleak_object *obj = prev_obj;
1798
1799 ++(*pos);
1800
1801 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1802 if (get_object(obj)) {
1803 next_obj = obj;
1804 break;
1805 }
1806 }
1807
1808 put_object(prev_obj);
1809 return next_obj;
1810}
1811
1812/*
1813 * Decrement the use_count of the last object required, if any.
1814 */
1815static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1816{
1817 if (!IS_ERR(v)) {
1818 /*
1819 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1820 * waiting was interrupted, so only release it if !IS_ERR.
1821 */
1822 rcu_read_unlock();
1823 mutex_unlock(&scan_mutex);
1824 if (v)
1825 put_object(v);
1826 }
1827}
1828
1829/*
1830 * Print the information for an unreferenced object to the seq file.
1831 */
1832static int kmemleak_seq_show(struct seq_file *seq, void *v)
1833{
1834 struct kmemleak_object *object = v;
1835 unsigned long flags;
1836
1837 raw_spin_lock_irqsave(&object->lock, flags);
1838 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1839 print_unreferenced(seq, object);
1840 raw_spin_unlock_irqrestore(&object->lock, flags);
1841 return 0;
1842}
1843
1844static const struct seq_operations kmemleak_seq_ops = {
1845 .start = kmemleak_seq_start,
1846 .next = kmemleak_seq_next,
1847 .stop = kmemleak_seq_stop,
1848 .show = kmemleak_seq_show,
1849};
1850
1851static int kmemleak_open(struct inode *inode, struct file *file)
1852{
1853 return seq_open(file, &kmemleak_seq_ops);
1854}
1855
1856static int dump_str_object_info(const char *str)
1857{
1858 unsigned long flags;
1859 struct kmemleak_object *object;
1860 unsigned long addr;
1861
1862 if (kstrtoul(str, 0, &addr))
1863 return -EINVAL;
1864 object = find_and_get_object(addr, 0);
1865 if (!object) {
1866 pr_info("Unknown object at 0x%08lx\n", addr);
1867 return -EINVAL;
1868 }
1869
1870 raw_spin_lock_irqsave(&object->lock, flags);
1871 dump_object_info(object);
1872 raw_spin_unlock_irqrestore(&object->lock, flags);
1873
1874 put_object(object);
1875 return 0;
1876}
1877
1878/*
1879 * We use grey instead of black to ensure we can do future scans on the same
1880 * objects. If we did not do future scans these black objects could
1881 * potentially contain references to newly allocated objects in the future and
1882 * we'd end up with false positives.
1883 */
1884static void kmemleak_clear(void)
1885{
1886 struct kmemleak_object *object;
1887
1888 rcu_read_lock();
1889 list_for_each_entry_rcu(object, &object_list, object_list) {
1890 raw_spin_lock_irq(&object->lock);
1891 if ((object->flags & OBJECT_REPORTED) &&
1892 unreferenced_object(object))
1893 __paint_it(object, KMEMLEAK_GREY);
1894 raw_spin_unlock_irq(&object->lock);
1895 }
1896 rcu_read_unlock();
1897
1898 kmemleak_found_leaks = false;
1899}
1900
1901static void __kmemleak_do_cleanup(void);
1902
1903/*
1904 * File write operation to configure kmemleak at run-time. The following
1905 * commands can be written to the /sys/kernel/debug/kmemleak file:
1906 * off - disable kmemleak (irreversible)
1907 * stack=on - enable the task stacks scanning
1908 * stack=off - disable the tasks stacks scanning
1909 * scan=on - start the automatic memory scanning thread
1910 * scan=off - stop the automatic memory scanning thread
1911 * scan=... - set the automatic memory scanning period in seconds (0 to
1912 * disable it)
1913 * scan - trigger a memory scan
1914 * clear - mark all current reported unreferenced kmemleak objects as
1915 * grey to ignore printing them, or free all kmemleak objects
1916 * if kmemleak has been disabled.
1917 * dump=... - dump information about the object found at the given address
1918 */
1919static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1920 size_t size, loff_t *ppos)
1921{
1922 char buf[64];
1923 int buf_size;
1924 int ret;
1925
1926 buf_size = min(size, (sizeof(buf) - 1));
1927 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1928 return -EFAULT;
1929 buf[buf_size] = 0;
1930
1931 ret = mutex_lock_interruptible(&scan_mutex);
1932 if (ret < 0)
1933 return ret;
1934
1935 if (strncmp(buf, "clear", 5) == 0) {
1936 if (kmemleak_enabled)
1937 kmemleak_clear();
1938 else
1939 __kmemleak_do_cleanup();
1940 goto out;
1941 }
1942
1943 if (!kmemleak_enabled) {
1944 ret = -EPERM;
1945 goto out;
1946 }
1947
1948 if (strncmp(buf, "off", 3) == 0)
1949 kmemleak_disable();
1950 else if (strncmp(buf, "stack=on", 8) == 0)
1951 kmemleak_stack_scan = 1;
1952 else if (strncmp(buf, "stack=off", 9) == 0)
1953 kmemleak_stack_scan = 0;
1954 else if (strncmp(buf, "scan=on", 7) == 0)
1955 start_scan_thread();
1956 else if (strncmp(buf, "scan=off", 8) == 0)
1957 stop_scan_thread();
1958 else if (strncmp(buf, "scan=", 5) == 0) {
1959 unsigned secs;
1960 unsigned long msecs;
1961
1962 ret = kstrtouint(buf + 5, 0, &secs);
1963 if (ret < 0)
1964 goto out;
1965
1966 msecs = secs * MSEC_PER_SEC;
1967 if (msecs > UINT_MAX)
1968 msecs = UINT_MAX;
1969
1970 stop_scan_thread();
1971 if (msecs) {
1972 WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
1973 start_scan_thread();
1974 }
1975 } else if (strncmp(buf, "scan", 4) == 0)
1976 kmemleak_scan();
1977 else if (strncmp(buf, "dump=", 5) == 0)
1978 ret = dump_str_object_info(buf + 5);
1979 else
1980 ret = -EINVAL;
1981
1982out:
1983 mutex_unlock(&scan_mutex);
1984 if (ret < 0)
1985 return ret;
1986
1987 /* ignore the rest of the buffer, only one command at a time */
1988 *ppos += size;
1989 return size;
1990}
1991
1992static const struct file_operations kmemleak_fops = {
1993 .owner = THIS_MODULE,
1994 .open = kmemleak_open,
1995 .read = seq_read,
1996 .write = kmemleak_write,
1997 .llseek = seq_lseek,
1998 .release = seq_release,
1999};
2000
2001static void __kmemleak_do_cleanup(void)
2002{
2003 struct kmemleak_object *object, *tmp;
2004
2005 /*
2006 * Kmemleak has already been disabled, no need for RCU list traversal
2007 * or kmemleak_lock held.
2008 */
2009 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2010 __remove_object(object);
2011 __delete_object(object);
2012 }
2013}
2014
2015/*
2016 * Stop the memory scanning thread and free the kmemleak internal objects if
2017 * no previous scan thread (otherwise, kmemleak may still have some useful
2018 * information on memory leaks).
2019 */
2020static void kmemleak_do_cleanup(struct work_struct *work)
2021{
2022 stop_scan_thread();
2023
2024 mutex_lock(&scan_mutex);
2025 /*
2026 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2027 * longer track object freeing. Ordering of the scan thread stopping and
2028 * the memory accesses below is guaranteed by the kthread_stop()
2029 * function.
2030 */
2031 kmemleak_free_enabled = 0;
2032 mutex_unlock(&scan_mutex);
2033
2034 if (!kmemleak_found_leaks)
2035 __kmemleak_do_cleanup();
2036 else
2037 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2038}
2039
2040static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2041
2042/*
2043 * Disable kmemleak. No memory allocation/freeing will be traced once this
2044 * function is called. Disabling kmemleak is an irreversible operation.
2045 */
2046static void kmemleak_disable(void)
2047{
2048 /* atomically check whether it was already invoked */
2049 if (cmpxchg(&kmemleak_error, 0, 1))
2050 return;
2051
2052 /* stop any memory operation tracing */
2053 kmemleak_enabled = 0;
2054
2055 /* check whether it is too early for a kernel thread */
2056 if (kmemleak_initialized)
2057 schedule_work(&cleanup_work);
2058 else
2059 kmemleak_free_enabled = 0;
2060
2061 pr_info("Kernel memory leak detector disabled\n");
2062}
2063
2064/*
2065 * Allow boot-time kmemleak disabling (enabled by default).
2066 */
2067static int __init kmemleak_boot_config(char *str)
2068{
2069 if (!str)
2070 return -EINVAL;
2071 if (strcmp(str, "off") == 0)
2072 kmemleak_disable();
2073 else if (strcmp(str, "on") == 0) {
2074 kmemleak_skip_disable = 1;
2075 stack_depot_want_early_init();
2076 }
2077 else
2078 return -EINVAL;
2079 return 0;
2080}
2081early_param("kmemleak", kmemleak_boot_config);
2082
2083/*
2084 * Kmemleak initialization.
2085 */
2086void __init kmemleak_init(void)
2087{
2088#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2089 if (!kmemleak_skip_disable) {
2090 kmemleak_disable();
2091 return;
2092 }
2093#endif
2094
2095 if (kmemleak_error)
2096 return;
2097
2098 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2099 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2100
2101 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2102 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2103
2104 /* register the data/bss sections */
2105 create_object((unsigned long)_sdata, _edata - _sdata,
2106 KMEMLEAK_GREY, GFP_ATOMIC);
2107 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2108 KMEMLEAK_GREY, GFP_ATOMIC);
2109 /* only register .data..ro_after_init if not within .data */
2110 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2111 create_object((unsigned long)__start_ro_after_init,
2112 __end_ro_after_init - __start_ro_after_init,
2113 KMEMLEAK_GREY, GFP_ATOMIC);
2114}
2115
2116/*
2117 * Late initialization function.
2118 */
2119static int __init kmemleak_late_init(void)
2120{
2121 kmemleak_initialized = 1;
2122
2123 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2124
2125 if (kmemleak_error) {
2126 /*
2127 * Some error occurred and kmemleak was disabled. There is a
2128 * small chance that kmemleak_disable() was called immediately
2129 * after setting kmemleak_initialized and we may end up with
2130 * two clean-up threads but serialized by scan_mutex.
2131 */
2132 schedule_work(&cleanup_work);
2133 return -ENOMEM;
2134 }
2135
2136 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2137 mutex_lock(&scan_mutex);
2138 start_scan_thread();
2139 mutex_unlock(&scan_mutex);
2140 }
2141
2142 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2143 mem_pool_free_count);
2144
2145 return 0;
2146}
2147late_initcall(kmemleak_late_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
16 * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
17 * del_state modifications and accesses to the object trees
18 * (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
19 * object_list is the main list holding the metadata (struct
20 * kmemleak_object) for the allocated memory blocks. The object trees are
21 * red black trees used to look-up metadata based on a pointer to the
22 * corresponding memory block. The kmemleak_object structures are added to
23 * the object_list and the object tree root in the create_object() function
24 * called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
25 * delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
26 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
27 * Accesses to the metadata (e.g. count) are protected by this lock. Note
28 * that some members of this structure may be protected by other means
29 * (atomic or kmemleak_lock). This lock is also held when scanning the
30 * corresponding memory block to avoid the kernel freeing it via the
31 * kmemleak_free() callback. This is less heavyweight than holding a global
32 * lock like kmemleak_lock during scanning.
33 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
34 * unreferenced objects at a time. The gray_list contains the objects which
35 * are already referenced or marked as false positives and need to be
36 * scanned. This list is only modified during a scanning episode when the
37 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
38 * Note that the kmemleak_object.use_count is incremented when an object is
39 * added to the gray_list and therefore cannot be freed. This mutex also
40 * prevents multiple users of the "kmemleak" debugfs file together with
41 * modifications to the memory scanning parameters including the scan_thread
42 * pointer
43 *
44 * Locks and mutexes are acquired/nested in the following order:
45 *
46 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
47 *
48 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
49 * regions.
50 *
51 * The kmemleak_object structures have a use_count incremented or decremented
52 * using the get_object()/put_object() functions. When the use_count becomes
53 * 0, this count can no longer be incremented and put_object() schedules the
54 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
55 * function must be protected by rcu_read_lock() to avoid accessing a freed
56 * structure.
57 */
58
59#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
60
61#include <linux/init.h>
62#include <linux/kernel.h>
63#include <linux/list.h>
64#include <linux/sched/signal.h>
65#include <linux/sched/task.h>
66#include <linux/sched/task_stack.h>
67#include <linux/jiffies.h>
68#include <linux/delay.h>
69#include <linux/export.h>
70#include <linux/kthread.h>
71#include <linux/rbtree.h>
72#include <linux/fs.h>
73#include <linux/debugfs.h>
74#include <linux/seq_file.h>
75#include <linux/cpumask.h>
76#include <linux/spinlock.h>
77#include <linux/module.h>
78#include <linux/mutex.h>
79#include <linux/rcupdate.h>
80#include <linux/stacktrace.h>
81#include <linux/stackdepot.h>
82#include <linux/cache.h>
83#include <linux/percpu.h>
84#include <linux/memblock.h>
85#include <linux/pfn.h>
86#include <linux/mmzone.h>
87#include <linux/slab.h>
88#include <linux/thread_info.h>
89#include <linux/err.h>
90#include <linux/uaccess.h>
91#include <linux/string.h>
92#include <linux/nodemask.h>
93#include <linux/mm.h>
94#include <linux/workqueue.h>
95#include <linux/crc32.h>
96
97#include <asm/sections.h>
98#include <asm/processor.h>
99#include <linux/atomic.h>
100
101#include <linux/kasan.h>
102#include <linux/kfence.h>
103#include <linux/kmemleak.h>
104#include <linux/memory_hotplug.h>
105
106/*
107 * Kmemleak configuration and common defines.
108 */
109#define MAX_TRACE 16 /* stack trace length */
110#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
111#define SECS_FIRST_SCAN 60 /* delay before the first scan */
112#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
113#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
114
115#define BYTES_PER_POINTER sizeof(void *)
116
117/* scanning area inside a memory block */
118struct kmemleak_scan_area {
119 struct hlist_node node;
120 unsigned long start;
121 size_t size;
122};
123
124#define KMEMLEAK_GREY 0
125#define KMEMLEAK_BLACK -1
126
127/*
128 * Structure holding the metadata for each allocated memory block.
129 * Modifications to such objects should be made while holding the
130 * object->lock. Insertions or deletions from object_list, gray_list or
131 * rb_node are already protected by the corresponding locks or mutex (see
132 * the notes on locking above). These objects are reference-counted
133 * (use_count) and freed using the RCU mechanism.
134 */
135struct kmemleak_object {
136 raw_spinlock_t lock;
137 unsigned int flags; /* object status flags */
138 struct list_head object_list;
139 struct list_head gray_list;
140 struct rb_node rb_node;
141 struct rcu_head rcu; /* object_list lockless traversal */
142 /* object usage count; object freed when use_count == 0 */
143 atomic_t use_count;
144 unsigned int del_state; /* deletion state */
145 unsigned long pointer;
146 size_t size;
147 /* pass surplus references to this pointer */
148 unsigned long excess_ref;
149 /* minimum number of a pointers found before it is considered leak */
150 int min_count;
151 /* the total number of pointers found pointing to this object */
152 int count;
153 /* checksum for detecting modified objects */
154 u32 checksum;
155 depot_stack_handle_t trace_handle;
156 /* memory ranges to be scanned inside an object (empty for all) */
157 struct hlist_head area_list;
158 unsigned long jiffies; /* creation timestamp */
159 pid_t pid; /* pid of the current task */
160 char comm[TASK_COMM_LEN]; /* executable name */
161};
162
163/* flag representing the memory block allocation status */
164#define OBJECT_ALLOCATED (1 << 0)
165/* flag set after the first reporting of an unreference object */
166#define OBJECT_REPORTED (1 << 1)
167/* flag set to not scan the object */
168#define OBJECT_NO_SCAN (1 << 2)
169/* flag set to fully scan the object when scan_area allocation failed */
170#define OBJECT_FULL_SCAN (1 << 3)
171/* flag set for object allocated with physical address */
172#define OBJECT_PHYS (1 << 4)
173/* flag set for per-CPU pointers */
174#define OBJECT_PERCPU (1 << 5)
175
176/* set when __remove_object() called */
177#define DELSTATE_REMOVED (1 << 0)
178/* set to temporarily prevent deletion from object_list */
179#define DELSTATE_NO_DELETE (1 << 1)
180
181#define HEX_PREFIX " "
182/* number of bytes to print per line; must be 16 or 32 */
183#define HEX_ROW_SIZE 16
184/* number of bytes to print at a time (1, 2, 4, 8) */
185#define HEX_GROUP_SIZE 1
186/* include ASCII after the hex output */
187#define HEX_ASCII 1
188/* max number of lines to be printed */
189#define HEX_MAX_LINES 2
190
191/* the list of all allocated objects */
192static LIST_HEAD(object_list);
193/* the list of gray-colored objects (see color_gray comment below) */
194static LIST_HEAD(gray_list);
195/* memory pool allocation */
196static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
197static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
198static LIST_HEAD(mem_pool_free_list);
199/* search tree for object boundaries */
200static struct rb_root object_tree_root = RB_ROOT;
201/* search tree for object (with OBJECT_PHYS flag) boundaries */
202static struct rb_root object_phys_tree_root = RB_ROOT;
203/* search tree for object (with OBJECT_PERCPU flag) boundaries */
204static struct rb_root object_percpu_tree_root = RB_ROOT;
205/* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
206static DEFINE_RAW_SPINLOCK(kmemleak_lock);
207
208/* allocation caches for kmemleak internal data */
209static struct kmem_cache *object_cache;
210static struct kmem_cache *scan_area_cache;
211
212/* set if tracing memory operations is enabled */
213static int kmemleak_enabled = 1;
214/* same as above but only for the kmemleak_free() callback */
215static int kmemleak_free_enabled = 1;
216/* set in the late_initcall if there were no errors */
217static int kmemleak_late_initialized;
218/* set if a kmemleak warning was issued */
219static int kmemleak_warning;
220/* set if a fatal kmemleak error has occurred */
221static int kmemleak_error;
222
223/* minimum and maximum address that may be valid pointers */
224static unsigned long min_addr = ULONG_MAX;
225static unsigned long max_addr;
226
227/* minimum and maximum address that may be valid per-CPU pointers */
228static unsigned long min_percpu_addr = ULONG_MAX;
229static unsigned long max_percpu_addr;
230
231static struct task_struct *scan_thread;
232/* used to avoid reporting of recently allocated objects */
233static unsigned long jiffies_min_age;
234static unsigned long jiffies_last_scan;
235/* delay between automatic memory scannings */
236static unsigned long jiffies_scan_wait;
237/* enables or disables the task stacks scanning */
238static int kmemleak_stack_scan = 1;
239/* protects the memory scanning, parameters and debug/kmemleak file access */
240static DEFINE_MUTEX(scan_mutex);
241/* setting kmemleak=on, will set this var, skipping the disable */
242static int kmemleak_skip_disable;
243/* If there are leaks that can be reported */
244static bool kmemleak_found_leaks;
245
246static bool kmemleak_verbose;
247module_param_named(verbose, kmemleak_verbose, bool, 0600);
248
249static void kmemleak_disable(void);
250
251/*
252 * Print a warning and dump the stack trace.
253 */
254#define kmemleak_warn(x...) do { \
255 pr_warn(x); \
256 dump_stack(); \
257 kmemleak_warning = 1; \
258} while (0)
259
260/*
261 * Macro invoked when a serious kmemleak condition occurred and cannot be
262 * recovered from. Kmemleak will be disabled and further allocation/freeing
263 * tracing no longer available.
264 */
265#define kmemleak_stop(x...) do { \
266 kmemleak_warn(x); \
267 kmemleak_disable(); \
268} while (0)
269
270#define warn_or_seq_printf(seq, fmt, ...) do { \
271 if (seq) \
272 seq_printf(seq, fmt, ##__VA_ARGS__); \
273 else \
274 pr_warn(fmt, ##__VA_ARGS__); \
275} while (0)
276
277static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
278 int rowsize, int groupsize, const void *buf,
279 size_t len, bool ascii)
280{
281 if (seq)
282 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
283 buf, len, ascii);
284 else
285 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
286 rowsize, groupsize, buf, len, ascii);
287}
288
289/*
290 * Printing of the objects hex dump to the seq file. The number of lines to be
291 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
292 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
293 * with the object->lock held.
294 */
295static void hex_dump_object(struct seq_file *seq,
296 struct kmemleak_object *object)
297{
298 const u8 *ptr = (const u8 *)object->pointer;
299 size_t len;
300
301 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
302 return;
303
304 if (object->flags & OBJECT_PERCPU)
305 ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer);
306
307 /* limit the number of lines to HEX_MAX_LINES */
308 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
309
310 if (object->flags & OBJECT_PERCPU)
311 warn_or_seq_printf(seq, " hex dump (first %zu bytes on cpu %d):\n",
312 len, raw_smp_processor_id());
313 else
314 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
315 kasan_disable_current();
316 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
317 HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
318 kasan_enable_current();
319}
320
321/*
322 * Object colors, encoded with count and min_count:
323 * - white - orphan object, not enough references to it (count < min_count)
324 * - gray - not orphan, not marked as false positive (min_count == 0) or
325 * sufficient references to it (count >= min_count)
326 * - black - ignore, it doesn't contain references (e.g. text section)
327 * (min_count == -1). No function defined for this color.
328 * Newly created objects don't have any color assigned (object->count == -1)
329 * before the next memory scan when they become white.
330 */
331static bool color_white(const struct kmemleak_object *object)
332{
333 return object->count != KMEMLEAK_BLACK &&
334 object->count < object->min_count;
335}
336
337static bool color_gray(const struct kmemleak_object *object)
338{
339 return object->min_count != KMEMLEAK_BLACK &&
340 object->count >= object->min_count;
341}
342
343/*
344 * Objects are considered unreferenced only if their color is white, they have
345 * not be deleted and have a minimum age to avoid false positives caused by
346 * pointers temporarily stored in CPU registers.
347 */
348static bool unreferenced_object(struct kmemleak_object *object)
349{
350 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
351 time_before_eq(object->jiffies + jiffies_min_age,
352 jiffies_last_scan);
353}
354
355/*
356 * Printing of the unreferenced objects information to the seq file. The
357 * print_unreferenced function must be called with the object->lock held.
358 */
359static void print_unreferenced(struct seq_file *seq,
360 struct kmemleak_object *object)
361{
362 int i;
363 unsigned long *entries;
364 unsigned int nr_entries;
365
366 nr_entries = stack_depot_fetch(object->trace_handle, &entries);
367 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
368 object->pointer, object->size);
369 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
370 object->comm, object->pid, object->jiffies);
371 hex_dump_object(seq, object);
372 warn_or_seq_printf(seq, " backtrace (crc %x):\n", object->checksum);
373
374 for (i = 0; i < nr_entries; i++) {
375 void *ptr = (void *)entries[i];
376 warn_or_seq_printf(seq, " %pS\n", ptr);
377 }
378}
379
380/*
381 * Print the kmemleak_object information. This function is used mainly for
382 * debugging special cases when kmemleak operations. It must be called with
383 * the object->lock held.
384 */
385static void dump_object_info(struct kmemleak_object *object)
386{
387 pr_notice("Object 0x%08lx (size %zu):\n",
388 object->pointer, object->size);
389 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
390 object->comm, object->pid, object->jiffies);
391 pr_notice(" min_count = %d\n", object->min_count);
392 pr_notice(" count = %d\n", object->count);
393 pr_notice(" flags = 0x%x\n", object->flags);
394 pr_notice(" checksum = %u\n", object->checksum);
395 pr_notice(" backtrace:\n");
396 if (object->trace_handle)
397 stack_depot_print(object->trace_handle);
398}
399
400static struct rb_root *object_tree(unsigned long objflags)
401{
402 if (objflags & OBJECT_PHYS)
403 return &object_phys_tree_root;
404 if (objflags & OBJECT_PERCPU)
405 return &object_percpu_tree_root;
406 return &object_tree_root;
407}
408
409/*
410 * Look-up a memory block metadata (kmemleak_object) in the object search
411 * tree based on a pointer value. If alias is 0, only values pointing to the
412 * beginning of the memory block are allowed. The kmemleak_lock must be held
413 * when calling this function.
414 */
415static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
416 unsigned int objflags)
417{
418 struct rb_node *rb = object_tree(objflags)->rb_node;
419 unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
420
421 while (rb) {
422 struct kmemleak_object *object;
423 unsigned long untagged_objp;
424
425 object = rb_entry(rb, struct kmemleak_object, rb_node);
426 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
427
428 if (untagged_ptr < untagged_objp)
429 rb = object->rb_node.rb_left;
430 else if (untagged_objp + object->size <= untagged_ptr)
431 rb = object->rb_node.rb_right;
432 else if (untagged_objp == untagged_ptr || alias)
433 return object;
434 else {
435 kmemleak_warn("Found object by alias at 0x%08lx\n",
436 ptr);
437 dump_object_info(object);
438 break;
439 }
440 }
441 return NULL;
442}
443
444/* Look-up a kmemleak object which allocated with virtual address. */
445static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
446{
447 return __lookup_object(ptr, alias, 0);
448}
449
450/*
451 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
452 * that once an object's use_count reached 0, the RCU freeing was already
453 * registered and the object should no longer be used. This function must be
454 * called under the protection of rcu_read_lock().
455 */
456static int get_object(struct kmemleak_object *object)
457{
458 return atomic_inc_not_zero(&object->use_count);
459}
460
461/*
462 * Memory pool allocation and freeing. kmemleak_lock must not be held.
463 */
464static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
465{
466 unsigned long flags;
467 struct kmemleak_object *object;
468
469 /* try the slab allocator first */
470 if (object_cache) {
471 object = kmem_cache_alloc_noprof(object_cache,
472 gfp_nested_mask(gfp));
473 if (object)
474 return object;
475 }
476
477 /* slab allocation failed, try the memory pool */
478 raw_spin_lock_irqsave(&kmemleak_lock, flags);
479 object = list_first_entry_or_null(&mem_pool_free_list,
480 typeof(*object), object_list);
481 if (object)
482 list_del(&object->object_list);
483 else if (mem_pool_free_count)
484 object = &mem_pool[--mem_pool_free_count];
485 else
486 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
487 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
488
489 return object;
490}
491
492/*
493 * Return the object to either the slab allocator or the memory pool.
494 */
495static void mem_pool_free(struct kmemleak_object *object)
496{
497 unsigned long flags;
498
499 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
500 kmem_cache_free(object_cache, object);
501 return;
502 }
503
504 /* add the object to the memory pool free list */
505 raw_spin_lock_irqsave(&kmemleak_lock, flags);
506 list_add(&object->object_list, &mem_pool_free_list);
507 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
508}
509
510/*
511 * RCU callback to free a kmemleak_object.
512 */
513static void free_object_rcu(struct rcu_head *rcu)
514{
515 struct hlist_node *tmp;
516 struct kmemleak_scan_area *area;
517 struct kmemleak_object *object =
518 container_of(rcu, struct kmemleak_object, rcu);
519
520 /*
521 * Once use_count is 0 (guaranteed by put_object), there is no other
522 * code accessing this object, hence no need for locking.
523 */
524 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
525 hlist_del(&area->node);
526 kmem_cache_free(scan_area_cache, area);
527 }
528 mem_pool_free(object);
529}
530
531/*
532 * Decrement the object use_count. Once the count is 0, free the object using
533 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
534 * delete_object() path, the delayed RCU freeing ensures that there is no
535 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
536 * is also possible.
537 */
538static void put_object(struct kmemleak_object *object)
539{
540 if (!atomic_dec_and_test(&object->use_count))
541 return;
542
543 /* should only get here after delete_object was called */
544 WARN_ON(object->flags & OBJECT_ALLOCATED);
545
546 /*
547 * It may be too early for the RCU callbacks, however, there is no
548 * concurrent object_list traversal when !object_cache and all objects
549 * came from the memory pool. Free the object directly.
550 */
551 if (object_cache)
552 call_rcu(&object->rcu, free_object_rcu);
553 else
554 free_object_rcu(&object->rcu);
555}
556
557/*
558 * Look up an object in the object search tree and increase its use_count.
559 */
560static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
561 unsigned int objflags)
562{
563 unsigned long flags;
564 struct kmemleak_object *object;
565
566 rcu_read_lock();
567 raw_spin_lock_irqsave(&kmemleak_lock, flags);
568 object = __lookup_object(ptr, alias, objflags);
569 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
570
571 /* check whether the object is still available */
572 if (object && !get_object(object))
573 object = NULL;
574 rcu_read_unlock();
575
576 return object;
577}
578
579/* Look up and get an object which allocated with virtual address. */
580static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
581{
582 return __find_and_get_object(ptr, alias, 0);
583}
584
585/*
586 * Remove an object from its object tree and object_list. Must be called with
587 * the kmemleak_lock held _if_ kmemleak is still enabled.
588 */
589static void __remove_object(struct kmemleak_object *object)
590{
591 rb_erase(&object->rb_node, object_tree(object->flags));
592 if (!(object->del_state & DELSTATE_NO_DELETE))
593 list_del_rcu(&object->object_list);
594 object->del_state |= DELSTATE_REMOVED;
595}
596
597static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
598 int alias,
599 unsigned int objflags)
600{
601 struct kmemleak_object *object;
602
603 object = __lookup_object(ptr, alias, objflags);
604 if (object)
605 __remove_object(object);
606
607 return object;
608}
609
610/*
611 * Look up an object in the object search tree and remove it from both object
612 * tree root and object_list. The returned object's use_count should be at
613 * least 1, as initially set by create_object().
614 */
615static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
616 unsigned int objflags)
617{
618 unsigned long flags;
619 struct kmemleak_object *object;
620
621 raw_spin_lock_irqsave(&kmemleak_lock, flags);
622 object = __find_and_remove_object(ptr, alias, objflags);
623 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
624
625 return object;
626}
627
628static noinline depot_stack_handle_t set_track_prepare(void)
629{
630 depot_stack_handle_t trace_handle;
631 unsigned long entries[MAX_TRACE];
632 unsigned int nr_entries;
633
634 /*
635 * Use object_cache to determine whether kmemleak_init() has
636 * been invoked. stack_depot_early_init() is called before
637 * kmemleak_init() in mm_core_init().
638 */
639 if (!object_cache)
640 return 0;
641 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
642 trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
643
644 return trace_handle;
645}
646
647static struct kmemleak_object *__alloc_object(gfp_t gfp)
648{
649 struct kmemleak_object *object;
650
651 object = mem_pool_alloc(gfp);
652 if (!object) {
653 pr_warn("Cannot allocate a kmemleak_object structure\n");
654 kmemleak_disable();
655 return NULL;
656 }
657
658 INIT_LIST_HEAD(&object->object_list);
659 INIT_LIST_HEAD(&object->gray_list);
660 INIT_HLIST_HEAD(&object->area_list);
661 raw_spin_lock_init(&object->lock);
662 atomic_set(&object->use_count, 1);
663 object->excess_ref = 0;
664 object->count = 0; /* white color initially */
665 object->checksum = 0;
666 object->del_state = 0;
667
668 /* task information */
669 if (in_hardirq()) {
670 object->pid = 0;
671 strscpy(object->comm, "hardirq");
672 } else if (in_serving_softirq()) {
673 object->pid = 0;
674 strscpy(object->comm, "softirq");
675 } else {
676 object->pid = current->pid;
677 /*
678 * There is a small chance of a race with set_task_comm(),
679 * however using get_task_comm() here may cause locking
680 * dependency issues with current->alloc_lock. In the worst
681 * case, the command line is not correct.
682 */
683 strscpy(object->comm, current->comm);
684 }
685
686 /* kernel backtrace */
687 object->trace_handle = set_track_prepare();
688
689 return object;
690}
691
692static int __link_object(struct kmemleak_object *object, unsigned long ptr,
693 size_t size, int min_count, unsigned int objflags)
694{
695
696 struct kmemleak_object *parent;
697 struct rb_node **link, *rb_parent;
698 unsigned long untagged_ptr;
699 unsigned long untagged_objp;
700
701 object->flags = OBJECT_ALLOCATED | objflags;
702 object->pointer = ptr;
703 object->size = kfence_ksize((void *)ptr) ?: size;
704 object->min_count = min_count;
705 object->jiffies = jiffies;
706
707 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
708 /*
709 * Only update min_addr and max_addr with object storing virtual
710 * address. And update min_percpu_addr max_percpu_addr for per-CPU
711 * objects.
712 */
713 if (objflags & OBJECT_PERCPU) {
714 min_percpu_addr = min(min_percpu_addr, untagged_ptr);
715 max_percpu_addr = max(max_percpu_addr, untagged_ptr + size);
716 } else if (!(objflags & OBJECT_PHYS)) {
717 min_addr = min(min_addr, untagged_ptr);
718 max_addr = max(max_addr, untagged_ptr + size);
719 }
720 link = &object_tree(objflags)->rb_node;
721 rb_parent = NULL;
722 while (*link) {
723 rb_parent = *link;
724 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
725 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
726 if (untagged_ptr + size <= untagged_objp)
727 link = &parent->rb_node.rb_left;
728 else if (untagged_objp + parent->size <= untagged_ptr)
729 link = &parent->rb_node.rb_right;
730 else {
731 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
732 ptr);
733 /*
734 * No need for parent->lock here since "parent" cannot
735 * be freed while the kmemleak_lock is held.
736 */
737 dump_object_info(parent);
738 return -EEXIST;
739 }
740 }
741 rb_link_node(&object->rb_node, rb_parent, link);
742 rb_insert_color(&object->rb_node, object_tree(objflags));
743 list_add_tail_rcu(&object->object_list, &object_list);
744
745 return 0;
746}
747
748/*
749 * Create the metadata (struct kmemleak_object) corresponding to an allocated
750 * memory block and add it to the object_list and object tree.
751 */
752static void __create_object(unsigned long ptr, size_t size,
753 int min_count, gfp_t gfp, unsigned int objflags)
754{
755 struct kmemleak_object *object;
756 unsigned long flags;
757 int ret;
758
759 object = __alloc_object(gfp);
760 if (!object)
761 return;
762
763 raw_spin_lock_irqsave(&kmemleak_lock, flags);
764 ret = __link_object(object, ptr, size, min_count, objflags);
765 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
766 if (ret)
767 mem_pool_free(object);
768}
769
770/* Create kmemleak object which allocated with virtual address. */
771static void create_object(unsigned long ptr, size_t size,
772 int min_count, gfp_t gfp)
773{
774 __create_object(ptr, size, min_count, gfp, 0);
775}
776
777/* Create kmemleak object which allocated with physical address. */
778static void create_object_phys(unsigned long ptr, size_t size,
779 int min_count, gfp_t gfp)
780{
781 __create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
782}
783
784/* Create kmemleak object corresponding to a per-CPU allocation. */
785static void create_object_percpu(unsigned long ptr, size_t size,
786 int min_count, gfp_t gfp)
787{
788 __create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
789}
790
791/*
792 * Mark the object as not allocated and schedule RCU freeing via put_object().
793 */
794static void __delete_object(struct kmemleak_object *object)
795{
796 unsigned long flags;
797
798 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
799 WARN_ON(atomic_read(&object->use_count) < 1);
800
801 /*
802 * Locking here also ensures that the corresponding memory block
803 * cannot be freed when it is being scanned.
804 */
805 raw_spin_lock_irqsave(&object->lock, flags);
806 object->flags &= ~OBJECT_ALLOCATED;
807 raw_spin_unlock_irqrestore(&object->lock, flags);
808 put_object(object);
809}
810
811/*
812 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
813 * delete it.
814 */
815static void delete_object_full(unsigned long ptr, unsigned int objflags)
816{
817 struct kmemleak_object *object;
818
819 object = find_and_remove_object(ptr, 0, objflags);
820 if (!object) {
821#ifdef DEBUG
822 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
823 ptr);
824#endif
825 return;
826 }
827 __delete_object(object);
828}
829
830/*
831 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
832 * delete it. If the memory block is partially freed, the function may create
833 * additional metadata for the remaining parts of the block.
834 */
835static void delete_object_part(unsigned long ptr, size_t size,
836 unsigned int objflags)
837{
838 struct kmemleak_object *object, *object_l, *object_r;
839 unsigned long start, end, flags;
840
841 object_l = __alloc_object(GFP_KERNEL);
842 if (!object_l)
843 return;
844
845 object_r = __alloc_object(GFP_KERNEL);
846 if (!object_r)
847 goto out;
848
849 raw_spin_lock_irqsave(&kmemleak_lock, flags);
850 object = __find_and_remove_object(ptr, 1, objflags);
851 if (!object) {
852#ifdef DEBUG
853 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
854 ptr, size);
855#endif
856 goto unlock;
857 }
858
859 /*
860 * Create one or two objects that may result from the memory block
861 * split. Note that partial freeing is only done by free_bootmem() and
862 * this happens before kmemleak_init() is called.
863 */
864 start = object->pointer;
865 end = object->pointer + object->size;
866 if ((ptr > start) &&
867 !__link_object(object_l, start, ptr - start,
868 object->min_count, objflags))
869 object_l = NULL;
870 if ((ptr + size < end) &&
871 !__link_object(object_r, ptr + size, end - ptr - size,
872 object->min_count, objflags))
873 object_r = NULL;
874
875unlock:
876 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
877 if (object)
878 __delete_object(object);
879
880out:
881 if (object_l)
882 mem_pool_free(object_l);
883 if (object_r)
884 mem_pool_free(object_r);
885}
886
887static void __paint_it(struct kmemleak_object *object, int color)
888{
889 object->min_count = color;
890 if (color == KMEMLEAK_BLACK)
891 object->flags |= OBJECT_NO_SCAN;
892}
893
894static void paint_it(struct kmemleak_object *object, int color)
895{
896 unsigned long flags;
897
898 raw_spin_lock_irqsave(&object->lock, flags);
899 __paint_it(object, color);
900 raw_spin_unlock_irqrestore(&object->lock, flags);
901}
902
903static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
904{
905 struct kmemleak_object *object;
906
907 object = __find_and_get_object(ptr, 0, objflags);
908 if (!object) {
909 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
910 ptr,
911 (color == KMEMLEAK_GREY) ? "Grey" :
912 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
913 return;
914 }
915 paint_it(object, color);
916 put_object(object);
917}
918
919/*
920 * Mark an object permanently as gray-colored so that it can no longer be
921 * reported as a leak. This is used in general to mark a false positive.
922 */
923static void make_gray_object(unsigned long ptr)
924{
925 paint_ptr(ptr, KMEMLEAK_GREY, 0);
926}
927
928/*
929 * Mark the object as black-colored so that it is ignored from scans and
930 * reporting.
931 */
932static void make_black_object(unsigned long ptr, unsigned int objflags)
933{
934 paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
935}
936
937/*
938 * Reset the checksum of an object. The immediate effect is that it will not
939 * be reported as a leak during the next scan until its checksum is updated.
940 */
941static void reset_checksum(unsigned long ptr)
942{
943 unsigned long flags;
944 struct kmemleak_object *object;
945
946 object = find_and_get_object(ptr, 0);
947 if (!object) {
948 kmemleak_warn("Not resetting the checksum of an unknown object at 0x%08lx\n",
949 ptr);
950 return;
951 }
952
953 raw_spin_lock_irqsave(&object->lock, flags);
954 object->checksum = 0;
955 raw_spin_unlock_irqrestore(&object->lock, flags);
956 put_object(object);
957}
958
959/*
960 * Add a scanning area to the object. If at least one such area is added,
961 * kmemleak will only scan these ranges rather than the whole memory block.
962 */
963static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
964{
965 unsigned long flags;
966 struct kmemleak_object *object;
967 struct kmemleak_scan_area *area = NULL;
968 unsigned long untagged_ptr;
969 unsigned long untagged_objp;
970
971 object = find_and_get_object(ptr, 1);
972 if (!object) {
973 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
974 ptr);
975 return;
976 }
977
978 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
979 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
980
981 if (scan_area_cache)
982 area = kmem_cache_alloc_noprof(scan_area_cache,
983 gfp_nested_mask(gfp));
984
985 raw_spin_lock_irqsave(&object->lock, flags);
986 if (!area) {
987 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
988 /* mark the object for full scan to avoid false positives */
989 object->flags |= OBJECT_FULL_SCAN;
990 goto out_unlock;
991 }
992 if (size == SIZE_MAX) {
993 size = untagged_objp + object->size - untagged_ptr;
994 } else if (untagged_ptr + size > untagged_objp + object->size) {
995 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
996 dump_object_info(object);
997 kmem_cache_free(scan_area_cache, area);
998 goto out_unlock;
999 }
1000
1001 INIT_HLIST_NODE(&area->node);
1002 area->start = ptr;
1003 area->size = size;
1004
1005 hlist_add_head(&area->node, &object->area_list);
1006out_unlock:
1007 raw_spin_unlock_irqrestore(&object->lock, flags);
1008 put_object(object);
1009}
1010
1011/*
1012 * Any surplus references (object already gray) to 'ptr' are passed to
1013 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
1014 * vm_struct may be used as an alternative reference to the vmalloc'ed object
1015 * (see free_thread_stack()).
1016 */
1017static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
1018{
1019 unsigned long flags;
1020 struct kmemleak_object *object;
1021
1022 object = find_and_get_object(ptr, 0);
1023 if (!object) {
1024 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
1025 ptr);
1026 return;
1027 }
1028
1029 raw_spin_lock_irqsave(&object->lock, flags);
1030 object->excess_ref = excess_ref;
1031 raw_spin_unlock_irqrestore(&object->lock, flags);
1032 put_object(object);
1033}
1034
1035/*
1036 * Set the OBJECT_NO_SCAN flag for the object corresponding to the given
1037 * pointer. Such object will not be scanned by kmemleak but references to it
1038 * are searched.
1039 */
1040static void object_no_scan(unsigned long ptr)
1041{
1042 unsigned long flags;
1043 struct kmemleak_object *object;
1044
1045 object = find_and_get_object(ptr, 0);
1046 if (!object) {
1047 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
1048 return;
1049 }
1050
1051 raw_spin_lock_irqsave(&object->lock, flags);
1052 object->flags |= OBJECT_NO_SCAN;
1053 raw_spin_unlock_irqrestore(&object->lock, flags);
1054 put_object(object);
1055}
1056
1057/**
1058 * kmemleak_alloc - register a newly allocated object
1059 * @ptr: pointer to beginning of the object
1060 * @size: size of the object
1061 * @min_count: minimum number of references to this object. If during memory
1062 * scanning a number of references less than @min_count is found,
1063 * the object is reported as a memory leak. If @min_count is 0,
1064 * the object is never reported as a leak. If @min_count is -1,
1065 * the object is ignored (not scanned and not reported as a leak)
1066 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1067 *
1068 * This function is called from the kernel allocators when a new object
1069 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
1070 */
1071void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
1072 gfp_t gfp)
1073{
1074 pr_debug("%s(0x%px, %zu, %d)\n", __func__, ptr, size, min_count);
1075
1076 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1077 create_object((unsigned long)ptr, size, min_count, gfp);
1078}
1079EXPORT_SYMBOL_GPL(kmemleak_alloc);
1080
1081/**
1082 * kmemleak_alloc_percpu - register a newly allocated __percpu object
1083 * @ptr: __percpu pointer to beginning of the object
1084 * @size: size of the object
1085 * @gfp: flags used for kmemleak internal memory allocations
1086 *
1087 * This function is called from the kernel percpu allocator when a new object
1088 * (memory block) is allocated (alloc_percpu).
1089 */
1090void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
1091 gfp_t gfp)
1092{
1093 pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
1094
1095 if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
1096 create_object_percpu((__force unsigned long)ptr, size, 1, gfp);
1097}
1098EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1099
1100/**
1101 * kmemleak_vmalloc - register a newly vmalloc'ed object
1102 * @area: pointer to vm_struct
1103 * @size: size of the object
1104 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
1105 *
1106 * This function is called from the vmalloc() kernel allocator when a new
1107 * object (memory block) is allocated.
1108 */
1109void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1110{
1111 pr_debug("%s(0x%px, %zu)\n", __func__, area, size);
1112
1113 /*
1114 * A min_count = 2 is needed because vm_struct contains a reference to
1115 * the virtual address of the vmalloc'ed block.
1116 */
1117 if (kmemleak_enabled) {
1118 create_object((unsigned long)area->addr, size, 2, gfp);
1119 object_set_excess_ref((unsigned long)area,
1120 (unsigned long)area->addr);
1121 }
1122}
1123EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1124
1125/**
1126 * kmemleak_free - unregister a previously registered object
1127 * @ptr: pointer to beginning of the object
1128 *
1129 * This function is called from the kernel allocators when an object (memory
1130 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1131 */
1132void __ref kmemleak_free(const void *ptr)
1133{
1134 pr_debug("%s(0x%px)\n", __func__, ptr);
1135
1136 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1137 delete_object_full((unsigned long)ptr, 0);
1138}
1139EXPORT_SYMBOL_GPL(kmemleak_free);
1140
1141/**
1142 * kmemleak_free_part - partially unregister a previously registered object
1143 * @ptr: pointer to the beginning or inside the object. This also
1144 * represents the start of the range to be freed
1145 * @size: size to be unregistered
1146 *
1147 * This function is called when only a part of a memory block is freed
1148 * (usually from the bootmem allocator).
1149 */
1150void __ref kmemleak_free_part(const void *ptr, size_t size)
1151{
1152 pr_debug("%s(0x%px)\n", __func__, ptr);
1153
1154 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1155 delete_object_part((unsigned long)ptr, size, 0);
1156}
1157EXPORT_SYMBOL_GPL(kmemleak_free_part);
1158
1159/**
1160 * kmemleak_free_percpu - unregister a previously registered __percpu object
1161 * @ptr: __percpu pointer to beginning of the object
1162 *
1163 * This function is called from the kernel percpu allocator when an object
1164 * (memory block) is freed (free_percpu).
1165 */
1166void __ref kmemleak_free_percpu(const void __percpu *ptr)
1167{
1168 pr_debug("%s(0x%px)\n", __func__, ptr);
1169
1170 if (kmemleak_free_enabled && ptr && !IS_ERR_PCPU(ptr))
1171 delete_object_full((__force unsigned long)ptr, OBJECT_PERCPU);
1172}
1173EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1174
1175/**
1176 * kmemleak_update_trace - update object allocation stack trace
1177 * @ptr: pointer to beginning of the object
1178 *
1179 * Override the object allocation stack trace for cases where the actual
1180 * allocation place is not always useful.
1181 */
1182void __ref kmemleak_update_trace(const void *ptr)
1183{
1184 struct kmemleak_object *object;
1185 depot_stack_handle_t trace_handle;
1186 unsigned long flags;
1187
1188 pr_debug("%s(0x%px)\n", __func__, ptr);
1189
1190 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1191 return;
1192
1193 object = find_and_get_object((unsigned long)ptr, 1);
1194 if (!object) {
1195#ifdef DEBUG
1196 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1197 ptr);
1198#endif
1199 return;
1200 }
1201
1202 trace_handle = set_track_prepare();
1203 raw_spin_lock_irqsave(&object->lock, flags);
1204 object->trace_handle = trace_handle;
1205 raw_spin_unlock_irqrestore(&object->lock, flags);
1206
1207 put_object(object);
1208}
1209EXPORT_SYMBOL(kmemleak_update_trace);
1210
1211/**
1212 * kmemleak_not_leak - mark an allocated object as false positive
1213 * @ptr: pointer to beginning of the object
1214 *
1215 * Calling this function on an object will cause the memory block to no longer
1216 * be reported as leak and always be scanned.
1217 */
1218void __ref kmemleak_not_leak(const void *ptr)
1219{
1220 pr_debug("%s(0x%px)\n", __func__, ptr);
1221
1222 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1223 make_gray_object((unsigned long)ptr);
1224}
1225EXPORT_SYMBOL(kmemleak_not_leak);
1226
1227/**
1228 * kmemleak_transient_leak - mark an allocated object as transient false positive
1229 * @ptr: pointer to beginning of the object
1230 *
1231 * Calling this function on an object will cause the memory block to not be
1232 * reported as a leak temporarily. This may happen, for example, if the object
1233 * is part of a singly linked list and the ->next reference to it is changed.
1234 */
1235void __ref kmemleak_transient_leak(const void *ptr)
1236{
1237 pr_debug("%s(0x%px)\n", __func__, ptr);
1238
1239 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1240 reset_checksum((unsigned long)ptr);
1241}
1242EXPORT_SYMBOL(kmemleak_transient_leak);
1243
1244/**
1245 * kmemleak_ignore - ignore an allocated object
1246 * @ptr: pointer to beginning of the object
1247 *
1248 * Calling this function on an object will cause the memory block to be
1249 * ignored (not scanned and not reported as a leak). This is usually done when
1250 * it is known that the corresponding block is not a leak and does not contain
1251 * any references to other allocated memory blocks.
1252 */
1253void __ref kmemleak_ignore(const void *ptr)
1254{
1255 pr_debug("%s(0x%px)\n", __func__, ptr);
1256
1257 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1258 make_black_object((unsigned long)ptr, 0);
1259}
1260EXPORT_SYMBOL(kmemleak_ignore);
1261
1262/**
1263 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1264 * @ptr: pointer to beginning or inside the object. This also
1265 * represents the start of the scan area
1266 * @size: size of the scan area
1267 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1268 *
1269 * This function is used when it is known that only certain parts of an object
1270 * contain references to other objects. Kmemleak will only scan these areas
1271 * reducing the number false negatives.
1272 */
1273void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1274{
1275 pr_debug("%s(0x%px)\n", __func__, ptr);
1276
1277 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1278 add_scan_area((unsigned long)ptr, size, gfp);
1279}
1280EXPORT_SYMBOL(kmemleak_scan_area);
1281
1282/**
1283 * kmemleak_no_scan - do not scan an allocated object
1284 * @ptr: pointer to beginning of the object
1285 *
1286 * This function notifies kmemleak not to scan the given memory block. Useful
1287 * in situations where it is known that the given object does not contain any
1288 * references to other objects. Kmemleak will not scan such objects reducing
1289 * the number of false negatives.
1290 */
1291void __ref kmemleak_no_scan(const void *ptr)
1292{
1293 pr_debug("%s(0x%px)\n", __func__, ptr);
1294
1295 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1296 object_no_scan((unsigned long)ptr);
1297}
1298EXPORT_SYMBOL(kmemleak_no_scan);
1299
1300/**
1301 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1302 * address argument
1303 * @phys: physical address of the object
1304 * @size: size of the object
1305 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1306 */
1307void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1308{
1309 pr_debug("%s(0x%px, %zu)\n", __func__, &phys, size);
1310
1311 if (kmemleak_enabled)
1312 /*
1313 * Create object with OBJECT_PHYS flag and
1314 * assume min_count 0.
1315 */
1316 create_object_phys((unsigned long)phys, size, 0, gfp);
1317}
1318EXPORT_SYMBOL(kmemleak_alloc_phys);
1319
1320/**
1321 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1322 * physical address argument
1323 * @phys: physical address if the beginning or inside an object. This
1324 * also represents the start of the range to be freed
1325 * @size: size to be unregistered
1326 */
1327void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1328{
1329 pr_debug("%s(0x%px)\n", __func__, &phys);
1330
1331 if (kmemleak_enabled)
1332 delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
1333}
1334EXPORT_SYMBOL(kmemleak_free_part_phys);
1335
1336/**
1337 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1338 * address argument
1339 * @phys: physical address of the object
1340 */
1341void __ref kmemleak_ignore_phys(phys_addr_t phys)
1342{
1343 pr_debug("%s(0x%px)\n", __func__, &phys);
1344
1345 if (kmemleak_enabled)
1346 make_black_object((unsigned long)phys, OBJECT_PHYS);
1347}
1348EXPORT_SYMBOL(kmemleak_ignore_phys);
1349
1350/*
1351 * Update an object's checksum and return true if it was modified.
1352 */
1353static bool update_checksum(struct kmemleak_object *object)
1354{
1355 u32 old_csum = object->checksum;
1356
1357 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1358 return false;
1359
1360 kasan_disable_current();
1361 kcsan_disable_current();
1362 if (object->flags & OBJECT_PERCPU) {
1363 unsigned int cpu;
1364
1365 object->checksum = 0;
1366 for_each_possible_cpu(cpu) {
1367 void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1368
1369 object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size);
1370 }
1371 } else {
1372 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1373 }
1374 kasan_enable_current();
1375 kcsan_enable_current();
1376
1377 return object->checksum != old_csum;
1378}
1379
1380/*
1381 * Update an object's references. object->lock must be held by the caller.
1382 */
1383static void update_refs(struct kmemleak_object *object)
1384{
1385 if (!color_white(object)) {
1386 /* non-orphan, ignored or new */
1387 return;
1388 }
1389
1390 /*
1391 * Increase the object's reference count (number of pointers to the
1392 * memory block). If this count reaches the required minimum, the
1393 * object's color will become gray and it will be added to the
1394 * gray_list.
1395 */
1396 object->count++;
1397 if (color_gray(object)) {
1398 /* put_object() called when removing from gray_list */
1399 WARN_ON(!get_object(object));
1400 list_add_tail(&object->gray_list, &gray_list);
1401 }
1402}
1403
1404static void pointer_update_refs(struct kmemleak_object *scanned,
1405 unsigned long pointer, unsigned int objflags)
1406{
1407 struct kmemleak_object *object;
1408 unsigned long untagged_ptr;
1409 unsigned long excess_ref;
1410
1411 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1412 if (objflags & OBJECT_PERCPU) {
1413 if (untagged_ptr < min_percpu_addr || untagged_ptr >= max_percpu_addr)
1414 return;
1415 } else {
1416 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1417 return;
1418 }
1419
1420 /*
1421 * No need for get_object() here since we hold kmemleak_lock.
1422 * object->use_count cannot be dropped to 0 while the object
1423 * is still present in object_tree_root and object_list
1424 * (with updates protected by kmemleak_lock).
1425 */
1426 object = __lookup_object(pointer, 1, objflags);
1427 if (!object)
1428 return;
1429 if (object == scanned)
1430 /* self referenced, ignore */
1431 return;
1432
1433 /*
1434 * Avoid the lockdep recursive warning on object->lock being
1435 * previously acquired in scan_object(). These locks are
1436 * enclosed by scan_mutex.
1437 */
1438 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1439 /* only pass surplus references (object already gray) */
1440 if (color_gray(object)) {
1441 excess_ref = object->excess_ref;
1442 /* no need for update_refs() if object already gray */
1443 } else {
1444 excess_ref = 0;
1445 update_refs(object);
1446 }
1447 raw_spin_unlock(&object->lock);
1448
1449 if (excess_ref) {
1450 object = lookup_object(excess_ref, 0);
1451 if (!object)
1452 return;
1453 if (object == scanned)
1454 /* circular reference, ignore */
1455 return;
1456 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1457 update_refs(object);
1458 raw_spin_unlock(&object->lock);
1459 }
1460}
1461
1462/*
1463 * Memory scanning is a long process and it needs to be interruptible. This
1464 * function checks whether such interrupt condition occurred.
1465 */
1466static int scan_should_stop(void)
1467{
1468 if (!kmemleak_enabled)
1469 return 1;
1470
1471 /*
1472 * This function may be called from either process or kthread context,
1473 * hence the need to check for both stop conditions.
1474 */
1475 if (current->mm)
1476 return signal_pending(current);
1477 else
1478 return kthread_should_stop();
1479
1480 return 0;
1481}
1482
1483/*
1484 * Scan a memory block (exclusive range) for valid pointers and add those
1485 * found to the gray list.
1486 */
1487static void scan_block(void *_start, void *_end,
1488 struct kmemleak_object *scanned)
1489{
1490 unsigned long *ptr;
1491 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1492 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1493 unsigned long flags;
1494
1495 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1496 for (ptr = start; ptr < end; ptr++) {
1497 unsigned long pointer;
1498
1499 if (scan_should_stop())
1500 break;
1501
1502 kasan_disable_current();
1503 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1504 kasan_enable_current();
1505
1506 pointer_update_refs(scanned, pointer, 0);
1507 pointer_update_refs(scanned, pointer, OBJECT_PERCPU);
1508 }
1509 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1510}
1511
1512/*
1513 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1514 */
1515#ifdef CONFIG_SMP
1516static void scan_large_block(void *start, void *end)
1517{
1518 void *next;
1519
1520 while (start < end) {
1521 next = min(start + MAX_SCAN_SIZE, end);
1522 scan_block(start, next, NULL);
1523 start = next;
1524 cond_resched();
1525 }
1526}
1527#endif
1528
1529/*
1530 * Scan a memory block corresponding to a kmemleak_object. A condition is
1531 * that object->use_count >= 1.
1532 */
1533static void scan_object(struct kmemleak_object *object)
1534{
1535 struct kmemleak_scan_area *area;
1536 unsigned long flags;
1537
1538 /*
1539 * Once the object->lock is acquired, the corresponding memory block
1540 * cannot be freed (the same lock is acquired in delete_object).
1541 */
1542 raw_spin_lock_irqsave(&object->lock, flags);
1543 if (object->flags & OBJECT_NO_SCAN)
1544 goto out;
1545 if (!(object->flags & OBJECT_ALLOCATED))
1546 /* already freed object */
1547 goto out;
1548
1549 if (object->flags & OBJECT_PERCPU) {
1550 unsigned int cpu;
1551
1552 for_each_possible_cpu(cpu) {
1553 void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1554 void *end = start + object->size;
1555
1556 scan_block(start, end, object);
1557
1558 raw_spin_unlock_irqrestore(&object->lock, flags);
1559 cond_resched();
1560 raw_spin_lock_irqsave(&object->lock, flags);
1561 if (!(object->flags & OBJECT_ALLOCATED))
1562 break;
1563 }
1564 } else if (hlist_empty(&object->area_list) ||
1565 object->flags & OBJECT_FULL_SCAN) {
1566 void *start = object->flags & OBJECT_PHYS ?
1567 __va((phys_addr_t)object->pointer) :
1568 (void *)object->pointer;
1569 void *end = start + object->size;
1570 void *next;
1571
1572 do {
1573 next = min(start + MAX_SCAN_SIZE, end);
1574 scan_block(start, next, object);
1575
1576 start = next;
1577 if (start >= end)
1578 break;
1579
1580 raw_spin_unlock_irqrestore(&object->lock, flags);
1581 cond_resched();
1582 raw_spin_lock_irqsave(&object->lock, flags);
1583 } while (object->flags & OBJECT_ALLOCATED);
1584 } else {
1585 hlist_for_each_entry(area, &object->area_list, node)
1586 scan_block((void *)area->start,
1587 (void *)(area->start + area->size),
1588 object);
1589 }
1590out:
1591 raw_spin_unlock_irqrestore(&object->lock, flags);
1592}
1593
1594/*
1595 * Scan the objects already referenced (gray objects). More objects will be
1596 * referenced and, if there are no memory leaks, all the objects are scanned.
1597 */
1598static void scan_gray_list(void)
1599{
1600 struct kmemleak_object *object, *tmp;
1601
1602 /*
1603 * The list traversal is safe for both tail additions and removals
1604 * from inside the loop. The kmemleak objects cannot be freed from
1605 * outside the loop because their use_count was incremented.
1606 */
1607 object = list_entry(gray_list.next, typeof(*object), gray_list);
1608 while (&object->gray_list != &gray_list) {
1609 cond_resched();
1610
1611 /* may add new objects to the list */
1612 if (!scan_should_stop())
1613 scan_object(object);
1614
1615 tmp = list_entry(object->gray_list.next, typeof(*object),
1616 gray_list);
1617
1618 /* remove the object from the list and release it */
1619 list_del(&object->gray_list);
1620 put_object(object);
1621
1622 object = tmp;
1623 }
1624 WARN_ON(!list_empty(&gray_list));
1625}
1626
1627/*
1628 * Conditionally call resched() in an object iteration loop while making sure
1629 * that the given object won't go away without RCU read lock by performing a
1630 * get_object() if necessaary.
1631 */
1632static void kmemleak_cond_resched(struct kmemleak_object *object)
1633{
1634 if (!get_object(object))
1635 return; /* Try next object */
1636
1637 raw_spin_lock_irq(&kmemleak_lock);
1638 if (object->del_state & DELSTATE_REMOVED)
1639 goto unlock_put; /* Object removed */
1640 object->del_state |= DELSTATE_NO_DELETE;
1641 raw_spin_unlock_irq(&kmemleak_lock);
1642
1643 rcu_read_unlock();
1644 cond_resched();
1645 rcu_read_lock();
1646
1647 raw_spin_lock_irq(&kmemleak_lock);
1648 if (object->del_state & DELSTATE_REMOVED)
1649 list_del_rcu(&object->object_list);
1650 object->del_state &= ~DELSTATE_NO_DELETE;
1651unlock_put:
1652 raw_spin_unlock_irq(&kmemleak_lock);
1653 put_object(object);
1654}
1655
1656/*
1657 * Scan data sections and all the referenced memory blocks allocated via the
1658 * kernel's standard allocators. This function must be called with the
1659 * scan_mutex held.
1660 */
1661static void kmemleak_scan(void)
1662{
1663 struct kmemleak_object *object;
1664 struct zone *zone;
1665 int __maybe_unused i;
1666 int new_leaks = 0;
1667
1668 jiffies_last_scan = jiffies;
1669
1670 /* prepare the kmemleak_object's */
1671 rcu_read_lock();
1672 list_for_each_entry_rcu(object, &object_list, object_list) {
1673 raw_spin_lock_irq(&object->lock);
1674#ifdef DEBUG
1675 /*
1676 * With a few exceptions there should be a maximum of
1677 * 1 reference to any object at this point.
1678 */
1679 if (atomic_read(&object->use_count) > 1) {
1680 pr_debug("object->use_count = %d\n",
1681 atomic_read(&object->use_count));
1682 dump_object_info(object);
1683 }
1684#endif
1685
1686 /* ignore objects outside lowmem (paint them black) */
1687 if ((object->flags & OBJECT_PHYS) &&
1688 !(object->flags & OBJECT_NO_SCAN)) {
1689 unsigned long phys = object->pointer;
1690
1691 if (PHYS_PFN(phys) < min_low_pfn ||
1692 PHYS_PFN(phys + object->size) > max_low_pfn)
1693 __paint_it(object, KMEMLEAK_BLACK);
1694 }
1695
1696 /* reset the reference count (whiten the object) */
1697 object->count = 0;
1698 if (color_gray(object) && get_object(object))
1699 list_add_tail(&object->gray_list, &gray_list);
1700
1701 raw_spin_unlock_irq(&object->lock);
1702
1703 if (need_resched())
1704 kmemleak_cond_resched(object);
1705 }
1706 rcu_read_unlock();
1707
1708#ifdef CONFIG_SMP
1709 /* per-cpu sections scanning */
1710 for_each_possible_cpu(i)
1711 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1712 __per_cpu_end + per_cpu_offset(i));
1713#endif
1714
1715 /*
1716 * Struct page scanning for each node.
1717 */
1718 get_online_mems();
1719 for_each_populated_zone(zone) {
1720 unsigned long start_pfn = zone->zone_start_pfn;
1721 unsigned long end_pfn = zone_end_pfn(zone);
1722 unsigned long pfn;
1723
1724 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1725 struct page *page = pfn_to_online_page(pfn);
1726
1727 if (!(pfn & 63))
1728 cond_resched();
1729
1730 if (!page)
1731 continue;
1732
1733 /* only scan pages belonging to this zone */
1734 if (page_zone(page) != zone)
1735 continue;
1736 /* only scan if page is in use */
1737 if (page_count(page) == 0)
1738 continue;
1739 scan_block(page, page + 1, NULL);
1740 }
1741 }
1742 put_online_mems();
1743
1744 /*
1745 * Scanning the task stacks (may introduce false negatives).
1746 */
1747 if (kmemleak_stack_scan) {
1748 struct task_struct *p, *g;
1749
1750 rcu_read_lock();
1751 for_each_process_thread(g, p) {
1752 void *stack = try_get_task_stack(p);
1753 if (stack) {
1754 scan_block(stack, stack + THREAD_SIZE, NULL);
1755 put_task_stack(p);
1756 }
1757 }
1758 rcu_read_unlock();
1759 }
1760
1761 /*
1762 * Scan the objects already referenced from the sections scanned
1763 * above.
1764 */
1765 scan_gray_list();
1766
1767 /*
1768 * Check for new or unreferenced objects modified since the previous
1769 * scan and color them gray until the next scan.
1770 */
1771 rcu_read_lock();
1772 list_for_each_entry_rcu(object, &object_list, object_list) {
1773 if (need_resched())
1774 kmemleak_cond_resched(object);
1775
1776 /*
1777 * This is racy but we can save the overhead of lock/unlock
1778 * calls. The missed objects, if any, should be caught in
1779 * the next scan.
1780 */
1781 if (!color_white(object))
1782 continue;
1783 raw_spin_lock_irq(&object->lock);
1784 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1785 && update_checksum(object) && get_object(object)) {
1786 /* color it gray temporarily */
1787 object->count = object->min_count;
1788 list_add_tail(&object->gray_list, &gray_list);
1789 }
1790 raw_spin_unlock_irq(&object->lock);
1791 }
1792 rcu_read_unlock();
1793
1794 /*
1795 * Re-scan the gray list for modified unreferenced objects.
1796 */
1797 scan_gray_list();
1798
1799 /*
1800 * If scanning was stopped do not report any new unreferenced objects.
1801 */
1802 if (scan_should_stop())
1803 return;
1804
1805 /*
1806 * Scanning result reporting.
1807 */
1808 rcu_read_lock();
1809 list_for_each_entry_rcu(object, &object_list, object_list) {
1810 if (need_resched())
1811 kmemleak_cond_resched(object);
1812
1813 /*
1814 * This is racy but we can save the overhead of lock/unlock
1815 * calls. The missed objects, if any, should be caught in
1816 * the next scan.
1817 */
1818 if (!color_white(object))
1819 continue;
1820 raw_spin_lock_irq(&object->lock);
1821 if (unreferenced_object(object) &&
1822 !(object->flags & OBJECT_REPORTED)) {
1823 object->flags |= OBJECT_REPORTED;
1824
1825 if (kmemleak_verbose)
1826 print_unreferenced(NULL, object);
1827
1828 new_leaks++;
1829 }
1830 raw_spin_unlock_irq(&object->lock);
1831 }
1832 rcu_read_unlock();
1833
1834 if (new_leaks) {
1835 kmemleak_found_leaks = true;
1836
1837 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1838 new_leaks);
1839 }
1840
1841}
1842
1843/*
1844 * Thread function performing automatic memory scanning. Unreferenced objects
1845 * at the end of a memory scan are reported but only the first time.
1846 */
1847static int kmemleak_scan_thread(void *arg)
1848{
1849 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1850
1851 pr_info("Automatic memory scanning thread started\n");
1852 set_user_nice(current, 10);
1853
1854 /*
1855 * Wait before the first scan to allow the system to fully initialize.
1856 */
1857 if (first_run) {
1858 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1859 first_run = 0;
1860 while (timeout && !kthread_should_stop())
1861 timeout = schedule_timeout_interruptible(timeout);
1862 }
1863
1864 while (!kthread_should_stop()) {
1865 signed long timeout = READ_ONCE(jiffies_scan_wait);
1866
1867 mutex_lock(&scan_mutex);
1868 kmemleak_scan();
1869 mutex_unlock(&scan_mutex);
1870
1871 /* wait before the next scan */
1872 while (timeout && !kthread_should_stop())
1873 timeout = schedule_timeout_interruptible(timeout);
1874 }
1875
1876 pr_info("Automatic memory scanning thread ended\n");
1877
1878 return 0;
1879}
1880
1881/*
1882 * Start the automatic memory scanning thread. This function must be called
1883 * with the scan_mutex held.
1884 */
1885static void start_scan_thread(void)
1886{
1887 if (scan_thread)
1888 return;
1889 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1890 if (IS_ERR(scan_thread)) {
1891 pr_warn("Failed to create the scan thread\n");
1892 scan_thread = NULL;
1893 }
1894}
1895
1896/*
1897 * Stop the automatic memory scanning thread.
1898 */
1899static void stop_scan_thread(void)
1900{
1901 if (scan_thread) {
1902 kthread_stop(scan_thread);
1903 scan_thread = NULL;
1904 }
1905}
1906
1907/*
1908 * Iterate over the object_list and return the first valid object at or after
1909 * the required position with its use_count incremented. The function triggers
1910 * a memory scanning when the pos argument points to the first position.
1911 */
1912static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1913{
1914 struct kmemleak_object *object;
1915 loff_t n = *pos;
1916 int err;
1917
1918 err = mutex_lock_interruptible(&scan_mutex);
1919 if (err < 0)
1920 return ERR_PTR(err);
1921
1922 rcu_read_lock();
1923 list_for_each_entry_rcu(object, &object_list, object_list) {
1924 if (n-- > 0)
1925 continue;
1926 if (get_object(object))
1927 goto out;
1928 }
1929 object = NULL;
1930out:
1931 return object;
1932}
1933
1934/*
1935 * Return the next object in the object_list. The function decrements the
1936 * use_count of the previous object and increases that of the next one.
1937 */
1938static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1939{
1940 struct kmemleak_object *prev_obj = v;
1941 struct kmemleak_object *next_obj = NULL;
1942 struct kmemleak_object *obj = prev_obj;
1943
1944 ++(*pos);
1945
1946 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1947 if (get_object(obj)) {
1948 next_obj = obj;
1949 break;
1950 }
1951 }
1952
1953 put_object(prev_obj);
1954 return next_obj;
1955}
1956
1957/*
1958 * Decrement the use_count of the last object required, if any.
1959 */
1960static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1961{
1962 if (!IS_ERR(v)) {
1963 /*
1964 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1965 * waiting was interrupted, so only release it if !IS_ERR.
1966 */
1967 rcu_read_unlock();
1968 mutex_unlock(&scan_mutex);
1969 if (v)
1970 put_object(v);
1971 }
1972}
1973
1974/*
1975 * Print the information for an unreferenced object to the seq file.
1976 */
1977static int kmemleak_seq_show(struct seq_file *seq, void *v)
1978{
1979 struct kmemleak_object *object = v;
1980 unsigned long flags;
1981
1982 raw_spin_lock_irqsave(&object->lock, flags);
1983 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1984 print_unreferenced(seq, object);
1985 raw_spin_unlock_irqrestore(&object->lock, flags);
1986 return 0;
1987}
1988
1989static const struct seq_operations kmemleak_seq_ops = {
1990 .start = kmemleak_seq_start,
1991 .next = kmemleak_seq_next,
1992 .stop = kmemleak_seq_stop,
1993 .show = kmemleak_seq_show,
1994};
1995
1996static int kmemleak_open(struct inode *inode, struct file *file)
1997{
1998 return seq_open(file, &kmemleak_seq_ops);
1999}
2000
2001static int dump_str_object_info(const char *str)
2002{
2003 unsigned long flags;
2004 struct kmemleak_object *object;
2005 unsigned long addr;
2006
2007 if (kstrtoul(str, 0, &addr))
2008 return -EINVAL;
2009 object = find_and_get_object(addr, 0);
2010 if (!object) {
2011 pr_info("Unknown object at 0x%08lx\n", addr);
2012 return -EINVAL;
2013 }
2014
2015 raw_spin_lock_irqsave(&object->lock, flags);
2016 dump_object_info(object);
2017 raw_spin_unlock_irqrestore(&object->lock, flags);
2018
2019 put_object(object);
2020 return 0;
2021}
2022
2023/*
2024 * We use grey instead of black to ensure we can do future scans on the same
2025 * objects. If we did not do future scans these black objects could
2026 * potentially contain references to newly allocated objects in the future and
2027 * we'd end up with false positives.
2028 */
2029static void kmemleak_clear(void)
2030{
2031 struct kmemleak_object *object;
2032
2033 rcu_read_lock();
2034 list_for_each_entry_rcu(object, &object_list, object_list) {
2035 raw_spin_lock_irq(&object->lock);
2036 if ((object->flags & OBJECT_REPORTED) &&
2037 unreferenced_object(object))
2038 __paint_it(object, KMEMLEAK_GREY);
2039 raw_spin_unlock_irq(&object->lock);
2040 }
2041 rcu_read_unlock();
2042
2043 kmemleak_found_leaks = false;
2044}
2045
2046static void __kmemleak_do_cleanup(void);
2047
2048/*
2049 * File write operation to configure kmemleak at run-time. The following
2050 * commands can be written to the /sys/kernel/debug/kmemleak file:
2051 * off - disable kmemleak (irreversible)
2052 * stack=on - enable the task stacks scanning
2053 * stack=off - disable the tasks stacks scanning
2054 * scan=on - start the automatic memory scanning thread
2055 * scan=off - stop the automatic memory scanning thread
2056 * scan=... - set the automatic memory scanning period in seconds (0 to
2057 * disable it)
2058 * scan - trigger a memory scan
2059 * clear - mark all current reported unreferenced kmemleak objects as
2060 * grey to ignore printing them, or free all kmemleak objects
2061 * if kmemleak has been disabled.
2062 * dump=... - dump information about the object found at the given address
2063 */
2064static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
2065 size_t size, loff_t *ppos)
2066{
2067 char buf[64];
2068 int buf_size;
2069 int ret;
2070
2071 buf_size = min(size, (sizeof(buf) - 1));
2072 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
2073 return -EFAULT;
2074 buf[buf_size] = 0;
2075
2076 ret = mutex_lock_interruptible(&scan_mutex);
2077 if (ret < 0)
2078 return ret;
2079
2080 if (strncmp(buf, "clear", 5) == 0) {
2081 if (kmemleak_enabled)
2082 kmemleak_clear();
2083 else
2084 __kmemleak_do_cleanup();
2085 goto out;
2086 }
2087
2088 if (!kmemleak_enabled) {
2089 ret = -EPERM;
2090 goto out;
2091 }
2092
2093 if (strncmp(buf, "off", 3) == 0)
2094 kmemleak_disable();
2095 else if (strncmp(buf, "stack=on", 8) == 0)
2096 kmemleak_stack_scan = 1;
2097 else if (strncmp(buf, "stack=off", 9) == 0)
2098 kmemleak_stack_scan = 0;
2099 else if (strncmp(buf, "scan=on", 7) == 0)
2100 start_scan_thread();
2101 else if (strncmp(buf, "scan=off", 8) == 0)
2102 stop_scan_thread();
2103 else if (strncmp(buf, "scan=", 5) == 0) {
2104 unsigned secs;
2105 unsigned long msecs;
2106
2107 ret = kstrtouint(buf + 5, 0, &secs);
2108 if (ret < 0)
2109 goto out;
2110
2111 msecs = secs * MSEC_PER_SEC;
2112 if (msecs > UINT_MAX)
2113 msecs = UINT_MAX;
2114
2115 stop_scan_thread();
2116 if (msecs) {
2117 WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
2118 start_scan_thread();
2119 }
2120 } else if (strncmp(buf, "scan", 4) == 0)
2121 kmemleak_scan();
2122 else if (strncmp(buf, "dump=", 5) == 0)
2123 ret = dump_str_object_info(buf + 5);
2124 else
2125 ret = -EINVAL;
2126
2127out:
2128 mutex_unlock(&scan_mutex);
2129 if (ret < 0)
2130 return ret;
2131
2132 /* ignore the rest of the buffer, only one command at a time */
2133 *ppos += size;
2134 return size;
2135}
2136
2137static const struct file_operations kmemleak_fops = {
2138 .owner = THIS_MODULE,
2139 .open = kmemleak_open,
2140 .read = seq_read,
2141 .write = kmemleak_write,
2142 .llseek = seq_lseek,
2143 .release = seq_release,
2144};
2145
2146static void __kmemleak_do_cleanup(void)
2147{
2148 struct kmemleak_object *object, *tmp;
2149
2150 /*
2151 * Kmemleak has already been disabled, no need for RCU list traversal
2152 * or kmemleak_lock held.
2153 */
2154 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2155 __remove_object(object);
2156 __delete_object(object);
2157 }
2158}
2159
2160/*
2161 * Stop the memory scanning thread and free the kmemleak internal objects if
2162 * no previous scan thread (otherwise, kmemleak may still have some useful
2163 * information on memory leaks).
2164 */
2165static void kmemleak_do_cleanup(struct work_struct *work)
2166{
2167 stop_scan_thread();
2168
2169 mutex_lock(&scan_mutex);
2170 /*
2171 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2172 * longer track object freeing. Ordering of the scan thread stopping and
2173 * the memory accesses below is guaranteed by the kthread_stop()
2174 * function.
2175 */
2176 kmemleak_free_enabled = 0;
2177 mutex_unlock(&scan_mutex);
2178
2179 if (!kmemleak_found_leaks)
2180 __kmemleak_do_cleanup();
2181 else
2182 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2183}
2184
2185static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2186
2187/*
2188 * Disable kmemleak. No memory allocation/freeing will be traced once this
2189 * function is called. Disabling kmemleak is an irreversible operation.
2190 */
2191static void kmemleak_disable(void)
2192{
2193 /* atomically check whether it was already invoked */
2194 if (cmpxchg(&kmemleak_error, 0, 1))
2195 return;
2196
2197 /* stop any memory operation tracing */
2198 kmemleak_enabled = 0;
2199
2200 /* check whether it is too early for a kernel thread */
2201 if (kmemleak_late_initialized)
2202 schedule_work(&cleanup_work);
2203 else
2204 kmemleak_free_enabled = 0;
2205
2206 pr_info("Kernel memory leak detector disabled\n");
2207}
2208
2209/*
2210 * Allow boot-time kmemleak disabling (enabled by default).
2211 */
2212static int __init kmemleak_boot_config(char *str)
2213{
2214 if (!str)
2215 return -EINVAL;
2216 if (strcmp(str, "off") == 0)
2217 kmemleak_disable();
2218 else if (strcmp(str, "on") == 0) {
2219 kmemleak_skip_disable = 1;
2220 stack_depot_request_early_init();
2221 }
2222 else
2223 return -EINVAL;
2224 return 0;
2225}
2226early_param("kmemleak", kmemleak_boot_config);
2227
2228/*
2229 * Kmemleak initialization.
2230 */
2231void __init kmemleak_init(void)
2232{
2233#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2234 if (!kmemleak_skip_disable) {
2235 kmemleak_disable();
2236 return;
2237 }
2238#endif
2239
2240 if (kmemleak_error)
2241 return;
2242
2243 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2244 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2245
2246 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2247 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2248
2249 /* register the data/bss sections */
2250 create_object((unsigned long)_sdata, _edata - _sdata,
2251 KMEMLEAK_GREY, GFP_ATOMIC);
2252 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2253 KMEMLEAK_GREY, GFP_ATOMIC);
2254 /* only register .data..ro_after_init if not within .data */
2255 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2256 create_object((unsigned long)__start_ro_after_init,
2257 __end_ro_after_init - __start_ro_after_init,
2258 KMEMLEAK_GREY, GFP_ATOMIC);
2259}
2260
2261/*
2262 * Late initialization function.
2263 */
2264static int __init kmemleak_late_init(void)
2265{
2266 kmemleak_late_initialized = 1;
2267
2268 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2269
2270 if (kmemleak_error) {
2271 /*
2272 * Some error occurred and kmemleak was disabled. There is a
2273 * small chance that kmemleak_disable() was called immediately
2274 * after setting kmemleak_late_initialized and we may end up with
2275 * two clean-up threads but serialized by scan_mutex.
2276 */
2277 schedule_work(&cleanup_work);
2278 return -ENOMEM;
2279 }
2280
2281 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2282 mutex_lock(&scan_mutex);
2283 start_scan_thread();
2284 mutex_unlock(&scan_mutex);
2285 }
2286
2287 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2288 mem_pool_free_count);
2289
2290 return 0;
2291}
2292late_initcall(kmemleak_late_init);