Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
16 * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
17 * accesses to the object_tree_root. The object_list is the main list
18 * holding the metadata (struct kmemleak_object) for the allocated memory
19 * blocks. The object_tree_root is a red black tree used to look-up
20 * metadata based on a pointer to the corresponding memory block. The
21 * kmemleak_object structures are added to the object_list and
22 * object_tree_root in the create_object() function called from the
23 * kmemleak_alloc() callback and removed in delete_object() called from the
24 * kmemleak_free() callback
25 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
26 * Accesses to the metadata (e.g. count) are protected by this lock. Note
27 * that some members of this structure may be protected by other means
28 * (atomic or kmemleak_lock). This lock is also held when scanning the
29 * corresponding memory block to avoid the kernel freeing it via the
30 * kmemleak_free() callback. This is less heavyweight than holding a global
31 * lock like kmemleak_lock during scanning.
32 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
33 * unreferenced objects at a time. The gray_list contains the objects which
34 * are already referenced or marked as false positives and need to be
35 * scanned. This list is only modified during a scanning episode when the
36 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
37 * Note that the kmemleak_object.use_count is incremented when an object is
38 * added to the gray_list and therefore cannot be freed. This mutex also
39 * prevents multiple users of the "kmemleak" debugfs file together with
40 * modifications to the memory scanning parameters including the scan_thread
41 * pointer
42 *
43 * Locks and mutexes are acquired/nested in the following order:
44 *
45 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
46 *
47 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
48 * regions.
49 *
50 * The kmemleak_object structures have a use_count incremented or decremented
51 * using the get_object()/put_object() functions. When the use_count becomes
52 * 0, this count can no longer be incremented and put_object() schedules the
53 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
54 * function must be protected by rcu_read_lock() to avoid accessing a freed
55 * structure.
56 */
57
58#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
59
60#include <linux/init.h>
61#include <linux/kernel.h>
62#include <linux/list.h>
63#include <linux/sched/signal.h>
64#include <linux/sched/task.h>
65#include <linux/sched/task_stack.h>
66#include <linux/jiffies.h>
67#include <linux/delay.h>
68#include <linux/export.h>
69#include <linux/kthread.h>
70#include <linux/rbtree.h>
71#include <linux/fs.h>
72#include <linux/debugfs.h>
73#include <linux/seq_file.h>
74#include <linux/cpumask.h>
75#include <linux/spinlock.h>
76#include <linux/module.h>
77#include <linux/mutex.h>
78#include <linux/rcupdate.h>
79#include <linux/stacktrace.h>
80#include <linux/cache.h>
81#include <linux/percpu.h>
82#include <linux/memblock.h>
83#include <linux/pfn.h>
84#include <linux/mmzone.h>
85#include <linux/slab.h>
86#include <linux/thread_info.h>
87#include <linux/err.h>
88#include <linux/uaccess.h>
89#include <linux/string.h>
90#include <linux/nodemask.h>
91#include <linux/mm.h>
92#include <linux/workqueue.h>
93#include <linux/crc32.h>
94
95#include <asm/sections.h>
96#include <asm/processor.h>
97#include <linux/atomic.h>
98
99#include <linux/kasan.h>
100#include <linux/kfence.h>
101#include <linux/kmemleak.h>
102#include <linux/memory_hotplug.h>
103
104/*
105 * Kmemleak configuration and common defines.
106 */
107#define MAX_TRACE 16 /* stack trace length */
108#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
109#define SECS_FIRST_SCAN 60 /* delay before the first scan */
110#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
111#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
112
113#define BYTES_PER_POINTER sizeof(void *)
114
115/* GFP bitmask for kmemleak internal allocations */
116#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
117 __GFP_NORETRY | __GFP_NOMEMALLOC | \
118 __GFP_NOWARN)
119
120/* scanning area inside a memory block */
121struct kmemleak_scan_area {
122 struct hlist_node node;
123 unsigned long start;
124 size_t size;
125};
126
127#define KMEMLEAK_GREY 0
128#define KMEMLEAK_BLACK -1
129
130/*
131 * Structure holding the metadata for each allocated memory block.
132 * Modifications to such objects should be made while holding the
133 * object->lock. Insertions or deletions from object_list, gray_list or
134 * rb_node are already protected by the corresponding locks or mutex (see
135 * the notes on locking above). These objects are reference-counted
136 * (use_count) and freed using the RCU mechanism.
137 */
138struct kmemleak_object {
139 raw_spinlock_t lock;
140 unsigned int flags; /* object status flags */
141 struct list_head object_list;
142 struct list_head gray_list;
143 struct rb_node rb_node;
144 struct rcu_head rcu; /* object_list lockless traversal */
145 /* object usage count; object freed when use_count == 0 */
146 atomic_t use_count;
147 unsigned long pointer;
148 size_t size;
149 /* pass surplus references to this pointer */
150 unsigned long excess_ref;
151 /* minimum number of a pointers found before it is considered leak */
152 int min_count;
153 /* the total number of pointers found pointing to this object */
154 int count;
155 /* checksum for detecting modified objects */
156 u32 checksum;
157 /* memory ranges to be scanned inside an object (empty for all) */
158 struct hlist_head area_list;
159 unsigned long trace[MAX_TRACE];
160 unsigned int trace_len;
161 unsigned long jiffies; /* creation timestamp */
162 pid_t pid; /* pid of the current task */
163 char comm[TASK_COMM_LEN]; /* executable name */
164};
165
166/* flag representing the memory block allocation status */
167#define OBJECT_ALLOCATED (1 << 0)
168/* flag set after the first reporting of an unreference object */
169#define OBJECT_REPORTED (1 << 1)
170/* flag set to not scan the object */
171#define OBJECT_NO_SCAN (1 << 2)
172/* flag set to fully scan the object when scan_area allocation failed */
173#define OBJECT_FULL_SCAN (1 << 3)
174
175#define HEX_PREFIX " "
176/* number of bytes to print per line; must be 16 or 32 */
177#define HEX_ROW_SIZE 16
178/* number of bytes to print at a time (1, 2, 4, 8) */
179#define HEX_GROUP_SIZE 1
180/* include ASCII after the hex output */
181#define HEX_ASCII 1
182/* max number of lines to be printed */
183#define HEX_MAX_LINES 2
184
185/* the list of all allocated objects */
186static LIST_HEAD(object_list);
187/* the list of gray-colored objects (see color_gray comment below) */
188static LIST_HEAD(gray_list);
189/* memory pool allocation */
190static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
191static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
192static LIST_HEAD(mem_pool_free_list);
193/* search tree for object boundaries */
194static struct rb_root object_tree_root = RB_ROOT;
195/* protecting the access to object_list and object_tree_root */
196static DEFINE_RAW_SPINLOCK(kmemleak_lock);
197
198/* allocation caches for kmemleak internal data */
199static struct kmem_cache *object_cache;
200static struct kmem_cache *scan_area_cache;
201
202/* set if tracing memory operations is enabled */
203static int kmemleak_enabled = 1;
204/* same as above but only for the kmemleak_free() callback */
205static int kmemleak_free_enabled = 1;
206/* set in the late_initcall if there were no errors */
207static int kmemleak_initialized;
208/* set if a kmemleak warning was issued */
209static int kmemleak_warning;
210/* set if a fatal kmemleak error has occurred */
211static int kmemleak_error;
212
213/* minimum and maximum address that may be valid pointers */
214static unsigned long min_addr = ULONG_MAX;
215static unsigned long max_addr;
216
217static struct task_struct *scan_thread;
218/* used to avoid reporting of recently allocated objects */
219static unsigned long jiffies_min_age;
220static unsigned long jiffies_last_scan;
221/* delay between automatic memory scannings */
222static unsigned long jiffies_scan_wait;
223/* enables or disables the task stacks scanning */
224static int kmemleak_stack_scan = 1;
225/* protects the memory scanning, parameters and debug/kmemleak file access */
226static DEFINE_MUTEX(scan_mutex);
227/* setting kmemleak=on, will set this var, skipping the disable */
228static int kmemleak_skip_disable;
229/* If there are leaks that can be reported */
230static bool kmemleak_found_leaks;
231
232static bool kmemleak_verbose;
233module_param_named(verbose, kmemleak_verbose, bool, 0600);
234
235static void kmemleak_disable(void);
236
237/*
238 * Print a warning and dump the stack trace.
239 */
240#define kmemleak_warn(x...) do { \
241 pr_warn(x); \
242 dump_stack(); \
243 kmemleak_warning = 1; \
244} while (0)
245
246/*
247 * Macro invoked when a serious kmemleak condition occurred and cannot be
248 * recovered from. Kmemleak will be disabled and further allocation/freeing
249 * tracing no longer available.
250 */
251#define kmemleak_stop(x...) do { \
252 kmemleak_warn(x); \
253 kmemleak_disable(); \
254} while (0)
255
256#define warn_or_seq_printf(seq, fmt, ...) do { \
257 if (seq) \
258 seq_printf(seq, fmt, ##__VA_ARGS__); \
259 else \
260 pr_warn(fmt, ##__VA_ARGS__); \
261} while (0)
262
263static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
264 int rowsize, int groupsize, const void *buf,
265 size_t len, bool ascii)
266{
267 if (seq)
268 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
269 buf, len, ascii);
270 else
271 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
272 rowsize, groupsize, buf, len, ascii);
273}
274
275/*
276 * Printing of the objects hex dump to the seq file. The number of lines to be
277 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
278 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
279 * with the object->lock held.
280 */
281static void hex_dump_object(struct seq_file *seq,
282 struct kmemleak_object *object)
283{
284 const u8 *ptr = (const u8 *)object->pointer;
285 size_t len;
286
287 /* limit the number of lines to HEX_MAX_LINES */
288 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
289
290 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
291 kasan_disable_current();
292 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
293 HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
294 kasan_enable_current();
295}
296
297/*
298 * Object colors, encoded with count and min_count:
299 * - white - orphan object, not enough references to it (count < min_count)
300 * - gray - not orphan, not marked as false positive (min_count == 0) or
301 * sufficient references to it (count >= min_count)
302 * - black - ignore, it doesn't contain references (e.g. text section)
303 * (min_count == -1). No function defined for this color.
304 * Newly created objects don't have any color assigned (object->count == -1)
305 * before the next memory scan when they become white.
306 */
307static bool color_white(const struct kmemleak_object *object)
308{
309 return object->count != KMEMLEAK_BLACK &&
310 object->count < object->min_count;
311}
312
313static bool color_gray(const struct kmemleak_object *object)
314{
315 return object->min_count != KMEMLEAK_BLACK &&
316 object->count >= object->min_count;
317}
318
319/*
320 * Objects are considered unreferenced only if their color is white, they have
321 * not be deleted and have a minimum age to avoid false positives caused by
322 * pointers temporarily stored in CPU registers.
323 */
324static bool unreferenced_object(struct kmemleak_object *object)
325{
326 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
327 time_before_eq(object->jiffies + jiffies_min_age,
328 jiffies_last_scan);
329}
330
331/*
332 * Printing of the unreferenced objects information to the seq file. The
333 * print_unreferenced function must be called with the object->lock held.
334 */
335static void print_unreferenced(struct seq_file *seq,
336 struct kmemleak_object *object)
337{
338 int i;
339 unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
340
341 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
342 object->pointer, object->size);
343 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
344 object->comm, object->pid, object->jiffies,
345 msecs_age / 1000, msecs_age % 1000);
346 hex_dump_object(seq, object);
347 warn_or_seq_printf(seq, " backtrace:\n");
348
349 for (i = 0; i < object->trace_len; i++) {
350 void *ptr = (void *)object->trace[i];
351 warn_or_seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
352 }
353}
354
355/*
356 * Print the kmemleak_object information. This function is used mainly for
357 * debugging special cases when kmemleak operations. It must be called with
358 * the object->lock held.
359 */
360static void dump_object_info(struct kmemleak_object *object)
361{
362 pr_notice("Object 0x%08lx (size %zu):\n",
363 object->pointer, object->size);
364 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
365 object->comm, object->pid, object->jiffies);
366 pr_notice(" min_count = %d\n", object->min_count);
367 pr_notice(" count = %d\n", object->count);
368 pr_notice(" flags = 0x%x\n", object->flags);
369 pr_notice(" checksum = %u\n", object->checksum);
370 pr_notice(" backtrace:\n");
371 stack_trace_print(object->trace, object->trace_len, 4);
372}
373
374/*
375 * Look-up a memory block metadata (kmemleak_object) in the object search
376 * tree based on a pointer value. If alias is 0, only values pointing to the
377 * beginning of the memory block are allowed. The kmemleak_lock must be held
378 * when calling this function.
379 */
380static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
381{
382 struct rb_node *rb = object_tree_root.rb_node;
383
384 while (rb) {
385 struct kmemleak_object *object =
386 rb_entry(rb, struct kmemleak_object, rb_node);
387 if (ptr < object->pointer)
388 rb = object->rb_node.rb_left;
389 else if (object->pointer + object->size <= ptr)
390 rb = object->rb_node.rb_right;
391 else if (object->pointer == ptr || alias)
392 return object;
393 else {
394 kmemleak_warn("Found object by alias at 0x%08lx\n",
395 ptr);
396 dump_object_info(object);
397 break;
398 }
399 }
400 return NULL;
401}
402
403/*
404 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
405 * that once an object's use_count reached 0, the RCU freeing was already
406 * registered and the object should no longer be used. This function must be
407 * called under the protection of rcu_read_lock().
408 */
409static int get_object(struct kmemleak_object *object)
410{
411 return atomic_inc_not_zero(&object->use_count);
412}
413
414/*
415 * Memory pool allocation and freeing. kmemleak_lock must not be held.
416 */
417static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
418{
419 unsigned long flags;
420 struct kmemleak_object *object;
421
422 /* try the slab allocator first */
423 if (object_cache) {
424 object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
425 if (object)
426 return object;
427 }
428
429 /* slab allocation failed, try the memory pool */
430 raw_spin_lock_irqsave(&kmemleak_lock, flags);
431 object = list_first_entry_or_null(&mem_pool_free_list,
432 typeof(*object), object_list);
433 if (object)
434 list_del(&object->object_list);
435 else if (mem_pool_free_count)
436 object = &mem_pool[--mem_pool_free_count];
437 else
438 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
439 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
440
441 return object;
442}
443
444/*
445 * Return the object to either the slab allocator or the memory pool.
446 */
447static void mem_pool_free(struct kmemleak_object *object)
448{
449 unsigned long flags;
450
451 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
452 kmem_cache_free(object_cache, object);
453 return;
454 }
455
456 /* add the object to the memory pool free list */
457 raw_spin_lock_irqsave(&kmemleak_lock, flags);
458 list_add(&object->object_list, &mem_pool_free_list);
459 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
460}
461
462/*
463 * RCU callback to free a kmemleak_object.
464 */
465static void free_object_rcu(struct rcu_head *rcu)
466{
467 struct hlist_node *tmp;
468 struct kmemleak_scan_area *area;
469 struct kmemleak_object *object =
470 container_of(rcu, struct kmemleak_object, rcu);
471
472 /*
473 * Once use_count is 0 (guaranteed by put_object), there is no other
474 * code accessing this object, hence no need for locking.
475 */
476 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
477 hlist_del(&area->node);
478 kmem_cache_free(scan_area_cache, area);
479 }
480 mem_pool_free(object);
481}
482
483/*
484 * Decrement the object use_count. Once the count is 0, free the object using
485 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
486 * delete_object() path, the delayed RCU freeing ensures that there is no
487 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
488 * is also possible.
489 */
490static void put_object(struct kmemleak_object *object)
491{
492 if (!atomic_dec_and_test(&object->use_count))
493 return;
494
495 /* should only get here after delete_object was called */
496 WARN_ON(object->flags & OBJECT_ALLOCATED);
497
498 /*
499 * It may be too early for the RCU callbacks, however, there is no
500 * concurrent object_list traversal when !object_cache and all objects
501 * came from the memory pool. Free the object directly.
502 */
503 if (object_cache)
504 call_rcu(&object->rcu, free_object_rcu);
505 else
506 free_object_rcu(&object->rcu);
507}
508
509/*
510 * Look up an object in the object search tree and increase its use_count.
511 */
512static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
513{
514 unsigned long flags;
515 struct kmemleak_object *object;
516
517 rcu_read_lock();
518 raw_spin_lock_irqsave(&kmemleak_lock, flags);
519 object = lookup_object(ptr, alias);
520 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
521
522 /* check whether the object is still available */
523 if (object && !get_object(object))
524 object = NULL;
525 rcu_read_unlock();
526
527 return object;
528}
529
530/*
531 * Remove an object from the object_tree_root and object_list. Must be called
532 * with the kmemleak_lock held _if_ kmemleak is still enabled.
533 */
534static void __remove_object(struct kmemleak_object *object)
535{
536 rb_erase(&object->rb_node, &object_tree_root);
537 list_del_rcu(&object->object_list);
538}
539
540/*
541 * Look up an object in the object search tree and remove it from both
542 * object_tree_root and object_list. The returned object's use_count should be
543 * at least 1, as initially set by create_object().
544 */
545static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
546{
547 unsigned long flags;
548 struct kmemleak_object *object;
549
550 raw_spin_lock_irqsave(&kmemleak_lock, flags);
551 object = lookup_object(ptr, alias);
552 if (object)
553 __remove_object(object);
554 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
555
556 return object;
557}
558
559/*
560 * Save stack trace to the given array of MAX_TRACE size.
561 */
562static int __save_stack_trace(unsigned long *trace)
563{
564 return stack_trace_save(trace, MAX_TRACE, 2);
565}
566
567/*
568 * Create the metadata (struct kmemleak_object) corresponding to an allocated
569 * memory block and add it to the object_list and object_tree_root.
570 */
571static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
572 int min_count, gfp_t gfp)
573{
574 unsigned long flags;
575 struct kmemleak_object *object, *parent;
576 struct rb_node **link, *rb_parent;
577 unsigned long untagged_ptr;
578
579 object = mem_pool_alloc(gfp);
580 if (!object) {
581 pr_warn("Cannot allocate a kmemleak_object structure\n");
582 kmemleak_disable();
583 return NULL;
584 }
585
586 INIT_LIST_HEAD(&object->object_list);
587 INIT_LIST_HEAD(&object->gray_list);
588 INIT_HLIST_HEAD(&object->area_list);
589 raw_spin_lock_init(&object->lock);
590 atomic_set(&object->use_count, 1);
591 object->flags = OBJECT_ALLOCATED;
592 object->pointer = ptr;
593 object->size = kfence_ksize((void *)ptr) ?: size;
594 object->excess_ref = 0;
595 object->min_count = min_count;
596 object->count = 0; /* white color initially */
597 object->jiffies = jiffies;
598 object->checksum = 0;
599
600 /* task information */
601 if (in_irq()) {
602 object->pid = 0;
603 strncpy(object->comm, "hardirq", sizeof(object->comm));
604 } else if (in_serving_softirq()) {
605 object->pid = 0;
606 strncpy(object->comm, "softirq", sizeof(object->comm));
607 } else {
608 object->pid = current->pid;
609 /*
610 * There is a small chance of a race with set_task_comm(),
611 * however using get_task_comm() here may cause locking
612 * dependency issues with current->alloc_lock. In the worst
613 * case, the command line is not correct.
614 */
615 strncpy(object->comm, current->comm, sizeof(object->comm));
616 }
617
618 /* kernel backtrace */
619 object->trace_len = __save_stack_trace(object->trace);
620
621 raw_spin_lock_irqsave(&kmemleak_lock, flags);
622
623 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
624 min_addr = min(min_addr, untagged_ptr);
625 max_addr = max(max_addr, untagged_ptr + size);
626 link = &object_tree_root.rb_node;
627 rb_parent = NULL;
628 while (*link) {
629 rb_parent = *link;
630 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
631 if (ptr + size <= parent->pointer)
632 link = &parent->rb_node.rb_left;
633 else if (parent->pointer + parent->size <= ptr)
634 link = &parent->rb_node.rb_right;
635 else {
636 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
637 ptr);
638 /*
639 * No need for parent->lock here since "parent" cannot
640 * be freed while the kmemleak_lock is held.
641 */
642 dump_object_info(parent);
643 kmem_cache_free(object_cache, object);
644 object = NULL;
645 goto out;
646 }
647 }
648 rb_link_node(&object->rb_node, rb_parent, link);
649 rb_insert_color(&object->rb_node, &object_tree_root);
650
651 list_add_tail_rcu(&object->object_list, &object_list);
652out:
653 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
654 return object;
655}
656
657/*
658 * Mark the object as not allocated and schedule RCU freeing via put_object().
659 */
660static void __delete_object(struct kmemleak_object *object)
661{
662 unsigned long flags;
663
664 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
665 WARN_ON(atomic_read(&object->use_count) < 1);
666
667 /*
668 * Locking here also ensures that the corresponding memory block
669 * cannot be freed when it is being scanned.
670 */
671 raw_spin_lock_irqsave(&object->lock, flags);
672 object->flags &= ~OBJECT_ALLOCATED;
673 raw_spin_unlock_irqrestore(&object->lock, flags);
674 put_object(object);
675}
676
677/*
678 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
679 * delete it.
680 */
681static void delete_object_full(unsigned long ptr)
682{
683 struct kmemleak_object *object;
684
685 object = find_and_remove_object(ptr, 0);
686 if (!object) {
687#ifdef DEBUG
688 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
689 ptr);
690#endif
691 return;
692 }
693 __delete_object(object);
694}
695
696/*
697 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
698 * delete it. If the memory block is partially freed, the function may create
699 * additional metadata for the remaining parts of the block.
700 */
701static void delete_object_part(unsigned long ptr, size_t size)
702{
703 struct kmemleak_object *object;
704 unsigned long start, end;
705
706 object = find_and_remove_object(ptr, 1);
707 if (!object) {
708#ifdef DEBUG
709 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
710 ptr, size);
711#endif
712 return;
713 }
714
715 /*
716 * Create one or two objects that may result from the memory block
717 * split. Note that partial freeing is only done by free_bootmem() and
718 * this happens before kmemleak_init() is called.
719 */
720 start = object->pointer;
721 end = object->pointer + object->size;
722 if (ptr > start)
723 create_object(start, ptr - start, object->min_count,
724 GFP_KERNEL);
725 if (ptr + size < end)
726 create_object(ptr + size, end - ptr - size, object->min_count,
727 GFP_KERNEL);
728
729 __delete_object(object);
730}
731
732static void __paint_it(struct kmemleak_object *object, int color)
733{
734 object->min_count = color;
735 if (color == KMEMLEAK_BLACK)
736 object->flags |= OBJECT_NO_SCAN;
737}
738
739static void paint_it(struct kmemleak_object *object, int color)
740{
741 unsigned long flags;
742
743 raw_spin_lock_irqsave(&object->lock, flags);
744 __paint_it(object, color);
745 raw_spin_unlock_irqrestore(&object->lock, flags);
746}
747
748static void paint_ptr(unsigned long ptr, int color)
749{
750 struct kmemleak_object *object;
751
752 object = find_and_get_object(ptr, 0);
753 if (!object) {
754 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
755 ptr,
756 (color == KMEMLEAK_GREY) ? "Grey" :
757 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
758 return;
759 }
760 paint_it(object, color);
761 put_object(object);
762}
763
764/*
765 * Mark an object permanently as gray-colored so that it can no longer be
766 * reported as a leak. This is used in general to mark a false positive.
767 */
768static void make_gray_object(unsigned long ptr)
769{
770 paint_ptr(ptr, KMEMLEAK_GREY);
771}
772
773/*
774 * Mark the object as black-colored so that it is ignored from scans and
775 * reporting.
776 */
777static void make_black_object(unsigned long ptr)
778{
779 paint_ptr(ptr, KMEMLEAK_BLACK);
780}
781
782/*
783 * Add a scanning area to the object. If at least one such area is added,
784 * kmemleak will only scan these ranges rather than the whole memory block.
785 */
786static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
787{
788 unsigned long flags;
789 struct kmemleak_object *object;
790 struct kmemleak_scan_area *area = NULL;
791
792 object = find_and_get_object(ptr, 1);
793 if (!object) {
794 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
795 ptr);
796 return;
797 }
798
799 if (scan_area_cache)
800 area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
801
802 raw_spin_lock_irqsave(&object->lock, flags);
803 if (!area) {
804 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
805 /* mark the object for full scan to avoid false positives */
806 object->flags |= OBJECT_FULL_SCAN;
807 goto out_unlock;
808 }
809 if (size == SIZE_MAX) {
810 size = object->pointer + object->size - ptr;
811 } else if (ptr + size > object->pointer + object->size) {
812 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
813 dump_object_info(object);
814 kmem_cache_free(scan_area_cache, area);
815 goto out_unlock;
816 }
817
818 INIT_HLIST_NODE(&area->node);
819 area->start = ptr;
820 area->size = size;
821
822 hlist_add_head(&area->node, &object->area_list);
823out_unlock:
824 raw_spin_unlock_irqrestore(&object->lock, flags);
825 put_object(object);
826}
827
828/*
829 * Any surplus references (object already gray) to 'ptr' are passed to
830 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
831 * vm_struct may be used as an alternative reference to the vmalloc'ed object
832 * (see free_thread_stack()).
833 */
834static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
835{
836 unsigned long flags;
837 struct kmemleak_object *object;
838
839 object = find_and_get_object(ptr, 0);
840 if (!object) {
841 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
842 ptr);
843 return;
844 }
845
846 raw_spin_lock_irqsave(&object->lock, flags);
847 object->excess_ref = excess_ref;
848 raw_spin_unlock_irqrestore(&object->lock, flags);
849 put_object(object);
850}
851
852/*
853 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
854 * pointer. Such object will not be scanned by kmemleak but references to it
855 * are searched.
856 */
857static void object_no_scan(unsigned long ptr)
858{
859 unsigned long flags;
860 struct kmemleak_object *object;
861
862 object = find_and_get_object(ptr, 0);
863 if (!object) {
864 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
865 return;
866 }
867
868 raw_spin_lock_irqsave(&object->lock, flags);
869 object->flags |= OBJECT_NO_SCAN;
870 raw_spin_unlock_irqrestore(&object->lock, flags);
871 put_object(object);
872}
873
874/**
875 * kmemleak_alloc - register a newly allocated object
876 * @ptr: pointer to beginning of the object
877 * @size: size of the object
878 * @min_count: minimum number of references to this object. If during memory
879 * scanning a number of references less than @min_count is found,
880 * the object is reported as a memory leak. If @min_count is 0,
881 * the object is never reported as a leak. If @min_count is -1,
882 * the object is ignored (not scanned and not reported as a leak)
883 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
884 *
885 * This function is called from the kernel allocators when a new object
886 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
887 */
888void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
889 gfp_t gfp)
890{
891 pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
892
893 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
894 create_object((unsigned long)ptr, size, min_count, gfp);
895}
896EXPORT_SYMBOL_GPL(kmemleak_alloc);
897
898/**
899 * kmemleak_alloc_percpu - register a newly allocated __percpu object
900 * @ptr: __percpu pointer to beginning of the object
901 * @size: size of the object
902 * @gfp: flags used for kmemleak internal memory allocations
903 *
904 * This function is called from the kernel percpu allocator when a new object
905 * (memory block) is allocated (alloc_percpu).
906 */
907void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
908 gfp_t gfp)
909{
910 unsigned int cpu;
911
912 pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
913
914 /*
915 * Percpu allocations are only scanned and not reported as leaks
916 * (min_count is set to 0).
917 */
918 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
919 for_each_possible_cpu(cpu)
920 create_object((unsigned long)per_cpu_ptr(ptr, cpu),
921 size, 0, gfp);
922}
923EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
924
925/**
926 * kmemleak_vmalloc - register a newly vmalloc'ed object
927 * @area: pointer to vm_struct
928 * @size: size of the object
929 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
930 *
931 * This function is called from the vmalloc() kernel allocator when a new
932 * object (memory block) is allocated.
933 */
934void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
935{
936 pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
937
938 /*
939 * A min_count = 2 is needed because vm_struct contains a reference to
940 * the virtual address of the vmalloc'ed block.
941 */
942 if (kmemleak_enabled) {
943 create_object((unsigned long)area->addr, size, 2, gfp);
944 object_set_excess_ref((unsigned long)area,
945 (unsigned long)area->addr);
946 }
947}
948EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
949
950/**
951 * kmemleak_free - unregister a previously registered object
952 * @ptr: pointer to beginning of the object
953 *
954 * This function is called from the kernel allocators when an object (memory
955 * block) is freed (kmem_cache_free, kfree, vfree etc.).
956 */
957void __ref kmemleak_free(const void *ptr)
958{
959 pr_debug("%s(0x%p)\n", __func__, ptr);
960
961 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
962 delete_object_full((unsigned long)ptr);
963}
964EXPORT_SYMBOL_GPL(kmemleak_free);
965
966/**
967 * kmemleak_free_part - partially unregister a previously registered object
968 * @ptr: pointer to the beginning or inside the object. This also
969 * represents the start of the range to be freed
970 * @size: size to be unregistered
971 *
972 * This function is called when only a part of a memory block is freed
973 * (usually from the bootmem allocator).
974 */
975void __ref kmemleak_free_part(const void *ptr, size_t size)
976{
977 pr_debug("%s(0x%p)\n", __func__, ptr);
978
979 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
980 delete_object_part((unsigned long)ptr, size);
981}
982EXPORT_SYMBOL_GPL(kmemleak_free_part);
983
984/**
985 * kmemleak_free_percpu - unregister a previously registered __percpu object
986 * @ptr: __percpu pointer to beginning of the object
987 *
988 * This function is called from the kernel percpu allocator when an object
989 * (memory block) is freed (free_percpu).
990 */
991void __ref kmemleak_free_percpu(const void __percpu *ptr)
992{
993 unsigned int cpu;
994
995 pr_debug("%s(0x%p)\n", __func__, ptr);
996
997 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
998 for_each_possible_cpu(cpu)
999 delete_object_full((unsigned long)per_cpu_ptr(ptr,
1000 cpu));
1001}
1002EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1003
1004/**
1005 * kmemleak_update_trace - update object allocation stack trace
1006 * @ptr: pointer to beginning of the object
1007 *
1008 * Override the object allocation stack trace for cases where the actual
1009 * allocation place is not always useful.
1010 */
1011void __ref kmemleak_update_trace(const void *ptr)
1012{
1013 struct kmemleak_object *object;
1014 unsigned long flags;
1015
1016 pr_debug("%s(0x%p)\n", __func__, ptr);
1017
1018 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1019 return;
1020
1021 object = find_and_get_object((unsigned long)ptr, 1);
1022 if (!object) {
1023#ifdef DEBUG
1024 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1025 ptr);
1026#endif
1027 return;
1028 }
1029
1030 raw_spin_lock_irqsave(&object->lock, flags);
1031 object->trace_len = __save_stack_trace(object->trace);
1032 raw_spin_unlock_irqrestore(&object->lock, flags);
1033
1034 put_object(object);
1035}
1036EXPORT_SYMBOL(kmemleak_update_trace);
1037
1038/**
1039 * kmemleak_not_leak - mark an allocated object as false positive
1040 * @ptr: pointer to beginning of the object
1041 *
1042 * Calling this function on an object will cause the memory block to no longer
1043 * be reported as leak and always be scanned.
1044 */
1045void __ref kmemleak_not_leak(const void *ptr)
1046{
1047 pr_debug("%s(0x%p)\n", __func__, ptr);
1048
1049 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1050 make_gray_object((unsigned long)ptr);
1051}
1052EXPORT_SYMBOL(kmemleak_not_leak);
1053
1054/**
1055 * kmemleak_ignore - ignore an allocated object
1056 * @ptr: pointer to beginning of the object
1057 *
1058 * Calling this function on an object will cause the memory block to be
1059 * ignored (not scanned and not reported as a leak). This is usually done when
1060 * it is known that the corresponding block is not a leak and does not contain
1061 * any references to other allocated memory blocks.
1062 */
1063void __ref kmemleak_ignore(const void *ptr)
1064{
1065 pr_debug("%s(0x%p)\n", __func__, ptr);
1066
1067 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1068 make_black_object((unsigned long)ptr);
1069}
1070EXPORT_SYMBOL(kmemleak_ignore);
1071
1072/**
1073 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1074 * @ptr: pointer to beginning or inside the object. This also
1075 * represents the start of the scan area
1076 * @size: size of the scan area
1077 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1078 *
1079 * This function is used when it is known that only certain parts of an object
1080 * contain references to other objects. Kmemleak will only scan these areas
1081 * reducing the number false negatives.
1082 */
1083void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1084{
1085 pr_debug("%s(0x%p)\n", __func__, ptr);
1086
1087 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1088 add_scan_area((unsigned long)ptr, size, gfp);
1089}
1090EXPORT_SYMBOL(kmemleak_scan_area);
1091
1092/**
1093 * kmemleak_no_scan - do not scan an allocated object
1094 * @ptr: pointer to beginning of the object
1095 *
1096 * This function notifies kmemleak not to scan the given memory block. Useful
1097 * in situations where it is known that the given object does not contain any
1098 * references to other objects. Kmemleak will not scan such objects reducing
1099 * the number of false negatives.
1100 */
1101void __ref kmemleak_no_scan(const void *ptr)
1102{
1103 pr_debug("%s(0x%p)\n", __func__, ptr);
1104
1105 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1106 object_no_scan((unsigned long)ptr);
1107}
1108EXPORT_SYMBOL(kmemleak_no_scan);
1109
1110/**
1111 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1112 * address argument
1113 * @phys: physical address of the object
1114 * @size: size of the object
1115 * @min_count: minimum number of references to this object.
1116 * See kmemleak_alloc()
1117 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1118 */
1119void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1120 gfp_t gfp)
1121{
1122 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1123 kmemleak_alloc(__va(phys), size, min_count, gfp);
1124}
1125EXPORT_SYMBOL(kmemleak_alloc_phys);
1126
1127/**
1128 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1129 * physical address argument
1130 * @phys: physical address if the beginning or inside an object. This
1131 * also represents the start of the range to be freed
1132 * @size: size to be unregistered
1133 */
1134void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1135{
1136 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1137 kmemleak_free_part(__va(phys), size);
1138}
1139EXPORT_SYMBOL(kmemleak_free_part_phys);
1140
1141/**
1142 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1143 * address argument
1144 * @phys: physical address of the object
1145 */
1146void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1147{
1148 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1149 kmemleak_not_leak(__va(phys));
1150}
1151EXPORT_SYMBOL(kmemleak_not_leak_phys);
1152
1153/**
1154 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1155 * address argument
1156 * @phys: physical address of the object
1157 */
1158void __ref kmemleak_ignore_phys(phys_addr_t phys)
1159{
1160 if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1161 kmemleak_ignore(__va(phys));
1162}
1163EXPORT_SYMBOL(kmemleak_ignore_phys);
1164
1165/*
1166 * Update an object's checksum and return true if it was modified.
1167 */
1168static bool update_checksum(struct kmemleak_object *object)
1169{
1170 u32 old_csum = object->checksum;
1171
1172 kasan_disable_current();
1173 kcsan_disable_current();
1174 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1175 kasan_enable_current();
1176 kcsan_enable_current();
1177
1178 return object->checksum != old_csum;
1179}
1180
1181/*
1182 * Update an object's references. object->lock must be held by the caller.
1183 */
1184static void update_refs(struct kmemleak_object *object)
1185{
1186 if (!color_white(object)) {
1187 /* non-orphan, ignored or new */
1188 return;
1189 }
1190
1191 /*
1192 * Increase the object's reference count (number of pointers to the
1193 * memory block). If this count reaches the required minimum, the
1194 * object's color will become gray and it will be added to the
1195 * gray_list.
1196 */
1197 object->count++;
1198 if (color_gray(object)) {
1199 /* put_object() called when removing from gray_list */
1200 WARN_ON(!get_object(object));
1201 list_add_tail(&object->gray_list, &gray_list);
1202 }
1203}
1204
1205/*
1206 * Memory scanning is a long process and it needs to be interruptible. This
1207 * function checks whether such interrupt condition occurred.
1208 */
1209static int scan_should_stop(void)
1210{
1211 if (!kmemleak_enabled)
1212 return 1;
1213
1214 /*
1215 * This function may be called from either process or kthread context,
1216 * hence the need to check for both stop conditions.
1217 */
1218 if (current->mm)
1219 return signal_pending(current);
1220 else
1221 return kthread_should_stop();
1222
1223 return 0;
1224}
1225
1226/*
1227 * Scan a memory block (exclusive range) for valid pointers and add those
1228 * found to the gray list.
1229 */
1230static void scan_block(void *_start, void *_end,
1231 struct kmemleak_object *scanned)
1232{
1233 unsigned long *ptr;
1234 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1235 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1236 unsigned long flags;
1237 unsigned long untagged_ptr;
1238
1239 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1240 for (ptr = start; ptr < end; ptr++) {
1241 struct kmemleak_object *object;
1242 unsigned long pointer;
1243 unsigned long excess_ref;
1244
1245 if (scan_should_stop())
1246 break;
1247
1248 kasan_disable_current();
1249 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1250 kasan_enable_current();
1251
1252 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1253 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1254 continue;
1255
1256 /*
1257 * No need for get_object() here since we hold kmemleak_lock.
1258 * object->use_count cannot be dropped to 0 while the object
1259 * is still present in object_tree_root and object_list
1260 * (with updates protected by kmemleak_lock).
1261 */
1262 object = lookup_object(pointer, 1);
1263 if (!object)
1264 continue;
1265 if (object == scanned)
1266 /* self referenced, ignore */
1267 continue;
1268
1269 /*
1270 * Avoid the lockdep recursive warning on object->lock being
1271 * previously acquired in scan_object(). These locks are
1272 * enclosed by scan_mutex.
1273 */
1274 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1275 /* only pass surplus references (object already gray) */
1276 if (color_gray(object)) {
1277 excess_ref = object->excess_ref;
1278 /* no need for update_refs() if object already gray */
1279 } else {
1280 excess_ref = 0;
1281 update_refs(object);
1282 }
1283 raw_spin_unlock(&object->lock);
1284
1285 if (excess_ref) {
1286 object = lookup_object(excess_ref, 0);
1287 if (!object)
1288 continue;
1289 if (object == scanned)
1290 /* circular reference, ignore */
1291 continue;
1292 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1293 update_refs(object);
1294 raw_spin_unlock(&object->lock);
1295 }
1296 }
1297 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1298}
1299
1300/*
1301 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1302 */
1303#ifdef CONFIG_SMP
1304static void scan_large_block(void *start, void *end)
1305{
1306 void *next;
1307
1308 while (start < end) {
1309 next = min(start + MAX_SCAN_SIZE, end);
1310 scan_block(start, next, NULL);
1311 start = next;
1312 cond_resched();
1313 }
1314}
1315#endif
1316
1317/*
1318 * Scan a memory block corresponding to a kmemleak_object. A condition is
1319 * that object->use_count >= 1.
1320 */
1321static void scan_object(struct kmemleak_object *object)
1322{
1323 struct kmemleak_scan_area *area;
1324 unsigned long flags;
1325
1326 /*
1327 * Once the object->lock is acquired, the corresponding memory block
1328 * cannot be freed (the same lock is acquired in delete_object).
1329 */
1330 raw_spin_lock_irqsave(&object->lock, flags);
1331 if (object->flags & OBJECT_NO_SCAN)
1332 goto out;
1333 if (!(object->flags & OBJECT_ALLOCATED))
1334 /* already freed object */
1335 goto out;
1336 if (hlist_empty(&object->area_list) ||
1337 object->flags & OBJECT_FULL_SCAN) {
1338 void *start = (void *)object->pointer;
1339 void *end = (void *)(object->pointer + object->size);
1340 void *next;
1341
1342 do {
1343 next = min(start + MAX_SCAN_SIZE, end);
1344 scan_block(start, next, object);
1345
1346 start = next;
1347 if (start >= end)
1348 break;
1349
1350 raw_spin_unlock_irqrestore(&object->lock, flags);
1351 cond_resched();
1352 raw_spin_lock_irqsave(&object->lock, flags);
1353 } while (object->flags & OBJECT_ALLOCATED);
1354 } else
1355 hlist_for_each_entry(area, &object->area_list, node)
1356 scan_block((void *)area->start,
1357 (void *)(area->start + area->size),
1358 object);
1359out:
1360 raw_spin_unlock_irqrestore(&object->lock, flags);
1361}
1362
1363/*
1364 * Scan the objects already referenced (gray objects). More objects will be
1365 * referenced and, if there are no memory leaks, all the objects are scanned.
1366 */
1367static void scan_gray_list(void)
1368{
1369 struct kmemleak_object *object, *tmp;
1370
1371 /*
1372 * The list traversal is safe for both tail additions and removals
1373 * from inside the loop. The kmemleak objects cannot be freed from
1374 * outside the loop because their use_count was incremented.
1375 */
1376 object = list_entry(gray_list.next, typeof(*object), gray_list);
1377 while (&object->gray_list != &gray_list) {
1378 cond_resched();
1379
1380 /* may add new objects to the list */
1381 if (!scan_should_stop())
1382 scan_object(object);
1383
1384 tmp = list_entry(object->gray_list.next, typeof(*object),
1385 gray_list);
1386
1387 /* remove the object from the list and release it */
1388 list_del(&object->gray_list);
1389 put_object(object);
1390
1391 object = tmp;
1392 }
1393 WARN_ON(!list_empty(&gray_list));
1394}
1395
1396/*
1397 * Scan data sections and all the referenced memory blocks allocated via the
1398 * kernel's standard allocators. This function must be called with the
1399 * scan_mutex held.
1400 */
1401static void kmemleak_scan(void)
1402{
1403 unsigned long flags;
1404 struct kmemleak_object *object;
1405 int i;
1406 int new_leaks = 0;
1407
1408 jiffies_last_scan = jiffies;
1409
1410 /* prepare the kmemleak_object's */
1411 rcu_read_lock();
1412 list_for_each_entry_rcu(object, &object_list, object_list) {
1413 raw_spin_lock_irqsave(&object->lock, flags);
1414#ifdef DEBUG
1415 /*
1416 * With a few exceptions there should be a maximum of
1417 * 1 reference to any object at this point.
1418 */
1419 if (atomic_read(&object->use_count) > 1) {
1420 pr_debug("object->use_count = %d\n",
1421 atomic_read(&object->use_count));
1422 dump_object_info(object);
1423 }
1424#endif
1425 /* reset the reference count (whiten the object) */
1426 object->count = 0;
1427 if (color_gray(object) && get_object(object))
1428 list_add_tail(&object->gray_list, &gray_list);
1429
1430 raw_spin_unlock_irqrestore(&object->lock, flags);
1431 }
1432 rcu_read_unlock();
1433
1434#ifdef CONFIG_SMP
1435 /* per-cpu sections scanning */
1436 for_each_possible_cpu(i)
1437 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1438 __per_cpu_end + per_cpu_offset(i));
1439#endif
1440
1441 /*
1442 * Struct page scanning for each node.
1443 */
1444 get_online_mems();
1445 for_each_online_node(i) {
1446 unsigned long start_pfn = node_start_pfn(i);
1447 unsigned long end_pfn = node_end_pfn(i);
1448 unsigned long pfn;
1449
1450 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1451 struct page *page = pfn_to_online_page(pfn);
1452
1453 if (!page)
1454 continue;
1455
1456 /* only scan pages belonging to this node */
1457 if (page_to_nid(page) != i)
1458 continue;
1459 /* only scan if page is in use */
1460 if (page_count(page) == 0)
1461 continue;
1462 scan_block(page, page + 1, NULL);
1463 if (!(pfn & 63))
1464 cond_resched();
1465 }
1466 }
1467 put_online_mems();
1468
1469 /*
1470 * Scanning the task stacks (may introduce false negatives).
1471 */
1472 if (kmemleak_stack_scan) {
1473 struct task_struct *p, *g;
1474
1475 rcu_read_lock();
1476 for_each_process_thread(g, p) {
1477 void *stack = try_get_task_stack(p);
1478 if (stack) {
1479 scan_block(stack, stack + THREAD_SIZE, NULL);
1480 put_task_stack(p);
1481 }
1482 }
1483 rcu_read_unlock();
1484 }
1485
1486 /*
1487 * Scan the objects already referenced from the sections scanned
1488 * above.
1489 */
1490 scan_gray_list();
1491
1492 /*
1493 * Check for new or unreferenced objects modified since the previous
1494 * scan and color them gray until the next scan.
1495 */
1496 rcu_read_lock();
1497 list_for_each_entry_rcu(object, &object_list, object_list) {
1498 raw_spin_lock_irqsave(&object->lock, flags);
1499 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1500 && update_checksum(object) && get_object(object)) {
1501 /* color it gray temporarily */
1502 object->count = object->min_count;
1503 list_add_tail(&object->gray_list, &gray_list);
1504 }
1505 raw_spin_unlock_irqrestore(&object->lock, flags);
1506 }
1507 rcu_read_unlock();
1508
1509 /*
1510 * Re-scan the gray list for modified unreferenced objects.
1511 */
1512 scan_gray_list();
1513
1514 /*
1515 * If scanning was stopped do not report any new unreferenced objects.
1516 */
1517 if (scan_should_stop())
1518 return;
1519
1520 /*
1521 * Scanning result reporting.
1522 */
1523 rcu_read_lock();
1524 list_for_each_entry_rcu(object, &object_list, object_list) {
1525 raw_spin_lock_irqsave(&object->lock, flags);
1526 if (unreferenced_object(object) &&
1527 !(object->flags & OBJECT_REPORTED)) {
1528 object->flags |= OBJECT_REPORTED;
1529
1530 if (kmemleak_verbose)
1531 print_unreferenced(NULL, object);
1532
1533 new_leaks++;
1534 }
1535 raw_spin_unlock_irqrestore(&object->lock, flags);
1536 }
1537 rcu_read_unlock();
1538
1539 if (new_leaks) {
1540 kmemleak_found_leaks = true;
1541
1542 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1543 new_leaks);
1544 }
1545
1546}
1547
1548/*
1549 * Thread function performing automatic memory scanning. Unreferenced objects
1550 * at the end of a memory scan are reported but only the first time.
1551 */
1552static int kmemleak_scan_thread(void *arg)
1553{
1554 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1555
1556 pr_info("Automatic memory scanning thread started\n");
1557 set_user_nice(current, 10);
1558
1559 /*
1560 * Wait before the first scan to allow the system to fully initialize.
1561 */
1562 if (first_run) {
1563 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1564 first_run = 0;
1565 while (timeout && !kthread_should_stop())
1566 timeout = schedule_timeout_interruptible(timeout);
1567 }
1568
1569 while (!kthread_should_stop()) {
1570 signed long timeout = READ_ONCE(jiffies_scan_wait);
1571
1572 mutex_lock(&scan_mutex);
1573 kmemleak_scan();
1574 mutex_unlock(&scan_mutex);
1575
1576 /* wait before the next scan */
1577 while (timeout && !kthread_should_stop())
1578 timeout = schedule_timeout_interruptible(timeout);
1579 }
1580
1581 pr_info("Automatic memory scanning thread ended\n");
1582
1583 return 0;
1584}
1585
1586/*
1587 * Start the automatic memory scanning thread. This function must be called
1588 * with the scan_mutex held.
1589 */
1590static void start_scan_thread(void)
1591{
1592 if (scan_thread)
1593 return;
1594 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1595 if (IS_ERR(scan_thread)) {
1596 pr_warn("Failed to create the scan thread\n");
1597 scan_thread = NULL;
1598 }
1599}
1600
1601/*
1602 * Stop the automatic memory scanning thread.
1603 */
1604static void stop_scan_thread(void)
1605{
1606 if (scan_thread) {
1607 kthread_stop(scan_thread);
1608 scan_thread = NULL;
1609 }
1610}
1611
1612/*
1613 * Iterate over the object_list and return the first valid object at or after
1614 * the required position with its use_count incremented. The function triggers
1615 * a memory scanning when the pos argument points to the first position.
1616 */
1617static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1618{
1619 struct kmemleak_object *object;
1620 loff_t n = *pos;
1621 int err;
1622
1623 err = mutex_lock_interruptible(&scan_mutex);
1624 if (err < 0)
1625 return ERR_PTR(err);
1626
1627 rcu_read_lock();
1628 list_for_each_entry_rcu(object, &object_list, object_list) {
1629 if (n-- > 0)
1630 continue;
1631 if (get_object(object))
1632 goto out;
1633 }
1634 object = NULL;
1635out:
1636 return object;
1637}
1638
1639/*
1640 * Return the next object in the object_list. The function decrements the
1641 * use_count of the previous object and increases that of the next one.
1642 */
1643static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1644{
1645 struct kmemleak_object *prev_obj = v;
1646 struct kmemleak_object *next_obj = NULL;
1647 struct kmemleak_object *obj = prev_obj;
1648
1649 ++(*pos);
1650
1651 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1652 if (get_object(obj)) {
1653 next_obj = obj;
1654 break;
1655 }
1656 }
1657
1658 put_object(prev_obj);
1659 return next_obj;
1660}
1661
1662/*
1663 * Decrement the use_count of the last object required, if any.
1664 */
1665static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1666{
1667 if (!IS_ERR(v)) {
1668 /*
1669 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1670 * waiting was interrupted, so only release it if !IS_ERR.
1671 */
1672 rcu_read_unlock();
1673 mutex_unlock(&scan_mutex);
1674 if (v)
1675 put_object(v);
1676 }
1677}
1678
1679/*
1680 * Print the information for an unreferenced object to the seq file.
1681 */
1682static int kmemleak_seq_show(struct seq_file *seq, void *v)
1683{
1684 struct kmemleak_object *object = v;
1685 unsigned long flags;
1686
1687 raw_spin_lock_irqsave(&object->lock, flags);
1688 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1689 print_unreferenced(seq, object);
1690 raw_spin_unlock_irqrestore(&object->lock, flags);
1691 return 0;
1692}
1693
1694static const struct seq_operations kmemleak_seq_ops = {
1695 .start = kmemleak_seq_start,
1696 .next = kmemleak_seq_next,
1697 .stop = kmemleak_seq_stop,
1698 .show = kmemleak_seq_show,
1699};
1700
1701static int kmemleak_open(struct inode *inode, struct file *file)
1702{
1703 return seq_open(file, &kmemleak_seq_ops);
1704}
1705
1706static int dump_str_object_info(const char *str)
1707{
1708 unsigned long flags;
1709 struct kmemleak_object *object;
1710 unsigned long addr;
1711
1712 if (kstrtoul(str, 0, &addr))
1713 return -EINVAL;
1714 object = find_and_get_object(addr, 0);
1715 if (!object) {
1716 pr_info("Unknown object at 0x%08lx\n", addr);
1717 return -EINVAL;
1718 }
1719
1720 raw_spin_lock_irqsave(&object->lock, flags);
1721 dump_object_info(object);
1722 raw_spin_unlock_irqrestore(&object->lock, flags);
1723
1724 put_object(object);
1725 return 0;
1726}
1727
1728/*
1729 * We use grey instead of black to ensure we can do future scans on the same
1730 * objects. If we did not do future scans these black objects could
1731 * potentially contain references to newly allocated objects in the future and
1732 * we'd end up with false positives.
1733 */
1734static void kmemleak_clear(void)
1735{
1736 struct kmemleak_object *object;
1737 unsigned long flags;
1738
1739 rcu_read_lock();
1740 list_for_each_entry_rcu(object, &object_list, object_list) {
1741 raw_spin_lock_irqsave(&object->lock, flags);
1742 if ((object->flags & OBJECT_REPORTED) &&
1743 unreferenced_object(object))
1744 __paint_it(object, KMEMLEAK_GREY);
1745 raw_spin_unlock_irqrestore(&object->lock, flags);
1746 }
1747 rcu_read_unlock();
1748
1749 kmemleak_found_leaks = false;
1750}
1751
1752static void __kmemleak_do_cleanup(void);
1753
1754/*
1755 * File write operation to configure kmemleak at run-time. The following
1756 * commands can be written to the /sys/kernel/debug/kmemleak file:
1757 * off - disable kmemleak (irreversible)
1758 * stack=on - enable the task stacks scanning
1759 * stack=off - disable the tasks stacks scanning
1760 * scan=on - start the automatic memory scanning thread
1761 * scan=off - stop the automatic memory scanning thread
1762 * scan=... - set the automatic memory scanning period in seconds (0 to
1763 * disable it)
1764 * scan - trigger a memory scan
1765 * clear - mark all current reported unreferenced kmemleak objects as
1766 * grey to ignore printing them, or free all kmemleak objects
1767 * if kmemleak has been disabled.
1768 * dump=... - dump information about the object found at the given address
1769 */
1770static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1771 size_t size, loff_t *ppos)
1772{
1773 char buf[64];
1774 int buf_size;
1775 int ret;
1776
1777 buf_size = min(size, (sizeof(buf) - 1));
1778 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1779 return -EFAULT;
1780 buf[buf_size] = 0;
1781
1782 ret = mutex_lock_interruptible(&scan_mutex);
1783 if (ret < 0)
1784 return ret;
1785
1786 if (strncmp(buf, "clear", 5) == 0) {
1787 if (kmemleak_enabled)
1788 kmemleak_clear();
1789 else
1790 __kmemleak_do_cleanup();
1791 goto out;
1792 }
1793
1794 if (!kmemleak_enabled) {
1795 ret = -EPERM;
1796 goto out;
1797 }
1798
1799 if (strncmp(buf, "off", 3) == 0)
1800 kmemleak_disable();
1801 else if (strncmp(buf, "stack=on", 8) == 0)
1802 kmemleak_stack_scan = 1;
1803 else if (strncmp(buf, "stack=off", 9) == 0)
1804 kmemleak_stack_scan = 0;
1805 else if (strncmp(buf, "scan=on", 7) == 0)
1806 start_scan_thread();
1807 else if (strncmp(buf, "scan=off", 8) == 0)
1808 stop_scan_thread();
1809 else if (strncmp(buf, "scan=", 5) == 0) {
1810 unsigned secs;
1811 unsigned long msecs;
1812
1813 ret = kstrtouint(buf + 5, 0, &secs);
1814 if (ret < 0)
1815 goto out;
1816
1817 msecs = secs * MSEC_PER_SEC;
1818 if (msecs > UINT_MAX)
1819 msecs = UINT_MAX;
1820
1821 stop_scan_thread();
1822 if (msecs) {
1823 WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
1824 start_scan_thread();
1825 }
1826 } else if (strncmp(buf, "scan", 4) == 0)
1827 kmemleak_scan();
1828 else if (strncmp(buf, "dump=", 5) == 0)
1829 ret = dump_str_object_info(buf + 5);
1830 else
1831 ret = -EINVAL;
1832
1833out:
1834 mutex_unlock(&scan_mutex);
1835 if (ret < 0)
1836 return ret;
1837
1838 /* ignore the rest of the buffer, only one command at a time */
1839 *ppos += size;
1840 return size;
1841}
1842
1843static const struct file_operations kmemleak_fops = {
1844 .owner = THIS_MODULE,
1845 .open = kmemleak_open,
1846 .read = seq_read,
1847 .write = kmemleak_write,
1848 .llseek = seq_lseek,
1849 .release = seq_release,
1850};
1851
1852static void __kmemleak_do_cleanup(void)
1853{
1854 struct kmemleak_object *object, *tmp;
1855
1856 /*
1857 * Kmemleak has already been disabled, no need for RCU list traversal
1858 * or kmemleak_lock held.
1859 */
1860 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1861 __remove_object(object);
1862 __delete_object(object);
1863 }
1864}
1865
1866/*
1867 * Stop the memory scanning thread and free the kmemleak internal objects if
1868 * no previous scan thread (otherwise, kmemleak may still have some useful
1869 * information on memory leaks).
1870 */
1871static void kmemleak_do_cleanup(struct work_struct *work)
1872{
1873 stop_scan_thread();
1874
1875 mutex_lock(&scan_mutex);
1876 /*
1877 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1878 * longer track object freeing. Ordering of the scan thread stopping and
1879 * the memory accesses below is guaranteed by the kthread_stop()
1880 * function.
1881 */
1882 kmemleak_free_enabled = 0;
1883 mutex_unlock(&scan_mutex);
1884
1885 if (!kmemleak_found_leaks)
1886 __kmemleak_do_cleanup();
1887 else
1888 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1889}
1890
1891static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1892
1893/*
1894 * Disable kmemleak. No memory allocation/freeing will be traced once this
1895 * function is called. Disabling kmemleak is an irreversible operation.
1896 */
1897static void kmemleak_disable(void)
1898{
1899 /* atomically check whether it was already invoked */
1900 if (cmpxchg(&kmemleak_error, 0, 1))
1901 return;
1902
1903 /* stop any memory operation tracing */
1904 kmemleak_enabled = 0;
1905
1906 /* check whether it is too early for a kernel thread */
1907 if (kmemleak_initialized)
1908 schedule_work(&cleanup_work);
1909 else
1910 kmemleak_free_enabled = 0;
1911
1912 pr_info("Kernel memory leak detector disabled\n");
1913}
1914
1915/*
1916 * Allow boot-time kmemleak disabling (enabled by default).
1917 */
1918static int __init kmemleak_boot_config(char *str)
1919{
1920 if (!str)
1921 return -EINVAL;
1922 if (strcmp(str, "off") == 0)
1923 kmemleak_disable();
1924 else if (strcmp(str, "on") == 0)
1925 kmemleak_skip_disable = 1;
1926 else
1927 return -EINVAL;
1928 return 0;
1929}
1930early_param("kmemleak", kmemleak_boot_config);
1931
1932/*
1933 * Kmemleak initialization.
1934 */
1935void __init kmemleak_init(void)
1936{
1937#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1938 if (!kmemleak_skip_disable) {
1939 kmemleak_disable();
1940 return;
1941 }
1942#endif
1943
1944 if (kmemleak_error)
1945 return;
1946
1947 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1948 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1949
1950 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1951 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1952
1953 /* register the data/bss sections */
1954 create_object((unsigned long)_sdata, _edata - _sdata,
1955 KMEMLEAK_GREY, GFP_ATOMIC);
1956 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
1957 KMEMLEAK_GREY, GFP_ATOMIC);
1958 /* only register .data..ro_after_init if not within .data */
1959 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
1960 create_object((unsigned long)__start_ro_after_init,
1961 __end_ro_after_init - __start_ro_after_init,
1962 KMEMLEAK_GREY, GFP_ATOMIC);
1963}
1964
1965/*
1966 * Late initialization function.
1967 */
1968static int __init kmemleak_late_init(void)
1969{
1970 kmemleak_initialized = 1;
1971
1972 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
1973
1974 if (kmemleak_error) {
1975 /*
1976 * Some error occurred and kmemleak was disabled. There is a
1977 * small chance that kmemleak_disable() was called immediately
1978 * after setting kmemleak_initialized and we may end up with
1979 * two clean-up threads but serialized by scan_mutex.
1980 */
1981 schedule_work(&cleanup_work);
1982 return -ENOMEM;
1983 }
1984
1985 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
1986 mutex_lock(&scan_mutex);
1987 start_scan_thread();
1988 mutex_unlock(&scan_mutex);
1989 }
1990
1991 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
1992 mem_pool_free_count);
1993
1994 return 0;
1995}
1996late_initcall(kmemleak_late_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm/kmemleak.c
4 *
5 * Copyright (C) 2008 ARM Limited
6 * Written by Catalin Marinas <catalin.marinas@arm.com>
7 *
8 * For more information on the algorithm and kmemleak usage, please see
9 * Documentation/dev-tools/kmemleak.rst.
10 *
11 * Notes on locking
12 * ----------------
13 *
14 * The following locks and mutexes are used by kmemleak:
15 *
16 * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
17 * del_state modifications and accesses to the object trees
18 * (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
19 * object_list is the main list holding the metadata (struct
20 * kmemleak_object) for the allocated memory blocks. The object trees are
21 * red black trees used to look-up metadata based on a pointer to the
22 * corresponding memory block. The kmemleak_object structures are added to
23 * the object_list and the object tree root in the create_object() function
24 * called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
25 * delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
26 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
27 * Accesses to the metadata (e.g. count) are protected by this lock. Note
28 * that some members of this structure may be protected by other means
29 * (atomic or kmemleak_lock). This lock is also held when scanning the
30 * corresponding memory block to avoid the kernel freeing it via the
31 * kmemleak_free() callback. This is less heavyweight than holding a global
32 * lock like kmemleak_lock during scanning.
33 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
34 * unreferenced objects at a time. The gray_list contains the objects which
35 * are already referenced or marked as false positives and need to be
36 * scanned. This list is only modified during a scanning episode when the
37 * scan_mutex is held. At the end of a scan, the gray_list is always empty.
38 * Note that the kmemleak_object.use_count is incremented when an object is
39 * added to the gray_list and therefore cannot be freed. This mutex also
40 * prevents multiple users of the "kmemleak" debugfs file together with
41 * modifications to the memory scanning parameters including the scan_thread
42 * pointer
43 *
44 * Locks and mutexes are acquired/nested in the following order:
45 *
46 * scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
47 *
48 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
49 * regions.
50 *
51 * The kmemleak_object structures have a use_count incremented or decremented
52 * using the get_object()/put_object() functions. When the use_count becomes
53 * 0, this count can no longer be incremented and put_object() schedules the
54 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
55 * function must be protected by rcu_read_lock() to avoid accessing a freed
56 * structure.
57 */
58
59#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
60
61#include <linux/init.h>
62#include <linux/kernel.h>
63#include <linux/list.h>
64#include <linux/sched/signal.h>
65#include <linux/sched/task.h>
66#include <linux/sched/task_stack.h>
67#include <linux/jiffies.h>
68#include <linux/delay.h>
69#include <linux/export.h>
70#include <linux/kthread.h>
71#include <linux/rbtree.h>
72#include <linux/fs.h>
73#include <linux/debugfs.h>
74#include <linux/seq_file.h>
75#include <linux/cpumask.h>
76#include <linux/spinlock.h>
77#include <linux/module.h>
78#include <linux/mutex.h>
79#include <linux/rcupdate.h>
80#include <linux/stacktrace.h>
81#include <linux/stackdepot.h>
82#include <linux/cache.h>
83#include <linux/percpu.h>
84#include <linux/memblock.h>
85#include <linux/pfn.h>
86#include <linux/mmzone.h>
87#include <linux/slab.h>
88#include <linux/thread_info.h>
89#include <linux/err.h>
90#include <linux/uaccess.h>
91#include <linux/string.h>
92#include <linux/nodemask.h>
93#include <linux/mm.h>
94#include <linux/workqueue.h>
95#include <linux/crc32.h>
96
97#include <asm/sections.h>
98#include <asm/processor.h>
99#include <linux/atomic.h>
100
101#include <linux/kasan.h>
102#include <linux/kfence.h>
103#include <linux/kmemleak.h>
104#include <linux/memory_hotplug.h>
105
106/*
107 * Kmemleak configuration and common defines.
108 */
109#define MAX_TRACE 16 /* stack trace length */
110#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */
111#define SECS_FIRST_SCAN 60 /* delay before the first scan */
112#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */
113#define MAX_SCAN_SIZE 4096 /* maximum size of a scanned block */
114
115#define BYTES_PER_POINTER sizeof(void *)
116
117/* scanning area inside a memory block */
118struct kmemleak_scan_area {
119 struct hlist_node node;
120 unsigned long start;
121 size_t size;
122};
123
124#define KMEMLEAK_GREY 0
125#define KMEMLEAK_BLACK -1
126
127/*
128 * Structure holding the metadata for each allocated memory block.
129 * Modifications to such objects should be made while holding the
130 * object->lock. Insertions or deletions from object_list, gray_list or
131 * rb_node are already protected by the corresponding locks or mutex (see
132 * the notes on locking above). These objects are reference-counted
133 * (use_count) and freed using the RCU mechanism.
134 */
135struct kmemleak_object {
136 raw_spinlock_t lock;
137 unsigned int flags; /* object status flags */
138 struct list_head object_list;
139 struct list_head gray_list;
140 struct rb_node rb_node;
141 struct rcu_head rcu; /* object_list lockless traversal */
142 /* object usage count; object freed when use_count == 0 */
143 atomic_t use_count;
144 unsigned int del_state; /* deletion state */
145 unsigned long pointer;
146 size_t size;
147 /* pass surplus references to this pointer */
148 unsigned long excess_ref;
149 /* minimum number of a pointers found before it is considered leak */
150 int min_count;
151 /* the total number of pointers found pointing to this object */
152 int count;
153 /* checksum for detecting modified objects */
154 u32 checksum;
155 depot_stack_handle_t trace_handle;
156 /* memory ranges to be scanned inside an object (empty for all) */
157 struct hlist_head area_list;
158 unsigned long jiffies; /* creation timestamp */
159 pid_t pid; /* pid of the current task */
160 char comm[TASK_COMM_LEN]; /* executable name */
161};
162
163/* flag representing the memory block allocation status */
164#define OBJECT_ALLOCATED (1 << 0)
165/* flag set after the first reporting of an unreference object */
166#define OBJECT_REPORTED (1 << 1)
167/* flag set to not scan the object */
168#define OBJECT_NO_SCAN (1 << 2)
169/* flag set to fully scan the object when scan_area allocation failed */
170#define OBJECT_FULL_SCAN (1 << 3)
171/* flag set for object allocated with physical address */
172#define OBJECT_PHYS (1 << 4)
173/* flag set for per-CPU pointers */
174#define OBJECT_PERCPU (1 << 5)
175
176/* set when __remove_object() called */
177#define DELSTATE_REMOVED (1 << 0)
178/* set to temporarily prevent deletion from object_list */
179#define DELSTATE_NO_DELETE (1 << 1)
180
181#define HEX_PREFIX " "
182/* number of bytes to print per line; must be 16 or 32 */
183#define HEX_ROW_SIZE 16
184/* number of bytes to print at a time (1, 2, 4, 8) */
185#define HEX_GROUP_SIZE 1
186/* include ASCII after the hex output */
187#define HEX_ASCII 1
188/* max number of lines to be printed */
189#define HEX_MAX_LINES 2
190
191/* the list of all allocated objects */
192static LIST_HEAD(object_list);
193/* the list of gray-colored objects (see color_gray comment below) */
194static LIST_HEAD(gray_list);
195/* memory pool allocation */
196static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
197static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
198static LIST_HEAD(mem_pool_free_list);
199/* search tree for object boundaries */
200static struct rb_root object_tree_root = RB_ROOT;
201/* search tree for object (with OBJECT_PHYS flag) boundaries */
202static struct rb_root object_phys_tree_root = RB_ROOT;
203/* search tree for object (with OBJECT_PERCPU flag) boundaries */
204static struct rb_root object_percpu_tree_root = RB_ROOT;
205/* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
206static DEFINE_RAW_SPINLOCK(kmemleak_lock);
207
208/* allocation caches for kmemleak internal data */
209static struct kmem_cache *object_cache;
210static struct kmem_cache *scan_area_cache;
211
212/* set if tracing memory operations is enabled */
213static int kmemleak_enabled = 1;
214/* same as above but only for the kmemleak_free() callback */
215static int kmemleak_free_enabled = 1;
216/* set in the late_initcall if there were no errors */
217static int kmemleak_late_initialized;
218/* set if a kmemleak warning was issued */
219static int kmemleak_warning;
220/* set if a fatal kmemleak error has occurred */
221static int kmemleak_error;
222
223/* minimum and maximum address that may be valid pointers */
224static unsigned long min_addr = ULONG_MAX;
225static unsigned long max_addr;
226
227/* minimum and maximum address that may be valid per-CPU pointers */
228static unsigned long min_percpu_addr = ULONG_MAX;
229static unsigned long max_percpu_addr;
230
231static struct task_struct *scan_thread;
232/* used to avoid reporting of recently allocated objects */
233static unsigned long jiffies_min_age;
234static unsigned long jiffies_last_scan;
235/* delay between automatic memory scannings */
236static unsigned long jiffies_scan_wait;
237/* enables or disables the task stacks scanning */
238static int kmemleak_stack_scan = 1;
239/* protects the memory scanning, parameters and debug/kmemleak file access */
240static DEFINE_MUTEX(scan_mutex);
241/* setting kmemleak=on, will set this var, skipping the disable */
242static int kmemleak_skip_disable;
243/* If there are leaks that can be reported */
244static bool kmemleak_found_leaks;
245
246static bool kmemleak_verbose;
247module_param_named(verbose, kmemleak_verbose, bool, 0600);
248
249static void kmemleak_disable(void);
250
251/*
252 * Print a warning and dump the stack trace.
253 */
254#define kmemleak_warn(x...) do { \
255 pr_warn(x); \
256 dump_stack(); \
257 kmemleak_warning = 1; \
258} while (0)
259
260/*
261 * Macro invoked when a serious kmemleak condition occurred and cannot be
262 * recovered from. Kmemleak will be disabled and further allocation/freeing
263 * tracing no longer available.
264 */
265#define kmemleak_stop(x...) do { \
266 kmemleak_warn(x); \
267 kmemleak_disable(); \
268} while (0)
269
270#define warn_or_seq_printf(seq, fmt, ...) do { \
271 if (seq) \
272 seq_printf(seq, fmt, ##__VA_ARGS__); \
273 else \
274 pr_warn(fmt, ##__VA_ARGS__); \
275} while (0)
276
277static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
278 int rowsize, int groupsize, const void *buf,
279 size_t len, bool ascii)
280{
281 if (seq)
282 seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
283 buf, len, ascii);
284 else
285 print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
286 rowsize, groupsize, buf, len, ascii);
287}
288
289/*
290 * Printing of the objects hex dump to the seq file. The number of lines to be
291 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
292 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
293 * with the object->lock held.
294 */
295static void hex_dump_object(struct seq_file *seq,
296 struct kmemleak_object *object)
297{
298 const u8 *ptr = (const u8 *)object->pointer;
299 size_t len;
300
301 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
302 return;
303
304 if (object->flags & OBJECT_PERCPU)
305 ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer);
306
307 /* limit the number of lines to HEX_MAX_LINES */
308 len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
309
310 if (object->flags & OBJECT_PERCPU)
311 warn_or_seq_printf(seq, " hex dump (first %zu bytes on cpu %d):\n",
312 len, raw_smp_processor_id());
313 else
314 warn_or_seq_printf(seq, " hex dump (first %zu bytes):\n", len);
315 kasan_disable_current();
316 warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
317 HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
318 kasan_enable_current();
319}
320
321/*
322 * Object colors, encoded with count and min_count:
323 * - white - orphan object, not enough references to it (count < min_count)
324 * - gray - not orphan, not marked as false positive (min_count == 0) or
325 * sufficient references to it (count >= min_count)
326 * - black - ignore, it doesn't contain references (e.g. text section)
327 * (min_count == -1). No function defined for this color.
328 * Newly created objects don't have any color assigned (object->count == -1)
329 * before the next memory scan when they become white.
330 */
331static bool color_white(const struct kmemleak_object *object)
332{
333 return object->count != KMEMLEAK_BLACK &&
334 object->count < object->min_count;
335}
336
337static bool color_gray(const struct kmemleak_object *object)
338{
339 return object->min_count != KMEMLEAK_BLACK &&
340 object->count >= object->min_count;
341}
342
343/*
344 * Objects are considered unreferenced only if their color is white, they have
345 * not be deleted and have a minimum age to avoid false positives caused by
346 * pointers temporarily stored in CPU registers.
347 */
348static bool unreferenced_object(struct kmemleak_object *object)
349{
350 return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
351 time_before_eq(object->jiffies + jiffies_min_age,
352 jiffies_last_scan);
353}
354
355/*
356 * Printing of the unreferenced objects information to the seq file. The
357 * print_unreferenced function must be called with the object->lock held.
358 */
359static void print_unreferenced(struct seq_file *seq,
360 struct kmemleak_object *object)
361{
362 int i;
363 unsigned long *entries;
364 unsigned int nr_entries;
365
366 nr_entries = stack_depot_fetch(object->trace_handle, &entries);
367 warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
368 object->pointer, object->size);
369 warn_or_seq_printf(seq, " comm \"%s\", pid %d, jiffies %lu\n",
370 object->comm, object->pid, object->jiffies);
371 hex_dump_object(seq, object);
372 warn_or_seq_printf(seq, " backtrace (crc %x):\n", object->checksum);
373
374 for (i = 0; i < nr_entries; i++) {
375 void *ptr = (void *)entries[i];
376 warn_or_seq_printf(seq, " %pS\n", ptr);
377 }
378}
379
380/*
381 * Print the kmemleak_object information. This function is used mainly for
382 * debugging special cases when kmemleak operations. It must be called with
383 * the object->lock held.
384 */
385static void dump_object_info(struct kmemleak_object *object)
386{
387 pr_notice("Object 0x%08lx (size %zu):\n",
388 object->pointer, object->size);
389 pr_notice(" comm \"%s\", pid %d, jiffies %lu\n",
390 object->comm, object->pid, object->jiffies);
391 pr_notice(" min_count = %d\n", object->min_count);
392 pr_notice(" count = %d\n", object->count);
393 pr_notice(" flags = 0x%x\n", object->flags);
394 pr_notice(" checksum = %u\n", object->checksum);
395 pr_notice(" backtrace:\n");
396 if (object->trace_handle)
397 stack_depot_print(object->trace_handle);
398}
399
400static struct rb_root *object_tree(unsigned long objflags)
401{
402 if (objflags & OBJECT_PHYS)
403 return &object_phys_tree_root;
404 if (objflags & OBJECT_PERCPU)
405 return &object_percpu_tree_root;
406 return &object_tree_root;
407}
408
409/*
410 * Look-up a memory block metadata (kmemleak_object) in the object search
411 * tree based on a pointer value. If alias is 0, only values pointing to the
412 * beginning of the memory block are allowed. The kmemleak_lock must be held
413 * when calling this function.
414 */
415static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
416 unsigned int objflags)
417{
418 struct rb_node *rb = object_tree(objflags)->rb_node;
419 unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
420
421 while (rb) {
422 struct kmemleak_object *object;
423 unsigned long untagged_objp;
424
425 object = rb_entry(rb, struct kmemleak_object, rb_node);
426 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
427
428 if (untagged_ptr < untagged_objp)
429 rb = object->rb_node.rb_left;
430 else if (untagged_objp + object->size <= untagged_ptr)
431 rb = object->rb_node.rb_right;
432 else if (untagged_objp == untagged_ptr || alias)
433 return object;
434 else {
435 kmemleak_warn("Found object by alias at 0x%08lx\n",
436 ptr);
437 dump_object_info(object);
438 break;
439 }
440 }
441 return NULL;
442}
443
444/* Look-up a kmemleak object which allocated with virtual address. */
445static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
446{
447 return __lookup_object(ptr, alias, 0);
448}
449
450/*
451 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
452 * that once an object's use_count reached 0, the RCU freeing was already
453 * registered and the object should no longer be used. This function must be
454 * called under the protection of rcu_read_lock().
455 */
456static int get_object(struct kmemleak_object *object)
457{
458 return atomic_inc_not_zero(&object->use_count);
459}
460
461/*
462 * Memory pool allocation and freeing. kmemleak_lock must not be held.
463 */
464static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
465{
466 unsigned long flags;
467 struct kmemleak_object *object;
468
469 /* try the slab allocator first */
470 if (object_cache) {
471 object = kmem_cache_alloc_noprof(object_cache,
472 gfp_nested_mask(gfp));
473 if (object)
474 return object;
475 }
476
477 /* slab allocation failed, try the memory pool */
478 raw_spin_lock_irqsave(&kmemleak_lock, flags);
479 object = list_first_entry_or_null(&mem_pool_free_list,
480 typeof(*object), object_list);
481 if (object)
482 list_del(&object->object_list);
483 else if (mem_pool_free_count)
484 object = &mem_pool[--mem_pool_free_count];
485 else
486 pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
487 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
488
489 return object;
490}
491
492/*
493 * Return the object to either the slab allocator or the memory pool.
494 */
495static void mem_pool_free(struct kmemleak_object *object)
496{
497 unsigned long flags;
498
499 if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
500 kmem_cache_free(object_cache, object);
501 return;
502 }
503
504 /* add the object to the memory pool free list */
505 raw_spin_lock_irqsave(&kmemleak_lock, flags);
506 list_add(&object->object_list, &mem_pool_free_list);
507 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
508}
509
510/*
511 * RCU callback to free a kmemleak_object.
512 */
513static void free_object_rcu(struct rcu_head *rcu)
514{
515 struct hlist_node *tmp;
516 struct kmemleak_scan_area *area;
517 struct kmemleak_object *object =
518 container_of(rcu, struct kmemleak_object, rcu);
519
520 /*
521 * Once use_count is 0 (guaranteed by put_object), there is no other
522 * code accessing this object, hence no need for locking.
523 */
524 hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
525 hlist_del(&area->node);
526 kmem_cache_free(scan_area_cache, area);
527 }
528 mem_pool_free(object);
529}
530
531/*
532 * Decrement the object use_count. Once the count is 0, free the object using
533 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
534 * delete_object() path, the delayed RCU freeing ensures that there is no
535 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
536 * is also possible.
537 */
538static void put_object(struct kmemleak_object *object)
539{
540 if (!atomic_dec_and_test(&object->use_count))
541 return;
542
543 /* should only get here after delete_object was called */
544 WARN_ON(object->flags & OBJECT_ALLOCATED);
545
546 /*
547 * It may be too early for the RCU callbacks, however, there is no
548 * concurrent object_list traversal when !object_cache and all objects
549 * came from the memory pool. Free the object directly.
550 */
551 if (object_cache)
552 call_rcu(&object->rcu, free_object_rcu);
553 else
554 free_object_rcu(&object->rcu);
555}
556
557/*
558 * Look up an object in the object search tree and increase its use_count.
559 */
560static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
561 unsigned int objflags)
562{
563 unsigned long flags;
564 struct kmemleak_object *object;
565
566 rcu_read_lock();
567 raw_spin_lock_irqsave(&kmemleak_lock, flags);
568 object = __lookup_object(ptr, alias, objflags);
569 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
570
571 /* check whether the object is still available */
572 if (object && !get_object(object))
573 object = NULL;
574 rcu_read_unlock();
575
576 return object;
577}
578
579/* Look up and get an object which allocated with virtual address. */
580static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
581{
582 return __find_and_get_object(ptr, alias, 0);
583}
584
585/*
586 * Remove an object from its object tree and object_list. Must be called with
587 * the kmemleak_lock held _if_ kmemleak is still enabled.
588 */
589static void __remove_object(struct kmemleak_object *object)
590{
591 rb_erase(&object->rb_node, object_tree(object->flags));
592 if (!(object->del_state & DELSTATE_NO_DELETE))
593 list_del_rcu(&object->object_list);
594 object->del_state |= DELSTATE_REMOVED;
595}
596
597static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
598 int alias,
599 unsigned int objflags)
600{
601 struct kmemleak_object *object;
602
603 object = __lookup_object(ptr, alias, objflags);
604 if (object)
605 __remove_object(object);
606
607 return object;
608}
609
610/*
611 * Look up an object in the object search tree and remove it from both object
612 * tree root and object_list. The returned object's use_count should be at
613 * least 1, as initially set by create_object().
614 */
615static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
616 unsigned int objflags)
617{
618 unsigned long flags;
619 struct kmemleak_object *object;
620
621 raw_spin_lock_irqsave(&kmemleak_lock, flags);
622 object = __find_and_remove_object(ptr, alias, objflags);
623 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
624
625 return object;
626}
627
628static noinline depot_stack_handle_t set_track_prepare(void)
629{
630 depot_stack_handle_t trace_handle;
631 unsigned long entries[MAX_TRACE];
632 unsigned int nr_entries;
633
634 /*
635 * Use object_cache to determine whether kmemleak_init() has
636 * been invoked. stack_depot_early_init() is called before
637 * kmemleak_init() in mm_core_init().
638 */
639 if (!object_cache)
640 return 0;
641 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
642 trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
643
644 return trace_handle;
645}
646
647static struct kmemleak_object *__alloc_object(gfp_t gfp)
648{
649 struct kmemleak_object *object;
650
651 object = mem_pool_alloc(gfp);
652 if (!object) {
653 pr_warn("Cannot allocate a kmemleak_object structure\n");
654 kmemleak_disable();
655 return NULL;
656 }
657
658 INIT_LIST_HEAD(&object->object_list);
659 INIT_LIST_HEAD(&object->gray_list);
660 INIT_HLIST_HEAD(&object->area_list);
661 raw_spin_lock_init(&object->lock);
662 atomic_set(&object->use_count, 1);
663 object->excess_ref = 0;
664 object->count = 0; /* white color initially */
665 object->checksum = 0;
666 object->del_state = 0;
667
668 /* task information */
669 if (in_hardirq()) {
670 object->pid = 0;
671 strscpy(object->comm, "hardirq");
672 } else if (in_serving_softirq()) {
673 object->pid = 0;
674 strscpy(object->comm, "softirq");
675 } else {
676 object->pid = current->pid;
677 /*
678 * There is a small chance of a race with set_task_comm(),
679 * however using get_task_comm() here may cause locking
680 * dependency issues with current->alloc_lock. In the worst
681 * case, the command line is not correct.
682 */
683 strscpy(object->comm, current->comm);
684 }
685
686 /* kernel backtrace */
687 object->trace_handle = set_track_prepare();
688
689 return object;
690}
691
692static int __link_object(struct kmemleak_object *object, unsigned long ptr,
693 size_t size, int min_count, unsigned int objflags)
694{
695
696 struct kmemleak_object *parent;
697 struct rb_node **link, *rb_parent;
698 unsigned long untagged_ptr;
699 unsigned long untagged_objp;
700
701 object->flags = OBJECT_ALLOCATED | objflags;
702 object->pointer = ptr;
703 object->size = kfence_ksize((void *)ptr) ?: size;
704 object->min_count = min_count;
705 object->jiffies = jiffies;
706
707 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
708 /*
709 * Only update min_addr and max_addr with object storing virtual
710 * address. And update min_percpu_addr max_percpu_addr for per-CPU
711 * objects.
712 */
713 if (objflags & OBJECT_PERCPU) {
714 min_percpu_addr = min(min_percpu_addr, untagged_ptr);
715 max_percpu_addr = max(max_percpu_addr, untagged_ptr + size);
716 } else if (!(objflags & OBJECT_PHYS)) {
717 min_addr = min(min_addr, untagged_ptr);
718 max_addr = max(max_addr, untagged_ptr + size);
719 }
720 link = &object_tree(objflags)->rb_node;
721 rb_parent = NULL;
722 while (*link) {
723 rb_parent = *link;
724 parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
725 untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
726 if (untagged_ptr + size <= untagged_objp)
727 link = &parent->rb_node.rb_left;
728 else if (untagged_objp + parent->size <= untagged_ptr)
729 link = &parent->rb_node.rb_right;
730 else {
731 kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
732 ptr);
733 /*
734 * No need for parent->lock here since "parent" cannot
735 * be freed while the kmemleak_lock is held.
736 */
737 dump_object_info(parent);
738 return -EEXIST;
739 }
740 }
741 rb_link_node(&object->rb_node, rb_parent, link);
742 rb_insert_color(&object->rb_node, object_tree(objflags));
743 list_add_tail_rcu(&object->object_list, &object_list);
744
745 return 0;
746}
747
748/*
749 * Create the metadata (struct kmemleak_object) corresponding to an allocated
750 * memory block and add it to the object_list and object tree.
751 */
752static void __create_object(unsigned long ptr, size_t size,
753 int min_count, gfp_t gfp, unsigned int objflags)
754{
755 struct kmemleak_object *object;
756 unsigned long flags;
757 int ret;
758
759 object = __alloc_object(gfp);
760 if (!object)
761 return;
762
763 raw_spin_lock_irqsave(&kmemleak_lock, flags);
764 ret = __link_object(object, ptr, size, min_count, objflags);
765 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
766 if (ret)
767 mem_pool_free(object);
768}
769
770/* Create kmemleak object which allocated with virtual address. */
771static void create_object(unsigned long ptr, size_t size,
772 int min_count, gfp_t gfp)
773{
774 __create_object(ptr, size, min_count, gfp, 0);
775}
776
777/* Create kmemleak object which allocated with physical address. */
778static void create_object_phys(unsigned long ptr, size_t size,
779 int min_count, gfp_t gfp)
780{
781 __create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
782}
783
784/* Create kmemleak object corresponding to a per-CPU allocation. */
785static void create_object_percpu(unsigned long ptr, size_t size,
786 int min_count, gfp_t gfp)
787{
788 __create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
789}
790
791/*
792 * Mark the object as not allocated and schedule RCU freeing via put_object().
793 */
794static void __delete_object(struct kmemleak_object *object)
795{
796 unsigned long flags;
797
798 WARN_ON(!(object->flags & OBJECT_ALLOCATED));
799 WARN_ON(atomic_read(&object->use_count) < 1);
800
801 /*
802 * Locking here also ensures that the corresponding memory block
803 * cannot be freed when it is being scanned.
804 */
805 raw_spin_lock_irqsave(&object->lock, flags);
806 object->flags &= ~OBJECT_ALLOCATED;
807 raw_spin_unlock_irqrestore(&object->lock, flags);
808 put_object(object);
809}
810
811/*
812 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
813 * delete it.
814 */
815static void delete_object_full(unsigned long ptr, unsigned int objflags)
816{
817 struct kmemleak_object *object;
818
819 object = find_and_remove_object(ptr, 0, objflags);
820 if (!object) {
821#ifdef DEBUG
822 kmemleak_warn("Freeing unknown object at 0x%08lx\n",
823 ptr);
824#endif
825 return;
826 }
827 __delete_object(object);
828}
829
830/*
831 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
832 * delete it. If the memory block is partially freed, the function may create
833 * additional metadata for the remaining parts of the block.
834 */
835static void delete_object_part(unsigned long ptr, size_t size,
836 unsigned int objflags)
837{
838 struct kmemleak_object *object, *object_l, *object_r;
839 unsigned long start, end, flags;
840
841 object_l = __alloc_object(GFP_KERNEL);
842 if (!object_l)
843 return;
844
845 object_r = __alloc_object(GFP_KERNEL);
846 if (!object_r)
847 goto out;
848
849 raw_spin_lock_irqsave(&kmemleak_lock, flags);
850 object = __find_and_remove_object(ptr, 1, objflags);
851 if (!object) {
852#ifdef DEBUG
853 kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
854 ptr, size);
855#endif
856 goto unlock;
857 }
858
859 /*
860 * Create one or two objects that may result from the memory block
861 * split. Note that partial freeing is only done by free_bootmem() and
862 * this happens before kmemleak_init() is called.
863 */
864 start = object->pointer;
865 end = object->pointer + object->size;
866 if ((ptr > start) &&
867 !__link_object(object_l, start, ptr - start,
868 object->min_count, objflags))
869 object_l = NULL;
870 if ((ptr + size < end) &&
871 !__link_object(object_r, ptr + size, end - ptr - size,
872 object->min_count, objflags))
873 object_r = NULL;
874
875unlock:
876 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
877 if (object)
878 __delete_object(object);
879
880out:
881 if (object_l)
882 mem_pool_free(object_l);
883 if (object_r)
884 mem_pool_free(object_r);
885}
886
887static void __paint_it(struct kmemleak_object *object, int color)
888{
889 object->min_count = color;
890 if (color == KMEMLEAK_BLACK)
891 object->flags |= OBJECT_NO_SCAN;
892}
893
894static void paint_it(struct kmemleak_object *object, int color)
895{
896 unsigned long flags;
897
898 raw_spin_lock_irqsave(&object->lock, flags);
899 __paint_it(object, color);
900 raw_spin_unlock_irqrestore(&object->lock, flags);
901}
902
903static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
904{
905 struct kmemleak_object *object;
906
907 object = __find_and_get_object(ptr, 0, objflags);
908 if (!object) {
909 kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
910 ptr,
911 (color == KMEMLEAK_GREY) ? "Grey" :
912 (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
913 return;
914 }
915 paint_it(object, color);
916 put_object(object);
917}
918
919/*
920 * Mark an object permanently as gray-colored so that it can no longer be
921 * reported as a leak. This is used in general to mark a false positive.
922 */
923static void make_gray_object(unsigned long ptr)
924{
925 paint_ptr(ptr, KMEMLEAK_GREY, 0);
926}
927
928/*
929 * Mark the object as black-colored so that it is ignored from scans and
930 * reporting.
931 */
932static void make_black_object(unsigned long ptr, unsigned int objflags)
933{
934 paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
935}
936
937/*
938 * Reset the checksum of an object. The immediate effect is that it will not
939 * be reported as a leak during the next scan until its checksum is updated.
940 */
941static void reset_checksum(unsigned long ptr)
942{
943 unsigned long flags;
944 struct kmemleak_object *object;
945
946 object = find_and_get_object(ptr, 0);
947 if (!object) {
948 kmemleak_warn("Not resetting the checksum of an unknown object at 0x%08lx\n",
949 ptr);
950 return;
951 }
952
953 raw_spin_lock_irqsave(&object->lock, flags);
954 object->checksum = 0;
955 raw_spin_unlock_irqrestore(&object->lock, flags);
956 put_object(object);
957}
958
959/*
960 * Add a scanning area to the object. If at least one such area is added,
961 * kmemleak will only scan these ranges rather than the whole memory block.
962 */
963static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
964{
965 unsigned long flags;
966 struct kmemleak_object *object;
967 struct kmemleak_scan_area *area = NULL;
968 unsigned long untagged_ptr;
969 unsigned long untagged_objp;
970
971 object = find_and_get_object(ptr, 1);
972 if (!object) {
973 kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
974 ptr);
975 return;
976 }
977
978 untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
979 untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
980
981 if (scan_area_cache)
982 area = kmem_cache_alloc_noprof(scan_area_cache,
983 gfp_nested_mask(gfp));
984
985 raw_spin_lock_irqsave(&object->lock, flags);
986 if (!area) {
987 pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
988 /* mark the object for full scan to avoid false positives */
989 object->flags |= OBJECT_FULL_SCAN;
990 goto out_unlock;
991 }
992 if (size == SIZE_MAX) {
993 size = untagged_objp + object->size - untagged_ptr;
994 } else if (untagged_ptr + size > untagged_objp + object->size) {
995 kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
996 dump_object_info(object);
997 kmem_cache_free(scan_area_cache, area);
998 goto out_unlock;
999 }
1000
1001 INIT_HLIST_NODE(&area->node);
1002 area->start = ptr;
1003 area->size = size;
1004
1005 hlist_add_head(&area->node, &object->area_list);
1006out_unlock:
1007 raw_spin_unlock_irqrestore(&object->lock, flags);
1008 put_object(object);
1009}
1010
1011/*
1012 * Any surplus references (object already gray) to 'ptr' are passed to
1013 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
1014 * vm_struct may be used as an alternative reference to the vmalloc'ed object
1015 * (see free_thread_stack()).
1016 */
1017static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
1018{
1019 unsigned long flags;
1020 struct kmemleak_object *object;
1021
1022 object = find_and_get_object(ptr, 0);
1023 if (!object) {
1024 kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
1025 ptr);
1026 return;
1027 }
1028
1029 raw_spin_lock_irqsave(&object->lock, flags);
1030 object->excess_ref = excess_ref;
1031 raw_spin_unlock_irqrestore(&object->lock, flags);
1032 put_object(object);
1033}
1034
1035/*
1036 * Set the OBJECT_NO_SCAN flag for the object corresponding to the given
1037 * pointer. Such object will not be scanned by kmemleak but references to it
1038 * are searched.
1039 */
1040static void object_no_scan(unsigned long ptr)
1041{
1042 unsigned long flags;
1043 struct kmemleak_object *object;
1044
1045 object = find_and_get_object(ptr, 0);
1046 if (!object) {
1047 kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
1048 return;
1049 }
1050
1051 raw_spin_lock_irqsave(&object->lock, flags);
1052 object->flags |= OBJECT_NO_SCAN;
1053 raw_spin_unlock_irqrestore(&object->lock, flags);
1054 put_object(object);
1055}
1056
1057/**
1058 * kmemleak_alloc - register a newly allocated object
1059 * @ptr: pointer to beginning of the object
1060 * @size: size of the object
1061 * @min_count: minimum number of references to this object. If during memory
1062 * scanning a number of references less than @min_count is found,
1063 * the object is reported as a memory leak. If @min_count is 0,
1064 * the object is never reported as a leak. If @min_count is -1,
1065 * the object is ignored (not scanned and not reported as a leak)
1066 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1067 *
1068 * This function is called from the kernel allocators when a new object
1069 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
1070 */
1071void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
1072 gfp_t gfp)
1073{
1074 pr_debug("%s(0x%px, %zu, %d)\n", __func__, ptr, size, min_count);
1075
1076 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1077 create_object((unsigned long)ptr, size, min_count, gfp);
1078}
1079EXPORT_SYMBOL_GPL(kmemleak_alloc);
1080
1081/**
1082 * kmemleak_alloc_percpu - register a newly allocated __percpu object
1083 * @ptr: __percpu pointer to beginning of the object
1084 * @size: size of the object
1085 * @gfp: flags used for kmemleak internal memory allocations
1086 *
1087 * This function is called from the kernel percpu allocator when a new object
1088 * (memory block) is allocated (alloc_percpu).
1089 */
1090void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
1091 gfp_t gfp)
1092{
1093 pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
1094
1095 if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
1096 create_object_percpu((__force unsigned long)ptr, size, 1, gfp);
1097}
1098EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1099
1100/**
1101 * kmemleak_vmalloc - register a newly vmalloc'ed object
1102 * @area: pointer to vm_struct
1103 * @size: size of the object
1104 * @gfp: __vmalloc() flags used for kmemleak internal memory allocations
1105 *
1106 * This function is called from the vmalloc() kernel allocator when a new
1107 * object (memory block) is allocated.
1108 */
1109void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1110{
1111 pr_debug("%s(0x%px, %zu)\n", __func__, area, size);
1112
1113 /*
1114 * A min_count = 2 is needed because vm_struct contains a reference to
1115 * the virtual address of the vmalloc'ed block.
1116 */
1117 if (kmemleak_enabled) {
1118 create_object((unsigned long)area->addr, size, 2, gfp);
1119 object_set_excess_ref((unsigned long)area,
1120 (unsigned long)area->addr);
1121 }
1122}
1123EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1124
1125/**
1126 * kmemleak_free - unregister a previously registered object
1127 * @ptr: pointer to beginning of the object
1128 *
1129 * This function is called from the kernel allocators when an object (memory
1130 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1131 */
1132void __ref kmemleak_free(const void *ptr)
1133{
1134 pr_debug("%s(0x%px)\n", __func__, ptr);
1135
1136 if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1137 delete_object_full((unsigned long)ptr, 0);
1138}
1139EXPORT_SYMBOL_GPL(kmemleak_free);
1140
1141/**
1142 * kmemleak_free_part - partially unregister a previously registered object
1143 * @ptr: pointer to the beginning or inside the object. This also
1144 * represents the start of the range to be freed
1145 * @size: size to be unregistered
1146 *
1147 * This function is called when only a part of a memory block is freed
1148 * (usually from the bootmem allocator).
1149 */
1150void __ref kmemleak_free_part(const void *ptr, size_t size)
1151{
1152 pr_debug("%s(0x%px)\n", __func__, ptr);
1153
1154 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1155 delete_object_part((unsigned long)ptr, size, 0);
1156}
1157EXPORT_SYMBOL_GPL(kmemleak_free_part);
1158
1159/**
1160 * kmemleak_free_percpu - unregister a previously registered __percpu object
1161 * @ptr: __percpu pointer to beginning of the object
1162 *
1163 * This function is called from the kernel percpu allocator when an object
1164 * (memory block) is freed (free_percpu).
1165 */
1166void __ref kmemleak_free_percpu(const void __percpu *ptr)
1167{
1168 pr_debug("%s(0x%px)\n", __func__, ptr);
1169
1170 if (kmemleak_free_enabled && ptr && !IS_ERR_PCPU(ptr))
1171 delete_object_full((__force unsigned long)ptr, OBJECT_PERCPU);
1172}
1173EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1174
1175/**
1176 * kmemleak_update_trace - update object allocation stack trace
1177 * @ptr: pointer to beginning of the object
1178 *
1179 * Override the object allocation stack trace for cases where the actual
1180 * allocation place is not always useful.
1181 */
1182void __ref kmemleak_update_trace(const void *ptr)
1183{
1184 struct kmemleak_object *object;
1185 depot_stack_handle_t trace_handle;
1186 unsigned long flags;
1187
1188 pr_debug("%s(0x%px)\n", __func__, ptr);
1189
1190 if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1191 return;
1192
1193 object = find_and_get_object((unsigned long)ptr, 1);
1194 if (!object) {
1195#ifdef DEBUG
1196 kmemleak_warn("Updating stack trace for unknown object at %p\n",
1197 ptr);
1198#endif
1199 return;
1200 }
1201
1202 trace_handle = set_track_prepare();
1203 raw_spin_lock_irqsave(&object->lock, flags);
1204 object->trace_handle = trace_handle;
1205 raw_spin_unlock_irqrestore(&object->lock, flags);
1206
1207 put_object(object);
1208}
1209EXPORT_SYMBOL(kmemleak_update_trace);
1210
1211/**
1212 * kmemleak_not_leak - mark an allocated object as false positive
1213 * @ptr: pointer to beginning of the object
1214 *
1215 * Calling this function on an object will cause the memory block to no longer
1216 * be reported as leak and always be scanned.
1217 */
1218void __ref kmemleak_not_leak(const void *ptr)
1219{
1220 pr_debug("%s(0x%px)\n", __func__, ptr);
1221
1222 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1223 make_gray_object((unsigned long)ptr);
1224}
1225EXPORT_SYMBOL(kmemleak_not_leak);
1226
1227/**
1228 * kmemleak_transient_leak - mark an allocated object as transient false positive
1229 * @ptr: pointer to beginning of the object
1230 *
1231 * Calling this function on an object will cause the memory block to not be
1232 * reported as a leak temporarily. This may happen, for example, if the object
1233 * is part of a singly linked list and the ->next reference to it is changed.
1234 */
1235void __ref kmemleak_transient_leak(const void *ptr)
1236{
1237 pr_debug("%s(0x%px)\n", __func__, ptr);
1238
1239 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1240 reset_checksum((unsigned long)ptr);
1241}
1242EXPORT_SYMBOL(kmemleak_transient_leak);
1243
1244/**
1245 * kmemleak_ignore - ignore an allocated object
1246 * @ptr: pointer to beginning of the object
1247 *
1248 * Calling this function on an object will cause the memory block to be
1249 * ignored (not scanned and not reported as a leak). This is usually done when
1250 * it is known that the corresponding block is not a leak and does not contain
1251 * any references to other allocated memory blocks.
1252 */
1253void __ref kmemleak_ignore(const void *ptr)
1254{
1255 pr_debug("%s(0x%px)\n", __func__, ptr);
1256
1257 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1258 make_black_object((unsigned long)ptr, 0);
1259}
1260EXPORT_SYMBOL(kmemleak_ignore);
1261
1262/**
1263 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1264 * @ptr: pointer to beginning or inside the object. This also
1265 * represents the start of the scan area
1266 * @size: size of the scan area
1267 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1268 *
1269 * This function is used when it is known that only certain parts of an object
1270 * contain references to other objects. Kmemleak will only scan these areas
1271 * reducing the number false negatives.
1272 */
1273void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1274{
1275 pr_debug("%s(0x%px)\n", __func__, ptr);
1276
1277 if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1278 add_scan_area((unsigned long)ptr, size, gfp);
1279}
1280EXPORT_SYMBOL(kmemleak_scan_area);
1281
1282/**
1283 * kmemleak_no_scan - do not scan an allocated object
1284 * @ptr: pointer to beginning of the object
1285 *
1286 * This function notifies kmemleak not to scan the given memory block. Useful
1287 * in situations where it is known that the given object does not contain any
1288 * references to other objects. Kmemleak will not scan such objects reducing
1289 * the number of false negatives.
1290 */
1291void __ref kmemleak_no_scan(const void *ptr)
1292{
1293 pr_debug("%s(0x%px)\n", __func__, ptr);
1294
1295 if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1296 object_no_scan((unsigned long)ptr);
1297}
1298EXPORT_SYMBOL(kmemleak_no_scan);
1299
1300/**
1301 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1302 * address argument
1303 * @phys: physical address of the object
1304 * @size: size of the object
1305 * @gfp: kmalloc() flags used for kmemleak internal memory allocations
1306 */
1307void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1308{
1309 pr_debug("%s(0x%px, %zu)\n", __func__, &phys, size);
1310
1311 if (kmemleak_enabled)
1312 /*
1313 * Create object with OBJECT_PHYS flag and
1314 * assume min_count 0.
1315 */
1316 create_object_phys((unsigned long)phys, size, 0, gfp);
1317}
1318EXPORT_SYMBOL(kmemleak_alloc_phys);
1319
1320/**
1321 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1322 * physical address argument
1323 * @phys: physical address if the beginning or inside an object. This
1324 * also represents the start of the range to be freed
1325 * @size: size to be unregistered
1326 */
1327void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1328{
1329 pr_debug("%s(0x%px)\n", __func__, &phys);
1330
1331 if (kmemleak_enabled)
1332 delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
1333}
1334EXPORT_SYMBOL(kmemleak_free_part_phys);
1335
1336/**
1337 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1338 * address argument
1339 * @phys: physical address of the object
1340 */
1341void __ref kmemleak_ignore_phys(phys_addr_t phys)
1342{
1343 pr_debug("%s(0x%px)\n", __func__, &phys);
1344
1345 if (kmemleak_enabled)
1346 make_black_object((unsigned long)phys, OBJECT_PHYS);
1347}
1348EXPORT_SYMBOL(kmemleak_ignore_phys);
1349
1350/*
1351 * Update an object's checksum and return true if it was modified.
1352 */
1353static bool update_checksum(struct kmemleak_object *object)
1354{
1355 u32 old_csum = object->checksum;
1356
1357 if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1358 return false;
1359
1360 kasan_disable_current();
1361 kcsan_disable_current();
1362 if (object->flags & OBJECT_PERCPU) {
1363 unsigned int cpu;
1364
1365 object->checksum = 0;
1366 for_each_possible_cpu(cpu) {
1367 void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1368
1369 object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size);
1370 }
1371 } else {
1372 object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1373 }
1374 kasan_enable_current();
1375 kcsan_enable_current();
1376
1377 return object->checksum != old_csum;
1378}
1379
1380/*
1381 * Update an object's references. object->lock must be held by the caller.
1382 */
1383static void update_refs(struct kmemleak_object *object)
1384{
1385 if (!color_white(object)) {
1386 /* non-orphan, ignored or new */
1387 return;
1388 }
1389
1390 /*
1391 * Increase the object's reference count (number of pointers to the
1392 * memory block). If this count reaches the required minimum, the
1393 * object's color will become gray and it will be added to the
1394 * gray_list.
1395 */
1396 object->count++;
1397 if (color_gray(object)) {
1398 /* put_object() called when removing from gray_list */
1399 WARN_ON(!get_object(object));
1400 list_add_tail(&object->gray_list, &gray_list);
1401 }
1402}
1403
1404static void pointer_update_refs(struct kmemleak_object *scanned,
1405 unsigned long pointer, unsigned int objflags)
1406{
1407 struct kmemleak_object *object;
1408 unsigned long untagged_ptr;
1409 unsigned long excess_ref;
1410
1411 untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1412 if (objflags & OBJECT_PERCPU) {
1413 if (untagged_ptr < min_percpu_addr || untagged_ptr >= max_percpu_addr)
1414 return;
1415 } else {
1416 if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1417 return;
1418 }
1419
1420 /*
1421 * No need for get_object() here since we hold kmemleak_lock.
1422 * object->use_count cannot be dropped to 0 while the object
1423 * is still present in object_tree_root and object_list
1424 * (with updates protected by kmemleak_lock).
1425 */
1426 object = __lookup_object(pointer, 1, objflags);
1427 if (!object)
1428 return;
1429 if (object == scanned)
1430 /* self referenced, ignore */
1431 return;
1432
1433 /*
1434 * Avoid the lockdep recursive warning on object->lock being
1435 * previously acquired in scan_object(). These locks are
1436 * enclosed by scan_mutex.
1437 */
1438 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1439 /* only pass surplus references (object already gray) */
1440 if (color_gray(object)) {
1441 excess_ref = object->excess_ref;
1442 /* no need for update_refs() if object already gray */
1443 } else {
1444 excess_ref = 0;
1445 update_refs(object);
1446 }
1447 raw_spin_unlock(&object->lock);
1448
1449 if (excess_ref) {
1450 object = lookup_object(excess_ref, 0);
1451 if (!object)
1452 return;
1453 if (object == scanned)
1454 /* circular reference, ignore */
1455 return;
1456 raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1457 update_refs(object);
1458 raw_spin_unlock(&object->lock);
1459 }
1460}
1461
1462/*
1463 * Memory scanning is a long process and it needs to be interruptible. This
1464 * function checks whether such interrupt condition occurred.
1465 */
1466static int scan_should_stop(void)
1467{
1468 if (!kmemleak_enabled)
1469 return 1;
1470
1471 /*
1472 * This function may be called from either process or kthread context,
1473 * hence the need to check for both stop conditions.
1474 */
1475 if (current->mm)
1476 return signal_pending(current);
1477 else
1478 return kthread_should_stop();
1479
1480 return 0;
1481}
1482
1483/*
1484 * Scan a memory block (exclusive range) for valid pointers and add those
1485 * found to the gray list.
1486 */
1487static void scan_block(void *_start, void *_end,
1488 struct kmemleak_object *scanned)
1489{
1490 unsigned long *ptr;
1491 unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1492 unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1493 unsigned long flags;
1494
1495 raw_spin_lock_irqsave(&kmemleak_lock, flags);
1496 for (ptr = start; ptr < end; ptr++) {
1497 unsigned long pointer;
1498
1499 if (scan_should_stop())
1500 break;
1501
1502 kasan_disable_current();
1503 pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1504 kasan_enable_current();
1505
1506 pointer_update_refs(scanned, pointer, 0);
1507 pointer_update_refs(scanned, pointer, OBJECT_PERCPU);
1508 }
1509 raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1510}
1511
1512/*
1513 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1514 */
1515#ifdef CONFIG_SMP
1516static void scan_large_block(void *start, void *end)
1517{
1518 void *next;
1519
1520 while (start < end) {
1521 next = min(start + MAX_SCAN_SIZE, end);
1522 scan_block(start, next, NULL);
1523 start = next;
1524 cond_resched();
1525 }
1526}
1527#endif
1528
1529/*
1530 * Scan a memory block corresponding to a kmemleak_object. A condition is
1531 * that object->use_count >= 1.
1532 */
1533static void scan_object(struct kmemleak_object *object)
1534{
1535 struct kmemleak_scan_area *area;
1536 unsigned long flags;
1537
1538 /*
1539 * Once the object->lock is acquired, the corresponding memory block
1540 * cannot be freed (the same lock is acquired in delete_object).
1541 */
1542 raw_spin_lock_irqsave(&object->lock, flags);
1543 if (object->flags & OBJECT_NO_SCAN)
1544 goto out;
1545 if (!(object->flags & OBJECT_ALLOCATED))
1546 /* already freed object */
1547 goto out;
1548
1549 if (object->flags & OBJECT_PERCPU) {
1550 unsigned int cpu;
1551
1552 for_each_possible_cpu(cpu) {
1553 void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1554 void *end = start + object->size;
1555
1556 scan_block(start, end, object);
1557
1558 raw_spin_unlock_irqrestore(&object->lock, flags);
1559 cond_resched();
1560 raw_spin_lock_irqsave(&object->lock, flags);
1561 if (!(object->flags & OBJECT_ALLOCATED))
1562 break;
1563 }
1564 } else if (hlist_empty(&object->area_list) ||
1565 object->flags & OBJECT_FULL_SCAN) {
1566 void *start = object->flags & OBJECT_PHYS ?
1567 __va((phys_addr_t)object->pointer) :
1568 (void *)object->pointer;
1569 void *end = start + object->size;
1570 void *next;
1571
1572 do {
1573 next = min(start + MAX_SCAN_SIZE, end);
1574 scan_block(start, next, object);
1575
1576 start = next;
1577 if (start >= end)
1578 break;
1579
1580 raw_spin_unlock_irqrestore(&object->lock, flags);
1581 cond_resched();
1582 raw_spin_lock_irqsave(&object->lock, flags);
1583 } while (object->flags & OBJECT_ALLOCATED);
1584 } else {
1585 hlist_for_each_entry(area, &object->area_list, node)
1586 scan_block((void *)area->start,
1587 (void *)(area->start + area->size),
1588 object);
1589 }
1590out:
1591 raw_spin_unlock_irqrestore(&object->lock, flags);
1592}
1593
1594/*
1595 * Scan the objects already referenced (gray objects). More objects will be
1596 * referenced and, if there are no memory leaks, all the objects are scanned.
1597 */
1598static void scan_gray_list(void)
1599{
1600 struct kmemleak_object *object, *tmp;
1601
1602 /*
1603 * The list traversal is safe for both tail additions and removals
1604 * from inside the loop. The kmemleak objects cannot be freed from
1605 * outside the loop because their use_count was incremented.
1606 */
1607 object = list_entry(gray_list.next, typeof(*object), gray_list);
1608 while (&object->gray_list != &gray_list) {
1609 cond_resched();
1610
1611 /* may add new objects to the list */
1612 if (!scan_should_stop())
1613 scan_object(object);
1614
1615 tmp = list_entry(object->gray_list.next, typeof(*object),
1616 gray_list);
1617
1618 /* remove the object from the list and release it */
1619 list_del(&object->gray_list);
1620 put_object(object);
1621
1622 object = tmp;
1623 }
1624 WARN_ON(!list_empty(&gray_list));
1625}
1626
1627/*
1628 * Conditionally call resched() in an object iteration loop while making sure
1629 * that the given object won't go away without RCU read lock by performing a
1630 * get_object() if necessaary.
1631 */
1632static void kmemleak_cond_resched(struct kmemleak_object *object)
1633{
1634 if (!get_object(object))
1635 return; /* Try next object */
1636
1637 raw_spin_lock_irq(&kmemleak_lock);
1638 if (object->del_state & DELSTATE_REMOVED)
1639 goto unlock_put; /* Object removed */
1640 object->del_state |= DELSTATE_NO_DELETE;
1641 raw_spin_unlock_irq(&kmemleak_lock);
1642
1643 rcu_read_unlock();
1644 cond_resched();
1645 rcu_read_lock();
1646
1647 raw_spin_lock_irq(&kmemleak_lock);
1648 if (object->del_state & DELSTATE_REMOVED)
1649 list_del_rcu(&object->object_list);
1650 object->del_state &= ~DELSTATE_NO_DELETE;
1651unlock_put:
1652 raw_spin_unlock_irq(&kmemleak_lock);
1653 put_object(object);
1654}
1655
1656/*
1657 * Scan data sections and all the referenced memory blocks allocated via the
1658 * kernel's standard allocators. This function must be called with the
1659 * scan_mutex held.
1660 */
1661static void kmemleak_scan(void)
1662{
1663 struct kmemleak_object *object;
1664 struct zone *zone;
1665 int __maybe_unused i;
1666 int new_leaks = 0;
1667
1668 jiffies_last_scan = jiffies;
1669
1670 /* prepare the kmemleak_object's */
1671 rcu_read_lock();
1672 list_for_each_entry_rcu(object, &object_list, object_list) {
1673 raw_spin_lock_irq(&object->lock);
1674#ifdef DEBUG
1675 /*
1676 * With a few exceptions there should be a maximum of
1677 * 1 reference to any object at this point.
1678 */
1679 if (atomic_read(&object->use_count) > 1) {
1680 pr_debug("object->use_count = %d\n",
1681 atomic_read(&object->use_count));
1682 dump_object_info(object);
1683 }
1684#endif
1685
1686 /* ignore objects outside lowmem (paint them black) */
1687 if ((object->flags & OBJECT_PHYS) &&
1688 !(object->flags & OBJECT_NO_SCAN)) {
1689 unsigned long phys = object->pointer;
1690
1691 if (PHYS_PFN(phys) < min_low_pfn ||
1692 PHYS_PFN(phys + object->size) > max_low_pfn)
1693 __paint_it(object, KMEMLEAK_BLACK);
1694 }
1695
1696 /* reset the reference count (whiten the object) */
1697 object->count = 0;
1698 if (color_gray(object) && get_object(object))
1699 list_add_tail(&object->gray_list, &gray_list);
1700
1701 raw_spin_unlock_irq(&object->lock);
1702
1703 if (need_resched())
1704 kmemleak_cond_resched(object);
1705 }
1706 rcu_read_unlock();
1707
1708#ifdef CONFIG_SMP
1709 /* per-cpu sections scanning */
1710 for_each_possible_cpu(i)
1711 scan_large_block(__per_cpu_start + per_cpu_offset(i),
1712 __per_cpu_end + per_cpu_offset(i));
1713#endif
1714
1715 /*
1716 * Struct page scanning for each node.
1717 */
1718 get_online_mems();
1719 for_each_populated_zone(zone) {
1720 unsigned long start_pfn = zone->zone_start_pfn;
1721 unsigned long end_pfn = zone_end_pfn(zone);
1722 unsigned long pfn;
1723
1724 for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1725 struct page *page = pfn_to_online_page(pfn);
1726
1727 if (!(pfn & 63))
1728 cond_resched();
1729
1730 if (!page)
1731 continue;
1732
1733 /* only scan pages belonging to this zone */
1734 if (page_zone(page) != zone)
1735 continue;
1736 /* only scan if page is in use */
1737 if (page_count(page) == 0)
1738 continue;
1739 scan_block(page, page + 1, NULL);
1740 }
1741 }
1742 put_online_mems();
1743
1744 /*
1745 * Scanning the task stacks (may introduce false negatives).
1746 */
1747 if (kmemleak_stack_scan) {
1748 struct task_struct *p, *g;
1749
1750 rcu_read_lock();
1751 for_each_process_thread(g, p) {
1752 void *stack = try_get_task_stack(p);
1753 if (stack) {
1754 scan_block(stack, stack + THREAD_SIZE, NULL);
1755 put_task_stack(p);
1756 }
1757 }
1758 rcu_read_unlock();
1759 }
1760
1761 /*
1762 * Scan the objects already referenced from the sections scanned
1763 * above.
1764 */
1765 scan_gray_list();
1766
1767 /*
1768 * Check for new or unreferenced objects modified since the previous
1769 * scan and color them gray until the next scan.
1770 */
1771 rcu_read_lock();
1772 list_for_each_entry_rcu(object, &object_list, object_list) {
1773 if (need_resched())
1774 kmemleak_cond_resched(object);
1775
1776 /*
1777 * This is racy but we can save the overhead of lock/unlock
1778 * calls. The missed objects, if any, should be caught in
1779 * the next scan.
1780 */
1781 if (!color_white(object))
1782 continue;
1783 raw_spin_lock_irq(&object->lock);
1784 if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1785 && update_checksum(object) && get_object(object)) {
1786 /* color it gray temporarily */
1787 object->count = object->min_count;
1788 list_add_tail(&object->gray_list, &gray_list);
1789 }
1790 raw_spin_unlock_irq(&object->lock);
1791 }
1792 rcu_read_unlock();
1793
1794 /*
1795 * Re-scan the gray list for modified unreferenced objects.
1796 */
1797 scan_gray_list();
1798
1799 /*
1800 * If scanning was stopped do not report any new unreferenced objects.
1801 */
1802 if (scan_should_stop())
1803 return;
1804
1805 /*
1806 * Scanning result reporting.
1807 */
1808 rcu_read_lock();
1809 list_for_each_entry_rcu(object, &object_list, object_list) {
1810 if (need_resched())
1811 kmemleak_cond_resched(object);
1812
1813 /*
1814 * This is racy but we can save the overhead of lock/unlock
1815 * calls. The missed objects, if any, should be caught in
1816 * the next scan.
1817 */
1818 if (!color_white(object))
1819 continue;
1820 raw_spin_lock_irq(&object->lock);
1821 if (unreferenced_object(object) &&
1822 !(object->flags & OBJECT_REPORTED)) {
1823 object->flags |= OBJECT_REPORTED;
1824
1825 if (kmemleak_verbose)
1826 print_unreferenced(NULL, object);
1827
1828 new_leaks++;
1829 }
1830 raw_spin_unlock_irq(&object->lock);
1831 }
1832 rcu_read_unlock();
1833
1834 if (new_leaks) {
1835 kmemleak_found_leaks = true;
1836
1837 pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1838 new_leaks);
1839 }
1840
1841}
1842
1843/*
1844 * Thread function performing automatic memory scanning. Unreferenced objects
1845 * at the end of a memory scan are reported but only the first time.
1846 */
1847static int kmemleak_scan_thread(void *arg)
1848{
1849 static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1850
1851 pr_info("Automatic memory scanning thread started\n");
1852 set_user_nice(current, 10);
1853
1854 /*
1855 * Wait before the first scan to allow the system to fully initialize.
1856 */
1857 if (first_run) {
1858 signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1859 first_run = 0;
1860 while (timeout && !kthread_should_stop())
1861 timeout = schedule_timeout_interruptible(timeout);
1862 }
1863
1864 while (!kthread_should_stop()) {
1865 signed long timeout = READ_ONCE(jiffies_scan_wait);
1866
1867 mutex_lock(&scan_mutex);
1868 kmemleak_scan();
1869 mutex_unlock(&scan_mutex);
1870
1871 /* wait before the next scan */
1872 while (timeout && !kthread_should_stop())
1873 timeout = schedule_timeout_interruptible(timeout);
1874 }
1875
1876 pr_info("Automatic memory scanning thread ended\n");
1877
1878 return 0;
1879}
1880
1881/*
1882 * Start the automatic memory scanning thread. This function must be called
1883 * with the scan_mutex held.
1884 */
1885static void start_scan_thread(void)
1886{
1887 if (scan_thread)
1888 return;
1889 scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1890 if (IS_ERR(scan_thread)) {
1891 pr_warn("Failed to create the scan thread\n");
1892 scan_thread = NULL;
1893 }
1894}
1895
1896/*
1897 * Stop the automatic memory scanning thread.
1898 */
1899static void stop_scan_thread(void)
1900{
1901 if (scan_thread) {
1902 kthread_stop(scan_thread);
1903 scan_thread = NULL;
1904 }
1905}
1906
1907/*
1908 * Iterate over the object_list and return the first valid object at or after
1909 * the required position with its use_count incremented. The function triggers
1910 * a memory scanning when the pos argument points to the first position.
1911 */
1912static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1913{
1914 struct kmemleak_object *object;
1915 loff_t n = *pos;
1916 int err;
1917
1918 err = mutex_lock_interruptible(&scan_mutex);
1919 if (err < 0)
1920 return ERR_PTR(err);
1921
1922 rcu_read_lock();
1923 list_for_each_entry_rcu(object, &object_list, object_list) {
1924 if (n-- > 0)
1925 continue;
1926 if (get_object(object))
1927 goto out;
1928 }
1929 object = NULL;
1930out:
1931 return object;
1932}
1933
1934/*
1935 * Return the next object in the object_list. The function decrements the
1936 * use_count of the previous object and increases that of the next one.
1937 */
1938static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1939{
1940 struct kmemleak_object *prev_obj = v;
1941 struct kmemleak_object *next_obj = NULL;
1942 struct kmemleak_object *obj = prev_obj;
1943
1944 ++(*pos);
1945
1946 list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1947 if (get_object(obj)) {
1948 next_obj = obj;
1949 break;
1950 }
1951 }
1952
1953 put_object(prev_obj);
1954 return next_obj;
1955}
1956
1957/*
1958 * Decrement the use_count of the last object required, if any.
1959 */
1960static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1961{
1962 if (!IS_ERR(v)) {
1963 /*
1964 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1965 * waiting was interrupted, so only release it if !IS_ERR.
1966 */
1967 rcu_read_unlock();
1968 mutex_unlock(&scan_mutex);
1969 if (v)
1970 put_object(v);
1971 }
1972}
1973
1974/*
1975 * Print the information for an unreferenced object to the seq file.
1976 */
1977static int kmemleak_seq_show(struct seq_file *seq, void *v)
1978{
1979 struct kmemleak_object *object = v;
1980 unsigned long flags;
1981
1982 raw_spin_lock_irqsave(&object->lock, flags);
1983 if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1984 print_unreferenced(seq, object);
1985 raw_spin_unlock_irqrestore(&object->lock, flags);
1986 return 0;
1987}
1988
1989static const struct seq_operations kmemleak_seq_ops = {
1990 .start = kmemleak_seq_start,
1991 .next = kmemleak_seq_next,
1992 .stop = kmemleak_seq_stop,
1993 .show = kmemleak_seq_show,
1994};
1995
1996static int kmemleak_open(struct inode *inode, struct file *file)
1997{
1998 return seq_open(file, &kmemleak_seq_ops);
1999}
2000
2001static int dump_str_object_info(const char *str)
2002{
2003 unsigned long flags;
2004 struct kmemleak_object *object;
2005 unsigned long addr;
2006
2007 if (kstrtoul(str, 0, &addr))
2008 return -EINVAL;
2009 object = find_and_get_object(addr, 0);
2010 if (!object) {
2011 pr_info("Unknown object at 0x%08lx\n", addr);
2012 return -EINVAL;
2013 }
2014
2015 raw_spin_lock_irqsave(&object->lock, flags);
2016 dump_object_info(object);
2017 raw_spin_unlock_irqrestore(&object->lock, flags);
2018
2019 put_object(object);
2020 return 0;
2021}
2022
2023/*
2024 * We use grey instead of black to ensure we can do future scans on the same
2025 * objects. If we did not do future scans these black objects could
2026 * potentially contain references to newly allocated objects in the future and
2027 * we'd end up with false positives.
2028 */
2029static void kmemleak_clear(void)
2030{
2031 struct kmemleak_object *object;
2032
2033 rcu_read_lock();
2034 list_for_each_entry_rcu(object, &object_list, object_list) {
2035 raw_spin_lock_irq(&object->lock);
2036 if ((object->flags & OBJECT_REPORTED) &&
2037 unreferenced_object(object))
2038 __paint_it(object, KMEMLEAK_GREY);
2039 raw_spin_unlock_irq(&object->lock);
2040 }
2041 rcu_read_unlock();
2042
2043 kmemleak_found_leaks = false;
2044}
2045
2046static void __kmemleak_do_cleanup(void);
2047
2048/*
2049 * File write operation to configure kmemleak at run-time. The following
2050 * commands can be written to the /sys/kernel/debug/kmemleak file:
2051 * off - disable kmemleak (irreversible)
2052 * stack=on - enable the task stacks scanning
2053 * stack=off - disable the tasks stacks scanning
2054 * scan=on - start the automatic memory scanning thread
2055 * scan=off - stop the automatic memory scanning thread
2056 * scan=... - set the automatic memory scanning period in seconds (0 to
2057 * disable it)
2058 * scan - trigger a memory scan
2059 * clear - mark all current reported unreferenced kmemleak objects as
2060 * grey to ignore printing them, or free all kmemleak objects
2061 * if kmemleak has been disabled.
2062 * dump=... - dump information about the object found at the given address
2063 */
2064static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
2065 size_t size, loff_t *ppos)
2066{
2067 char buf[64];
2068 int buf_size;
2069 int ret;
2070
2071 buf_size = min(size, (sizeof(buf) - 1));
2072 if (strncpy_from_user(buf, user_buf, buf_size) < 0)
2073 return -EFAULT;
2074 buf[buf_size] = 0;
2075
2076 ret = mutex_lock_interruptible(&scan_mutex);
2077 if (ret < 0)
2078 return ret;
2079
2080 if (strncmp(buf, "clear", 5) == 0) {
2081 if (kmemleak_enabled)
2082 kmemleak_clear();
2083 else
2084 __kmemleak_do_cleanup();
2085 goto out;
2086 }
2087
2088 if (!kmemleak_enabled) {
2089 ret = -EPERM;
2090 goto out;
2091 }
2092
2093 if (strncmp(buf, "off", 3) == 0)
2094 kmemleak_disable();
2095 else if (strncmp(buf, "stack=on", 8) == 0)
2096 kmemleak_stack_scan = 1;
2097 else if (strncmp(buf, "stack=off", 9) == 0)
2098 kmemleak_stack_scan = 0;
2099 else if (strncmp(buf, "scan=on", 7) == 0)
2100 start_scan_thread();
2101 else if (strncmp(buf, "scan=off", 8) == 0)
2102 stop_scan_thread();
2103 else if (strncmp(buf, "scan=", 5) == 0) {
2104 unsigned secs;
2105 unsigned long msecs;
2106
2107 ret = kstrtouint(buf + 5, 0, &secs);
2108 if (ret < 0)
2109 goto out;
2110
2111 msecs = secs * MSEC_PER_SEC;
2112 if (msecs > UINT_MAX)
2113 msecs = UINT_MAX;
2114
2115 stop_scan_thread();
2116 if (msecs) {
2117 WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
2118 start_scan_thread();
2119 }
2120 } else if (strncmp(buf, "scan", 4) == 0)
2121 kmemleak_scan();
2122 else if (strncmp(buf, "dump=", 5) == 0)
2123 ret = dump_str_object_info(buf + 5);
2124 else
2125 ret = -EINVAL;
2126
2127out:
2128 mutex_unlock(&scan_mutex);
2129 if (ret < 0)
2130 return ret;
2131
2132 /* ignore the rest of the buffer, only one command at a time */
2133 *ppos += size;
2134 return size;
2135}
2136
2137static const struct file_operations kmemleak_fops = {
2138 .owner = THIS_MODULE,
2139 .open = kmemleak_open,
2140 .read = seq_read,
2141 .write = kmemleak_write,
2142 .llseek = seq_lseek,
2143 .release = seq_release,
2144};
2145
2146static void __kmemleak_do_cleanup(void)
2147{
2148 struct kmemleak_object *object, *tmp;
2149
2150 /*
2151 * Kmemleak has already been disabled, no need for RCU list traversal
2152 * or kmemleak_lock held.
2153 */
2154 list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2155 __remove_object(object);
2156 __delete_object(object);
2157 }
2158}
2159
2160/*
2161 * Stop the memory scanning thread and free the kmemleak internal objects if
2162 * no previous scan thread (otherwise, kmemleak may still have some useful
2163 * information on memory leaks).
2164 */
2165static void kmemleak_do_cleanup(struct work_struct *work)
2166{
2167 stop_scan_thread();
2168
2169 mutex_lock(&scan_mutex);
2170 /*
2171 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2172 * longer track object freeing. Ordering of the scan thread stopping and
2173 * the memory accesses below is guaranteed by the kthread_stop()
2174 * function.
2175 */
2176 kmemleak_free_enabled = 0;
2177 mutex_unlock(&scan_mutex);
2178
2179 if (!kmemleak_found_leaks)
2180 __kmemleak_do_cleanup();
2181 else
2182 pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2183}
2184
2185static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2186
2187/*
2188 * Disable kmemleak. No memory allocation/freeing will be traced once this
2189 * function is called. Disabling kmemleak is an irreversible operation.
2190 */
2191static void kmemleak_disable(void)
2192{
2193 /* atomically check whether it was already invoked */
2194 if (cmpxchg(&kmemleak_error, 0, 1))
2195 return;
2196
2197 /* stop any memory operation tracing */
2198 kmemleak_enabled = 0;
2199
2200 /* check whether it is too early for a kernel thread */
2201 if (kmemleak_late_initialized)
2202 schedule_work(&cleanup_work);
2203 else
2204 kmemleak_free_enabled = 0;
2205
2206 pr_info("Kernel memory leak detector disabled\n");
2207}
2208
2209/*
2210 * Allow boot-time kmemleak disabling (enabled by default).
2211 */
2212static int __init kmemleak_boot_config(char *str)
2213{
2214 if (!str)
2215 return -EINVAL;
2216 if (strcmp(str, "off") == 0)
2217 kmemleak_disable();
2218 else if (strcmp(str, "on") == 0) {
2219 kmemleak_skip_disable = 1;
2220 stack_depot_request_early_init();
2221 }
2222 else
2223 return -EINVAL;
2224 return 0;
2225}
2226early_param("kmemleak", kmemleak_boot_config);
2227
2228/*
2229 * Kmemleak initialization.
2230 */
2231void __init kmemleak_init(void)
2232{
2233#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2234 if (!kmemleak_skip_disable) {
2235 kmemleak_disable();
2236 return;
2237 }
2238#endif
2239
2240 if (kmemleak_error)
2241 return;
2242
2243 jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2244 jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2245
2246 object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2247 scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2248
2249 /* register the data/bss sections */
2250 create_object((unsigned long)_sdata, _edata - _sdata,
2251 KMEMLEAK_GREY, GFP_ATOMIC);
2252 create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2253 KMEMLEAK_GREY, GFP_ATOMIC);
2254 /* only register .data..ro_after_init if not within .data */
2255 if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2256 create_object((unsigned long)__start_ro_after_init,
2257 __end_ro_after_init - __start_ro_after_init,
2258 KMEMLEAK_GREY, GFP_ATOMIC);
2259}
2260
2261/*
2262 * Late initialization function.
2263 */
2264static int __init kmemleak_late_init(void)
2265{
2266 kmemleak_late_initialized = 1;
2267
2268 debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2269
2270 if (kmemleak_error) {
2271 /*
2272 * Some error occurred and kmemleak was disabled. There is a
2273 * small chance that kmemleak_disable() was called immediately
2274 * after setting kmemleak_late_initialized and we may end up with
2275 * two clean-up threads but serialized by scan_mutex.
2276 */
2277 schedule_work(&cleanup_work);
2278 return -ENOMEM;
2279 }
2280
2281 if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2282 mutex_lock(&scan_mutex);
2283 start_scan_thread();
2284 mutex_unlock(&scan_mutex);
2285 }
2286
2287 pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2288 mem_pool_free_count);
2289
2290 return 0;
2291}
2292late_initcall(kmemleak_late_init);