Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/kmemleak.c
   4 *
   5 * Copyright (C) 2008 ARM Limited
   6 * Written by Catalin Marinas <catalin.marinas@arm.com>
   7 *
 
 
 
 
 
 
 
 
 
 
 
 
 
 
   8 * For more information on the algorithm and kmemleak usage, please see
   9 * Documentation/dev-tools/kmemleak.rst.
  10 *
  11 * Notes on locking
  12 * ----------------
  13 *
  14 * The following locks and mutexes are used by kmemleak:
  15 *
  16 * - kmemleak_lock (raw_spinlock_t): protects the object_list modifications and
  17 *   accesses to the object_tree_root (or object_phys_tree_root). The
  18 *   object_list is the main list holding the metadata (struct kmemleak_object)
  19 *   for the allocated memory blocks. The object_tree_root and object_phys_tree_root
  20 *   are red black trees used to look-up metadata based on a pointer to the
  21 *   corresponding memory block. The object_phys_tree_root is for objects
  22 *   allocated with physical address. The kmemleak_object structures are
  23 *   added to the object_list and object_tree_root (or object_phys_tree_root)
  24 *   in the create_object() function called from the kmemleak_alloc() (or
  25 *   kmemleak_alloc_phys()) callback and removed in delete_object() called from
  26 *   the kmemleak_free() callback
  27 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
  28 *   Accesses to the metadata (e.g. count) are protected by this lock. Note
  29 *   that some members of this structure may be protected by other means
  30 *   (atomic or kmemleak_lock). This lock is also held when scanning the
  31 *   corresponding memory block to avoid the kernel freeing it via the
  32 *   kmemleak_free() callback. This is less heavyweight than holding a global
  33 *   lock like kmemleak_lock during scanning.
  34 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
  35 *   unreferenced objects at a time. The gray_list contains the objects which
  36 *   are already referenced or marked as false positives and need to be
  37 *   scanned. This list is only modified during a scanning episode when the
  38 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
  39 *   Note that the kmemleak_object.use_count is incremented when an object is
  40 *   added to the gray_list and therefore cannot be freed. This mutex also
  41 *   prevents multiple users of the "kmemleak" debugfs file together with
  42 *   modifications to the memory scanning parameters including the scan_thread
  43 *   pointer
  44 *
  45 * Locks and mutexes are acquired/nested in the following order:
  46 *
  47 *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
  48 *
  49 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
  50 * regions.
  51 *
  52 * The kmemleak_object structures have a use_count incremented or decremented
  53 * using the get_object()/put_object() functions. When the use_count becomes
  54 * 0, this count can no longer be incremented and put_object() schedules the
  55 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
  56 * function must be protected by rcu_read_lock() to avoid accessing a freed
  57 * structure.
  58 */
  59
  60#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  61
  62#include <linux/init.h>
  63#include <linux/kernel.h>
  64#include <linux/list.h>
  65#include <linux/sched/signal.h>
  66#include <linux/sched/task.h>
  67#include <linux/sched/task_stack.h>
  68#include <linux/jiffies.h>
  69#include <linux/delay.h>
  70#include <linux/export.h>
  71#include <linux/kthread.h>
  72#include <linux/rbtree.h>
  73#include <linux/fs.h>
  74#include <linux/debugfs.h>
  75#include <linux/seq_file.h>
  76#include <linux/cpumask.h>
  77#include <linux/spinlock.h>
  78#include <linux/module.h>
  79#include <linux/mutex.h>
  80#include <linux/rcupdate.h>
  81#include <linux/stacktrace.h>
  82#include <linux/stackdepot.h>
  83#include <linux/cache.h>
  84#include <linux/percpu.h>
  85#include <linux/memblock.h>
  86#include <linux/pfn.h>
  87#include <linux/mmzone.h>
  88#include <linux/slab.h>
  89#include <linux/thread_info.h>
  90#include <linux/err.h>
  91#include <linux/uaccess.h>
  92#include <linux/string.h>
  93#include <linux/nodemask.h>
  94#include <linux/mm.h>
  95#include <linux/workqueue.h>
  96#include <linux/crc32.h>
  97
  98#include <asm/sections.h>
  99#include <asm/processor.h>
 100#include <linux/atomic.h>
 101
 102#include <linux/kasan.h>
 103#include <linux/kfence.h>
 104#include <linux/kmemleak.h>
 105#include <linux/memory_hotplug.h>
 106
 107/*
 108 * Kmemleak configuration and common defines.
 109 */
 110#define MAX_TRACE		16	/* stack trace length */
 111#define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
 112#define SECS_FIRST_SCAN		60	/* delay before the first scan */
 113#define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
 114#define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
 115
 116#define BYTES_PER_POINTER	sizeof(void *)
 117
 118/* GFP bitmask for kmemleak internal allocations */
 119#define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
 120					   __GFP_NOLOCKDEP)) | \
 121				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
 122				 __GFP_NOWARN)
 123
 124/* scanning area inside a memory block */
 125struct kmemleak_scan_area {
 126	struct hlist_node node;
 127	unsigned long start;
 128	size_t size;
 129};
 130
 131#define KMEMLEAK_GREY	0
 132#define KMEMLEAK_BLACK	-1
 133
 134/*
 135 * Structure holding the metadata for each allocated memory block.
 136 * Modifications to such objects should be made while holding the
 137 * object->lock. Insertions or deletions from object_list, gray_list or
 138 * rb_node are already protected by the corresponding locks or mutex (see
 139 * the notes on locking above). These objects are reference-counted
 140 * (use_count) and freed using the RCU mechanism.
 141 */
 142struct kmemleak_object {
 143	raw_spinlock_t lock;
 144	unsigned int flags;		/* object status flags */
 145	struct list_head object_list;
 146	struct list_head gray_list;
 147	struct rb_node rb_node;
 148	struct rcu_head rcu;		/* object_list lockless traversal */
 149	/* object usage count; object freed when use_count == 0 */
 150	atomic_t use_count;
 151	unsigned long pointer;
 152	size_t size;
 153	/* pass surplus references to this pointer */
 154	unsigned long excess_ref;
 155	/* minimum number of a pointers found before it is considered leak */
 156	int min_count;
 157	/* the total number of pointers found pointing to this object */
 158	int count;
 159	/* checksum for detecting modified objects */
 160	u32 checksum;
 161	/* memory ranges to be scanned inside an object (empty for all) */
 162	struct hlist_head area_list;
 163	depot_stack_handle_t trace_handle;
 
 164	unsigned long jiffies;		/* creation timestamp */
 165	pid_t pid;			/* pid of the current task */
 166	char comm[TASK_COMM_LEN];	/* executable name */
 167};
 168
 169/* flag representing the memory block allocation status */
 170#define OBJECT_ALLOCATED	(1 << 0)
 171/* flag set after the first reporting of an unreference object */
 172#define OBJECT_REPORTED		(1 << 1)
 173/* flag set to not scan the object */
 174#define OBJECT_NO_SCAN		(1 << 2)
 175/* flag set to fully scan the object when scan_area allocation failed */
 176#define OBJECT_FULL_SCAN	(1 << 3)
 177/* flag set for object allocated with physical address */
 178#define OBJECT_PHYS		(1 << 4)
 179
 180#define HEX_PREFIX		"    "
 181/* number of bytes to print per line; must be 16 or 32 */
 182#define HEX_ROW_SIZE		16
 183/* number of bytes to print at a time (1, 2, 4, 8) */
 184#define HEX_GROUP_SIZE		1
 185/* include ASCII after the hex output */
 186#define HEX_ASCII		1
 187/* max number of lines to be printed */
 188#define HEX_MAX_LINES		2
 189
 190/* the list of all allocated objects */
 191static LIST_HEAD(object_list);
 192/* the list of gray-colored objects (see color_gray comment below) */
 193static LIST_HEAD(gray_list);
 194/* memory pool allocation */
 195static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
 196static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
 197static LIST_HEAD(mem_pool_free_list);
 198/* search tree for object boundaries */
 199static struct rb_root object_tree_root = RB_ROOT;
 200/* search tree for object (with OBJECT_PHYS flag) boundaries */
 201static struct rb_root object_phys_tree_root = RB_ROOT;
 202/* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
 203static DEFINE_RAW_SPINLOCK(kmemleak_lock);
 204
 205/* allocation caches for kmemleak internal data */
 206static struct kmem_cache *object_cache;
 207static struct kmem_cache *scan_area_cache;
 208
 209/* set if tracing memory operations is enabled */
 210static int kmemleak_enabled = 1;
 211/* same as above but only for the kmemleak_free() callback */
 212static int kmemleak_free_enabled = 1;
 213/* set in the late_initcall if there were no errors */
 214static int kmemleak_initialized;
 
 
 215/* set if a kmemleak warning was issued */
 216static int kmemleak_warning;
 217/* set if a fatal kmemleak error has occurred */
 218static int kmemleak_error;
 219
 220/* minimum and maximum address that may be valid pointers */
 221static unsigned long min_addr = ULONG_MAX;
 222static unsigned long max_addr;
 223
 224static struct task_struct *scan_thread;
 225/* used to avoid reporting of recently allocated objects */
 226static unsigned long jiffies_min_age;
 227static unsigned long jiffies_last_scan;
 228/* delay between automatic memory scannings */
 229static unsigned long jiffies_scan_wait;
 230/* enables or disables the task stacks scanning */
 231static int kmemleak_stack_scan = 1;
 232/* protects the memory scanning, parameters and debug/kmemleak file access */
 233static DEFINE_MUTEX(scan_mutex);
 234/* setting kmemleak=on, will set this var, skipping the disable */
 235static int kmemleak_skip_disable;
 236/* If there are leaks that can be reported */
 237static bool kmemleak_found_leaks;
 238
 239static bool kmemleak_verbose;
 240module_param_named(verbose, kmemleak_verbose, bool, 0600);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 241
 242static void kmemleak_disable(void);
 243
 244/*
 245 * Print a warning and dump the stack trace.
 246 */
 247#define kmemleak_warn(x...)	do {		\
 248	pr_warn(x);				\
 249	dump_stack();				\
 250	kmemleak_warning = 1;			\
 251} while (0)
 252
 253/*
 254 * Macro invoked when a serious kmemleak condition occurred and cannot be
 255 * recovered from. Kmemleak will be disabled and further allocation/freeing
 256 * tracing no longer available.
 257 */
 258#define kmemleak_stop(x...)	do {	\
 259	kmemleak_warn(x);		\
 260	kmemleak_disable();		\
 261} while (0)
 262
 263#define warn_or_seq_printf(seq, fmt, ...)	do {	\
 264	if (seq)					\
 265		seq_printf(seq, fmt, ##__VA_ARGS__);	\
 266	else						\
 267		pr_warn(fmt, ##__VA_ARGS__);		\
 268} while (0)
 269
 270static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
 271				 int rowsize, int groupsize, const void *buf,
 272				 size_t len, bool ascii)
 273{
 274	if (seq)
 275		seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
 276			     buf, len, ascii);
 277	else
 278		print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
 279			       rowsize, groupsize, buf, len, ascii);
 280}
 281
 282/*
 283 * Printing of the objects hex dump to the seq file. The number of lines to be
 284 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
 285 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
 286 * with the object->lock held.
 287 */
 288static void hex_dump_object(struct seq_file *seq,
 289			    struct kmemleak_object *object)
 290{
 291	const u8 *ptr = (const u8 *)object->pointer;
 292	size_t len;
 293
 294	if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
 295		return;
 296
 297	/* limit the number of lines to HEX_MAX_LINES */
 298	len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
 
 299
 300	warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
 301	kasan_disable_current();
 302	warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
 303			     HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
 304	kasan_enable_current();
 
 
 
 
 
 305}
 306
 307/*
 308 * Object colors, encoded with count and min_count:
 309 * - white - orphan object, not enough references to it (count < min_count)
 310 * - gray  - not orphan, not marked as false positive (min_count == 0) or
 311 *		sufficient references to it (count >= min_count)
 312 * - black - ignore, it doesn't contain references (e.g. text section)
 313 *		(min_count == -1). No function defined for this color.
 314 * Newly created objects don't have any color assigned (object->count == -1)
 315 * before the next memory scan when they become white.
 316 */
 317static bool color_white(const struct kmemleak_object *object)
 318{
 319	return object->count != KMEMLEAK_BLACK &&
 320		object->count < object->min_count;
 321}
 322
 323static bool color_gray(const struct kmemleak_object *object)
 324{
 325	return object->min_count != KMEMLEAK_BLACK &&
 326		object->count >= object->min_count;
 327}
 328
 329/*
 330 * Objects are considered unreferenced only if their color is white, they have
 331 * not be deleted and have a minimum age to avoid false positives caused by
 332 * pointers temporarily stored in CPU registers.
 333 */
 334static bool unreferenced_object(struct kmemleak_object *object)
 335{
 336	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
 337		time_before_eq(object->jiffies + jiffies_min_age,
 338			       jiffies_last_scan);
 339}
 340
 341/*
 342 * Printing of the unreferenced objects information to the seq file. The
 343 * print_unreferenced function must be called with the object->lock held.
 344 */
 345static void print_unreferenced(struct seq_file *seq,
 346			       struct kmemleak_object *object)
 347{
 348	int i;
 349	unsigned long *entries;
 350	unsigned int nr_entries;
 351	unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
 352
 353	nr_entries = stack_depot_fetch(object->trace_handle, &entries);
 354	warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
 355			  object->pointer, object->size);
 356	warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
 357			   object->comm, object->pid, object->jiffies,
 358			   msecs_age / 1000, msecs_age % 1000);
 359	hex_dump_object(seq, object);
 360	warn_or_seq_printf(seq, "  backtrace:\n");
 361
 362	for (i = 0; i < nr_entries; i++) {
 363		void *ptr = (void *)entries[i];
 364		warn_or_seq_printf(seq, "    [<%pK>] %pS\n", ptr, ptr);
 365	}
 366}
 367
 368/*
 369 * Print the kmemleak_object information. This function is used mainly for
 370 * debugging special cases when kmemleak operations. It must be called with
 371 * the object->lock held.
 372 */
 373static void dump_object_info(struct kmemleak_object *object)
 374{
 
 
 
 
 
 375	pr_notice("Object 0x%08lx (size %zu):\n",
 376			object->pointer, object->size);
 377	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
 378			object->comm, object->pid, object->jiffies);
 379	pr_notice("  min_count = %d\n", object->min_count);
 380	pr_notice("  count = %d\n", object->count);
 381	pr_notice("  flags = 0x%x\n", object->flags);
 382	pr_notice("  checksum = %u\n", object->checksum);
 383	pr_notice("  backtrace:\n");
 384	if (object->trace_handle)
 385		stack_depot_print(object->trace_handle);
 386}
 387
 388/*
 389 * Look-up a memory block metadata (kmemleak_object) in the object search
 390 * tree based on a pointer value. If alias is 0, only values pointing to the
 391 * beginning of the memory block are allowed. The kmemleak_lock must be held
 392 * when calling this function.
 393 */
 394static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
 395					       bool is_phys)
 396{
 397	struct rb_node *rb = is_phys ? object_phys_tree_root.rb_node :
 398			     object_tree_root.rb_node;
 399	unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
 400
 401	while (rb) {
 402		struct kmemleak_object *object;
 403		unsigned long untagged_objp;
 404
 405		object = rb_entry(rb, struct kmemleak_object, rb_node);
 406		untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
 407
 408		if (untagged_ptr < untagged_objp)
 409			rb = object->rb_node.rb_left;
 410		else if (untagged_objp + object->size <= untagged_ptr)
 411			rb = object->rb_node.rb_right;
 412		else if (untagged_objp == untagged_ptr || alias)
 413			return object;
 414		else {
 415			kmemleak_warn("Found object by alias at 0x%08lx\n",
 416				      ptr);
 417			dump_object_info(object);
 418			break;
 419		}
 420	}
 421	return NULL;
 422}
 423
 424/* Look-up a kmemleak object which allocated with virtual address. */
 425static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
 426{
 427	return __lookup_object(ptr, alias, false);
 428}
 429
 430/*
 431 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
 432 * that once an object's use_count reached 0, the RCU freeing was already
 433 * registered and the object should no longer be used. This function must be
 434 * called under the protection of rcu_read_lock().
 435 */
 436static int get_object(struct kmemleak_object *object)
 437{
 438	return atomic_inc_not_zero(&object->use_count);
 439}
 440
 441/*
 442 * Memory pool allocation and freeing. kmemleak_lock must not be held.
 443 */
 444static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
 445{
 446	unsigned long flags;
 447	struct kmemleak_object *object;
 448
 449	/* try the slab allocator first */
 450	if (object_cache) {
 451		object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
 452		if (object)
 453			return object;
 454	}
 455
 456	/* slab allocation failed, try the memory pool */
 457	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 458	object = list_first_entry_or_null(&mem_pool_free_list,
 459					  typeof(*object), object_list);
 460	if (object)
 461		list_del(&object->object_list);
 462	else if (mem_pool_free_count)
 463		object = &mem_pool[--mem_pool_free_count];
 464	else
 465		pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
 466	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 467
 468	return object;
 469}
 470
 471/*
 472 * Return the object to either the slab allocator or the memory pool.
 473 */
 474static void mem_pool_free(struct kmemleak_object *object)
 475{
 476	unsigned long flags;
 477
 478	if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
 479		kmem_cache_free(object_cache, object);
 480		return;
 481	}
 482
 483	/* add the object to the memory pool free list */
 484	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 485	list_add(&object->object_list, &mem_pool_free_list);
 486	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 487}
 488
 489/*
 490 * RCU callback to free a kmemleak_object.
 491 */
 492static void free_object_rcu(struct rcu_head *rcu)
 493{
 494	struct hlist_node *tmp;
 495	struct kmemleak_scan_area *area;
 496	struct kmemleak_object *object =
 497		container_of(rcu, struct kmemleak_object, rcu);
 498
 499	/*
 500	 * Once use_count is 0 (guaranteed by put_object), there is no other
 501	 * code accessing this object, hence no need for locking.
 502	 */
 503	hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
 504		hlist_del(&area->node);
 505		kmem_cache_free(scan_area_cache, area);
 506	}
 507	mem_pool_free(object);
 508}
 509
 510/*
 511 * Decrement the object use_count. Once the count is 0, free the object using
 512 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
 513 * delete_object() path, the delayed RCU freeing ensures that there is no
 514 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
 515 * is also possible.
 516 */
 517static void put_object(struct kmemleak_object *object)
 518{
 519	if (!atomic_dec_and_test(&object->use_count))
 520		return;
 521
 522	/* should only get here after delete_object was called */
 523	WARN_ON(object->flags & OBJECT_ALLOCATED);
 524
 525	/*
 526	 * It may be too early for the RCU callbacks, however, there is no
 527	 * concurrent object_list traversal when !object_cache and all objects
 528	 * came from the memory pool. Free the object directly.
 529	 */
 530	if (object_cache)
 531		call_rcu(&object->rcu, free_object_rcu);
 532	else
 533		free_object_rcu(&object->rcu);
 534}
 535
 536/*
 537 * Look up an object in the object search tree and increase its use_count.
 538 */
 539static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
 540						     bool is_phys)
 541{
 542	unsigned long flags;
 543	struct kmemleak_object *object;
 544
 545	rcu_read_lock();
 546	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 547	object = __lookup_object(ptr, alias, is_phys);
 548	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 
 549
 550	/* check whether the object is still available */
 551	if (object && !get_object(object))
 552		object = NULL;
 553	rcu_read_unlock();
 554
 555	return object;
 556}
 557
 558/* Look up and get an object which allocated with virtual address. */
 559static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 560{
 561	return __find_and_get_object(ptr, alias, false);
 562}
 563
 564/*
 565 * Remove an object from the object_tree_root (or object_phys_tree_root)
 566 * and object_list. Must be called with the kmemleak_lock held _if_ kmemleak
 567 * is still enabled.
 568 */
 569static void __remove_object(struct kmemleak_object *object)
 570{
 571	rb_erase(&object->rb_node, object->flags & OBJECT_PHYS ?
 572				   &object_phys_tree_root :
 573				   &object_tree_root);
 574	list_del_rcu(&object->object_list);
 575}
 576
 577/*
 578 * Look up an object in the object search tree and remove it from both
 579 * object_tree_root (or object_phys_tree_root) and object_list. The
 580 * returned object's use_count should be at least 1, as initially set
 581 * by create_object().
 582 */
 583static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
 584						      bool is_phys)
 585{
 586	unsigned long flags;
 587	struct kmemleak_object *object;
 588
 589	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 590	object = __lookup_object(ptr, alias, is_phys);
 591	if (object)
 592		__remove_object(object);
 593	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 594
 595	return object;
 596}
 597
 598static noinline depot_stack_handle_t set_track_prepare(void)
 599{
 600	depot_stack_handle_t trace_handle;
 601	unsigned long entries[MAX_TRACE];
 602	unsigned int nr_entries;
 603
 604	if (!kmemleak_initialized)
 605		return 0;
 606	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
 607	trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
 608
 609	return trace_handle;
 610}
 611
 612/*
 613 * Create the metadata (struct kmemleak_object) corresponding to an allocated
 614 * memory block and add it to the object_list and object_tree_root (or
 615 * object_phys_tree_root).
 616 */
 617static void __create_object(unsigned long ptr, size_t size,
 618			    int min_count, gfp_t gfp, bool is_phys)
 619{
 620	unsigned long flags;
 621	struct kmemleak_object *object, *parent;
 622	struct rb_node **link, *rb_parent;
 623	unsigned long untagged_ptr;
 624	unsigned long untagged_objp;
 625
 626	object = mem_pool_alloc(gfp);
 627	if (!object) {
 628		pr_warn("Cannot allocate a kmemleak_object structure\n");
 629		kmemleak_disable();
 630		return;
 631	}
 632
 633	INIT_LIST_HEAD(&object->object_list);
 634	INIT_LIST_HEAD(&object->gray_list);
 635	INIT_HLIST_HEAD(&object->area_list);
 636	raw_spin_lock_init(&object->lock);
 637	atomic_set(&object->use_count, 1);
 638	object->flags = OBJECT_ALLOCATED | (is_phys ? OBJECT_PHYS : 0);
 639	object->pointer = ptr;
 640	object->size = kfence_ksize((void *)ptr) ?: size;
 641	object->excess_ref = 0;
 642	object->min_count = min_count;
 643	object->count = 0;			/* white color initially */
 644	object->jiffies = jiffies;
 645	object->checksum = 0;
 646
 647	/* task information */
 648	if (in_hardirq()) {
 649		object->pid = 0;
 650		strncpy(object->comm, "hardirq", sizeof(object->comm));
 651	} else if (in_serving_softirq()) {
 652		object->pid = 0;
 653		strncpy(object->comm, "softirq", sizeof(object->comm));
 654	} else {
 655		object->pid = current->pid;
 656		/*
 657		 * There is a small chance of a race with set_task_comm(),
 658		 * however using get_task_comm() here may cause locking
 659		 * dependency issues with current->alloc_lock. In the worst
 660		 * case, the command line is not correct.
 661		 */
 662		strncpy(object->comm, current->comm, sizeof(object->comm));
 663	}
 664
 665	/* kernel backtrace */
 666	object->trace_handle = set_track_prepare();
 667
 668	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 669
 670	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
 671	/*
 672	 * Only update min_addr and max_addr with object
 673	 * storing virtual address.
 674	 */
 675	if (!is_phys) {
 676		min_addr = min(min_addr, untagged_ptr);
 677		max_addr = max(max_addr, untagged_ptr + size);
 678	}
 679	link = is_phys ? &object_phys_tree_root.rb_node :
 680		&object_tree_root.rb_node;
 681	rb_parent = NULL;
 682	while (*link) {
 683		rb_parent = *link;
 684		parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
 685		untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
 686		if (untagged_ptr + size <= untagged_objp)
 687			link = &parent->rb_node.rb_left;
 688		else if (untagged_objp + parent->size <= untagged_ptr)
 689			link = &parent->rb_node.rb_right;
 690		else {
 691			kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
 692				      ptr);
 693			/*
 694			 * No need for parent->lock here since "parent" cannot
 695			 * be freed while the kmemleak_lock is held.
 696			 */
 697			dump_object_info(parent);
 698			kmem_cache_free(object_cache, object);
 699			goto out;
 700		}
 701	}
 702	rb_link_node(&object->rb_node, rb_parent, link);
 703	rb_insert_color(&object->rb_node, is_phys ? &object_phys_tree_root :
 704					  &object_tree_root);
 705	list_add_tail_rcu(&object->object_list, &object_list);
 706out:
 707	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 708}
 709
 710/* Create kmemleak object which allocated with virtual address. */
 711static void create_object(unsigned long ptr, size_t size,
 712			  int min_count, gfp_t gfp)
 713{
 714	__create_object(ptr, size, min_count, gfp, false);
 715}
 716
 717/* Create kmemleak object which allocated with physical address. */
 718static void create_object_phys(unsigned long ptr, size_t size,
 719			       int min_count, gfp_t gfp)
 720{
 721	__create_object(ptr, size, min_count, gfp, true);
 722}
 723
 724/*
 725 * Mark the object as not allocated and schedule RCU freeing via put_object().
 
 726 */
 727static void __delete_object(struct kmemleak_object *object)
 728{
 729	unsigned long flags;
 730
 
 
 
 
 
 731	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
 732	WARN_ON(atomic_read(&object->use_count) < 1);
 733
 734	/*
 735	 * Locking here also ensures that the corresponding memory block
 736	 * cannot be freed when it is being scanned.
 737	 */
 738	raw_spin_lock_irqsave(&object->lock, flags);
 739	object->flags &= ~OBJECT_ALLOCATED;
 740	raw_spin_unlock_irqrestore(&object->lock, flags);
 741	put_object(object);
 742}
 743
 744/*
 745 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 746 * delete it.
 747 */
 748static void delete_object_full(unsigned long ptr)
 749{
 750	struct kmemleak_object *object;
 751
 752	object = find_and_remove_object(ptr, 0, false);
 753	if (!object) {
 754#ifdef DEBUG
 755		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
 756			      ptr);
 757#endif
 758		return;
 759	}
 760	__delete_object(object);
 
 761}
 762
 763/*
 764 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 765 * delete it. If the memory block is partially freed, the function may create
 766 * additional metadata for the remaining parts of the block.
 767 */
 768static void delete_object_part(unsigned long ptr, size_t size, bool is_phys)
 769{
 770	struct kmemleak_object *object;
 771	unsigned long start, end;
 772
 773	object = find_and_remove_object(ptr, 1, is_phys);
 774	if (!object) {
 775#ifdef DEBUG
 776		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
 777			      ptr, size);
 778#endif
 779		return;
 780	}
 
 781
 782	/*
 783	 * Create one or two objects that may result from the memory block
 784	 * split. Note that partial freeing is only done by free_bootmem() and
 785	 * this happens before kmemleak_init() is called.
 
 
 786	 */
 787	start = object->pointer;
 788	end = object->pointer + object->size;
 789	if (ptr > start)
 790		__create_object(start, ptr - start, object->min_count,
 791			      GFP_KERNEL, is_phys);
 792	if (ptr + size < end)
 793		__create_object(ptr + size, end - ptr - size, object->min_count,
 794			      GFP_KERNEL, is_phys);
 795
 796	__delete_object(object);
 797}
 798
 799static void __paint_it(struct kmemleak_object *object, int color)
 800{
 801	object->min_count = color;
 802	if (color == KMEMLEAK_BLACK)
 803		object->flags |= OBJECT_NO_SCAN;
 804}
 805
 806static void paint_it(struct kmemleak_object *object, int color)
 807{
 808	unsigned long flags;
 809
 810	raw_spin_lock_irqsave(&object->lock, flags);
 811	__paint_it(object, color);
 812	raw_spin_unlock_irqrestore(&object->lock, flags);
 813}
 814
 815static void paint_ptr(unsigned long ptr, int color, bool is_phys)
 816{
 817	struct kmemleak_object *object;
 818
 819	object = __find_and_get_object(ptr, 0, is_phys);
 820	if (!object) {
 821		kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
 822			      ptr,
 823			      (color == KMEMLEAK_GREY) ? "Grey" :
 824			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
 825		return;
 826	}
 827	paint_it(object, color);
 828	put_object(object);
 829}
 830
 831/*
 832 * Mark an object permanently as gray-colored so that it can no longer be
 833 * reported as a leak. This is used in general to mark a false positive.
 834 */
 835static void make_gray_object(unsigned long ptr)
 836{
 837	paint_ptr(ptr, KMEMLEAK_GREY, false);
 838}
 839
 840/*
 841 * Mark the object as black-colored so that it is ignored from scans and
 842 * reporting.
 843 */
 844static void make_black_object(unsigned long ptr, bool is_phys)
 845{
 846	paint_ptr(ptr, KMEMLEAK_BLACK, is_phys);
 847}
 848
 849/*
 850 * Add a scanning area to the object. If at least one such area is added,
 851 * kmemleak will only scan these ranges rather than the whole memory block.
 852 */
 853static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 854{
 855	unsigned long flags;
 856	struct kmemleak_object *object;
 857	struct kmemleak_scan_area *area = NULL;
 858	unsigned long untagged_ptr;
 859	unsigned long untagged_objp;
 860
 861	object = find_and_get_object(ptr, 1);
 862	if (!object) {
 863		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
 864			      ptr);
 865		return;
 866	}
 867
 868	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
 869	untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
 870
 871	if (scan_area_cache)
 872		area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 873
 874	raw_spin_lock_irqsave(&object->lock, flags);
 875	if (!area) {
 876		pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
 877		/* mark the object for full scan to avoid false positives */
 878		object->flags |= OBJECT_FULL_SCAN;
 879		goto out_unlock;
 880	}
 881	if (size == SIZE_MAX) {
 882		size = untagged_objp + object->size - untagged_ptr;
 883	} else if (untagged_ptr + size > untagged_objp + object->size) {
 884		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
 885		dump_object_info(object);
 886		kmem_cache_free(scan_area_cache, area);
 887		goto out_unlock;
 888	}
 889
 890	INIT_HLIST_NODE(&area->node);
 891	area->start = ptr;
 892	area->size = size;
 893
 894	hlist_add_head(&area->node, &object->area_list);
 895out_unlock:
 896	raw_spin_unlock_irqrestore(&object->lock, flags);
 
 897	put_object(object);
 898}
 899
 900/*
 901 * Any surplus references (object already gray) to 'ptr' are passed to
 902 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
 903 * vm_struct may be used as an alternative reference to the vmalloc'ed object
 904 * (see free_thread_stack()).
 905 */
 906static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
 907{
 908	unsigned long flags;
 909	struct kmemleak_object *object;
 910
 911	object = find_and_get_object(ptr, 0);
 912	if (!object) {
 913		kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
 914			      ptr);
 915		return;
 916	}
 917
 918	raw_spin_lock_irqsave(&object->lock, flags);
 919	object->excess_ref = excess_ref;
 920	raw_spin_unlock_irqrestore(&object->lock, flags);
 921	put_object(object);
 922}
 923
 924/*
 925 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
 926 * pointer. Such object will not be scanned by kmemleak but references to it
 927 * are searched.
 928 */
 929static void object_no_scan(unsigned long ptr)
 
 930{
 931	unsigned long flags;
 932	struct kmemleak_object *object;
 933
 934	object = find_and_get_object(ptr, 0);
 935	if (!object) {
 936		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
 937		return;
 938	}
 939
 940	raw_spin_lock_irqsave(&object->lock, flags);
 941	object->flags |= OBJECT_NO_SCAN;
 942	raw_spin_unlock_irqrestore(&object->lock, flags);
 943	put_object(object);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 944}
 945
 946/**
 947 * kmemleak_alloc - register a newly allocated object
 948 * @ptr:	pointer to beginning of the object
 949 * @size:	size of the object
 950 * @min_count:	minimum number of references to this object. If during memory
 951 *		scanning a number of references less than @min_count is found,
 952 *		the object is reported as a memory leak. If @min_count is 0,
 953 *		the object is never reported as a leak. If @min_count is -1,
 954 *		the object is ignored (not scanned and not reported as a leak)
 955 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
 956 *
 957 * This function is called from the kernel allocators when a new object
 958 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
 959 */
 960void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
 961			  gfp_t gfp)
 962{
 963	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
 964
 965	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 966		create_object((unsigned long)ptr, size, min_count, gfp);
 
 
 967}
 968EXPORT_SYMBOL_GPL(kmemleak_alloc);
 969
 970/**
 971 * kmemleak_alloc_percpu - register a newly allocated __percpu object
 972 * @ptr:	__percpu pointer to beginning of the object
 973 * @size:	size of the object
 974 * @gfp:	flags used for kmemleak internal memory allocations
 975 *
 976 * This function is called from the kernel percpu allocator when a new object
 977 * (memory block) is allocated (alloc_percpu).
 
 978 */
 979void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
 980				 gfp_t gfp)
 981{
 982	unsigned int cpu;
 983
 984	pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
 985
 986	/*
 987	 * Percpu allocations are only scanned and not reported as leaks
 988	 * (min_count is set to 0).
 989	 */
 990	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 991		for_each_possible_cpu(cpu)
 992			create_object((unsigned long)per_cpu_ptr(ptr, cpu),
 993				      size, 0, gfp);
 
 
 994}
 995EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
 996
 997/**
 998 * kmemleak_vmalloc - register a newly vmalloc'ed object
 999 * @area:	pointer to vm_struct
1000 * @size:	size of the object
1001 * @gfp:	__vmalloc() flags used for kmemleak internal memory allocations
1002 *
1003 * This function is called from the vmalloc() kernel allocator when a new
1004 * object (memory block) is allocated.
1005 */
1006void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1007{
1008	pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
1009
1010	/*
1011	 * A min_count = 2 is needed because vm_struct contains a reference to
1012	 * the virtual address of the vmalloc'ed block.
1013	 */
1014	if (kmemleak_enabled) {
1015		create_object((unsigned long)area->addr, size, 2, gfp);
1016		object_set_excess_ref((unsigned long)area,
1017				      (unsigned long)area->addr);
1018	}
1019}
1020EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1021
1022/**
1023 * kmemleak_free - unregister a previously registered object
1024 * @ptr:	pointer to beginning of the object
1025 *
1026 * This function is called from the kernel allocators when an object (memory
1027 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1028 */
1029void __ref kmemleak_free(const void *ptr)
1030{
1031	pr_debug("%s(0x%p)\n", __func__, ptr);
1032
1033	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1034		delete_object_full((unsigned long)ptr);
 
 
1035}
1036EXPORT_SYMBOL_GPL(kmemleak_free);
1037
1038/**
1039 * kmemleak_free_part - partially unregister a previously registered object
1040 * @ptr:	pointer to the beginning or inside the object. This also
1041 *		represents the start of the range to be freed
1042 * @size:	size to be unregistered
1043 *
1044 * This function is called when only a part of a memory block is freed
1045 * (usually from the bootmem allocator).
1046 */
1047void __ref kmemleak_free_part(const void *ptr, size_t size)
1048{
1049	pr_debug("%s(0x%p)\n", __func__, ptr);
1050
1051	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1052		delete_object_part((unsigned long)ptr, size, false);
 
 
1053}
1054EXPORT_SYMBOL_GPL(kmemleak_free_part);
1055
1056/**
1057 * kmemleak_free_percpu - unregister a previously registered __percpu object
1058 * @ptr:	__percpu pointer to beginning of the object
1059 *
1060 * This function is called from the kernel percpu allocator when an object
1061 * (memory block) is freed (free_percpu).
1062 */
1063void __ref kmemleak_free_percpu(const void __percpu *ptr)
1064{
1065	unsigned int cpu;
1066
1067	pr_debug("%s(0x%p)\n", __func__, ptr);
1068
1069	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1070		for_each_possible_cpu(cpu)
1071			delete_object_full((unsigned long)per_cpu_ptr(ptr,
1072								      cpu));
 
 
1073}
1074EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1075
1076/**
1077 * kmemleak_update_trace - update object allocation stack trace
1078 * @ptr:	pointer to beginning of the object
1079 *
1080 * Override the object allocation stack trace for cases where the actual
1081 * allocation place is not always useful.
1082 */
1083void __ref kmemleak_update_trace(const void *ptr)
1084{
1085	struct kmemleak_object *object;
1086	unsigned long flags;
1087
1088	pr_debug("%s(0x%p)\n", __func__, ptr);
1089
1090	if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1091		return;
1092
1093	object = find_and_get_object((unsigned long)ptr, 1);
1094	if (!object) {
1095#ifdef DEBUG
1096		kmemleak_warn("Updating stack trace for unknown object at %p\n",
1097			      ptr);
1098#endif
1099		return;
1100	}
1101
1102	raw_spin_lock_irqsave(&object->lock, flags);
1103	object->trace_handle = set_track_prepare();
1104	raw_spin_unlock_irqrestore(&object->lock, flags);
1105
1106	put_object(object);
1107}
1108EXPORT_SYMBOL(kmemleak_update_trace);
1109
1110/**
1111 * kmemleak_not_leak - mark an allocated object as false positive
1112 * @ptr:	pointer to beginning of the object
1113 *
1114 * Calling this function on an object will cause the memory block to no longer
1115 * be reported as leak and always be scanned.
1116 */
1117void __ref kmemleak_not_leak(const void *ptr)
1118{
1119	pr_debug("%s(0x%p)\n", __func__, ptr);
1120
1121	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1122		make_gray_object((unsigned long)ptr);
 
 
1123}
1124EXPORT_SYMBOL(kmemleak_not_leak);
1125
1126/**
1127 * kmemleak_ignore - ignore an allocated object
1128 * @ptr:	pointer to beginning of the object
1129 *
1130 * Calling this function on an object will cause the memory block to be
1131 * ignored (not scanned and not reported as a leak). This is usually done when
1132 * it is known that the corresponding block is not a leak and does not contain
1133 * any references to other allocated memory blocks.
1134 */
1135void __ref kmemleak_ignore(const void *ptr)
1136{
1137	pr_debug("%s(0x%p)\n", __func__, ptr);
1138
1139	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1140		make_black_object((unsigned long)ptr, false);
 
 
1141}
1142EXPORT_SYMBOL(kmemleak_ignore);
1143
1144/**
1145 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1146 * @ptr:	pointer to beginning or inside the object. This also
1147 *		represents the start of the scan area
1148 * @size:	size of the scan area
1149 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1150 *
1151 * This function is used when it is known that only certain parts of an object
1152 * contain references to other objects. Kmemleak will only scan these areas
1153 * reducing the number false negatives.
1154 */
1155void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1156{
1157	pr_debug("%s(0x%p)\n", __func__, ptr);
1158
1159	if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1160		add_scan_area((unsigned long)ptr, size, gfp);
 
 
1161}
1162EXPORT_SYMBOL(kmemleak_scan_area);
1163
1164/**
1165 * kmemleak_no_scan - do not scan an allocated object
1166 * @ptr:	pointer to beginning of the object
1167 *
1168 * This function notifies kmemleak not to scan the given memory block. Useful
1169 * in situations where it is known that the given object does not contain any
1170 * references to other objects. Kmemleak will not scan such objects reducing
1171 * the number of false negatives.
1172 */
1173void __ref kmemleak_no_scan(const void *ptr)
1174{
1175	pr_debug("%s(0x%p)\n", __func__, ptr);
1176
1177	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1178		object_no_scan((unsigned long)ptr);
 
 
1179}
1180EXPORT_SYMBOL(kmemleak_no_scan);
1181
1182/**
1183 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1184 *			 address argument
1185 * @phys:	physical address of the object
1186 * @size:	size of the object
1187 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1188 */
1189void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
1190{
1191	pr_debug("%s(0x%pa, %zu)\n", __func__, &phys, size);
1192
1193	if (kmemleak_enabled)
1194		/*
1195		 * Create object with OBJECT_PHYS flag and
1196		 * assume min_count 0.
1197		 */
1198		create_object_phys((unsigned long)phys, size, 0, gfp);
1199}
1200EXPORT_SYMBOL(kmemleak_alloc_phys);
1201
1202/**
1203 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1204 *			     physical address argument
1205 * @phys:	physical address if the beginning or inside an object. This
1206 *		also represents the start of the range to be freed
1207 * @size:	size to be unregistered
1208 */
1209void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1210{
1211	pr_debug("%s(0x%pa)\n", __func__, &phys);
1212
1213	if (kmemleak_enabled)
1214		delete_object_part((unsigned long)phys, size, true);
1215}
1216EXPORT_SYMBOL(kmemleak_free_part_phys);
1217
1218/**
1219 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1220 *			  address argument
1221 * @phys:	physical address of the object
1222 */
1223void __ref kmemleak_ignore_phys(phys_addr_t phys)
1224{
1225	pr_debug("%s(0x%pa)\n", __func__, &phys);
1226
1227	if (kmemleak_enabled)
1228		make_black_object((unsigned long)phys, true);
1229}
1230EXPORT_SYMBOL(kmemleak_ignore_phys);
1231
1232/*
1233 * Update an object's checksum and return true if it was modified.
1234 */
1235static bool update_checksum(struct kmemleak_object *object)
1236{
1237	u32 old_csum = object->checksum;
1238
1239	if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1240		return false;
1241
1242	kasan_disable_current();
1243	kcsan_disable_current();
1244	object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1245	kasan_enable_current();
1246	kcsan_enable_current();
1247
1248	return object->checksum != old_csum;
1249}
1250
1251/*
1252 * Update an object's references. object->lock must be held by the caller.
1253 */
1254static void update_refs(struct kmemleak_object *object)
1255{
1256	if (!color_white(object)) {
1257		/* non-orphan, ignored or new */
1258		return;
1259	}
1260
1261	/*
1262	 * Increase the object's reference count (number of pointers to the
1263	 * memory block). If this count reaches the required minimum, the
1264	 * object's color will become gray and it will be added to the
1265	 * gray_list.
1266	 */
1267	object->count++;
1268	if (color_gray(object)) {
1269		/* put_object() called when removing from gray_list */
1270		WARN_ON(!get_object(object));
1271		list_add_tail(&object->gray_list, &gray_list);
1272	}
1273}
1274
1275/*
1276 * Memory scanning is a long process and it needs to be interruptible. This
1277 * function checks whether such interrupt condition occurred.
1278 */
1279static int scan_should_stop(void)
1280{
1281	if (!kmemleak_enabled)
1282		return 1;
1283
1284	/*
1285	 * This function may be called from either process or kthread context,
1286	 * hence the need to check for both stop conditions.
1287	 */
1288	if (current->mm)
1289		return signal_pending(current);
1290	else
1291		return kthread_should_stop();
1292
1293	return 0;
1294}
1295
1296/*
1297 * Scan a memory block (exclusive range) for valid pointers and add those
1298 * found to the gray list.
1299 */
1300static void scan_block(void *_start, void *_end,
1301		       struct kmemleak_object *scanned)
1302{
1303	unsigned long *ptr;
1304	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1305	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1306	unsigned long flags;
1307	unsigned long untagged_ptr;
1308
1309	raw_spin_lock_irqsave(&kmemleak_lock, flags);
1310	for (ptr = start; ptr < end; ptr++) {
1311		struct kmemleak_object *object;
 
1312		unsigned long pointer;
1313		unsigned long excess_ref;
1314
 
 
1315		if (scan_should_stop())
1316			break;
1317
1318		kasan_disable_current();
1319		pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1320		kasan_enable_current();
1321
1322		untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1323		if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1324			continue;
1325
1326		/*
1327		 * No need for get_object() here since we hold kmemleak_lock.
1328		 * object->use_count cannot be dropped to 0 while the object
1329		 * is still present in object_tree_root and object_list
1330		 * (with updates protected by kmemleak_lock).
1331		 */
1332		object = lookup_object(pointer, 1);
1333		if (!object)
1334			continue;
1335		if (object == scanned)
1336			/* self referenced, ignore */
 
1337			continue;
 
1338
1339		/*
1340		 * Avoid the lockdep recursive warning on object->lock being
1341		 * previously acquired in scan_object(). These locks are
1342		 * enclosed by scan_mutex.
1343		 */
1344		raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1345		/* only pass surplus references (object already gray) */
1346		if (color_gray(object)) {
1347			excess_ref = object->excess_ref;
1348			/* no need for update_refs() if object already gray */
1349		} else {
1350			excess_ref = 0;
1351			update_refs(object);
1352		}
1353		raw_spin_unlock(&object->lock);
1354
1355		if (excess_ref) {
1356			object = lookup_object(excess_ref, 0);
1357			if (!object)
1358				continue;
1359			if (object == scanned)
1360				/* circular reference, ignore */
1361				continue;
1362			raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1363			update_refs(object);
1364			raw_spin_unlock(&object->lock);
 
1365		}
1366	}
1367	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1368}
1369
1370/*
1371 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1372 */
1373#ifdef CONFIG_SMP
1374static void scan_large_block(void *start, void *end)
1375{
1376	void *next;
1377
1378	while (start < end) {
1379		next = min(start + MAX_SCAN_SIZE, end);
1380		scan_block(start, next, NULL);
1381		start = next;
1382		cond_resched();
1383	}
1384}
1385#endif
1386
1387/*
1388 * Scan a memory block corresponding to a kmemleak_object. A condition is
1389 * that object->use_count >= 1.
1390 */
1391static void scan_object(struct kmemleak_object *object)
1392{
1393	struct kmemleak_scan_area *area;
 
1394	unsigned long flags;
1395	void *obj_ptr;
1396
1397	/*
1398	 * Once the object->lock is acquired, the corresponding memory block
1399	 * cannot be freed (the same lock is acquired in delete_object).
1400	 */
1401	raw_spin_lock_irqsave(&object->lock, flags);
1402	if (object->flags & OBJECT_NO_SCAN)
1403		goto out;
1404	if (!(object->flags & OBJECT_ALLOCATED))
1405		/* already freed object */
1406		goto out;
 
 
 
 
 
 
 
 
 
1407
1408	obj_ptr = object->flags & OBJECT_PHYS ?
1409		  __va((phys_addr_t)object->pointer) :
1410		  (void *)object->pointer;
1411
1412	if (hlist_empty(&object->area_list) ||
1413	    object->flags & OBJECT_FULL_SCAN) {
1414		void *start = obj_ptr;
1415		void *end = obj_ptr + object->size;
1416		void *next;
1417
1418		do {
1419			next = min(start + MAX_SCAN_SIZE, end);
1420			scan_block(start, next, object);
1421
1422			start = next;
1423			if (start >= end)
1424				break;
1425
1426			raw_spin_unlock_irqrestore(&object->lock, flags);
1427			cond_resched();
1428			raw_spin_lock_irqsave(&object->lock, flags);
1429		} while (object->flags & OBJECT_ALLOCATED);
1430	} else
1431		hlist_for_each_entry(area, &object->area_list, node)
1432			scan_block((void *)area->start,
1433				   (void *)(area->start + area->size),
1434				   object);
1435out:
1436	raw_spin_unlock_irqrestore(&object->lock, flags);
1437}
1438
1439/*
1440 * Scan the objects already referenced (gray objects). More objects will be
1441 * referenced and, if there are no memory leaks, all the objects are scanned.
1442 */
1443static void scan_gray_list(void)
1444{
1445	struct kmemleak_object *object, *tmp;
1446
1447	/*
1448	 * The list traversal is safe for both tail additions and removals
1449	 * from inside the loop. The kmemleak objects cannot be freed from
1450	 * outside the loop because their use_count was incremented.
1451	 */
1452	object = list_entry(gray_list.next, typeof(*object), gray_list);
1453	while (&object->gray_list != &gray_list) {
1454		cond_resched();
1455
1456		/* may add new objects to the list */
1457		if (!scan_should_stop())
1458			scan_object(object);
1459
1460		tmp = list_entry(object->gray_list.next, typeof(*object),
1461				 gray_list);
1462
1463		/* remove the object from the list and release it */
1464		list_del(&object->gray_list);
1465		put_object(object);
1466
1467		object = tmp;
1468	}
1469	WARN_ON(!list_empty(&gray_list));
1470}
1471
1472/*
1473 * Conditionally call resched() in an object iteration loop while making sure
1474 * that the given object won't go away without RCU read lock by performing a
1475 * get_object() if !pinned.
1476 *
1477 * Return: false if can't do a cond_resched() due to get_object() failure
1478 *	   true otherwise
1479 */
1480static bool kmemleak_cond_resched(struct kmemleak_object *object, bool pinned)
1481{
1482	if (!pinned && !get_object(object))
1483		return false;
1484
1485	rcu_read_unlock();
1486	cond_resched();
1487	rcu_read_lock();
1488	if (!pinned)
1489		put_object(object);
1490	return true;
1491}
1492
1493/*
1494 * Scan data sections and all the referenced memory blocks allocated via the
1495 * kernel's standard allocators. This function must be called with the
1496 * scan_mutex held.
1497 */
1498static void kmemleak_scan(void)
1499{
 
1500	struct kmemleak_object *object;
1501	struct zone *zone;
1502	int __maybe_unused i;
1503	int new_leaks = 0;
1504	int loop_cnt = 0;
1505
1506	jiffies_last_scan = jiffies;
1507
1508	/* prepare the kmemleak_object's */
1509	rcu_read_lock();
1510	list_for_each_entry_rcu(object, &object_list, object_list) {
1511		bool obj_pinned = false;
1512
1513		raw_spin_lock_irq(&object->lock);
1514#ifdef DEBUG
1515		/*
1516		 * With a few exceptions there should be a maximum of
1517		 * 1 reference to any object at this point.
1518		 */
1519		if (atomic_read(&object->use_count) > 1) {
1520			pr_debug("object->use_count = %d\n",
1521				 atomic_read(&object->use_count));
1522			dump_object_info(object);
1523		}
1524#endif
1525
1526		/* ignore objects outside lowmem (paint them black) */
1527		if ((object->flags & OBJECT_PHYS) &&
1528		   !(object->flags & OBJECT_NO_SCAN)) {
1529			unsigned long phys = object->pointer;
1530
1531			if (PHYS_PFN(phys) < min_low_pfn ||
1532			    PHYS_PFN(phys + object->size) >= max_low_pfn)
1533				__paint_it(object, KMEMLEAK_BLACK);
1534		}
1535
1536		/* reset the reference count (whiten the object) */
1537		object->count = 0;
1538		if (color_gray(object) && get_object(object)) {
1539			list_add_tail(&object->gray_list, &gray_list);
1540			obj_pinned = true;
1541		}
1542
1543		raw_spin_unlock_irq(&object->lock);
1544
1545		/*
1546		 * Do a cond_resched() every 64k objects to avoid soft lockup.
1547		 */
1548		if (!(++loop_cnt & 0xffff) &&
1549		    !kmemleak_cond_resched(object, obj_pinned))
1550			loop_cnt--; /* Try again on next object */
1551	}
1552	rcu_read_unlock();
1553
 
 
 
 
1554#ifdef CONFIG_SMP
1555	/* per-cpu sections scanning */
1556	for_each_possible_cpu(i)
1557		scan_large_block(__per_cpu_start + per_cpu_offset(i),
1558				 __per_cpu_end + per_cpu_offset(i));
1559#endif
1560
1561	/*
1562	 * Struct page scanning for each node.
1563	 */
1564	get_online_mems();
1565	for_each_populated_zone(zone) {
1566		unsigned long start_pfn = zone->zone_start_pfn;
1567		unsigned long end_pfn = zone_end_pfn(zone);
 
1568		unsigned long pfn;
1569
1570		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1571			struct page *page = pfn_to_online_page(pfn);
1572
1573			if (!page)
1574				continue;
1575
1576			/* only scan pages belonging to this zone */
1577			if (page_zone(page) != zone)
1578				continue;
 
1579			/* only scan if page is in use */
1580			if (page_count(page) == 0)
1581				continue;
1582			scan_block(page, page + 1, NULL);
1583			if (!(pfn & 63))
1584				cond_resched();
1585		}
1586	}
1587	put_online_mems();
1588
1589	/*
1590	 * Scanning the task stacks (may introduce false negatives).
1591	 */
1592	if (kmemleak_stack_scan) {
1593		struct task_struct *p, *g;
1594
1595		rcu_read_lock();
1596		for_each_process_thread(g, p) {
1597			void *stack = try_get_task_stack(p);
1598			if (stack) {
1599				scan_block(stack, stack + THREAD_SIZE, NULL);
1600				put_task_stack(p);
1601			}
1602		}
1603		rcu_read_unlock();
1604	}
1605
1606	/*
1607	 * Scan the objects already referenced from the sections scanned
1608	 * above.
1609	 */
1610	scan_gray_list();
1611
1612	/*
1613	 * Check for new or unreferenced objects modified since the previous
1614	 * scan and color them gray until the next scan.
1615	 */
1616	rcu_read_lock();
1617	loop_cnt = 0;
1618	list_for_each_entry_rcu(object, &object_list, object_list) {
1619		/*
1620		 * Do a cond_resched() every 64k objects to avoid soft lockup.
1621		 */
1622		if (!(++loop_cnt & 0xffff) &&
1623		    !kmemleak_cond_resched(object, false))
1624			loop_cnt--;	/* Try again on next object */
1625
1626		/*
1627		 * This is racy but we can save the overhead of lock/unlock
1628		 * calls. The missed objects, if any, should be caught in
1629		 * the next scan.
1630		 */
1631		if (!color_white(object))
1632			continue;
1633		raw_spin_lock_irq(&object->lock);
1634		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1635		    && update_checksum(object) && get_object(object)) {
1636			/* color it gray temporarily */
1637			object->count = object->min_count;
1638			list_add_tail(&object->gray_list, &gray_list);
1639		}
1640		raw_spin_unlock_irq(&object->lock);
1641	}
1642	rcu_read_unlock();
1643
1644	/*
1645	 * Re-scan the gray list for modified unreferenced objects.
1646	 */
1647	scan_gray_list();
1648
1649	/*
1650	 * If scanning was stopped do not report any new unreferenced objects.
1651	 */
1652	if (scan_should_stop())
1653		return;
1654
1655	/*
1656	 * Scanning result reporting.
1657	 */
1658	rcu_read_lock();
1659	loop_cnt = 0;
1660	list_for_each_entry_rcu(object, &object_list, object_list) {
1661		/*
1662		 * Do a cond_resched() every 64k objects to avoid soft lockup.
1663		 */
1664		if (!(++loop_cnt & 0xffff) &&
1665		    !kmemleak_cond_resched(object, false))
1666			loop_cnt--;	/* Try again on next object */
1667
1668		/*
1669		 * This is racy but we can save the overhead of lock/unlock
1670		 * calls. The missed objects, if any, should be caught in
1671		 * the next scan.
1672		 */
1673		if (!color_white(object))
1674			continue;
1675		raw_spin_lock_irq(&object->lock);
1676		if (unreferenced_object(object) &&
1677		    !(object->flags & OBJECT_REPORTED)) {
1678			object->flags |= OBJECT_REPORTED;
1679
1680			if (kmemleak_verbose)
1681				print_unreferenced(NULL, object);
1682
1683			new_leaks++;
1684		}
1685		raw_spin_unlock_irq(&object->lock);
1686	}
1687	rcu_read_unlock();
1688
1689	if (new_leaks) {
1690		kmemleak_found_leaks = true;
1691
1692		pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1693			new_leaks);
1694	}
1695
1696}
1697
1698/*
1699 * Thread function performing automatic memory scanning. Unreferenced objects
1700 * at the end of a memory scan are reported but only the first time.
1701 */
1702static int kmemleak_scan_thread(void *arg)
1703{
1704	static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1705
1706	pr_info("Automatic memory scanning thread started\n");
1707	set_user_nice(current, 10);
1708
1709	/*
1710	 * Wait before the first scan to allow the system to fully initialize.
1711	 */
1712	if (first_run) {
1713		signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1714		first_run = 0;
1715		while (timeout && !kthread_should_stop())
1716			timeout = schedule_timeout_interruptible(timeout);
1717	}
1718
1719	while (!kthread_should_stop()) {
1720		signed long timeout = READ_ONCE(jiffies_scan_wait);
1721
1722		mutex_lock(&scan_mutex);
1723		kmemleak_scan();
1724		mutex_unlock(&scan_mutex);
1725
1726		/* wait before the next scan */
1727		while (timeout && !kthread_should_stop())
1728			timeout = schedule_timeout_interruptible(timeout);
1729	}
1730
1731	pr_info("Automatic memory scanning thread ended\n");
1732
1733	return 0;
1734}
1735
1736/*
1737 * Start the automatic memory scanning thread. This function must be called
1738 * with the scan_mutex held.
1739 */
1740static void start_scan_thread(void)
1741{
1742	if (scan_thread)
1743		return;
1744	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1745	if (IS_ERR(scan_thread)) {
1746		pr_warn("Failed to create the scan thread\n");
1747		scan_thread = NULL;
1748	}
1749}
1750
1751/*
1752 * Stop the automatic memory scanning thread.
 
1753 */
1754static void stop_scan_thread(void)
1755{
1756	if (scan_thread) {
1757		kthread_stop(scan_thread);
1758		scan_thread = NULL;
1759	}
1760}
1761
1762/*
1763 * Iterate over the object_list and return the first valid object at or after
1764 * the required position with its use_count incremented. The function triggers
1765 * a memory scanning when the pos argument points to the first position.
1766 */
1767static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1768{
1769	struct kmemleak_object *object;
1770	loff_t n = *pos;
1771	int err;
1772
1773	err = mutex_lock_interruptible(&scan_mutex);
1774	if (err < 0)
1775		return ERR_PTR(err);
1776
1777	rcu_read_lock();
1778	list_for_each_entry_rcu(object, &object_list, object_list) {
1779		if (n-- > 0)
1780			continue;
1781		if (get_object(object))
1782			goto out;
1783	}
1784	object = NULL;
1785out:
1786	return object;
1787}
1788
1789/*
1790 * Return the next object in the object_list. The function decrements the
1791 * use_count of the previous object and increases that of the next one.
1792 */
1793static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1794{
1795	struct kmemleak_object *prev_obj = v;
1796	struct kmemleak_object *next_obj = NULL;
1797	struct kmemleak_object *obj = prev_obj;
1798
1799	++(*pos);
1800
1801	list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
 
 
1802		if (get_object(obj)) {
1803			next_obj = obj;
1804			break;
1805		}
1806	}
1807
1808	put_object(prev_obj);
1809	return next_obj;
1810}
1811
1812/*
1813 * Decrement the use_count of the last object required, if any.
1814 */
1815static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1816{
1817	if (!IS_ERR(v)) {
1818		/*
1819		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1820		 * waiting was interrupted, so only release it if !IS_ERR.
1821		 */
1822		rcu_read_unlock();
1823		mutex_unlock(&scan_mutex);
1824		if (v)
1825			put_object(v);
1826	}
1827}
1828
1829/*
1830 * Print the information for an unreferenced object to the seq file.
1831 */
1832static int kmemleak_seq_show(struct seq_file *seq, void *v)
1833{
1834	struct kmemleak_object *object = v;
1835	unsigned long flags;
1836
1837	raw_spin_lock_irqsave(&object->lock, flags);
1838	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1839		print_unreferenced(seq, object);
1840	raw_spin_unlock_irqrestore(&object->lock, flags);
1841	return 0;
1842}
1843
1844static const struct seq_operations kmemleak_seq_ops = {
1845	.start = kmemleak_seq_start,
1846	.next  = kmemleak_seq_next,
1847	.stop  = kmemleak_seq_stop,
1848	.show  = kmemleak_seq_show,
1849};
1850
1851static int kmemleak_open(struct inode *inode, struct file *file)
1852{
1853	return seq_open(file, &kmemleak_seq_ops);
1854}
1855
 
 
 
 
 
1856static int dump_str_object_info(const char *str)
1857{
1858	unsigned long flags;
1859	struct kmemleak_object *object;
1860	unsigned long addr;
1861
1862	if (kstrtoul(str, 0, &addr))
1863		return -EINVAL;
1864	object = find_and_get_object(addr, 0);
1865	if (!object) {
1866		pr_info("Unknown object at 0x%08lx\n", addr);
1867		return -EINVAL;
1868	}
1869
1870	raw_spin_lock_irqsave(&object->lock, flags);
1871	dump_object_info(object);
1872	raw_spin_unlock_irqrestore(&object->lock, flags);
1873
1874	put_object(object);
1875	return 0;
1876}
1877
1878/*
1879 * We use grey instead of black to ensure we can do future scans on the same
1880 * objects. If we did not do future scans these black objects could
1881 * potentially contain references to newly allocated objects in the future and
1882 * we'd end up with false positives.
1883 */
1884static void kmemleak_clear(void)
1885{
1886	struct kmemleak_object *object;
 
1887
1888	rcu_read_lock();
1889	list_for_each_entry_rcu(object, &object_list, object_list) {
1890		raw_spin_lock_irq(&object->lock);
1891		if ((object->flags & OBJECT_REPORTED) &&
1892		    unreferenced_object(object))
1893			__paint_it(object, KMEMLEAK_GREY);
1894		raw_spin_unlock_irq(&object->lock);
1895	}
1896	rcu_read_unlock();
1897
1898	kmemleak_found_leaks = false;
1899}
1900
1901static void __kmemleak_do_cleanup(void);
1902
1903/*
1904 * File write operation to configure kmemleak at run-time. The following
1905 * commands can be written to the /sys/kernel/debug/kmemleak file:
1906 *   off	- disable kmemleak (irreversible)
1907 *   stack=on	- enable the task stacks scanning
1908 *   stack=off	- disable the tasks stacks scanning
1909 *   scan=on	- start the automatic memory scanning thread
1910 *   scan=off	- stop the automatic memory scanning thread
1911 *   scan=...	- set the automatic memory scanning period in seconds (0 to
1912 *		  disable it)
1913 *   scan	- trigger a memory scan
1914 *   clear	- mark all current reported unreferenced kmemleak objects as
1915 *		  grey to ignore printing them, or free all kmemleak objects
1916 *		  if kmemleak has been disabled.
1917 *   dump=...	- dump information about the object found at the given address
1918 */
1919static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1920			      size_t size, loff_t *ppos)
1921{
1922	char buf[64];
1923	int buf_size;
1924	int ret;
1925
 
 
 
1926	buf_size = min(size, (sizeof(buf) - 1));
1927	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1928		return -EFAULT;
1929	buf[buf_size] = 0;
1930
1931	ret = mutex_lock_interruptible(&scan_mutex);
1932	if (ret < 0)
1933		return ret;
1934
1935	if (strncmp(buf, "clear", 5) == 0) {
1936		if (kmemleak_enabled)
1937			kmemleak_clear();
1938		else
1939			__kmemleak_do_cleanup();
1940		goto out;
1941	}
1942
1943	if (!kmemleak_enabled) {
1944		ret = -EPERM;
1945		goto out;
1946	}
1947
1948	if (strncmp(buf, "off", 3) == 0)
1949		kmemleak_disable();
1950	else if (strncmp(buf, "stack=on", 8) == 0)
1951		kmemleak_stack_scan = 1;
1952	else if (strncmp(buf, "stack=off", 9) == 0)
1953		kmemleak_stack_scan = 0;
1954	else if (strncmp(buf, "scan=on", 7) == 0)
1955		start_scan_thread();
1956	else if (strncmp(buf, "scan=off", 8) == 0)
1957		stop_scan_thread();
1958	else if (strncmp(buf, "scan=", 5) == 0) {
1959		unsigned secs;
1960		unsigned long msecs;
1961
1962		ret = kstrtouint(buf + 5, 0, &secs);
1963		if (ret < 0)
1964			goto out;
1965
1966		msecs = secs * MSEC_PER_SEC;
1967		if (msecs > UINT_MAX)
1968			msecs = UINT_MAX;
1969
1970		stop_scan_thread();
1971		if (msecs) {
1972			WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
1973			start_scan_thread();
1974		}
1975	} else if (strncmp(buf, "scan", 4) == 0)
1976		kmemleak_scan();
 
 
1977	else if (strncmp(buf, "dump=", 5) == 0)
1978		ret = dump_str_object_info(buf + 5);
1979	else
1980		ret = -EINVAL;
1981
1982out:
1983	mutex_unlock(&scan_mutex);
1984	if (ret < 0)
1985		return ret;
1986
1987	/* ignore the rest of the buffer, only one command at a time */
1988	*ppos += size;
1989	return size;
1990}
1991
1992static const struct file_operations kmemleak_fops = {
1993	.owner		= THIS_MODULE,
1994	.open		= kmemleak_open,
1995	.read		= seq_read,
1996	.write		= kmemleak_write,
1997	.llseek		= seq_lseek,
1998	.release	= seq_release,
1999};
2000
2001static void __kmemleak_do_cleanup(void)
2002{
2003	struct kmemleak_object *object, *tmp;
2004
2005	/*
2006	 * Kmemleak has already been disabled, no need for RCU list traversal
2007	 * or kmemleak_lock held.
2008	 */
2009	list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2010		__remove_object(object);
2011		__delete_object(object);
2012	}
2013}
2014
2015/*
2016 * Stop the memory scanning thread and free the kmemleak internal objects if
2017 * no previous scan thread (otherwise, kmemleak may still have some useful
2018 * information on memory leaks).
2019 */
2020static void kmemleak_do_cleanup(struct work_struct *work)
2021{
2022	stop_scan_thread();
 
2023
2024	mutex_lock(&scan_mutex);
2025	/*
2026	 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2027	 * longer track object freeing. Ordering of the scan thread stopping and
2028	 * the memory accesses below is guaranteed by the kthread_stop()
2029	 * function.
2030	 */
2031	kmemleak_free_enabled = 0;
2032	mutex_unlock(&scan_mutex);
2033
2034	if (!kmemleak_found_leaks)
2035		__kmemleak_do_cleanup();
2036	else
2037		pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
 
 
 
2038}
2039
2040static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2041
2042/*
2043 * Disable kmemleak. No memory allocation/freeing will be traced once this
2044 * function is called. Disabling kmemleak is an irreversible operation.
2045 */
2046static void kmemleak_disable(void)
2047{
2048	/* atomically check whether it was already invoked */
2049	if (cmpxchg(&kmemleak_error, 0, 1))
2050		return;
2051
2052	/* stop any memory operation tracing */
2053	kmemleak_enabled = 0;
2054
2055	/* check whether it is too early for a kernel thread */
2056	if (kmemleak_initialized)
2057		schedule_work(&cleanup_work);
2058	else
2059		kmemleak_free_enabled = 0;
2060
2061	pr_info("Kernel memory leak detector disabled\n");
2062}
2063
2064/*
2065 * Allow boot-time kmemleak disabling (enabled by default).
2066 */
2067static int __init kmemleak_boot_config(char *str)
2068{
2069	if (!str)
2070		return -EINVAL;
2071	if (strcmp(str, "off") == 0)
2072		kmemleak_disable();
2073	else if (strcmp(str, "on") == 0) {
2074		kmemleak_skip_disable = 1;
2075		stack_depot_want_early_init();
2076	}
2077	else
2078		return -EINVAL;
2079	return 0;
2080}
2081early_param("kmemleak", kmemleak_boot_config);
2082
 
 
 
 
 
 
 
 
 
 
 
2083/*
2084 * Kmemleak initialization.
2085 */
2086void __init kmemleak_init(void)
2087{
 
 
 
2088#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2089	if (!kmemleak_skip_disable) {
 
2090		kmemleak_disable();
2091		return;
2092	}
2093#endif
2094
2095	if (kmemleak_error)
2096		return;
2097
2098	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2099	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2100
2101	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2102	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
 
2103
2104	/* register the data/bss sections */
2105	create_object((unsigned long)_sdata, _edata - _sdata,
2106		      KMEMLEAK_GREY, GFP_ATOMIC);
2107	create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2108		      KMEMLEAK_GREY, GFP_ATOMIC);
2109	/* only register .data..ro_after_init if not within .data */
2110	if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2111		create_object((unsigned long)__start_ro_after_init,
2112			      __end_ro_after_init - __start_ro_after_init,
2113			      KMEMLEAK_GREY, GFP_ATOMIC);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2114}
2115
2116/*
2117 * Late initialization function.
2118 */
2119static int __init kmemleak_late_init(void)
2120{
2121	kmemleak_initialized = 1;
2122
2123	debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2124
2125	if (kmemleak_error) {
2126		/*
2127		 * Some error occurred and kmemleak was disabled. There is a
2128		 * small chance that kmemleak_disable() was called immediately
2129		 * after setting kmemleak_initialized and we may end up with
2130		 * two clean-up threads but serialized by scan_mutex.
2131		 */
2132		schedule_work(&cleanup_work);
2133		return -ENOMEM;
2134	}
2135
2136	if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2137		mutex_lock(&scan_mutex);
2138		start_scan_thread();
2139		mutex_unlock(&scan_mutex);
2140	}
 
 
2141
2142	pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2143		mem_pool_free_count);
2144
2145	return 0;
2146}
2147late_initcall(kmemleak_late_init);
v3.5.6
 
   1/*
   2 * mm/kmemleak.c
   3 *
   4 * Copyright (C) 2008 ARM Limited
   5 * Written by Catalin Marinas <catalin.marinas@arm.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19 *
  20 *
  21 * For more information on the algorithm and kmemleak usage, please see
  22 * Documentation/kmemleak.txt.
  23 *
  24 * Notes on locking
  25 * ----------------
  26 *
  27 * The following locks and mutexes are used by kmemleak:
  28 *
  29 * - kmemleak_lock (rwlock): protects the object_list modifications and
  30 *   accesses to the object_tree_root. The object_list is the main list
  31 *   holding the metadata (struct kmemleak_object) for the allocated memory
  32 *   blocks. The object_tree_root is a priority search tree used to look-up
  33 *   metadata based on a pointer to the corresponding memory block.  The
  34 *   kmemleak_object structures are added to the object_list and
  35 *   object_tree_root in the create_object() function called from the
  36 *   kmemleak_alloc() callback and removed in delete_object() called from the
  37 *   kmemleak_free() callback
  38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
  39 *   the metadata (e.g. count) are protected by this lock. Note that some
  40 *   members of this structure may be protected by other means (atomic or
  41 *   kmemleak_lock). This lock is also held when scanning the corresponding
  42 *   memory block to avoid the kernel freeing it via the kmemleak_free()
  43 *   callback. This is less heavyweight than holding a global lock like
  44 *   kmemleak_lock during scanning
 
 
  45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
  46 *   unreferenced objects at a time. The gray_list contains the objects which
  47 *   are already referenced or marked as false positives and need to be
  48 *   scanned. This list is only modified during a scanning episode when the
  49 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
  50 *   Note that the kmemleak_object.use_count is incremented when an object is
  51 *   added to the gray_list and therefore cannot be freed. This mutex also
  52 *   prevents multiple users of the "kmemleak" debugfs file together with
  53 *   modifications to the memory scanning parameters including the scan_thread
  54 *   pointer
  55 *
 
 
 
 
 
 
 
  56 * The kmemleak_object structures have a use_count incremented or decremented
  57 * using the get_object()/put_object() functions. When the use_count becomes
  58 * 0, this count can no longer be incremented and put_object() schedules the
  59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
  60 * function must be protected by rcu_read_lock() to avoid accessing a freed
  61 * structure.
  62 */
  63
  64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  65
  66#include <linux/init.h>
  67#include <linux/kernel.h>
  68#include <linux/list.h>
  69#include <linux/sched.h>
 
 
  70#include <linux/jiffies.h>
  71#include <linux/delay.h>
  72#include <linux/export.h>
  73#include <linux/kthread.h>
  74#include <linux/prio_tree.h>
  75#include <linux/fs.h>
  76#include <linux/debugfs.h>
  77#include <linux/seq_file.h>
  78#include <linux/cpumask.h>
  79#include <linux/spinlock.h>
 
  80#include <linux/mutex.h>
  81#include <linux/rcupdate.h>
  82#include <linux/stacktrace.h>
 
  83#include <linux/cache.h>
  84#include <linux/percpu.h>
  85#include <linux/hardirq.h>
 
  86#include <linux/mmzone.h>
  87#include <linux/slab.h>
  88#include <linux/thread_info.h>
  89#include <linux/err.h>
  90#include <linux/uaccess.h>
  91#include <linux/string.h>
  92#include <linux/nodemask.h>
  93#include <linux/mm.h>
  94#include <linux/workqueue.h>
  95#include <linux/crc32.h>
  96
  97#include <asm/sections.h>
  98#include <asm/processor.h>
  99#include <linux/atomic.h>
 100
 101#include <linux/kmemcheck.h>
 
 102#include <linux/kmemleak.h>
 103#include <linux/memory_hotplug.h>
 104
 105/*
 106 * Kmemleak configuration and common defines.
 107 */
 108#define MAX_TRACE		16	/* stack trace length */
 109#define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
 110#define SECS_FIRST_SCAN		60	/* delay before the first scan */
 111#define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
 112#define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
 113
 114#define BYTES_PER_POINTER	sizeof(void *)
 115
 116/* GFP bitmask for kmemleak internal allocations */
 117#define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
 
 118				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
 119				 __GFP_NOWARN)
 120
 121/* scanning area inside a memory block */
 122struct kmemleak_scan_area {
 123	struct hlist_node node;
 124	unsigned long start;
 125	size_t size;
 126};
 127
 128#define KMEMLEAK_GREY	0
 129#define KMEMLEAK_BLACK	-1
 130
 131/*
 132 * Structure holding the metadata for each allocated memory block.
 133 * Modifications to such objects should be made while holding the
 134 * object->lock. Insertions or deletions from object_list, gray_list or
 135 * tree_node are already protected by the corresponding locks or mutex (see
 136 * the notes on locking above). These objects are reference-counted
 137 * (use_count) and freed using the RCU mechanism.
 138 */
 139struct kmemleak_object {
 140	spinlock_t lock;
 141	unsigned long flags;		/* object status flags */
 142	struct list_head object_list;
 143	struct list_head gray_list;
 144	struct prio_tree_node tree_node;
 145	struct rcu_head rcu;		/* object_list lockless traversal */
 146	/* object usage count; object freed when use_count == 0 */
 147	atomic_t use_count;
 148	unsigned long pointer;
 149	size_t size;
 
 
 150	/* minimum number of a pointers found before it is considered leak */
 151	int min_count;
 152	/* the total number of pointers found pointing to this object */
 153	int count;
 154	/* checksum for detecting modified objects */
 155	u32 checksum;
 156	/* memory ranges to be scanned inside an object (empty for all) */
 157	struct hlist_head area_list;
 158	unsigned long trace[MAX_TRACE];
 159	unsigned int trace_len;
 160	unsigned long jiffies;		/* creation timestamp */
 161	pid_t pid;			/* pid of the current task */
 162	char comm[TASK_COMM_LEN];	/* executable name */
 163};
 164
 165/* flag representing the memory block allocation status */
 166#define OBJECT_ALLOCATED	(1 << 0)
 167/* flag set after the first reporting of an unreference object */
 168#define OBJECT_REPORTED		(1 << 1)
 169/* flag set to not scan the object */
 170#define OBJECT_NO_SCAN		(1 << 2)
 
 
 
 
 171
 
 172/* number of bytes to print per line; must be 16 or 32 */
 173#define HEX_ROW_SIZE		16
 174/* number of bytes to print at a time (1, 2, 4, 8) */
 175#define HEX_GROUP_SIZE		1
 176/* include ASCII after the hex output */
 177#define HEX_ASCII		1
 178/* max number of lines to be printed */
 179#define HEX_MAX_LINES		2
 180
 181/* the list of all allocated objects */
 182static LIST_HEAD(object_list);
 183/* the list of gray-colored objects (see color_gray comment below) */
 184static LIST_HEAD(gray_list);
 185/* prio search tree for object boundaries */
 186static struct prio_tree_root object_tree_root;
 187/* rw_lock protecting the access to object_list and prio_tree_root */
 188static DEFINE_RWLOCK(kmemleak_lock);
 
 
 
 
 
 
 189
 190/* allocation caches for kmemleak internal data */
 191static struct kmem_cache *object_cache;
 192static struct kmem_cache *scan_area_cache;
 193
 194/* set if tracing memory operations is enabled */
 195static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
 
 
 196/* set in the late_initcall if there were no errors */
 197static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
 198/* enables or disables early logging of the memory operations */
 199static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
 200/* set if a kmemleak warning was issued */
 201static atomic_t kmemleak_warning = ATOMIC_INIT(0);
 202/* set if a fatal kmemleak error has occurred */
 203static atomic_t kmemleak_error = ATOMIC_INIT(0);
 204
 205/* minimum and maximum address that may be valid pointers */
 206static unsigned long min_addr = ULONG_MAX;
 207static unsigned long max_addr;
 208
 209static struct task_struct *scan_thread;
 210/* used to avoid reporting of recently allocated objects */
 211static unsigned long jiffies_min_age;
 212static unsigned long jiffies_last_scan;
 213/* delay between automatic memory scannings */
 214static signed long jiffies_scan_wait;
 215/* enables or disables the task stacks scanning */
 216static int kmemleak_stack_scan = 1;
 217/* protects the memory scanning, parameters and debug/kmemleak file access */
 218static DEFINE_MUTEX(scan_mutex);
 219/* setting kmemleak=on, will set this var, skipping the disable */
 220static int kmemleak_skip_disable;
 
 
 221
 222
 223/*
 224 * Early object allocation/freeing logging. Kmemleak is initialized after the
 225 * kernel allocator. However, both the kernel allocator and kmemleak may
 226 * allocate memory blocks which need to be tracked. Kmemleak defines an
 227 * arbitrary buffer to hold the allocation/freeing information before it is
 228 * fully initialized.
 229 */
 230
 231/* kmemleak operation type for early logging */
 232enum {
 233	KMEMLEAK_ALLOC,
 234	KMEMLEAK_ALLOC_PERCPU,
 235	KMEMLEAK_FREE,
 236	KMEMLEAK_FREE_PART,
 237	KMEMLEAK_FREE_PERCPU,
 238	KMEMLEAK_NOT_LEAK,
 239	KMEMLEAK_IGNORE,
 240	KMEMLEAK_SCAN_AREA,
 241	KMEMLEAK_NO_SCAN
 242};
 243
 244/*
 245 * Structure holding the information passed to kmemleak callbacks during the
 246 * early logging.
 247 */
 248struct early_log {
 249	int op_type;			/* kmemleak operation type */
 250	const void *ptr;		/* allocated/freed memory block */
 251	size_t size;			/* memory block size */
 252	int min_count;			/* minimum reference count */
 253	unsigned long trace[MAX_TRACE];	/* stack trace */
 254	unsigned int trace_len;		/* stack trace length */
 255};
 256
 257/* early logging buffer and current position */
 258static struct early_log
 259	early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
 260static int crt_early_log __initdata;
 261
 262static void kmemleak_disable(void);
 263
 264/*
 265 * Print a warning and dump the stack trace.
 266 */
 267#define kmemleak_warn(x...)	do {		\
 268	pr_warning(x);				\
 269	dump_stack();				\
 270	atomic_set(&kmemleak_warning, 1);	\
 271} while (0)
 272
 273/*
 274 * Macro invoked when a serious kmemleak condition occurred and cannot be
 275 * recovered from. Kmemleak will be disabled and further allocation/freeing
 276 * tracing no longer available.
 277 */
 278#define kmemleak_stop(x...)	do {	\
 279	kmemleak_warn(x);		\
 280	kmemleak_disable();		\
 281} while (0)
 282
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 283/*
 284 * Printing of the objects hex dump to the seq file. The number of lines to be
 285 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
 286 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
 287 * with the object->lock held.
 288 */
 289static void hex_dump_object(struct seq_file *seq,
 290			    struct kmemleak_object *object)
 291{
 292	const u8 *ptr = (const u8 *)object->pointer;
 293	int i, len, remaining;
 294	unsigned char linebuf[HEX_ROW_SIZE * 5];
 
 
 295
 296	/* limit the number of lines to HEX_MAX_LINES */
 297	remaining = len =
 298		min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
 299
 300	seq_printf(seq, "  hex dump (first %d bytes):\n", len);
 301	for (i = 0; i < len; i += HEX_ROW_SIZE) {
 302		int linelen = min(remaining, HEX_ROW_SIZE);
 303
 304		remaining -= HEX_ROW_SIZE;
 305		hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
 306				   HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
 307				   HEX_ASCII);
 308		seq_printf(seq, "    %s\n", linebuf);
 309	}
 310}
 311
 312/*
 313 * Object colors, encoded with count and min_count:
 314 * - white - orphan object, not enough references to it (count < min_count)
 315 * - gray  - not orphan, not marked as false positive (min_count == 0) or
 316 *		sufficient references to it (count >= min_count)
 317 * - black - ignore, it doesn't contain references (e.g. text section)
 318 *		(min_count == -1). No function defined for this color.
 319 * Newly created objects don't have any color assigned (object->count == -1)
 320 * before the next memory scan when they become white.
 321 */
 322static bool color_white(const struct kmemleak_object *object)
 323{
 324	return object->count != KMEMLEAK_BLACK &&
 325		object->count < object->min_count;
 326}
 327
 328static bool color_gray(const struct kmemleak_object *object)
 329{
 330	return object->min_count != KMEMLEAK_BLACK &&
 331		object->count >= object->min_count;
 332}
 333
 334/*
 335 * Objects are considered unreferenced only if their color is white, they have
 336 * not be deleted and have a minimum age to avoid false positives caused by
 337 * pointers temporarily stored in CPU registers.
 338 */
 339static bool unreferenced_object(struct kmemleak_object *object)
 340{
 341	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
 342		time_before_eq(object->jiffies + jiffies_min_age,
 343			       jiffies_last_scan);
 344}
 345
 346/*
 347 * Printing of the unreferenced objects information to the seq file. The
 348 * print_unreferenced function must be called with the object->lock held.
 349 */
 350static void print_unreferenced(struct seq_file *seq,
 351			       struct kmemleak_object *object)
 352{
 353	int i;
 
 
 354	unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
 355
 356	seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
 357		   object->pointer, object->size);
 358	seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
 359		   object->comm, object->pid, object->jiffies,
 360		   msecs_age / 1000, msecs_age % 1000);
 
 361	hex_dump_object(seq, object);
 362	seq_printf(seq, "  backtrace:\n");
 363
 364	for (i = 0; i < object->trace_len; i++) {
 365		void *ptr = (void *)object->trace[i];
 366		seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
 367	}
 368}
 369
 370/*
 371 * Print the kmemleak_object information. This function is used mainly for
 372 * debugging special cases when kmemleak operations. It must be called with
 373 * the object->lock held.
 374 */
 375static void dump_object_info(struct kmemleak_object *object)
 376{
 377	struct stack_trace trace;
 378
 379	trace.nr_entries = object->trace_len;
 380	trace.entries = object->trace;
 381
 382	pr_notice("Object 0x%08lx (size %zu):\n",
 383		  object->tree_node.start, object->size);
 384	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
 385		  object->comm, object->pid, object->jiffies);
 386	pr_notice("  min_count = %d\n", object->min_count);
 387	pr_notice("  count = %d\n", object->count);
 388	pr_notice("  flags = 0x%lx\n", object->flags);
 389	pr_notice("  checksum = %d\n", object->checksum);
 390	pr_notice("  backtrace:\n");
 391	print_stack_trace(&trace, 4);
 
 392}
 393
 394/*
 395 * Look-up a memory block metadata (kmemleak_object) in the priority search
 396 * tree based on a pointer value. If alias is 0, only values pointing to the
 397 * beginning of the memory block are allowed. The kmemleak_lock must be held
 398 * when calling this function.
 399 */
 400static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
 
 401{
 402	struct prio_tree_node *node;
 403	struct prio_tree_iter iter;
 404	struct kmemleak_object *object;
 
 
 
 
 
 
 
 405
 406	prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
 407	node = prio_tree_next(&iter);
 408	if (node) {
 409		object = prio_tree_entry(node, struct kmemleak_object,
 410					 tree_node);
 411		if (!alias && object->pointer != ptr) {
 
 412			kmemleak_warn("Found object by alias at 0x%08lx\n",
 413				      ptr);
 414			dump_object_info(object);
 415			object = NULL;
 416		}
 417	} else
 418		object = NULL;
 
 419
 420	return object;
 
 
 
 421}
 422
 423/*
 424 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
 425 * that once an object's use_count reached 0, the RCU freeing was already
 426 * registered and the object should no longer be used. This function must be
 427 * called under the protection of rcu_read_lock().
 428 */
 429static int get_object(struct kmemleak_object *object)
 430{
 431	return atomic_inc_not_zero(&object->use_count);
 432}
 433
 434/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 435 * RCU callback to free a kmemleak_object.
 436 */
 437static void free_object_rcu(struct rcu_head *rcu)
 438{
 439	struct hlist_node *elem, *tmp;
 440	struct kmemleak_scan_area *area;
 441	struct kmemleak_object *object =
 442		container_of(rcu, struct kmemleak_object, rcu);
 443
 444	/*
 445	 * Once use_count is 0 (guaranteed by put_object), there is no other
 446	 * code accessing this object, hence no need for locking.
 447	 */
 448	hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
 449		hlist_del(elem);
 450		kmem_cache_free(scan_area_cache, area);
 451	}
 452	kmem_cache_free(object_cache, object);
 453}
 454
 455/*
 456 * Decrement the object use_count. Once the count is 0, free the object using
 457 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
 458 * delete_object() path, the delayed RCU freeing ensures that there is no
 459 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
 460 * is also possible.
 461 */
 462static void put_object(struct kmemleak_object *object)
 463{
 464	if (!atomic_dec_and_test(&object->use_count))
 465		return;
 466
 467	/* should only get here after delete_object was called */
 468	WARN_ON(object->flags & OBJECT_ALLOCATED);
 469
 470	call_rcu(&object->rcu, free_object_rcu);
 
 
 
 
 
 
 
 
 471}
 472
 473/*
 474 * Look up an object in the prio search tree and increase its use_count.
 475 */
 476static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 
 477{
 478	unsigned long flags;
 479	struct kmemleak_object *object = NULL;
 480
 481	rcu_read_lock();
 482	read_lock_irqsave(&kmemleak_lock, flags);
 483	if (ptr >= min_addr && ptr < max_addr)
 484		object = lookup_object(ptr, alias);
 485	read_unlock_irqrestore(&kmemleak_lock, flags);
 486
 487	/* check whether the object is still available */
 488	if (object && !get_object(object))
 489		object = NULL;
 490	rcu_read_unlock();
 491
 492	return object;
 493}
 494
 
 
 
 
 
 
 495/*
 496 * Save stack trace to the given array of MAX_TRACE size.
 
 
 497 */
 498static int __save_stack_trace(unsigned long *trace)
 499{
 500	struct stack_trace stack_trace;
 
 
 
 
 501
 502	stack_trace.max_entries = MAX_TRACE;
 503	stack_trace.nr_entries = 0;
 504	stack_trace.entries = trace;
 505	stack_trace.skip = 2;
 506	save_stack_trace(&stack_trace);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 507
 508	return stack_trace.nr_entries;
 509}
 510
 511/*
 512 * Create the metadata (struct kmemleak_object) corresponding to an allocated
 513 * memory block and add it to the object_list and object_tree_root.
 
 514 */
 515static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 516					     int min_count, gfp_t gfp)
 517{
 518	unsigned long flags;
 519	struct kmemleak_object *object;
 520	struct prio_tree_node *node;
 
 
 521
 522	object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
 523	if (!object) {
 524		pr_warning("Cannot allocate a kmemleak_object structure\n");
 525		kmemleak_disable();
 526		return NULL;
 527	}
 528
 529	INIT_LIST_HEAD(&object->object_list);
 530	INIT_LIST_HEAD(&object->gray_list);
 531	INIT_HLIST_HEAD(&object->area_list);
 532	spin_lock_init(&object->lock);
 533	atomic_set(&object->use_count, 1);
 534	object->flags = OBJECT_ALLOCATED;
 535	object->pointer = ptr;
 536	object->size = size;
 
 537	object->min_count = min_count;
 538	object->count = 0;			/* white color initially */
 539	object->jiffies = jiffies;
 540	object->checksum = 0;
 541
 542	/* task information */
 543	if (in_irq()) {
 544		object->pid = 0;
 545		strncpy(object->comm, "hardirq", sizeof(object->comm));
 546	} else if (in_softirq()) {
 547		object->pid = 0;
 548		strncpy(object->comm, "softirq", sizeof(object->comm));
 549	} else {
 550		object->pid = current->pid;
 551		/*
 552		 * There is a small chance of a race with set_task_comm(),
 553		 * however using get_task_comm() here may cause locking
 554		 * dependency issues with current->alloc_lock. In the worst
 555		 * case, the command line is not correct.
 556		 */
 557		strncpy(object->comm, current->comm, sizeof(object->comm));
 558	}
 559
 560	/* kernel backtrace */
 561	object->trace_len = __save_stack_trace(object->trace);
 562
 563	INIT_PRIO_TREE_NODE(&object->tree_node);
 564	object->tree_node.start = ptr;
 565	object->tree_node.last = ptr + size - 1;
 566
 567	write_lock_irqsave(&kmemleak_lock, flags);
 568
 569	min_addr = min(min_addr, ptr);
 570	max_addr = max(max_addr, ptr + size);
 571	node = prio_tree_insert(&object_tree_root, &object->tree_node);
 572	/*
 573	 * The code calling the kernel does not yet have the pointer to the
 574	 * memory block to be able to free it.  However, we still hold the
 575	 * kmemleak_lock here in case parts of the kernel started freeing
 576	 * random memory blocks.
 577	 */
 578	if (node != &object->tree_node) {
 579		kmemleak_stop("Cannot insert 0x%lx into the object search tree "
 580			      "(already existing)\n", ptr);
 581		object = lookup_object(ptr, 1);
 582		spin_lock(&object->lock);
 583		dump_object_info(object);
 584		spin_unlock(&object->lock);
 585
 586		goto out;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 587	}
 
 
 
 588	list_add_tail_rcu(&object->object_list, &object_list);
 589out:
 590	write_unlock_irqrestore(&kmemleak_lock, flags);
 591	return object;
 
 
 
 
 
 
 
 
 
 
 
 
 
 592}
 593
 594/*
 595 * Remove the metadata (struct kmemleak_object) for a memory block from the
 596 * object_list and object_tree_root and decrement its use_count.
 597 */
 598static void __delete_object(struct kmemleak_object *object)
 599{
 600	unsigned long flags;
 601
 602	write_lock_irqsave(&kmemleak_lock, flags);
 603	prio_tree_remove(&object_tree_root, &object->tree_node);
 604	list_del_rcu(&object->object_list);
 605	write_unlock_irqrestore(&kmemleak_lock, flags);
 606
 607	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
 608	WARN_ON(atomic_read(&object->use_count) < 2);
 609
 610	/*
 611	 * Locking here also ensures that the corresponding memory block
 612	 * cannot be freed when it is being scanned.
 613	 */
 614	spin_lock_irqsave(&object->lock, flags);
 615	object->flags &= ~OBJECT_ALLOCATED;
 616	spin_unlock_irqrestore(&object->lock, flags);
 617	put_object(object);
 618}
 619
 620/*
 621 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 622 * delete it.
 623 */
 624static void delete_object_full(unsigned long ptr)
 625{
 626	struct kmemleak_object *object;
 627
 628	object = find_and_get_object(ptr, 0);
 629	if (!object) {
 630#ifdef DEBUG
 631		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
 632			      ptr);
 633#endif
 634		return;
 635	}
 636	__delete_object(object);
 637	put_object(object);
 638}
 639
 640/*
 641 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 642 * delete it. If the memory block is partially freed, the function may create
 643 * additional metadata for the remaining parts of the block.
 644 */
 645static void delete_object_part(unsigned long ptr, size_t size)
 646{
 647	struct kmemleak_object *object;
 648	unsigned long start, end;
 649
 650	object = find_and_get_object(ptr, 1);
 651	if (!object) {
 652#ifdef DEBUG
 653		kmemleak_warn("Partially freeing unknown object at 0x%08lx "
 654			      "(size %zu)\n", ptr, size);
 655#endif
 656		return;
 657	}
 658	__delete_object(object);
 659
 660	/*
 661	 * Create one or two objects that may result from the memory block
 662	 * split. Note that partial freeing is only done by free_bootmem() and
 663	 * this happens before kmemleak_init() is called. The path below is
 664	 * only executed during early log recording in kmemleak_init(), so
 665	 * GFP_KERNEL is enough.
 666	 */
 667	start = object->pointer;
 668	end = object->pointer + object->size;
 669	if (ptr > start)
 670		create_object(start, ptr - start, object->min_count,
 671			      GFP_KERNEL);
 672	if (ptr + size < end)
 673		create_object(ptr + size, end - ptr - size, object->min_count,
 674			      GFP_KERNEL);
 675
 676	put_object(object);
 677}
 678
 679static void __paint_it(struct kmemleak_object *object, int color)
 680{
 681	object->min_count = color;
 682	if (color == KMEMLEAK_BLACK)
 683		object->flags |= OBJECT_NO_SCAN;
 684}
 685
 686static void paint_it(struct kmemleak_object *object, int color)
 687{
 688	unsigned long flags;
 689
 690	spin_lock_irqsave(&object->lock, flags);
 691	__paint_it(object, color);
 692	spin_unlock_irqrestore(&object->lock, flags);
 693}
 694
 695static void paint_ptr(unsigned long ptr, int color)
 696{
 697	struct kmemleak_object *object;
 698
 699	object = find_and_get_object(ptr, 0);
 700	if (!object) {
 701		kmemleak_warn("Trying to color unknown object "
 702			      "at 0x%08lx as %s\n", ptr,
 703			      (color == KMEMLEAK_GREY) ? "Grey" :
 704			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
 705		return;
 706	}
 707	paint_it(object, color);
 708	put_object(object);
 709}
 710
 711/*
 712 * Mark an object permanently as gray-colored so that it can no longer be
 713 * reported as a leak. This is used in general to mark a false positive.
 714 */
 715static void make_gray_object(unsigned long ptr)
 716{
 717	paint_ptr(ptr, KMEMLEAK_GREY);
 718}
 719
 720/*
 721 * Mark the object as black-colored so that it is ignored from scans and
 722 * reporting.
 723 */
 724static void make_black_object(unsigned long ptr)
 725{
 726	paint_ptr(ptr, KMEMLEAK_BLACK);
 727}
 728
 729/*
 730 * Add a scanning area to the object. If at least one such area is added,
 731 * kmemleak will only scan these ranges rather than the whole memory block.
 732 */
 733static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 734{
 735	unsigned long flags;
 736	struct kmemleak_object *object;
 737	struct kmemleak_scan_area *area;
 
 
 738
 739	object = find_and_get_object(ptr, 1);
 740	if (!object) {
 741		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
 742			      ptr);
 743		return;
 744	}
 745
 746	area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 
 
 
 
 
 
 747	if (!area) {
 748		pr_warning("Cannot allocate a scan area\n");
 749		goto out;
 
 
 750	}
 751
 752	spin_lock_irqsave(&object->lock, flags);
 753	if (ptr + size > object->pointer + object->size) {
 754		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
 755		dump_object_info(object);
 756		kmem_cache_free(scan_area_cache, area);
 757		goto out_unlock;
 758	}
 759
 760	INIT_HLIST_NODE(&area->node);
 761	area->start = ptr;
 762	area->size = size;
 763
 764	hlist_add_head(&area->node, &object->area_list);
 765out_unlock:
 766	spin_unlock_irqrestore(&object->lock, flags);
 767out:
 768	put_object(object);
 769}
 770
 771/*
 772 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
 773 * pointer. Such object will not be scanned by kmemleak but references to it
 774 * are searched.
 
 775 */
 776static void object_no_scan(unsigned long ptr)
 777{
 778	unsigned long flags;
 779	struct kmemleak_object *object;
 780
 781	object = find_and_get_object(ptr, 0);
 782	if (!object) {
 783		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
 
 784		return;
 785	}
 786
 787	spin_lock_irqsave(&object->lock, flags);
 788	object->flags |= OBJECT_NO_SCAN;
 789	spin_unlock_irqrestore(&object->lock, flags);
 790	put_object(object);
 791}
 792
 793/*
 794 * Log an early kmemleak_* call to the early_log buffer. These calls will be
 795 * processed later once kmemleak is fully initialized.
 
 796 */
 797static void __init log_early(int op_type, const void *ptr, size_t size,
 798			     int min_count)
 799{
 800	unsigned long flags;
 801	struct early_log *log;
 802
 803	if (atomic_read(&kmemleak_error)) {
 804		/* kmemleak stopped recording, just count the requests */
 805		crt_early_log++;
 806		return;
 807	}
 808
 809	if (crt_early_log >= ARRAY_SIZE(early_log)) {
 810		kmemleak_disable();
 811		return;
 812	}
 813
 814	/*
 815	 * There is no need for locking since the kernel is still in UP mode
 816	 * at this stage. Disabling the IRQs is enough.
 817	 */
 818	local_irq_save(flags);
 819	log = &early_log[crt_early_log];
 820	log->op_type = op_type;
 821	log->ptr = ptr;
 822	log->size = size;
 823	log->min_count = min_count;
 824	log->trace_len = __save_stack_trace(log->trace);
 825	crt_early_log++;
 826	local_irq_restore(flags);
 827}
 828
 829/*
 830 * Log an early allocated block and populate the stack trace.
 831 */
 832static void early_alloc(struct early_log *log)
 833{
 834	struct kmemleak_object *object;
 835	unsigned long flags;
 836	int i;
 837
 838	if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
 839		return;
 840
 841	/*
 842	 * RCU locking needed to ensure object is not freed via put_object().
 843	 */
 844	rcu_read_lock();
 845	object = create_object((unsigned long)log->ptr, log->size,
 846			       log->min_count, GFP_ATOMIC);
 847	if (!object)
 848		goto out;
 849	spin_lock_irqsave(&object->lock, flags);
 850	for (i = 0; i < log->trace_len; i++)
 851		object->trace[i] = log->trace[i];
 852	object->trace_len = log->trace_len;
 853	spin_unlock_irqrestore(&object->lock, flags);
 854out:
 855	rcu_read_unlock();
 856}
 857
 858/*
 859 * Log an early allocated block and populate the stack trace.
 860 */
 861static void early_alloc_percpu(struct early_log *log)
 862{
 863	unsigned int cpu;
 864	const void __percpu *ptr = log->ptr;
 865
 866	for_each_possible_cpu(cpu) {
 867		log->ptr = per_cpu_ptr(ptr, cpu);
 868		early_alloc(log);
 869	}
 870}
 871
 872/**
 873 * kmemleak_alloc - register a newly allocated object
 874 * @ptr:	pointer to beginning of the object
 875 * @size:	size of the object
 876 * @min_count:	minimum number of references to this object. If during memory
 877 *		scanning a number of references less than @min_count is found,
 878 *		the object is reported as a memory leak. If @min_count is 0,
 879 *		the object is never reported as a leak. If @min_count is -1,
 880 *		the object is ignored (not scanned and not reported as a leak)
 881 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
 882 *
 883 * This function is called from the kernel allocators when a new object
 884 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
 885 */
 886void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
 887			  gfp_t gfp)
 888{
 889	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
 890
 891	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 892		create_object((unsigned long)ptr, size, min_count, gfp);
 893	else if (atomic_read(&kmemleak_early_log))
 894		log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
 895}
 896EXPORT_SYMBOL_GPL(kmemleak_alloc);
 897
 898/**
 899 * kmemleak_alloc_percpu - register a newly allocated __percpu object
 900 * @ptr:	__percpu pointer to beginning of the object
 901 * @size:	size of the object
 
 902 *
 903 * This function is called from the kernel percpu allocator when a new object
 904 * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
 905 * allocation.
 906 */
 907void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
 
 908{
 909	unsigned int cpu;
 910
 911	pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
 912
 913	/*
 914	 * Percpu allocations are only scanned and not reported as leaks
 915	 * (min_count is set to 0).
 916	 */
 917	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 918		for_each_possible_cpu(cpu)
 919			create_object((unsigned long)per_cpu_ptr(ptr, cpu),
 920				      size, 0, GFP_KERNEL);
 921	else if (atomic_read(&kmemleak_early_log))
 922		log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
 923}
 924EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
 925
 926/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 927 * kmemleak_free - unregister a previously registered object
 928 * @ptr:	pointer to beginning of the object
 929 *
 930 * This function is called from the kernel allocators when an object (memory
 931 * block) is freed (kmem_cache_free, kfree, vfree etc.).
 932 */
 933void __ref kmemleak_free(const void *ptr)
 934{
 935	pr_debug("%s(0x%p)\n", __func__, ptr);
 936
 937	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 938		delete_object_full((unsigned long)ptr);
 939	else if (atomic_read(&kmemleak_early_log))
 940		log_early(KMEMLEAK_FREE, ptr, 0, 0);
 941}
 942EXPORT_SYMBOL_GPL(kmemleak_free);
 943
 944/**
 945 * kmemleak_free_part - partially unregister a previously registered object
 946 * @ptr:	pointer to the beginning or inside the object. This also
 947 *		represents the start of the range to be freed
 948 * @size:	size to be unregistered
 949 *
 950 * This function is called when only a part of a memory block is freed
 951 * (usually from the bootmem allocator).
 952 */
 953void __ref kmemleak_free_part(const void *ptr, size_t size)
 954{
 955	pr_debug("%s(0x%p)\n", __func__, ptr);
 956
 957	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 958		delete_object_part((unsigned long)ptr, size);
 959	else if (atomic_read(&kmemleak_early_log))
 960		log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
 961}
 962EXPORT_SYMBOL_GPL(kmemleak_free_part);
 963
 964/**
 965 * kmemleak_free_percpu - unregister a previously registered __percpu object
 966 * @ptr:	__percpu pointer to beginning of the object
 967 *
 968 * This function is called from the kernel percpu allocator when an object
 969 * (memory block) is freed (free_percpu).
 970 */
 971void __ref kmemleak_free_percpu(const void __percpu *ptr)
 972{
 973	unsigned int cpu;
 974
 975	pr_debug("%s(0x%p)\n", __func__, ptr);
 976
 977	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 978		for_each_possible_cpu(cpu)
 979			delete_object_full((unsigned long)per_cpu_ptr(ptr,
 980								      cpu));
 981	else if (atomic_read(&kmemleak_early_log))
 982		log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
 983}
 984EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
 985
 986/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 987 * kmemleak_not_leak - mark an allocated object as false positive
 988 * @ptr:	pointer to beginning of the object
 989 *
 990 * Calling this function on an object will cause the memory block to no longer
 991 * be reported as leak and always be scanned.
 992 */
 993void __ref kmemleak_not_leak(const void *ptr)
 994{
 995	pr_debug("%s(0x%p)\n", __func__, ptr);
 996
 997	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 998		make_gray_object((unsigned long)ptr);
 999	else if (atomic_read(&kmemleak_early_log))
1000		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1001}
1002EXPORT_SYMBOL(kmemleak_not_leak);
1003
1004/**
1005 * kmemleak_ignore - ignore an allocated object
1006 * @ptr:	pointer to beginning of the object
1007 *
1008 * Calling this function on an object will cause the memory block to be
1009 * ignored (not scanned and not reported as a leak). This is usually done when
1010 * it is known that the corresponding block is not a leak and does not contain
1011 * any references to other allocated memory blocks.
1012 */
1013void __ref kmemleak_ignore(const void *ptr)
1014{
1015	pr_debug("%s(0x%p)\n", __func__, ptr);
1016
1017	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1018		make_black_object((unsigned long)ptr);
1019	else if (atomic_read(&kmemleak_early_log))
1020		log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1021}
1022EXPORT_SYMBOL(kmemleak_ignore);
1023
1024/**
1025 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1026 * @ptr:	pointer to beginning or inside the object. This also
1027 *		represents the start of the scan area
1028 * @size:	size of the scan area
1029 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1030 *
1031 * This function is used when it is known that only certain parts of an object
1032 * contain references to other objects. Kmemleak will only scan these areas
1033 * reducing the number false negatives.
1034 */
1035void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1036{
1037	pr_debug("%s(0x%p)\n", __func__, ptr);
1038
1039	if (atomic_read(&kmemleak_enabled) && ptr && size && !IS_ERR(ptr))
1040		add_scan_area((unsigned long)ptr, size, gfp);
1041	else if (atomic_read(&kmemleak_early_log))
1042		log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1043}
1044EXPORT_SYMBOL(kmemleak_scan_area);
1045
1046/**
1047 * kmemleak_no_scan - do not scan an allocated object
1048 * @ptr:	pointer to beginning of the object
1049 *
1050 * This function notifies kmemleak not to scan the given memory block. Useful
1051 * in situations where it is known that the given object does not contain any
1052 * references to other objects. Kmemleak will not scan such objects reducing
1053 * the number of false negatives.
1054 */
1055void __ref kmemleak_no_scan(const void *ptr)
1056{
1057	pr_debug("%s(0x%p)\n", __func__, ptr);
1058
1059	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
1060		object_no_scan((unsigned long)ptr);
1061	else if (atomic_read(&kmemleak_early_log))
1062		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1063}
1064EXPORT_SYMBOL(kmemleak_no_scan);
1065
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1066/*
1067 * Update an object's checksum and return true if it was modified.
1068 */
1069static bool update_checksum(struct kmemleak_object *object)
1070{
1071	u32 old_csum = object->checksum;
1072
1073	if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1074		return false;
1075
1076	object->checksum = crc32(0, (void *)object->pointer, object->size);
 
 
 
 
 
1077	return object->checksum != old_csum;
1078}
1079
1080/*
1081 * Memory scanning is a long process and it needs to be interruptable. This
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1082 * function checks whether such interrupt condition occurred.
1083 */
1084static int scan_should_stop(void)
1085{
1086	if (!atomic_read(&kmemleak_enabled))
1087		return 1;
1088
1089	/*
1090	 * This function may be called from either process or kthread context,
1091	 * hence the need to check for both stop conditions.
1092	 */
1093	if (current->mm)
1094		return signal_pending(current);
1095	else
1096		return kthread_should_stop();
1097
1098	return 0;
1099}
1100
1101/*
1102 * Scan a memory block (exclusive range) for valid pointers and add those
1103 * found to the gray list.
1104 */
1105static void scan_block(void *_start, void *_end,
1106		       struct kmemleak_object *scanned, int allow_resched)
1107{
1108	unsigned long *ptr;
1109	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1110	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
 
 
1111
 
1112	for (ptr = start; ptr < end; ptr++) {
1113		struct kmemleak_object *object;
1114		unsigned long flags;
1115		unsigned long pointer;
 
1116
1117		if (allow_resched)
1118			cond_resched();
1119		if (scan_should_stop())
1120			break;
1121
1122		/* don't scan uninitialized memory */
1123		if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1124						  BYTES_PER_POINTER))
 
 
 
1125			continue;
1126
1127		pointer = *ptr;
1128
1129		object = find_and_get_object(pointer, 1);
 
 
 
 
1130		if (!object)
1131			continue;
1132		if (object == scanned) {
1133			/* self referenced, ignore */
1134			put_object(object);
1135			continue;
1136		}
1137
1138		/*
1139		 * Avoid the lockdep recursive warning on object->lock being
1140		 * previously acquired in scan_object(). These locks are
1141		 * enclosed by scan_mutex.
1142		 */
1143		spin_lock_irqsave_nested(&object->lock, flags,
1144					 SINGLE_DEPTH_NESTING);
1145		if (!color_white(object)) {
1146			/* non-orphan, ignored or new */
1147			spin_unlock_irqrestore(&object->lock, flags);
1148			put_object(object);
1149			continue;
 
1150		}
 
1151
1152		/*
1153		 * Increase the object's reference count (number of pointers
1154		 * to the memory block). If this count reaches the required
1155		 * minimum, the object's color will become gray and it will be
1156		 * added to the gray_list.
1157		 */
1158		object->count++;
1159		if (color_gray(object)) {
1160			list_add_tail(&object->gray_list, &gray_list);
1161			spin_unlock_irqrestore(&object->lock, flags);
1162			continue;
1163		}
 
 
 
 
 
 
 
 
 
 
 
1164
1165		spin_unlock_irqrestore(&object->lock, flags);
1166		put_object(object);
 
 
 
1167	}
1168}
 
1169
1170/*
1171 * Scan a memory block corresponding to a kmemleak_object. A condition is
1172 * that object->use_count >= 1.
1173 */
1174static void scan_object(struct kmemleak_object *object)
1175{
1176	struct kmemleak_scan_area *area;
1177	struct hlist_node *elem;
1178	unsigned long flags;
 
1179
1180	/*
1181	 * Once the object->lock is acquired, the corresponding memory block
1182	 * cannot be freed (the same lock is acquired in delete_object).
1183	 */
1184	spin_lock_irqsave(&object->lock, flags);
1185	if (object->flags & OBJECT_NO_SCAN)
1186		goto out;
1187	if (!(object->flags & OBJECT_ALLOCATED))
1188		/* already freed object */
1189		goto out;
1190	if (hlist_empty(&object->area_list)) {
1191		void *start = (void *)object->pointer;
1192		void *end = (void *)(object->pointer + object->size);
1193
1194		while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1195		       !(object->flags & OBJECT_NO_SCAN)) {
1196			scan_block(start, min(start + MAX_SCAN_SIZE, end),
1197				   object, 0);
1198			start += MAX_SCAN_SIZE;
1199
1200			spin_unlock_irqrestore(&object->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1201			cond_resched();
1202			spin_lock_irqsave(&object->lock, flags);
1203		}
1204	} else
1205		hlist_for_each_entry(area, elem, &object->area_list, node)
1206			scan_block((void *)area->start,
1207				   (void *)(area->start + area->size),
1208				   object, 0);
1209out:
1210	spin_unlock_irqrestore(&object->lock, flags);
1211}
1212
1213/*
1214 * Scan the objects already referenced (gray objects). More objects will be
1215 * referenced and, if there are no memory leaks, all the objects are scanned.
1216 */
1217static void scan_gray_list(void)
1218{
1219	struct kmemleak_object *object, *tmp;
1220
1221	/*
1222	 * The list traversal is safe for both tail additions and removals
1223	 * from inside the loop. The kmemleak objects cannot be freed from
1224	 * outside the loop because their use_count was incremented.
1225	 */
1226	object = list_entry(gray_list.next, typeof(*object), gray_list);
1227	while (&object->gray_list != &gray_list) {
1228		cond_resched();
1229
1230		/* may add new objects to the list */
1231		if (!scan_should_stop())
1232			scan_object(object);
1233
1234		tmp = list_entry(object->gray_list.next, typeof(*object),
1235				 gray_list);
1236
1237		/* remove the object from the list and release it */
1238		list_del(&object->gray_list);
1239		put_object(object);
1240
1241		object = tmp;
1242	}
1243	WARN_ON(!list_empty(&gray_list));
1244}
1245
1246/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1247 * Scan data sections and all the referenced memory blocks allocated via the
1248 * kernel's standard allocators. This function must be called with the
1249 * scan_mutex held.
1250 */
1251static void kmemleak_scan(void)
1252{
1253	unsigned long flags;
1254	struct kmemleak_object *object;
1255	int i;
 
1256	int new_leaks = 0;
 
1257
1258	jiffies_last_scan = jiffies;
1259
1260	/* prepare the kmemleak_object's */
1261	rcu_read_lock();
1262	list_for_each_entry_rcu(object, &object_list, object_list) {
1263		spin_lock_irqsave(&object->lock, flags);
 
 
1264#ifdef DEBUG
1265		/*
1266		 * With a few exceptions there should be a maximum of
1267		 * 1 reference to any object at this point.
1268		 */
1269		if (atomic_read(&object->use_count) > 1) {
1270			pr_debug("object->use_count = %d\n",
1271				 atomic_read(&object->use_count));
1272			dump_object_info(object);
1273		}
1274#endif
 
 
 
 
 
 
 
 
 
 
 
1275		/* reset the reference count (whiten the object) */
1276		object->count = 0;
1277		if (color_gray(object) && get_object(object))
1278			list_add_tail(&object->gray_list, &gray_list);
 
 
 
 
1279
1280		spin_unlock_irqrestore(&object->lock, flags);
 
 
 
 
 
1281	}
1282	rcu_read_unlock();
1283
1284	/* data/bss scanning */
1285	scan_block(_sdata, _edata, NULL, 1);
1286	scan_block(__bss_start, __bss_stop, NULL, 1);
1287
1288#ifdef CONFIG_SMP
1289	/* per-cpu sections scanning */
1290	for_each_possible_cpu(i)
1291		scan_block(__per_cpu_start + per_cpu_offset(i),
1292			   __per_cpu_end + per_cpu_offset(i), NULL, 1);
1293#endif
1294
1295	/*
1296	 * Struct page scanning for each node.
1297	 */
1298	lock_memory_hotplug();
1299	for_each_online_node(i) {
1300		pg_data_t *pgdat = NODE_DATA(i);
1301		unsigned long start_pfn = pgdat->node_start_pfn;
1302		unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1303		unsigned long pfn;
1304
1305		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1306			struct page *page;
 
 
 
1307
1308			if (!pfn_valid(pfn))
 
1309				continue;
1310			page = pfn_to_page(pfn);
1311			/* only scan if page is in use */
1312			if (page_count(page) == 0)
1313				continue;
1314			scan_block(page, page + 1, NULL, 1);
 
 
1315		}
1316	}
1317	unlock_memory_hotplug();
1318
1319	/*
1320	 * Scanning the task stacks (may introduce false negatives).
1321	 */
1322	if (kmemleak_stack_scan) {
1323		struct task_struct *p, *g;
1324
1325		read_lock(&tasklist_lock);
1326		do_each_thread(g, p) {
1327			scan_block(task_stack_page(p), task_stack_page(p) +
1328				   THREAD_SIZE, NULL, 0);
1329		} while_each_thread(g, p);
1330		read_unlock(&tasklist_lock);
 
 
 
1331	}
1332
1333	/*
1334	 * Scan the objects already referenced from the sections scanned
1335	 * above.
1336	 */
1337	scan_gray_list();
1338
1339	/*
1340	 * Check for new or unreferenced objects modified since the previous
1341	 * scan and color them gray until the next scan.
1342	 */
1343	rcu_read_lock();
 
1344	list_for_each_entry_rcu(object, &object_list, object_list) {
1345		spin_lock_irqsave(&object->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1346		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1347		    && update_checksum(object) && get_object(object)) {
1348			/* color it gray temporarily */
1349			object->count = object->min_count;
1350			list_add_tail(&object->gray_list, &gray_list);
1351		}
1352		spin_unlock_irqrestore(&object->lock, flags);
1353	}
1354	rcu_read_unlock();
1355
1356	/*
1357	 * Re-scan the gray list for modified unreferenced objects.
1358	 */
1359	scan_gray_list();
1360
1361	/*
1362	 * If scanning was stopped do not report any new unreferenced objects.
1363	 */
1364	if (scan_should_stop())
1365		return;
1366
1367	/*
1368	 * Scanning result reporting.
1369	 */
1370	rcu_read_lock();
 
1371	list_for_each_entry_rcu(object, &object_list, object_list) {
1372		spin_lock_irqsave(&object->lock, flags);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1373		if (unreferenced_object(object) &&
1374		    !(object->flags & OBJECT_REPORTED)) {
1375			object->flags |= OBJECT_REPORTED;
 
 
 
 
1376			new_leaks++;
1377		}
1378		spin_unlock_irqrestore(&object->lock, flags);
1379	}
1380	rcu_read_unlock();
1381
1382	if (new_leaks)
1383		pr_info("%d new suspected memory leaks (see "
1384			"/sys/kernel/debug/kmemleak)\n", new_leaks);
 
 
 
1385
1386}
1387
1388/*
1389 * Thread function performing automatic memory scanning. Unreferenced objects
1390 * at the end of a memory scan are reported but only the first time.
1391 */
1392static int kmemleak_scan_thread(void *arg)
1393{
1394	static int first_run = 1;
1395
1396	pr_info("Automatic memory scanning thread started\n");
1397	set_user_nice(current, 10);
1398
1399	/*
1400	 * Wait before the first scan to allow the system to fully initialize.
1401	 */
1402	if (first_run) {
 
1403		first_run = 0;
1404		ssleep(SECS_FIRST_SCAN);
 
1405	}
1406
1407	while (!kthread_should_stop()) {
1408		signed long timeout = jiffies_scan_wait;
1409
1410		mutex_lock(&scan_mutex);
1411		kmemleak_scan();
1412		mutex_unlock(&scan_mutex);
1413
1414		/* wait before the next scan */
1415		while (timeout && !kthread_should_stop())
1416			timeout = schedule_timeout_interruptible(timeout);
1417	}
1418
1419	pr_info("Automatic memory scanning thread ended\n");
1420
1421	return 0;
1422}
1423
1424/*
1425 * Start the automatic memory scanning thread. This function must be called
1426 * with the scan_mutex held.
1427 */
1428static void start_scan_thread(void)
1429{
1430	if (scan_thread)
1431		return;
1432	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1433	if (IS_ERR(scan_thread)) {
1434		pr_warning("Failed to create the scan thread\n");
1435		scan_thread = NULL;
1436	}
1437}
1438
1439/*
1440 * Stop the automatic memory scanning thread. This function must be called
1441 * with the scan_mutex held.
1442 */
1443static void stop_scan_thread(void)
1444{
1445	if (scan_thread) {
1446		kthread_stop(scan_thread);
1447		scan_thread = NULL;
1448	}
1449}
1450
1451/*
1452 * Iterate over the object_list and return the first valid object at or after
1453 * the required position with its use_count incremented. The function triggers
1454 * a memory scanning when the pos argument points to the first position.
1455 */
1456static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1457{
1458	struct kmemleak_object *object;
1459	loff_t n = *pos;
1460	int err;
1461
1462	err = mutex_lock_interruptible(&scan_mutex);
1463	if (err < 0)
1464		return ERR_PTR(err);
1465
1466	rcu_read_lock();
1467	list_for_each_entry_rcu(object, &object_list, object_list) {
1468		if (n-- > 0)
1469			continue;
1470		if (get_object(object))
1471			goto out;
1472	}
1473	object = NULL;
1474out:
1475	return object;
1476}
1477
1478/*
1479 * Return the next object in the object_list. The function decrements the
1480 * use_count of the previous object and increases that of the next one.
1481 */
1482static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1483{
1484	struct kmemleak_object *prev_obj = v;
1485	struct kmemleak_object *next_obj = NULL;
1486	struct list_head *n = &prev_obj->object_list;
1487
1488	++(*pos);
1489
1490	list_for_each_continue_rcu(n, &object_list) {
1491		struct kmemleak_object *obj =
1492			list_entry(n, struct kmemleak_object, object_list);
1493		if (get_object(obj)) {
1494			next_obj = obj;
1495			break;
1496		}
1497	}
1498
1499	put_object(prev_obj);
1500	return next_obj;
1501}
1502
1503/*
1504 * Decrement the use_count of the last object required, if any.
1505 */
1506static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1507{
1508	if (!IS_ERR(v)) {
1509		/*
1510		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1511		 * waiting was interrupted, so only release it if !IS_ERR.
1512		 */
1513		rcu_read_unlock();
1514		mutex_unlock(&scan_mutex);
1515		if (v)
1516			put_object(v);
1517	}
1518}
1519
1520/*
1521 * Print the information for an unreferenced object to the seq file.
1522 */
1523static int kmemleak_seq_show(struct seq_file *seq, void *v)
1524{
1525	struct kmemleak_object *object = v;
1526	unsigned long flags;
1527
1528	spin_lock_irqsave(&object->lock, flags);
1529	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1530		print_unreferenced(seq, object);
1531	spin_unlock_irqrestore(&object->lock, flags);
1532	return 0;
1533}
1534
1535static const struct seq_operations kmemleak_seq_ops = {
1536	.start = kmemleak_seq_start,
1537	.next  = kmemleak_seq_next,
1538	.stop  = kmemleak_seq_stop,
1539	.show  = kmemleak_seq_show,
1540};
1541
1542static int kmemleak_open(struct inode *inode, struct file *file)
1543{
1544	return seq_open(file, &kmemleak_seq_ops);
1545}
1546
1547static int kmemleak_release(struct inode *inode, struct file *file)
1548{
1549	return seq_release(inode, file);
1550}
1551
1552static int dump_str_object_info(const char *str)
1553{
1554	unsigned long flags;
1555	struct kmemleak_object *object;
1556	unsigned long addr;
1557
1558	addr= simple_strtoul(str, NULL, 0);
 
1559	object = find_and_get_object(addr, 0);
1560	if (!object) {
1561		pr_info("Unknown object at 0x%08lx\n", addr);
1562		return -EINVAL;
1563	}
1564
1565	spin_lock_irqsave(&object->lock, flags);
1566	dump_object_info(object);
1567	spin_unlock_irqrestore(&object->lock, flags);
1568
1569	put_object(object);
1570	return 0;
1571}
1572
1573/*
1574 * We use grey instead of black to ensure we can do future scans on the same
1575 * objects. If we did not do future scans these black objects could
1576 * potentially contain references to newly allocated objects in the future and
1577 * we'd end up with false positives.
1578 */
1579static void kmemleak_clear(void)
1580{
1581	struct kmemleak_object *object;
1582	unsigned long flags;
1583
1584	rcu_read_lock();
1585	list_for_each_entry_rcu(object, &object_list, object_list) {
1586		spin_lock_irqsave(&object->lock, flags);
1587		if ((object->flags & OBJECT_REPORTED) &&
1588		    unreferenced_object(object))
1589			__paint_it(object, KMEMLEAK_GREY);
1590		spin_unlock_irqrestore(&object->lock, flags);
1591	}
1592	rcu_read_unlock();
 
 
1593}
1594
 
 
1595/*
1596 * File write operation to configure kmemleak at run-time. The following
1597 * commands can be written to the /sys/kernel/debug/kmemleak file:
1598 *   off	- disable kmemleak (irreversible)
1599 *   stack=on	- enable the task stacks scanning
1600 *   stack=off	- disable the tasks stacks scanning
1601 *   scan=on	- start the automatic memory scanning thread
1602 *   scan=off	- stop the automatic memory scanning thread
1603 *   scan=...	- set the automatic memory scanning period in seconds (0 to
1604 *		  disable it)
1605 *   scan	- trigger a memory scan
1606 *   clear	- mark all current reported unreferenced kmemleak objects as
1607 *		  grey to ignore printing them
 
1608 *   dump=...	- dump information about the object found at the given address
1609 */
1610static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1611			      size_t size, loff_t *ppos)
1612{
1613	char buf[64];
1614	int buf_size;
1615	int ret;
1616
1617	if (!atomic_read(&kmemleak_enabled))
1618		return -EBUSY;
1619
1620	buf_size = min(size, (sizeof(buf) - 1));
1621	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1622		return -EFAULT;
1623	buf[buf_size] = 0;
1624
1625	ret = mutex_lock_interruptible(&scan_mutex);
1626	if (ret < 0)
1627		return ret;
1628
 
 
 
 
 
 
 
 
 
 
 
 
 
1629	if (strncmp(buf, "off", 3) == 0)
1630		kmemleak_disable();
1631	else if (strncmp(buf, "stack=on", 8) == 0)
1632		kmemleak_stack_scan = 1;
1633	else if (strncmp(buf, "stack=off", 9) == 0)
1634		kmemleak_stack_scan = 0;
1635	else if (strncmp(buf, "scan=on", 7) == 0)
1636		start_scan_thread();
1637	else if (strncmp(buf, "scan=off", 8) == 0)
1638		stop_scan_thread();
1639	else if (strncmp(buf, "scan=", 5) == 0) {
1640		unsigned long secs;
 
1641
1642		ret = strict_strtoul(buf + 5, 0, &secs);
1643		if (ret < 0)
1644			goto out;
 
 
 
 
 
1645		stop_scan_thread();
1646		if (secs) {
1647			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1648			start_scan_thread();
1649		}
1650	} else if (strncmp(buf, "scan", 4) == 0)
1651		kmemleak_scan();
1652	else if (strncmp(buf, "clear", 5) == 0)
1653		kmemleak_clear();
1654	else if (strncmp(buf, "dump=", 5) == 0)
1655		ret = dump_str_object_info(buf + 5);
1656	else
1657		ret = -EINVAL;
1658
1659out:
1660	mutex_unlock(&scan_mutex);
1661	if (ret < 0)
1662		return ret;
1663
1664	/* ignore the rest of the buffer, only one command at a time */
1665	*ppos += size;
1666	return size;
1667}
1668
1669static const struct file_operations kmemleak_fops = {
1670	.owner		= THIS_MODULE,
1671	.open		= kmemleak_open,
1672	.read		= seq_read,
1673	.write		= kmemleak_write,
1674	.llseek		= seq_lseek,
1675	.release	= kmemleak_release,
1676};
1677
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1678/*
1679 * Stop the memory scanning thread and free the kmemleak internal objects if
1680 * no previous scan thread (otherwise, kmemleak may still have some useful
1681 * information on memory leaks).
1682 */
1683static void kmemleak_do_cleanup(struct work_struct *work)
1684{
1685	struct kmemleak_object *object;
1686	bool cleanup = scan_thread == NULL;
1687
1688	mutex_lock(&scan_mutex);
1689	stop_scan_thread();
 
 
 
 
 
 
 
1690
1691	if (cleanup) {
1692		rcu_read_lock();
1693		list_for_each_entry_rcu(object, &object_list, object_list)
1694			delete_object_full(object->pointer);
1695		rcu_read_unlock();
1696	}
1697	mutex_unlock(&scan_mutex);
1698}
1699
1700static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1701
1702/*
1703 * Disable kmemleak. No memory allocation/freeing will be traced once this
1704 * function is called. Disabling kmemleak is an irreversible operation.
1705 */
1706static void kmemleak_disable(void)
1707{
1708	/* atomically check whether it was already invoked */
1709	if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1710		return;
1711
1712	/* stop any memory operation tracing */
1713	atomic_set(&kmemleak_enabled, 0);
1714
1715	/* check whether it is too early for a kernel thread */
1716	if (atomic_read(&kmemleak_initialized))
1717		schedule_work(&cleanup_work);
 
 
1718
1719	pr_info("Kernel memory leak detector disabled\n");
1720}
1721
1722/*
1723 * Allow boot-time kmemleak disabling (enabled by default).
1724 */
1725static int kmemleak_boot_config(char *str)
1726{
1727	if (!str)
1728		return -EINVAL;
1729	if (strcmp(str, "off") == 0)
1730		kmemleak_disable();
1731	else if (strcmp(str, "on") == 0)
1732		kmemleak_skip_disable = 1;
 
 
1733	else
1734		return -EINVAL;
1735	return 0;
1736}
1737early_param("kmemleak", kmemleak_boot_config);
1738
1739static void __init print_log_trace(struct early_log *log)
1740{
1741	struct stack_trace trace;
1742
1743	trace.nr_entries = log->trace_len;
1744	trace.entries = log->trace;
1745
1746	pr_notice("Early log backtrace:\n");
1747	print_stack_trace(&trace, 2);
1748}
1749
1750/*
1751 * Kmemleak initialization.
1752 */
1753void __init kmemleak_init(void)
1754{
1755	int i;
1756	unsigned long flags;
1757
1758#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1759	if (!kmemleak_skip_disable) {
1760		atomic_set(&kmemleak_early_log, 0);
1761		kmemleak_disable();
1762		return;
1763	}
1764#endif
1765
 
 
 
1766	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1767	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1768
1769	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1770	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1771	INIT_PRIO_TREE_ROOT(&object_tree_root);
1772
1773	if (crt_early_log >= ARRAY_SIZE(early_log))
1774		pr_warning("Early log buffer exceeded (%d), please increase "
1775			   "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
1776
1777	/* the kernel is still in UP mode, so disabling the IRQs is enough */
1778	local_irq_save(flags);
1779	atomic_set(&kmemleak_early_log, 0);
1780	if (atomic_read(&kmemleak_error)) {
1781		local_irq_restore(flags);
1782		return;
1783	} else
1784		atomic_set(&kmemleak_enabled, 1);
1785	local_irq_restore(flags);
1786
1787	/*
1788	 * This is the point where tracking allocations is safe. Automatic
1789	 * scanning is started during the late initcall. Add the early logged
1790	 * callbacks to the kmemleak infrastructure.
1791	 */
1792	for (i = 0; i < crt_early_log; i++) {
1793		struct early_log *log = &early_log[i];
1794
1795		switch (log->op_type) {
1796		case KMEMLEAK_ALLOC:
1797			early_alloc(log);
1798			break;
1799		case KMEMLEAK_ALLOC_PERCPU:
1800			early_alloc_percpu(log);
1801			break;
1802		case KMEMLEAK_FREE:
1803			kmemleak_free(log->ptr);
1804			break;
1805		case KMEMLEAK_FREE_PART:
1806			kmemleak_free_part(log->ptr, log->size);
1807			break;
1808		case KMEMLEAK_FREE_PERCPU:
1809			kmemleak_free_percpu(log->ptr);
1810			break;
1811		case KMEMLEAK_NOT_LEAK:
1812			kmemleak_not_leak(log->ptr);
1813			break;
1814		case KMEMLEAK_IGNORE:
1815			kmemleak_ignore(log->ptr);
1816			break;
1817		case KMEMLEAK_SCAN_AREA:
1818			kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1819			break;
1820		case KMEMLEAK_NO_SCAN:
1821			kmemleak_no_scan(log->ptr);
1822			break;
1823		default:
1824			kmemleak_warn("Unknown early log operation: %d\n",
1825				      log->op_type);
1826		}
1827
1828		if (atomic_read(&kmemleak_warning)) {
1829			print_log_trace(log);
1830			atomic_set(&kmemleak_warning, 0);
1831		}
1832	}
1833}
1834
1835/*
1836 * Late initialization function.
1837 */
1838static int __init kmemleak_late_init(void)
1839{
1840	struct dentry *dentry;
1841
1842	atomic_set(&kmemleak_initialized, 1);
1843
1844	if (atomic_read(&kmemleak_error)) {
1845		/*
1846		 * Some error occurred and kmemleak was disabled. There is a
1847		 * small chance that kmemleak_disable() was called immediately
1848		 * after setting kmemleak_initialized and we may end up with
1849		 * two clean-up threads but serialized by scan_mutex.
1850		 */
1851		schedule_work(&cleanup_work);
1852		return -ENOMEM;
1853	}
1854
1855	dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1856				     &kmemleak_fops);
1857	if (!dentry)
1858		pr_warning("Failed to create the debugfs kmemleak file\n");
1859	mutex_lock(&scan_mutex);
1860	start_scan_thread();
1861	mutex_unlock(&scan_mutex);
1862
1863	pr_info("Kernel memory leak detector initialized\n");
 
1864
1865	return 0;
1866}
1867late_initcall(kmemleak_late_init);