Linux Audio

Check our new training course

Loading...
v5.4
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/kmemleak.c
   4 *
   5 * Copyright (C) 2008 ARM Limited
   6 * Written by Catalin Marinas <catalin.marinas@arm.com>
   7 *
   8 * For more information on the algorithm and kmemleak usage, please see
   9 * Documentation/dev-tools/kmemleak.rst.
  10 *
  11 * Notes on locking
  12 * ----------------
  13 *
  14 * The following locks and mutexes are used by kmemleak:
  15 *
  16 * - kmemleak_lock (rwlock): protects the object_list modifications and
  17 *   accesses to the object_tree_root. The object_list is the main list
  18 *   holding the metadata (struct kmemleak_object) for the allocated memory
  19 *   blocks. The object_tree_root is a red black tree used to look-up
  20 *   metadata based on a pointer to the corresponding memory block.  The
  21 *   kmemleak_object structures are added to the object_list and
  22 *   object_tree_root in the create_object() function called from the
  23 *   kmemleak_alloc() callback and removed in delete_object() called from the
  24 *   kmemleak_free() callback
  25 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
  26 *   the metadata (e.g. count) are protected by this lock. Note that some
  27 *   members of this structure may be protected by other means (atomic or
  28 *   kmemleak_lock). This lock is also held when scanning the corresponding
  29 *   memory block to avoid the kernel freeing it via the kmemleak_free()
  30 *   callback. This is less heavyweight than holding a global lock like
  31 *   kmemleak_lock during scanning
 
  32 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
  33 *   unreferenced objects at a time. The gray_list contains the objects which
  34 *   are already referenced or marked as false positives and need to be
  35 *   scanned. This list is only modified during a scanning episode when the
  36 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
  37 *   Note that the kmemleak_object.use_count is incremented when an object is
  38 *   added to the gray_list and therefore cannot be freed. This mutex also
  39 *   prevents multiple users of the "kmemleak" debugfs file together with
  40 *   modifications to the memory scanning parameters including the scan_thread
  41 *   pointer
  42 *
  43 * Locks and mutexes are acquired/nested in the following order:
  44 *
  45 *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
  46 *
  47 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
  48 * regions.
  49 *
  50 * The kmemleak_object structures have a use_count incremented or decremented
  51 * using the get_object()/put_object() functions. When the use_count becomes
  52 * 0, this count can no longer be incremented and put_object() schedules the
  53 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
  54 * function must be protected by rcu_read_lock() to avoid accessing a freed
  55 * structure.
  56 */
  57
  58#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  59
  60#include <linux/init.h>
  61#include <linux/kernel.h>
  62#include <linux/list.h>
  63#include <linux/sched/signal.h>
  64#include <linux/sched/task.h>
  65#include <linux/sched/task_stack.h>
  66#include <linux/jiffies.h>
  67#include <linux/delay.h>
  68#include <linux/export.h>
  69#include <linux/kthread.h>
  70#include <linux/rbtree.h>
  71#include <linux/fs.h>
  72#include <linux/debugfs.h>
  73#include <linux/seq_file.h>
  74#include <linux/cpumask.h>
  75#include <linux/spinlock.h>
  76#include <linux/module.h>
  77#include <linux/mutex.h>
  78#include <linux/rcupdate.h>
  79#include <linux/stacktrace.h>
 
  80#include <linux/cache.h>
  81#include <linux/percpu.h>
  82#include <linux/memblock.h>
  83#include <linux/pfn.h>
  84#include <linux/mmzone.h>
  85#include <linux/slab.h>
  86#include <linux/thread_info.h>
  87#include <linux/err.h>
  88#include <linux/uaccess.h>
  89#include <linux/string.h>
  90#include <linux/nodemask.h>
  91#include <linux/mm.h>
  92#include <linux/workqueue.h>
  93#include <linux/crc32.h>
  94
  95#include <asm/sections.h>
  96#include <asm/processor.h>
  97#include <linux/atomic.h>
  98
  99#include <linux/kasan.h>
 
 100#include <linux/kmemleak.h>
 101#include <linux/memory_hotplug.h>
 102
 103/*
 104 * Kmemleak configuration and common defines.
 105 */
 106#define MAX_TRACE		16	/* stack trace length */
 107#define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
 108#define SECS_FIRST_SCAN		60	/* delay before the first scan */
 109#define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
 110#define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
 111
 112#define BYTES_PER_POINTER	sizeof(void *)
 113
 114/* GFP bitmask for kmemleak internal allocations */
 115#define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
 116				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
 117				 __GFP_NOWARN)
 118
 119/* scanning area inside a memory block */
 120struct kmemleak_scan_area {
 121	struct hlist_node node;
 122	unsigned long start;
 123	size_t size;
 124};
 125
 126#define KMEMLEAK_GREY	0
 127#define KMEMLEAK_BLACK	-1
 128
 129/*
 130 * Structure holding the metadata for each allocated memory block.
 131 * Modifications to such objects should be made while holding the
 132 * object->lock. Insertions or deletions from object_list, gray_list or
 133 * rb_node are already protected by the corresponding locks or mutex (see
 134 * the notes on locking above). These objects are reference-counted
 135 * (use_count) and freed using the RCU mechanism.
 136 */
 137struct kmemleak_object {
 138	spinlock_t lock;
 139	unsigned int flags;		/* object status flags */
 140	struct list_head object_list;
 141	struct list_head gray_list;
 142	struct rb_node rb_node;
 143	struct rcu_head rcu;		/* object_list lockless traversal */
 144	/* object usage count; object freed when use_count == 0 */
 145	atomic_t use_count;
 
 146	unsigned long pointer;
 147	size_t size;
 148	/* pass surplus references to this pointer */
 149	unsigned long excess_ref;
 150	/* minimum number of a pointers found before it is considered leak */
 151	int min_count;
 152	/* the total number of pointers found pointing to this object */
 153	int count;
 154	/* checksum for detecting modified objects */
 155	u32 checksum;
 
 156	/* memory ranges to be scanned inside an object (empty for all) */
 157	struct hlist_head area_list;
 158	unsigned long trace[MAX_TRACE];
 159	unsigned int trace_len;
 160	unsigned long jiffies;		/* creation timestamp */
 161	pid_t pid;			/* pid of the current task */
 162	char comm[TASK_COMM_LEN];	/* executable name */
 163};
 164
 165/* flag representing the memory block allocation status */
 166#define OBJECT_ALLOCATED	(1 << 0)
 167/* flag set after the first reporting of an unreference object */
 168#define OBJECT_REPORTED		(1 << 1)
 169/* flag set to not scan the object */
 170#define OBJECT_NO_SCAN		(1 << 2)
 171/* flag set to fully scan the object when scan_area allocation failed */
 172#define OBJECT_FULL_SCAN	(1 << 3)
 
 
 
 
 
 
 
 
 
 173
 174#define HEX_PREFIX		"    "
 175/* number of bytes to print per line; must be 16 or 32 */
 176#define HEX_ROW_SIZE		16
 177/* number of bytes to print at a time (1, 2, 4, 8) */
 178#define HEX_GROUP_SIZE		1
 179/* include ASCII after the hex output */
 180#define HEX_ASCII		1
 181/* max number of lines to be printed */
 182#define HEX_MAX_LINES		2
 183
 184/* the list of all allocated objects */
 185static LIST_HEAD(object_list);
 186/* the list of gray-colored objects (see color_gray comment below) */
 187static LIST_HEAD(gray_list);
 188/* memory pool allocation */
 189static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
 190static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
 191static LIST_HEAD(mem_pool_free_list);
 192/* search tree for object boundaries */
 193static struct rb_root object_tree_root = RB_ROOT;
 194/* rw_lock protecting the access to object_list and object_tree_root */
 195static DEFINE_RWLOCK(kmemleak_lock);
 
 
 
 
 196
 197/* allocation caches for kmemleak internal data */
 198static struct kmem_cache *object_cache;
 199static struct kmem_cache *scan_area_cache;
 200
 201/* set if tracing memory operations is enabled */
 202static int kmemleak_enabled = 1;
 203/* same as above but only for the kmemleak_free() callback */
 204static int kmemleak_free_enabled = 1;
 205/* set in the late_initcall if there were no errors */
 206static int kmemleak_initialized;
 207/* set if a kmemleak warning was issued */
 208static int kmemleak_warning;
 209/* set if a fatal kmemleak error has occurred */
 210static int kmemleak_error;
 211
 212/* minimum and maximum address that may be valid pointers */
 213static unsigned long min_addr = ULONG_MAX;
 214static unsigned long max_addr;
 215
 
 
 
 
 216static struct task_struct *scan_thread;
 217/* used to avoid reporting of recently allocated objects */
 218static unsigned long jiffies_min_age;
 219static unsigned long jiffies_last_scan;
 220/* delay between automatic memory scannings */
 221static signed long jiffies_scan_wait;
 222/* enables or disables the task stacks scanning */
 223static int kmemleak_stack_scan = 1;
 224/* protects the memory scanning, parameters and debug/kmemleak file access */
 225static DEFINE_MUTEX(scan_mutex);
 226/* setting kmemleak=on, will set this var, skipping the disable */
 227static int kmemleak_skip_disable;
 228/* If there are leaks that can be reported */
 229static bool kmemleak_found_leaks;
 230
 231static bool kmemleak_verbose;
 232module_param_named(verbose, kmemleak_verbose, bool, 0600);
 233
 234static void kmemleak_disable(void);
 235
 236/*
 237 * Print a warning and dump the stack trace.
 238 */
 239#define kmemleak_warn(x...)	do {		\
 240	pr_warn(x);				\
 241	dump_stack();				\
 242	kmemleak_warning = 1;			\
 243} while (0)
 244
 245/*
 246 * Macro invoked when a serious kmemleak condition occurred and cannot be
 247 * recovered from. Kmemleak will be disabled and further allocation/freeing
 248 * tracing no longer available.
 249 */
 250#define kmemleak_stop(x...)	do {	\
 251	kmemleak_warn(x);		\
 252	kmemleak_disable();		\
 253} while (0)
 254
 255#define warn_or_seq_printf(seq, fmt, ...)	do {	\
 256	if (seq)					\
 257		seq_printf(seq, fmt, ##__VA_ARGS__);	\
 258	else						\
 259		pr_warn(fmt, ##__VA_ARGS__);		\
 260} while (0)
 261
 262static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
 263				 int rowsize, int groupsize, const void *buf,
 264				 size_t len, bool ascii)
 265{
 266	if (seq)
 267		seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
 268			     buf, len, ascii);
 269	else
 270		print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
 271			       rowsize, groupsize, buf, len, ascii);
 272}
 273
 274/*
 275 * Printing of the objects hex dump to the seq file. The number of lines to be
 276 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
 277 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
 278 * with the object->lock held.
 279 */
 280static void hex_dump_object(struct seq_file *seq,
 281			    struct kmemleak_object *object)
 282{
 283	const u8 *ptr = (const u8 *)object->pointer;
 284	size_t len;
 285
 
 
 
 
 
 
 286	/* limit the number of lines to HEX_MAX_LINES */
 287	len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
 288
 289	warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
 
 
 
 
 290	kasan_disable_current();
 291	warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
 292			     HEX_GROUP_SIZE, ptr, len, HEX_ASCII);
 293	kasan_enable_current();
 294}
 295
 296/*
 297 * Object colors, encoded with count and min_count:
 298 * - white - orphan object, not enough references to it (count < min_count)
 299 * - gray  - not orphan, not marked as false positive (min_count == 0) or
 300 *		sufficient references to it (count >= min_count)
 301 * - black - ignore, it doesn't contain references (e.g. text section)
 302 *		(min_count == -1). No function defined for this color.
 303 * Newly created objects don't have any color assigned (object->count == -1)
 304 * before the next memory scan when they become white.
 305 */
 306static bool color_white(const struct kmemleak_object *object)
 307{
 308	return object->count != KMEMLEAK_BLACK &&
 309		object->count < object->min_count;
 310}
 311
 312static bool color_gray(const struct kmemleak_object *object)
 313{
 314	return object->min_count != KMEMLEAK_BLACK &&
 315		object->count >= object->min_count;
 316}
 317
 318/*
 319 * Objects are considered unreferenced only if their color is white, they have
 320 * not be deleted and have a minimum age to avoid false positives caused by
 321 * pointers temporarily stored in CPU registers.
 322 */
 323static bool unreferenced_object(struct kmemleak_object *object)
 324{
 325	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
 326		time_before_eq(object->jiffies + jiffies_min_age,
 327			       jiffies_last_scan);
 328}
 329
 330/*
 331 * Printing of the unreferenced objects information to the seq file. The
 332 * print_unreferenced function must be called with the object->lock held.
 333 */
 334static void print_unreferenced(struct seq_file *seq,
 335			       struct kmemleak_object *object)
 336{
 337	int i;
 338	unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
 
 339
 
 340	warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
 341		   object->pointer, object->size);
 342	warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
 343		   object->comm, object->pid, object->jiffies,
 344		   msecs_age / 1000, msecs_age % 1000);
 345	hex_dump_object(seq, object);
 346	warn_or_seq_printf(seq, "  backtrace:\n");
 347
 348	for (i = 0; i < object->trace_len; i++) {
 349		void *ptr = (void *)object->trace[i];
 350		warn_or_seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
 351	}
 352}
 353
 354/*
 355 * Print the kmemleak_object information. This function is used mainly for
 356 * debugging special cases when kmemleak operations. It must be called with
 357 * the object->lock held.
 358 */
 359static void dump_object_info(struct kmemleak_object *object)
 360{
 361	pr_notice("Object 0x%08lx (size %zu):\n",
 362		  object->pointer, object->size);
 363	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
 364		  object->comm, object->pid, object->jiffies);
 365	pr_notice("  min_count = %d\n", object->min_count);
 366	pr_notice("  count = %d\n", object->count);
 367	pr_notice("  flags = 0x%x\n", object->flags);
 368	pr_notice("  checksum = %u\n", object->checksum);
 369	pr_notice("  backtrace:\n");
 370	stack_trace_print(object->trace, object->trace_len, 4);
 
 
 
 
 
 
 
 
 
 
 371}
 372
 373/*
 374 * Look-up a memory block metadata (kmemleak_object) in the object search
 375 * tree based on a pointer value. If alias is 0, only values pointing to the
 376 * beginning of the memory block are allowed. The kmemleak_lock must be held
 377 * when calling this function.
 378 */
 379static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
 
 380{
 381	struct rb_node *rb = object_tree_root.rb_node;
 
 382
 383	while (rb) {
 384		struct kmemleak_object *object =
 385			rb_entry(rb, struct kmemleak_object, rb_node);
 386		if (ptr < object->pointer)
 
 
 
 
 387			rb = object->rb_node.rb_left;
 388		else if (object->pointer + object->size <= ptr)
 389			rb = object->rb_node.rb_right;
 390		else if (object->pointer == ptr || alias)
 391			return object;
 392		else {
 393			kmemleak_warn("Found object by alias at 0x%08lx\n",
 394				      ptr);
 395			dump_object_info(object);
 396			break;
 397		}
 398	}
 399	return NULL;
 400}
 401
 
 
 
 
 
 
 402/*
 403 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
 404 * that once an object's use_count reached 0, the RCU freeing was already
 405 * registered and the object should no longer be used. This function must be
 406 * called under the protection of rcu_read_lock().
 407 */
 408static int get_object(struct kmemleak_object *object)
 409{
 410	return atomic_inc_not_zero(&object->use_count);
 411}
 412
 413/*
 414 * Memory pool allocation and freeing. kmemleak_lock must not be held.
 415 */
 416static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
 417{
 418	unsigned long flags;
 419	struct kmemleak_object *object;
 420
 421	/* try the slab allocator first */
 422	if (object_cache) {
 423		object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
 
 424		if (object)
 425			return object;
 426	}
 427
 428	/* slab allocation failed, try the memory pool */
 429	write_lock_irqsave(&kmemleak_lock, flags);
 430	object = list_first_entry_or_null(&mem_pool_free_list,
 431					  typeof(*object), object_list);
 432	if (object)
 433		list_del(&object->object_list);
 434	else if (mem_pool_free_count)
 435		object = &mem_pool[--mem_pool_free_count];
 436	else
 437		pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
 438	write_unlock_irqrestore(&kmemleak_lock, flags);
 439
 440	return object;
 441}
 442
 443/*
 444 * Return the object to either the slab allocator or the memory pool.
 445 */
 446static void mem_pool_free(struct kmemleak_object *object)
 447{
 448	unsigned long flags;
 449
 450	if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
 451		kmem_cache_free(object_cache, object);
 452		return;
 453	}
 454
 455	/* add the object to the memory pool free list */
 456	write_lock_irqsave(&kmemleak_lock, flags);
 457	list_add(&object->object_list, &mem_pool_free_list);
 458	write_unlock_irqrestore(&kmemleak_lock, flags);
 459}
 460
 461/*
 462 * RCU callback to free a kmemleak_object.
 463 */
 464static void free_object_rcu(struct rcu_head *rcu)
 465{
 466	struct hlist_node *tmp;
 467	struct kmemleak_scan_area *area;
 468	struct kmemleak_object *object =
 469		container_of(rcu, struct kmemleak_object, rcu);
 470
 471	/*
 472	 * Once use_count is 0 (guaranteed by put_object), there is no other
 473	 * code accessing this object, hence no need for locking.
 474	 */
 475	hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
 476		hlist_del(&area->node);
 477		kmem_cache_free(scan_area_cache, area);
 478	}
 479	mem_pool_free(object);
 480}
 481
 482/*
 483 * Decrement the object use_count. Once the count is 0, free the object using
 484 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
 485 * delete_object() path, the delayed RCU freeing ensures that there is no
 486 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
 487 * is also possible.
 488 */
 489static void put_object(struct kmemleak_object *object)
 490{
 491	if (!atomic_dec_and_test(&object->use_count))
 492		return;
 493
 494	/* should only get here after delete_object was called */
 495	WARN_ON(object->flags & OBJECT_ALLOCATED);
 496
 497	/*
 498	 * It may be too early for the RCU callbacks, however, there is no
 499	 * concurrent object_list traversal when !object_cache and all objects
 500	 * came from the memory pool. Free the object directly.
 501	 */
 502	if (object_cache)
 503		call_rcu(&object->rcu, free_object_rcu);
 504	else
 505		free_object_rcu(&object->rcu);
 506}
 507
 508/*
 509 * Look up an object in the object search tree and increase its use_count.
 510 */
 511static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 
 512{
 513	unsigned long flags;
 514	struct kmemleak_object *object;
 515
 516	rcu_read_lock();
 517	read_lock_irqsave(&kmemleak_lock, flags);
 518	object = lookup_object(ptr, alias);
 519	read_unlock_irqrestore(&kmemleak_lock, flags);
 520
 521	/* check whether the object is still available */
 522	if (object && !get_object(object))
 523		object = NULL;
 524	rcu_read_unlock();
 525
 526	return object;
 527}
 528
 
 
 
 
 
 
 529/*
 530 * Remove an object from the object_tree_root and object_list. Must be called
 531 * with the kmemleak_lock held _if_ kmemleak is still enabled.
 532 */
 533static void __remove_object(struct kmemleak_object *object)
 534{
 535	rb_erase(&object->rb_node, &object_tree_root);
 536	list_del_rcu(&object->object_list);
 
 
 537}
 538
 539/*
 540 * Look up an object in the object search tree and remove it from both
 541 * object_tree_root and object_list. The returned object's use_count should be
 542 * at least 1, as initially set by create_object().
 543 */
 544static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias)
 545{
 546	unsigned long flags;
 547	struct kmemleak_object *object;
 548
 549	write_lock_irqsave(&kmemleak_lock, flags);
 550	object = lookup_object(ptr, alias);
 551	if (object)
 552		__remove_object(object);
 553	write_unlock_irqrestore(&kmemleak_lock, flags);
 554
 555	return object;
 556}
 557
 558/*
 559 * Save stack trace to the given array of MAX_TRACE size.
 
 
 560 */
 561static int __save_stack_trace(unsigned long *trace)
 
 562{
 563	return stack_trace_save(trace, MAX_TRACE, 2);
 
 
 
 
 
 
 
 564}
 565
 566/*
 567 * Create the metadata (struct kmemleak_object) corresponding to an allocated
 568 * memory block and add it to the object_list and object_tree_root.
 569 */
 570static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 571					     int min_count, gfp_t gfp)
 572{
 573	unsigned long flags;
 574	struct kmemleak_object *object, *parent;
 575	struct rb_node **link, *rb_parent;
 576	unsigned long untagged_ptr;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 577
 578	object = mem_pool_alloc(gfp);
 579	if (!object) {
 580		pr_warn("Cannot allocate a kmemleak_object structure\n");
 581		kmemleak_disable();
 582		return NULL;
 583	}
 584
 585	INIT_LIST_HEAD(&object->object_list);
 586	INIT_LIST_HEAD(&object->gray_list);
 587	INIT_HLIST_HEAD(&object->area_list);
 588	spin_lock_init(&object->lock);
 589	atomic_set(&object->use_count, 1);
 590	object->flags = OBJECT_ALLOCATED;
 591	object->pointer = ptr;
 592	object->size = size;
 593	object->excess_ref = 0;
 594	object->min_count = min_count;
 595	object->count = 0;			/* white color initially */
 596	object->jiffies = jiffies;
 597	object->checksum = 0;
 
 598
 599	/* task information */
 600	if (in_irq()) {
 601		object->pid = 0;
 602		strncpy(object->comm, "hardirq", sizeof(object->comm));
 603	} else if (in_serving_softirq()) {
 604		object->pid = 0;
 605		strncpy(object->comm, "softirq", sizeof(object->comm));
 606	} else {
 607		object->pid = current->pid;
 608		/*
 609		 * There is a small chance of a race with set_task_comm(),
 610		 * however using get_task_comm() here may cause locking
 611		 * dependency issues with current->alloc_lock. In the worst
 612		 * case, the command line is not correct.
 613		 */
 614		strncpy(object->comm, current->comm, sizeof(object->comm));
 615	}
 616
 617	/* kernel backtrace */
 618	object->trace_len = __save_stack_trace(object->trace);
 
 
 
 
 
 
 
 619
 620	write_lock_irqsave(&kmemleak_lock, flags);
 
 
 
 
 
 
 
 
 
 621
 622	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
 623	min_addr = min(min_addr, untagged_ptr);
 624	max_addr = max(max_addr, untagged_ptr + size);
 625	link = &object_tree_root.rb_node;
 
 
 
 
 
 
 
 
 
 
 626	rb_parent = NULL;
 627	while (*link) {
 628		rb_parent = *link;
 629		parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
 630		if (ptr + size <= parent->pointer)
 
 631			link = &parent->rb_node.rb_left;
 632		else if (parent->pointer + parent->size <= ptr)
 633			link = &parent->rb_node.rb_right;
 634		else {
 635			kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
 636				      ptr);
 637			/*
 638			 * No need for parent->lock here since "parent" cannot
 639			 * be freed while the kmemleak_lock is held.
 640			 */
 641			dump_object_info(parent);
 642			kmem_cache_free(object_cache, object);
 643			object = NULL;
 644			goto out;
 645		}
 646	}
 647	rb_link_node(&object->rb_node, rb_parent, link);
 648	rb_insert_color(&object->rb_node, &object_tree_root);
 649
 650	list_add_tail_rcu(&object->object_list, &object_list);
 651out:
 652	write_unlock_irqrestore(&kmemleak_lock, flags);
 653	return object;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 654}
 655
 656/*
 657 * Mark the object as not allocated and schedule RCU freeing via put_object().
 658 */
 659static void __delete_object(struct kmemleak_object *object)
 660{
 661	unsigned long flags;
 662
 663	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
 664	WARN_ON(atomic_read(&object->use_count) < 1);
 665
 666	/*
 667	 * Locking here also ensures that the corresponding memory block
 668	 * cannot be freed when it is being scanned.
 669	 */
 670	spin_lock_irqsave(&object->lock, flags);
 671	object->flags &= ~OBJECT_ALLOCATED;
 672	spin_unlock_irqrestore(&object->lock, flags);
 673	put_object(object);
 674}
 675
 676/*
 677 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 678 * delete it.
 679 */
 680static void delete_object_full(unsigned long ptr)
 681{
 682	struct kmemleak_object *object;
 683
 684	object = find_and_remove_object(ptr, 0);
 685	if (!object) {
 686#ifdef DEBUG
 687		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
 688			      ptr);
 689#endif
 690		return;
 691	}
 692	__delete_object(object);
 693}
 694
 695/*
 696 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 697 * delete it. If the memory block is partially freed, the function may create
 698 * additional metadata for the remaining parts of the block.
 699 */
 700static void delete_object_part(unsigned long ptr, size_t size)
 
 701{
 702	struct kmemleak_object *object;
 703	unsigned long start, end;
 704
 705	object = find_and_remove_object(ptr, 1);
 
 
 
 
 
 
 
 
 
 706	if (!object) {
 707#ifdef DEBUG
 708		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
 709			      ptr, size);
 710#endif
 711		return;
 712	}
 713
 714	/*
 715	 * Create one or two objects that may result from the memory block
 716	 * split. Note that partial freeing is only done by free_bootmem() and
 717	 * this happens before kmemleak_init() is called.
 718	 */
 719	start = object->pointer;
 720	end = object->pointer + object->size;
 721	if (ptr > start)
 722		create_object(start, ptr - start, object->min_count,
 723			      GFP_KERNEL);
 724	if (ptr + size < end)
 725		create_object(ptr + size, end - ptr - size, object->min_count,
 726			      GFP_KERNEL);
 
 
 727
 728	__delete_object(object);
 
 
 
 
 
 
 
 
 
 729}
 730
 731static void __paint_it(struct kmemleak_object *object, int color)
 732{
 733	object->min_count = color;
 734	if (color == KMEMLEAK_BLACK)
 735		object->flags |= OBJECT_NO_SCAN;
 736}
 737
 738static void paint_it(struct kmemleak_object *object, int color)
 739{
 740	unsigned long flags;
 741
 742	spin_lock_irqsave(&object->lock, flags);
 743	__paint_it(object, color);
 744	spin_unlock_irqrestore(&object->lock, flags);
 745}
 746
 747static void paint_ptr(unsigned long ptr, int color)
 748{
 749	struct kmemleak_object *object;
 750
 751	object = find_and_get_object(ptr, 0);
 752	if (!object) {
 753		kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
 754			      ptr,
 755			      (color == KMEMLEAK_GREY) ? "Grey" :
 756			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
 757		return;
 758	}
 759	paint_it(object, color);
 760	put_object(object);
 761}
 762
 763/*
 764 * Mark an object permanently as gray-colored so that it can no longer be
 765 * reported as a leak. This is used in general to mark a false positive.
 766 */
 767static void make_gray_object(unsigned long ptr)
 768{
 769	paint_ptr(ptr, KMEMLEAK_GREY);
 770}
 771
 772/*
 773 * Mark the object as black-colored so that it is ignored from scans and
 774 * reporting.
 775 */
 776static void make_black_object(unsigned long ptr)
 777{
 778	paint_ptr(ptr, KMEMLEAK_BLACK);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 779}
 780
 781/*
 782 * Add a scanning area to the object. If at least one such area is added,
 783 * kmemleak will only scan these ranges rather than the whole memory block.
 784 */
 785static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 786{
 787	unsigned long flags;
 788	struct kmemleak_object *object;
 789	struct kmemleak_scan_area *area = NULL;
 
 
 790
 791	object = find_and_get_object(ptr, 1);
 792	if (!object) {
 793		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
 794			      ptr);
 795		return;
 796	}
 797
 
 
 
 798	if (scan_area_cache)
 799		area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 
 800
 801	spin_lock_irqsave(&object->lock, flags);
 802	if (!area) {
 803		pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
 804		/* mark the object for full scan to avoid false positives */
 805		object->flags |= OBJECT_FULL_SCAN;
 806		goto out_unlock;
 807	}
 808	if (size == SIZE_MAX) {
 809		size = object->pointer + object->size - ptr;
 810	} else if (ptr + size > object->pointer + object->size) {
 811		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
 812		dump_object_info(object);
 813		kmem_cache_free(scan_area_cache, area);
 814		goto out_unlock;
 815	}
 816
 817	INIT_HLIST_NODE(&area->node);
 818	area->start = ptr;
 819	area->size = size;
 820
 821	hlist_add_head(&area->node, &object->area_list);
 822out_unlock:
 823	spin_unlock_irqrestore(&object->lock, flags);
 824	put_object(object);
 825}
 826
 827/*
 828 * Any surplus references (object already gray) to 'ptr' are passed to
 829 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
 830 * vm_struct may be used as an alternative reference to the vmalloc'ed object
 831 * (see free_thread_stack()).
 832 */
 833static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
 834{
 835	unsigned long flags;
 836	struct kmemleak_object *object;
 837
 838	object = find_and_get_object(ptr, 0);
 839	if (!object) {
 840		kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
 841			      ptr);
 842		return;
 843	}
 844
 845	spin_lock_irqsave(&object->lock, flags);
 846	object->excess_ref = excess_ref;
 847	spin_unlock_irqrestore(&object->lock, flags);
 848	put_object(object);
 849}
 850
 851/*
 852 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
 853 * pointer. Such object will not be scanned by kmemleak but references to it
 854 * are searched.
 855 */
 856static void object_no_scan(unsigned long ptr)
 857{
 858	unsigned long flags;
 859	struct kmemleak_object *object;
 860
 861	object = find_and_get_object(ptr, 0);
 862	if (!object) {
 863		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
 864		return;
 865	}
 866
 867	spin_lock_irqsave(&object->lock, flags);
 868	object->flags |= OBJECT_NO_SCAN;
 869	spin_unlock_irqrestore(&object->lock, flags);
 870	put_object(object);
 871}
 872
 873/**
 874 * kmemleak_alloc - register a newly allocated object
 875 * @ptr:	pointer to beginning of the object
 876 * @size:	size of the object
 877 * @min_count:	minimum number of references to this object. If during memory
 878 *		scanning a number of references less than @min_count is found,
 879 *		the object is reported as a memory leak. If @min_count is 0,
 880 *		the object is never reported as a leak. If @min_count is -1,
 881 *		the object is ignored (not scanned and not reported as a leak)
 882 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
 883 *
 884 * This function is called from the kernel allocators when a new object
 885 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
 886 */
 887void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
 888			  gfp_t gfp)
 889{
 890	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
 891
 892	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 893		create_object((unsigned long)ptr, size, min_count, gfp);
 894}
 895EXPORT_SYMBOL_GPL(kmemleak_alloc);
 896
 897/**
 898 * kmemleak_alloc_percpu - register a newly allocated __percpu object
 899 * @ptr:	__percpu pointer to beginning of the object
 900 * @size:	size of the object
 901 * @gfp:	flags used for kmemleak internal memory allocations
 902 *
 903 * This function is called from the kernel percpu allocator when a new object
 904 * (memory block) is allocated (alloc_percpu).
 905 */
 906void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
 907				 gfp_t gfp)
 908{
 909	unsigned int cpu;
 910
 911	pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
 912
 913	/*
 914	 * Percpu allocations are only scanned and not reported as leaks
 915	 * (min_count is set to 0).
 916	 */
 917	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 918		for_each_possible_cpu(cpu)
 919			create_object((unsigned long)per_cpu_ptr(ptr, cpu),
 920				      size, 0, gfp);
 921}
 922EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
 923
 924/**
 925 * kmemleak_vmalloc - register a newly vmalloc'ed object
 926 * @area:	pointer to vm_struct
 927 * @size:	size of the object
 928 * @gfp:	__vmalloc() flags used for kmemleak internal memory allocations
 929 *
 930 * This function is called from the vmalloc() kernel allocator when a new
 931 * object (memory block) is allocated.
 932 */
 933void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
 934{
 935	pr_debug("%s(0x%p, %zu)\n", __func__, area, size);
 936
 937	/*
 938	 * A min_count = 2 is needed because vm_struct contains a reference to
 939	 * the virtual address of the vmalloc'ed block.
 940	 */
 941	if (kmemleak_enabled) {
 942		create_object((unsigned long)area->addr, size, 2, gfp);
 943		object_set_excess_ref((unsigned long)area,
 944				      (unsigned long)area->addr);
 945	}
 946}
 947EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
 948
 949/**
 950 * kmemleak_free - unregister a previously registered object
 951 * @ptr:	pointer to beginning of the object
 952 *
 953 * This function is called from the kernel allocators when an object (memory
 954 * block) is freed (kmem_cache_free, kfree, vfree etc.).
 955 */
 956void __ref kmemleak_free(const void *ptr)
 957{
 958	pr_debug("%s(0x%p)\n", __func__, ptr);
 959
 960	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
 961		delete_object_full((unsigned long)ptr);
 962}
 963EXPORT_SYMBOL_GPL(kmemleak_free);
 964
 965/**
 966 * kmemleak_free_part - partially unregister a previously registered object
 967 * @ptr:	pointer to the beginning or inside the object. This also
 968 *		represents the start of the range to be freed
 969 * @size:	size to be unregistered
 970 *
 971 * This function is called when only a part of a memory block is freed
 972 * (usually from the bootmem allocator).
 973 */
 974void __ref kmemleak_free_part(const void *ptr, size_t size)
 975{
 976	pr_debug("%s(0x%p)\n", __func__, ptr);
 977
 978	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 979		delete_object_part((unsigned long)ptr, size);
 980}
 981EXPORT_SYMBOL_GPL(kmemleak_free_part);
 982
 983/**
 984 * kmemleak_free_percpu - unregister a previously registered __percpu object
 985 * @ptr:	__percpu pointer to beginning of the object
 986 *
 987 * This function is called from the kernel percpu allocator when an object
 988 * (memory block) is freed (free_percpu).
 989 */
 990void __ref kmemleak_free_percpu(const void __percpu *ptr)
 991{
 992	unsigned int cpu;
 993
 994	pr_debug("%s(0x%p)\n", __func__, ptr);
 995
 996	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
 997		for_each_possible_cpu(cpu)
 998			delete_object_full((unsigned long)per_cpu_ptr(ptr,
 999								      cpu));
1000}
1001EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1002
1003/**
1004 * kmemleak_update_trace - update object allocation stack trace
1005 * @ptr:	pointer to beginning of the object
1006 *
1007 * Override the object allocation stack trace for cases where the actual
1008 * allocation place is not always useful.
1009 */
1010void __ref kmemleak_update_trace(const void *ptr)
1011{
1012	struct kmemleak_object *object;
 
1013	unsigned long flags;
1014
1015	pr_debug("%s(0x%p)\n", __func__, ptr);
1016
1017	if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1018		return;
1019
1020	object = find_and_get_object((unsigned long)ptr, 1);
1021	if (!object) {
1022#ifdef DEBUG
1023		kmemleak_warn("Updating stack trace for unknown object at %p\n",
1024			      ptr);
1025#endif
1026		return;
1027	}
1028
1029	spin_lock_irqsave(&object->lock, flags);
1030	object->trace_len = __save_stack_trace(object->trace);
1031	spin_unlock_irqrestore(&object->lock, flags);
 
1032
1033	put_object(object);
1034}
1035EXPORT_SYMBOL(kmemleak_update_trace);
1036
1037/**
1038 * kmemleak_not_leak - mark an allocated object as false positive
1039 * @ptr:	pointer to beginning of the object
1040 *
1041 * Calling this function on an object will cause the memory block to no longer
1042 * be reported as leak and always be scanned.
1043 */
1044void __ref kmemleak_not_leak(const void *ptr)
1045{
1046	pr_debug("%s(0x%p)\n", __func__, ptr);
1047
1048	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1049		make_gray_object((unsigned long)ptr);
1050}
1051EXPORT_SYMBOL(kmemleak_not_leak);
1052
1053/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1054 * kmemleak_ignore - ignore an allocated object
1055 * @ptr:	pointer to beginning of the object
1056 *
1057 * Calling this function on an object will cause the memory block to be
1058 * ignored (not scanned and not reported as a leak). This is usually done when
1059 * it is known that the corresponding block is not a leak and does not contain
1060 * any references to other allocated memory blocks.
1061 */
1062void __ref kmemleak_ignore(const void *ptr)
1063{
1064	pr_debug("%s(0x%p)\n", __func__, ptr);
1065
1066	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1067		make_black_object((unsigned long)ptr);
1068}
1069EXPORT_SYMBOL(kmemleak_ignore);
1070
1071/**
1072 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1073 * @ptr:	pointer to beginning or inside the object. This also
1074 *		represents the start of the scan area
1075 * @size:	size of the scan area
1076 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1077 *
1078 * This function is used when it is known that only certain parts of an object
1079 * contain references to other objects. Kmemleak will only scan these areas
1080 * reducing the number false negatives.
1081 */
1082void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1083{
1084	pr_debug("%s(0x%p)\n", __func__, ptr);
1085
1086	if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1087		add_scan_area((unsigned long)ptr, size, gfp);
1088}
1089EXPORT_SYMBOL(kmemleak_scan_area);
1090
1091/**
1092 * kmemleak_no_scan - do not scan an allocated object
1093 * @ptr:	pointer to beginning of the object
1094 *
1095 * This function notifies kmemleak not to scan the given memory block. Useful
1096 * in situations where it is known that the given object does not contain any
1097 * references to other objects. Kmemleak will not scan such objects reducing
1098 * the number of false negatives.
1099 */
1100void __ref kmemleak_no_scan(const void *ptr)
1101{
1102	pr_debug("%s(0x%p)\n", __func__, ptr);
1103
1104	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1105		object_no_scan((unsigned long)ptr);
1106}
1107EXPORT_SYMBOL(kmemleak_no_scan);
1108
1109/**
1110 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1111 *			 address argument
1112 * @phys:	physical address of the object
1113 * @size:	size of the object
1114 * @min_count:	minimum number of references to this object.
1115 *              See kmemleak_alloc()
1116 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1117 */
1118void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, int min_count,
1119			       gfp_t gfp)
1120{
1121	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1122		kmemleak_alloc(__va(phys), size, min_count, gfp);
 
 
 
 
 
 
1123}
1124EXPORT_SYMBOL(kmemleak_alloc_phys);
1125
1126/**
1127 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1128 *			     physical address argument
1129 * @phys:	physical address if the beginning or inside an object. This
1130 *		also represents the start of the range to be freed
1131 * @size:	size to be unregistered
1132 */
1133void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1134{
1135	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1136		kmemleak_free_part(__va(phys), size);
1137}
1138EXPORT_SYMBOL(kmemleak_free_part_phys);
1139
1140/**
1141 * kmemleak_not_leak_phys - similar to kmemleak_not_leak but taking a physical
1142 *			    address argument
1143 * @phys:	physical address of the object
1144 */
1145void __ref kmemleak_not_leak_phys(phys_addr_t phys)
1146{
1147	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1148		kmemleak_not_leak(__va(phys));
1149}
1150EXPORT_SYMBOL(kmemleak_not_leak_phys);
1151
1152/**
1153 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1154 *			  address argument
1155 * @phys:	physical address of the object
1156 */
1157void __ref kmemleak_ignore_phys(phys_addr_t phys)
1158{
1159	if (!IS_ENABLED(CONFIG_HIGHMEM) || PHYS_PFN(phys) < max_low_pfn)
1160		kmemleak_ignore(__va(phys));
 
 
1161}
1162EXPORT_SYMBOL(kmemleak_ignore_phys);
1163
1164/*
1165 * Update an object's checksum and return true if it was modified.
1166 */
1167static bool update_checksum(struct kmemleak_object *object)
1168{
1169	u32 old_csum = object->checksum;
1170
 
 
 
1171	kasan_disable_current();
1172	object->checksum = crc32(0, (void *)object->pointer, object->size);
 
 
 
 
 
 
 
 
 
 
 
 
1173	kasan_enable_current();
 
1174
1175	return object->checksum != old_csum;
1176}
1177
1178/*
1179 * Update an object's references. object->lock must be held by the caller.
1180 */
1181static void update_refs(struct kmemleak_object *object)
1182{
1183	if (!color_white(object)) {
1184		/* non-orphan, ignored or new */
1185		return;
1186	}
1187
1188	/*
1189	 * Increase the object's reference count (number of pointers to the
1190	 * memory block). If this count reaches the required minimum, the
1191	 * object's color will become gray and it will be added to the
1192	 * gray_list.
1193	 */
1194	object->count++;
1195	if (color_gray(object)) {
1196		/* put_object() called when removing from gray_list */
1197		WARN_ON(!get_object(object));
1198		list_add_tail(&object->gray_list, &gray_list);
1199	}
1200}
1201
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1202/*
1203 * Memory scanning is a long process and it needs to be interruptable. This
1204 * function checks whether such interrupt condition occurred.
1205 */
1206static int scan_should_stop(void)
1207{
1208	if (!kmemleak_enabled)
1209		return 1;
1210
1211	/*
1212	 * This function may be called from either process or kthread context,
1213	 * hence the need to check for both stop conditions.
1214	 */
1215	if (current->mm)
1216		return signal_pending(current);
1217	else
1218		return kthread_should_stop();
1219
1220	return 0;
1221}
1222
1223/*
1224 * Scan a memory block (exclusive range) for valid pointers and add those
1225 * found to the gray list.
1226 */
1227static void scan_block(void *_start, void *_end,
1228		       struct kmemleak_object *scanned)
1229{
1230	unsigned long *ptr;
1231	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1232	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1233	unsigned long flags;
1234	unsigned long untagged_ptr;
1235
1236	read_lock_irqsave(&kmemleak_lock, flags);
1237	for (ptr = start; ptr < end; ptr++) {
1238		struct kmemleak_object *object;
1239		unsigned long pointer;
1240		unsigned long excess_ref;
1241
1242		if (scan_should_stop())
1243			break;
1244
1245		kasan_disable_current();
1246		pointer = *ptr;
1247		kasan_enable_current();
1248
1249		untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1250		if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1251			continue;
1252
1253		/*
1254		 * No need for get_object() here since we hold kmemleak_lock.
1255		 * object->use_count cannot be dropped to 0 while the object
1256		 * is still present in object_tree_root and object_list
1257		 * (with updates protected by kmemleak_lock).
1258		 */
1259		object = lookup_object(pointer, 1);
1260		if (!object)
1261			continue;
1262		if (object == scanned)
1263			/* self referenced, ignore */
1264			continue;
1265
1266		/*
1267		 * Avoid the lockdep recursive warning on object->lock being
1268		 * previously acquired in scan_object(). These locks are
1269		 * enclosed by scan_mutex.
1270		 */
1271		spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1272		/* only pass surplus references (object already gray) */
1273		if (color_gray(object)) {
1274			excess_ref = object->excess_ref;
1275			/* no need for update_refs() if object already gray */
1276		} else {
1277			excess_ref = 0;
1278			update_refs(object);
1279		}
1280		spin_unlock(&object->lock);
1281
1282		if (excess_ref) {
1283			object = lookup_object(excess_ref, 0);
1284			if (!object)
1285				continue;
1286			if (object == scanned)
1287				/* circular reference, ignore */
1288				continue;
1289			spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1290			update_refs(object);
1291			spin_unlock(&object->lock);
1292		}
1293	}
1294	read_unlock_irqrestore(&kmemleak_lock, flags);
1295}
1296
1297/*
1298 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1299 */
1300#ifdef CONFIG_SMP
1301static void scan_large_block(void *start, void *end)
1302{
1303	void *next;
1304
1305	while (start < end) {
1306		next = min(start + MAX_SCAN_SIZE, end);
1307		scan_block(start, next, NULL);
1308		start = next;
1309		cond_resched();
1310	}
1311}
1312#endif
1313
1314/*
1315 * Scan a memory block corresponding to a kmemleak_object. A condition is
1316 * that object->use_count >= 1.
1317 */
1318static void scan_object(struct kmemleak_object *object)
1319{
1320	struct kmemleak_scan_area *area;
1321	unsigned long flags;
1322
1323	/*
1324	 * Once the object->lock is acquired, the corresponding memory block
1325	 * cannot be freed (the same lock is acquired in delete_object).
1326	 */
1327	spin_lock_irqsave(&object->lock, flags);
1328	if (object->flags & OBJECT_NO_SCAN)
1329		goto out;
1330	if (!(object->flags & OBJECT_ALLOCATED))
1331		/* already freed object */
1332		goto out;
1333	if (hlist_empty(&object->area_list) ||
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1334	    object->flags & OBJECT_FULL_SCAN) {
1335		void *start = (void *)object->pointer;
1336		void *end = (void *)(object->pointer + object->size);
 
 
1337		void *next;
1338
1339		do {
1340			next = min(start + MAX_SCAN_SIZE, end);
1341			scan_block(start, next, object);
1342
1343			start = next;
1344			if (start >= end)
1345				break;
1346
1347			spin_unlock_irqrestore(&object->lock, flags);
1348			cond_resched();
1349			spin_lock_irqsave(&object->lock, flags);
1350		} while (object->flags & OBJECT_ALLOCATED);
1351	} else
1352		hlist_for_each_entry(area, &object->area_list, node)
1353			scan_block((void *)area->start,
1354				   (void *)(area->start + area->size),
1355				   object);
 
1356out:
1357	spin_unlock_irqrestore(&object->lock, flags);
1358}
1359
1360/*
1361 * Scan the objects already referenced (gray objects). More objects will be
1362 * referenced and, if there are no memory leaks, all the objects are scanned.
1363 */
1364static void scan_gray_list(void)
1365{
1366	struct kmemleak_object *object, *tmp;
1367
1368	/*
1369	 * The list traversal is safe for both tail additions and removals
1370	 * from inside the loop. The kmemleak objects cannot be freed from
1371	 * outside the loop because their use_count was incremented.
1372	 */
1373	object = list_entry(gray_list.next, typeof(*object), gray_list);
1374	while (&object->gray_list != &gray_list) {
1375		cond_resched();
1376
1377		/* may add new objects to the list */
1378		if (!scan_should_stop())
1379			scan_object(object);
1380
1381		tmp = list_entry(object->gray_list.next, typeof(*object),
1382				 gray_list);
1383
1384		/* remove the object from the list and release it */
1385		list_del(&object->gray_list);
1386		put_object(object);
1387
1388		object = tmp;
1389	}
1390	WARN_ON(!list_empty(&gray_list));
1391}
1392
1393/*
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1394 * Scan data sections and all the referenced memory blocks allocated via the
1395 * kernel's standard allocators. This function must be called with the
1396 * scan_mutex held.
1397 */
1398static void kmemleak_scan(void)
1399{
1400	unsigned long flags;
1401	struct kmemleak_object *object;
1402	int i;
 
1403	int new_leaks = 0;
1404
1405	jiffies_last_scan = jiffies;
1406
1407	/* prepare the kmemleak_object's */
1408	rcu_read_lock();
1409	list_for_each_entry_rcu(object, &object_list, object_list) {
1410		spin_lock_irqsave(&object->lock, flags);
1411#ifdef DEBUG
1412		/*
1413		 * With a few exceptions there should be a maximum of
1414		 * 1 reference to any object at this point.
1415		 */
1416		if (atomic_read(&object->use_count) > 1) {
1417			pr_debug("object->use_count = %d\n",
1418				 atomic_read(&object->use_count));
1419			dump_object_info(object);
1420		}
1421#endif
 
 
 
 
 
 
 
 
 
 
 
1422		/* reset the reference count (whiten the object) */
1423		object->count = 0;
1424		if (color_gray(object) && get_object(object))
1425			list_add_tail(&object->gray_list, &gray_list);
1426
1427		spin_unlock_irqrestore(&object->lock, flags);
 
 
 
1428	}
1429	rcu_read_unlock();
1430
1431#ifdef CONFIG_SMP
1432	/* per-cpu sections scanning */
1433	for_each_possible_cpu(i)
1434		scan_large_block(__per_cpu_start + per_cpu_offset(i),
1435				 __per_cpu_end + per_cpu_offset(i));
1436#endif
1437
1438	/*
1439	 * Struct page scanning for each node.
1440	 */
1441	get_online_mems();
1442	for_each_online_node(i) {
1443		unsigned long start_pfn = node_start_pfn(i);
1444		unsigned long end_pfn = node_end_pfn(i);
1445		unsigned long pfn;
1446
1447		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1448			struct page *page = pfn_to_online_page(pfn);
1449
 
 
 
1450			if (!page)
1451				continue;
1452
1453			/* only scan pages belonging to this node */
1454			if (page_to_nid(page) != i)
1455				continue;
1456			/* only scan if page is in use */
1457			if (page_count(page) == 0)
1458				continue;
1459			scan_block(page, page + 1, NULL);
1460			if (!(pfn & 63))
1461				cond_resched();
1462		}
1463	}
1464	put_online_mems();
1465
1466	/*
1467	 * Scanning the task stacks (may introduce false negatives).
1468	 */
1469	if (kmemleak_stack_scan) {
1470		struct task_struct *p, *g;
1471
1472		read_lock(&tasklist_lock);
1473		do_each_thread(g, p) {
1474			void *stack = try_get_task_stack(p);
1475			if (stack) {
1476				scan_block(stack, stack + THREAD_SIZE, NULL);
1477				put_task_stack(p);
1478			}
1479		} while_each_thread(g, p);
1480		read_unlock(&tasklist_lock);
1481	}
1482
1483	/*
1484	 * Scan the objects already referenced from the sections scanned
1485	 * above.
1486	 */
1487	scan_gray_list();
1488
1489	/*
1490	 * Check for new or unreferenced objects modified since the previous
1491	 * scan and color them gray until the next scan.
1492	 */
1493	rcu_read_lock();
1494	list_for_each_entry_rcu(object, &object_list, object_list) {
1495		spin_lock_irqsave(&object->lock, flags);
 
 
 
 
 
 
 
 
 
 
1496		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1497		    && update_checksum(object) && get_object(object)) {
1498			/* color it gray temporarily */
1499			object->count = object->min_count;
1500			list_add_tail(&object->gray_list, &gray_list);
1501		}
1502		spin_unlock_irqrestore(&object->lock, flags);
1503	}
1504	rcu_read_unlock();
1505
1506	/*
1507	 * Re-scan the gray list for modified unreferenced objects.
1508	 */
1509	scan_gray_list();
1510
1511	/*
1512	 * If scanning was stopped do not report any new unreferenced objects.
1513	 */
1514	if (scan_should_stop())
1515		return;
1516
1517	/*
1518	 * Scanning result reporting.
1519	 */
1520	rcu_read_lock();
1521	list_for_each_entry_rcu(object, &object_list, object_list) {
1522		spin_lock_irqsave(&object->lock, flags);
 
 
 
 
 
 
 
 
 
 
1523		if (unreferenced_object(object) &&
1524		    !(object->flags & OBJECT_REPORTED)) {
1525			object->flags |= OBJECT_REPORTED;
1526
1527			if (kmemleak_verbose)
1528				print_unreferenced(NULL, object);
1529
1530			new_leaks++;
1531		}
1532		spin_unlock_irqrestore(&object->lock, flags);
1533	}
1534	rcu_read_unlock();
1535
1536	if (new_leaks) {
1537		kmemleak_found_leaks = true;
1538
1539		pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1540			new_leaks);
1541	}
1542
1543}
1544
1545/*
1546 * Thread function performing automatic memory scanning. Unreferenced objects
1547 * at the end of a memory scan are reported but only the first time.
1548 */
1549static int kmemleak_scan_thread(void *arg)
1550{
1551	static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1552
1553	pr_info("Automatic memory scanning thread started\n");
1554	set_user_nice(current, 10);
1555
1556	/*
1557	 * Wait before the first scan to allow the system to fully initialize.
1558	 */
1559	if (first_run) {
1560		signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1561		first_run = 0;
1562		while (timeout && !kthread_should_stop())
1563			timeout = schedule_timeout_interruptible(timeout);
1564	}
1565
1566	while (!kthread_should_stop()) {
1567		signed long timeout = jiffies_scan_wait;
1568
1569		mutex_lock(&scan_mutex);
1570		kmemleak_scan();
1571		mutex_unlock(&scan_mutex);
1572
1573		/* wait before the next scan */
1574		while (timeout && !kthread_should_stop())
1575			timeout = schedule_timeout_interruptible(timeout);
1576	}
1577
1578	pr_info("Automatic memory scanning thread ended\n");
1579
1580	return 0;
1581}
1582
1583/*
1584 * Start the automatic memory scanning thread. This function must be called
1585 * with the scan_mutex held.
1586 */
1587static void start_scan_thread(void)
1588{
1589	if (scan_thread)
1590		return;
1591	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1592	if (IS_ERR(scan_thread)) {
1593		pr_warn("Failed to create the scan thread\n");
1594		scan_thread = NULL;
1595	}
1596}
1597
1598/*
1599 * Stop the automatic memory scanning thread.
1600 */
1601static void stop_scan_thread(void)
1602{
1603	if (scan_thread) {
1604		kthread_stop(scan_thread);
1605		scan_thread = NULL;
1606	}
1607}
1608
1609/*
1610 * Iterate over the object_list and return the first valid object at or after
1611 * the required position with its use_count incremented. The function triggers
1612 * a memory scanning when the pos argument points to the first position.
1613 */
1614static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1615{
1616	struct kmemleak_object *object;
1617	loff_t n = *pos;
1618	int err;
1619
1620	err = mutex_lock_interruptible(&scan_mutex);
1621	if (err < 0)
1622		return ERR_PTR(err);
1623
1624	rcu_read_lock();
1625	list_for_each_entry_rcu(object, &object_list, object_list) {
1626		if (n-- > 0)
1627			continue;
1628		if (get_object(object))
1629			goto out;
1630	}
1631	object = NULL;
1632out:
1633	return object;
1634}
1635
1636/*
1637 * Return the next object in the object_list. The function decrements the
1638 * use_count of the previous object and increases that of the next one.
1639 */
1640static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1641{
1642	struct kmemleak_object *prev_obj = v;
1643	struct kmemleak_object *next_obj = NULL;
1644	struct kmemleak_object *obj = prev_obj;
1645
1646	++(*pos);
1647
1648	list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1649		if (get_object(obj)) {
1650			next_obj = obj;
1651			break;
1652		}
1653	}
1654
1655	put_object(prev_obj);
1656	return next_obj;
1657}
1658
1659/*
1660 * Decrement the use_count of the last object required, if any.
1661 */
1662static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1663{
1664	if (!IS_ERR(v)) {
1665		/*
1666		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1667		 * waiting was interrupted, so only release it if !IS_ERR.
1668		 */
1669		rcu_read_unlock();
1670		mutex_unlock(&scan_mutex);
1671		if (v)
1672			put_object(v);
1673	}
1674}
1675
1676/*
1677 * Print the information for an unreferenced object to the seq file.
1678 */
1679static int kmemleak_seq_show(struct seq_file *seq, void *v)
1680{
1681	struct kmemleak_object *object = v;
1682	unsigned long flags;
1683
1684	spin_lock_irqsave(&object->lock, flags);
1685	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1686		print_unreferenced(seq, object);
1687	spin_unlock_irqrestore(&object->lock, flags);
1688	return 0;
1689}
1690
1691static const struct seq_operations kmemleak_seq_ops = {
1692	.start = kmemleak_seq_start,
1693	.next  = kmemleak_seq_next,
1694	.stop  = kmemleak_seq_stop,
1695	.show  = kmemleak_seq_show,
1696};
1697
1698static int kmemleak_open(struct inode *inode, struct file *file)
1699{
1700	return seq_open(file, &kmemleak_seq_ops);
1701}
1702
1703static int dump_str_object_info(const char *str)
1704{
1705	unsigned long flags;
1706	struct kmemleak_object *object;
1707	unsigned long addr;
1708
1709	if (kstrtoul(str, 0, &addr))
1710		return -EINVAL;
1711	object = find_and_get_object(addr, 0);
1712	if (!object) {
1713		pr_info("Unknown object at 0x%08lx\n", addr);
1714		return -EINVAL;
1715	}
1716
1717	spin_lock_irqsave(&object->lock, flags);
1718	dump_object_info(object);
1719	spin_unlock_irqrestore(&object->lock, flags);
1720
1721	put_object(object);
1722	return 0;
1723}
1724
1725/*
1726 * We use grey instead of black to ensure we can do future scans on the same
1727 * objects. If we did not do future scans these black objects could
1728 * potentially contain references to newly allocated objects in the future and
1729 * we'd end up with false positives.
1730 */
1731static void kmemleak_clear(void)
1732{
1733	struct kmemleak_object *object;
1734	unsigned long flags;
1735
1736	rcu_read_lock();
1737	list_for_each_entry_rcu(object, &object_list, object_list) {
1738		spin_lock_irqsave(&object->lock, flags);
1739		if ((object->flags & OBJECT_REPORTED) &&
1740		    unreferenced_object(object))
1741			__paint_it(object, KMEMLEAK_GREY);
1742		spin_unlock_irqrestore(&object->lock, flags);
1743	}
1744	rcu_read_unlock();
1745
1746	kmemleak_found_leaks = false;
1747}
1748
1749static void __kmemleak_do_cleanup(void);
1750
1751/*
1752 * File write operation to configure kmemleak at run-time. The following
1753 * commands can be written to the /sys/kernel/debug/kmemleak file:
1754 *   off	- disable kmemleak (irreversible)
1755 *   stack=on	- enable the task stacks scanning
1756 *   stack=off	- disable the tasks stacks scanning
1757 *   scan=on	- start the automatic memory scanning thread
1758 *   scan=off	- stop the automatic memory scanning thread
1759 *   scan=...	- set the automatic memory scanning period in seconds (0 to
1760 *		  disable it)
1761 *   scan	- trigger a memory scan
1762 *   clear	- mark all current reported unreferenced kmemleak objects as
1763 *		  grey to ignore printing them, or free all kmemleak objects
1764 *		  if kmemleak has been disabled.
1765 *   dump=...	- dump information about the object found at the given address
1766 */
1767static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1768			      size_t size, loff_t *ppos)
1769{
1770	char buf[64];
1771	int buf_size;
1772	int ret;
1773
1774	buf_size = min(size, (sizeof(buf) - 1));
1775	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1776		return -EFAULT;
1777	buf[buf_size] = 0;
1778
1779	ret = mutex_lock_interruptible(&scan_mutex);
1780	if (ret < 0)
1781		return ret;
1782
1783	if (strncmp(buf, "clear", 5) == 0) {
1784		if (kmemleak_enabled)
1785			kmemleak_clear();
1786		else
1787			__kmemleak_do_cleanup();
1788		goto out;
1789	}
1790
1791	if (!kmemleak_enabled) {
1792		ret = -EPERM;
1793		goto out;
1794	}
1795
1796	if (strncmp(buf, "off", 3) == 0)
1797		kmemleak_disable();
1798	else if (strncmp(buf, "stack=on", 8) == 0)
1799		kmemleak_stack_scan = 1;
1800	else if (strncmp(buf, "stack=off", 9) == 0)
1801		kmemleak_stack_scan = 0;
1802	else if (strncmp(buf, "scan=on", 7) == 0)
1803		start_scan_thread();
1804	else if (strncmp(buf, "scan=off", 8) == 0)
1805		stop_scan_thread();
1806	else if (strncmp(buf, "scan=", 5) == 0) {
1807		unsigned long secs;
 
1808
1809		ret = kstrtoul(buf + 5, 0, &secs);
1810		if (ret < 0)
1811			goto out;
 
 
 
 
 
1812		stop_scan_thread();
1813		if (secs) {
1814			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1815			start_scan_thread();
1816		}
1817	} else if (strncmp(buf, "scan", 4) == 0)
1818		kmemleak_scan();
1819	else if (strncmp(buf, "dump=", 5) == 0)
1820		ret = dump_str_object_info(buf + 5);
1821	else
1822		ret = -EINVAL;
1823
1824out:
1825	mutex_unlock(&scan_mutex);
1826	if (ret < 0)
1827		return ret;
1828
1829	/* ignore the rest of the buffer, only one command at a time */
1830	*ppos += size;
1831	return size;
1832}
1833
1834static const struct file_operations kmemleak_fops = {
1835	.owner		= THIS_MODULE,
1836	.open		= kmemleak_open,
1837	.read		= seq_read,
1838	.write		= kmemleak_write,
1839	.llseek		= seq_lseek,
1840	.release	= seq_release,
1841};
1842
1843static void __kmemleak_do_cleanup(void)
1844{
1845	struct kmemleak_object *object, *tmp;
1846
1847	/*
1848	 * Kmemleak has already been disabled, no need for RCU list traversal
1849	 * or kmemleak_lock held.
1850	 */
1851	list_for_each_entry_safe(object, tmp, &object_list, object_list) {
1852		__remove_object(object);
1853		__delete_object(object);
1854	}
1855}
1856
1857/*
1858 * Stop the memory scanning thread and free the kmemleak internal objects if
1859 * no previous scan thread (otherwise, kmemleak may still have some useful
1860 * information on memory leaks).
1861 */
1862static void kmemleak_do_cleanup(struct work_struct *work)
1863{
1864	stop_scan_thread();
1865
1866	mutex_lock(&scan_mutex);
1867	/*
1868	 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
1869	 * longer track object freeing. Ordering of the scan thread stopping and
1870	 * the memory accesses below is guaranteed by the kthread_stop()
1871	 * function.
1872	 */
1873	kmemleak_free_enabled = 0;
1874	mutex_unlock(&scan_mutex);
1875
1876	if (!kmemleak_found_leaks)
1877		__kmemleak_do_cleanup();
1878	else
1879		pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
1880}
1881
1882static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1883
1884/*
1885 * Disable kmemleak. No memory allocation/freeing will be traced once this
1886 * function is called. Disabling kmemleak is an irreversible operation.
1887 */
1888static void kmemleak_disable(void)
1889{
1890	/* atomically check whether it was already invoked */
1891	if (cmpxchg(&kmemleak_error, 0, 1))
1892		return;
1893
1894	/* stop any memory operation tracing */
1895	kmemleak_enabled = 0;
1896
1897	/* check whether it is too early for a kernel thread */
1898	if (kmemleak_initialized)
1899		schedule_work(&cleanup_work);
1900	else
1901		kmemleak_free_enabled = 0;
1902
1903	pr_info("Kernel memory leak detector disabled\n");
1904}
1905
1906/*
1907 * Allow boot-time kmemleak disabling (enabled by default).
1908 */
1909static int __init kmemleak_boot_config(char *str)
1910{
1911	if (!str)
1912		return -EINVAL;
1913	if (strcmp(str, "off") == 0)
1914		kmemleak_disable();
1915	else if (strcmp(str, "on") == 0)
1916		kmemleak_skip_disable = 1;
 
 
1917	else
1918		return -EINVAL;
1919	return 0;
1920}
1921early_param("kmemleak", kmemleak_boot_config);
1922
1923/*
1924 * Kmemleak initialization.
1925 */
1926void __init kmemleak_init(void)
1927{
1928#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1929	if (!kmemleak_skip_disable) {
1930		kmemleak_disable();
1931		return;
1932	}
1933#endif
1934
1935	if (kmemleak_error)
1936		return;
1937
1938	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1939	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1940
1941	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1942	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1943
1944	/* register the data/bss sections */
1945	create_object((unsigned long)_sdata, _edata - _sdata,
1946		      KMEMLEAK_GREY, GFP_ATOMIC);
1947	create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
1948		      KMEMLEAK_GREY, GFP_ATOMIC);
1949	/* only register .data..ro_after_init if not within .data */
1950	if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
1951		create_object((unsigned long)__start_ro_after_init,
1952			      __end_ro_after_init - __start_ro_after_init,
1953			      KMEMLEAK_GREY, GFP_ATOMIC);
1954}
1955
1956/*
1957 * Late initialization function.
1958 */
1959static int __init kmemleak_late_init(void)
1960{
1961	kmemleak_initialized = 1;
1962
1963	debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
1964
1965	if (kmemleak_error) {
1966		/*
1967		 * Some error occurred and kmemleak was disabled. There is a
1968		 * small chance that kmemleak_disable() was called immediately
1969		 * after setting kmemleak_initialized and we may end up with
1970		 * two clean-up threads but serialized by scan_mutex.
1971		 */
1972		schedule_work(&cleanup_work);
1973		return -ENOMEM;
1974	}
1975
1976	if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
1977		mutex_lock(&scan_mutex);
1978		start_scan_thread();
1979		mutex_unlock(&scan_mutex);
1980	}
1981
1982	pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
1983		mem_pool_free_count);
1984
1985	return 0;
1986}
1987late_initcall(kmemleak_late_init);
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * mm/kmemleak.c
   4 *
   5 * Copyright (C) 2008 ARM Limited
   6 * Written by Catalin Marinas <catalin.marinas@arm.com>
   7 *
   8 * For more information on the algorithm and kmemleak usage, please see
   9 * Documentation/dev-tools/kmemleak.rst.
  10 *
  11 * Notes on locking
  12 * ----------------
  13 *
  14 * The following locks and mutexes are used by kmemleak:
  15 *
  16 * - kmemleak_lock (raw_spinlock_t): protects the object_list as well as
  17 *   del_state modifications and accesses to the object trees
  18 *   (object_tree_root, object_phys_tree_root, object_percpu_tree_root). The
  19 *   object_list is the main list holding the metadata (struct
  20 *   kmemleak_object) for the allocated memory blocks. The object trees are
  21 *   red black trees used to look-up metadata based on a pointer to the
  22 *   corresponding memory block. The kmemleak_object structures are added to
  23 *   the object_list and the object tree root in the create_object() function
  24 *   called from the kmemleak_alloc{,_phys,_percpu}() callback and removed in
  25 *   delete_object() called from the kmemleak_free{,_phys,_percpu}() callback
  26 * - kmemleak_object.lock (raw_spinlock_t): protects a kmemleak_object.
  27 *   Accesses to the metadata (e.g. count) are protected by this lock. Note
  28 *   that some members of this structure may be protected by other means
  29 *   (atomic or kmemleak_lock). This lock is also held when scanning the
  30 *   corresponding memory block to avoid the kernel freeing it via the
  31 *   kmemleak_free() callback. This is less heavyweight than holding a global
  32 *   lock like kmemleak_lock during scanning.
  33 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
  34 *   unreferenced objects at a time. The gray_list contains the objects which
  35 *   are already referenced or marked as false positives and need to be
  36 *   scanned. This list is only modified during a scanning episode when the
  37 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
  38 *   Note that the kmemleak_object.use_count is incremented when an object is
  39 *   added to the gray_list and therefore cannot be freed. This mutex also
  40 *   prevents multiple users of the "kmemleak" debugfs file together with
  41 *   modifications to the memory scanning parameters including the scan_thread
  42 *   pointer
  43 *
  44 * Locks and mutexes are acquired/nested in the following order:
  45 *
  46 *   scan_mutex [-> object->lock] -> kmemleak_lock -> other_object->lock (SINGLE_DEPTH_NESTING)
  47 *
  48 * No kmemleak_lock and object->lock nesting is allowed outside scan_mutex
  49 * regions.
  50 *
  51 * The kmemleak_object structures have a use_count incremented or decremented
  52 * using the get_object()/put_object() functions. When the use_count becomes
  53 * 0, this count can no longer be incremented and put_object() schedules the
  54 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
  55 * function must be protected by rcu_read_lock() to avoid accessing a freed
  56 * structure.
  57 */
  58
  59#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  60
  61#include <linux/init.h>
  62#include <linux/kernel.h>
  63#include <linux/list.h>
  64#include <linux/sched/signal.h>
  65#include <linux/sched/task.h>
  66#include <linux/sched/task_stack.h>
  67#include <linux/jiffies.h>
  68#include <linux/delay.h>
  69#include <linux/export.h>
  70#include <linux/kthread.h>
  71#include <linux/rbtree.h>
  72#include <linux/fs.h>
  73#include <linux/debugfs.h>
  74#include <linux/seq_file.h>
  75#include <linux/cpumask.h>
  76#include <linux/spinlock.h>
  77#include <linux/module.h>
  78#include <linux/mutex.h>
  79#include <linux/rcupdate.h>
  80#include <linux/stacktrace.h>
  81#include <linux/stackdepot.h>
  82#include <linux/cache.h>
  83#include <linux/percpu.h>
  84#include <linux/memblock.h>
  85#include <linux/pfn.h>
  86#include <linux/mmzone.h>
  87#include <linux/slab.h>
  88#include <linux/thread_info.h>
  89#include <linux/err.h>
  90#include <linux/uaccess.h>
  91#include <linux/string.h>
  92#include <linux/nodemask.h>
  93#include <linux/mm.h>
  94#include <linux/workqueue.h>
  95#include <linux/crc32.h>
  96
  97#include <asm/sections.h>
  98#include <asm/processor.h>
  99#include <linux/atomic.h>
 100
 101#include <linux/kasan.h>
 102#include <linux/kfence.h>
 103#include <linux/kmemleak.h>
 104#include <linux/memory_hotplug.h>
 105
 106/*
 107 * Kmemleak configuration and common defines.
 108 */
 109#define MAX_TRACE		16	/* stack trace length */
 110#define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
 111#define SECS_FIRST_SCAN		60	/* delay before the first scan */
 112#define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
 113#define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
 114
 115#define BYTES_PER_POINTER	sizeof(void *)
 116
 
 
 
 
 
 117/* scanning area inside a memory block */
 118struct kmemleak_scan_area {
 119	struct hlist_node node;
 120	unsigned long start;
 121	size_t size;
 122};
 123
 124#define KMEMLEAK_GREY	0
 125#define KMEMLEAK_BLACK	-1
 126
 127/*
 128 * Structure holding the metadata for each allocated memory block.
 129 * Modifications to such objects should be made while holding the
 130 * object->lock. Insertions or deletions from object_list, gray_list or
 131 * rb_node are already protected by the corresponding locks or mutex (see
 132 * the notes on locking above). These objects are reference-counted
 133 * (use_count) and freed using the RCU mechanism.
 134 */
 135struct kmemleak_object {
 136	raw_spinlock_t lock;
 137	unsigned int flags;		/* object status flags */
 138	struct list_head object_list;
 139	struct list_head gray_list;
 140	struct rb_node rb_node;
 141	struct rcu_head rcu;		/* object_list lockless traversal */
 142	/* object usage count; object freed when use_count == 0 */
 143	atomic_t use_count;
 144	unsigned int del_state;		/* deletion state */
 145	unsigned long pointer;
 146	size_t size;
 147	/* pass surplus references to this pointer */
 148	unsigned long excess_ref;
 149	/* minimum number of a pointers found before it is considered leak */
 150	int min_count;
 151	/* the total number of pointers found pointing to this object */
 152	int count;
 153	/* checksum for detecting modified objects */
 154	u32 checksum;
 155	depot_stack_handle_t trace_handle;
 156	/* memory ranges to be scanned inside an object (empty for all) */
 157	struct hlist_head area_list;
 
 
 158	unsigned long jiffies;		/* creation timestamp */
 159	pid_t pid;			/* pid of the current task */
 160	char comm[TASK_COMM_LEN];	/* executable name */
 161};
 162
 163/* flag representing the memory block allocation status */
 164#define OBJECT_ALLOCATED	(1 << 0)
 165/* flag set after the first reporting of an unreference object */
 166#define OBJECT_REPORTED		(1 << 1)
 167/* flag set to not scan the object */
 168#define OBJECT_NO_SCAN		(1 << 2)
 169/* flag set to fully scan the object when scan_area allocation failed */
 170#define OBJECT_FULL_SCAN	(1 << 3)
 171/* flag set for object allocated with physical address */
 172#define OBJECT_PHYS		(1 << 4)
 173/* flag set for per-CPU pointers */
 174#define OBJECT_PERCPU		(1 << 5)
 175
 176/* set when __remove_object() called */
 177#define DELSTATE_REMOVED	(1 << 0)
 178/* set to temporarily prevent deletion from object_list */
 179#define DELSTATE_NO_DELETE	(1 << 1)
 180
 181#define HEX_PREFIX		"    "
 182/* number of bytes to print per line; must be 16 or 32 */
 183#define HEX_ROW_SIZE		16
 184/* number of bytes to print at a time (1, 2, 4, 8) */
 185#define HEX_GROUP_SIZE		1
 186/* include ASCII after the hex output */
 187#define HEX_ASCII		1
 188/* max number of lines to be printed */
 189#define HEX_MAX_LINES		2
 190
 191/* the list of all allocated objects */
 192static LIST_HEAD(object_list);
 193/* the list of gray-colored objects (see color_gray comment below) */
 194static LIST_HEAD(gray_list);
 195/* memory pool allocation */
 196static struct kmemleak_object mem_pool[CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE];
 197static int mem_pool_free_count = ARRAY_SIZE(mem_pool);
 198static LIST_HEAD(mem_pool_free_list);
 199/* search tree for object boundaries */
 200static struct rb_root object_tree_root = RB_ROOT;
 201/* search tree for object (with OBJECT_PHYS flag) boundaries */
 202static struct rb_root object_phys_tree_root = RB_ROOT;
 203/* search tree for object (with OBJECT_PERCPU flag) boundaries */
 204static struct rb_root object_percpu_tree_root = RB_ROOT;
 205/* protecting the access to object_list, object_tree_root (or object_phys_tree_root) */
 206static DEFINE_RAW_SPINLOCK(kmemleak_lock);
 207
 208/* allocation caches for kmemleak internal data */
 209static struct kmem_cache *object_cache;
 210static struct kmem_cache *scan_area_cache;
 211
 212/* set if tracing memory operations is enabled */
 213static int kmemleak_enabled = 1;
 214/* same as above but only for the kmemleak_free() callback */
 215static int kmemleak_free_enabled = 1;
 216/* set in the late_initcall if there were no errors */
 217static int kmemleak_late_initialized;
 218/* set if a kmemleak warning was issued */
 219static int kmemleak_warning;
 220/* set if a fatal kmemleak error has occurred */
 221static int kmemleak_error;
 222
 223/* minimum and maximum address that may be valid pointers */
 224static unsigned long min_addr = ULONG_MAX;
 225static unsigned long max_addr;
 226
 227/* minimum and maximum address that may be valid per-CPU pointers */
 228static unsigned long min_percpu_addr = ULONG_MAX;
 229static unsigned long max_percpu_addr;
 230
 231static struct task_struct *scan_thread;
 232/* used to avoid reporting of recently allocated objects */
 233static unsigned long jiffies_min_age;
 234static unsigned long jiffies_last_scan;
 235/* delay between automatic memory scannings */
 236static unsigned long jiffies_scan_wait;
 237/* enables or disables the task stacks scanning */
 238static int kmemleak_stack_scan = 1;
 239/* protects the memory scanning, parameters and debug/kmemleak file access */
 240static DEFINE_MUTEX(scan_mutex);
 241/* setting kmemleak=on, will set this var, skipping the disable */
 242static int kmemleak_skip_disable;
 243/* If there are leaks that can be reported */
 244static bool kmemleak_found_leaks;
 245
 246static bool kmemleak_verbose;
 247module_param_named(verbose, kmemleak_verbose, bool, 0600);
 248
 249static void kmemleak_disable(void);
 250
 251/*
 252 * Print a warning and dump the stack trace.
 253 */
 254#define kmemleak_warn(x...)	do {		\
 255	pr_warn(x);				\
 256	dump_stack();				\
 257	kmemleak_warning = 1;			\
 258} while (0)
 259
 260/*
 261 * Macro invoked when a serious kmemleak condition occurred and cannot be
 262 * recovered from. Kmemleak will be disabled and further allocation/freeing
 263 * tracing no longer available.
 264 */
 265#define kmemleak_stop(x...)	do {	\
 266	kmemleak_warn(x);		\
 267	kmemleak_disable();		\
 268} while (0)
 269
 270#define warn_or_seq_printf(seq, fmt, ...)	do {	\
 271	if (seq)					\
 272		seq_printf(seq, fmt, ##__VA_ARGS__);	\
 273	else						\
 274		pr_warn(fmt, ##__VA_ARGS__);		\
 275} while (0)
 276
 277static void warn_or_seq_hex_dump(struct seq_file *seq, int prefix_type,
 278				 int rowsize, int groupsize, const void *buf,
 279				 size_t len, bool ascii)
 280{
 281	if (seq)
 282		seq_hex_dump(seq, HEX_PREFIX, prefix_type, rowsize, groupsize,
 283			     buf, len, ascii);
 284	else
 285		print_hex_dump(KERN_WARNING, pr_fmt(HEX_PREFIX), prefix_type,
 286			       rowsize, groupsize, buf, len, ascii);
 287}
 288
 289/*
 290 * Printing of the objects hex dump to the seq file. The number of lines to be
 291 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
 292 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
 293 * with the object->lock held.
 294 */
 295static void hex_dump_object(struct seq_file *seq,
 296			    struct kmemleak_object *object)
 297{
 298	const u8 *ptr = (const u8 *)object->pointer;
 299	size_t len;
 300
 301	if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
 302		return;
 303
 304	if (object->flags & OBJECT_PERCPU)
 305		ptr = (const u8 *)this_cpu_ptr((void __percpu *)object->pointer);
 306
 307	/* limit the number of lines to HEX_MAX_LINES */
 308	len = min_t(size_t, object->size, HEX_MAX_LINES * HEX_ROW_SIZE);
 309
 310	if (object->flags & OBJECT_PERCPU)
 311		warn_or_seq_printf(seq, "  hex dump (first %zu bytes on cpu %d):\n",
 312				   len, raw_smp_processor_id());
 313	else
 314		warn_or_seq_printf(seq, "  hex dump (first %zu bytes):\n", len);
 315	kasan_disable_current();
 316	warn_or_seq_hex_dump(seq, DUMP_PREFIX_NONE, HEX_ROW_SIZE,
 317			     HEX_GROUP_SIZE, kasan_reset_tag((void *)ptr), len, HEX_ASCII);
 318	kasan_enable_current();
 319}
 320
 321/*
 322 * Object colors, encoded with count and min_count:
 323 * - white - orphan object, not enough references to it (count < min_count)
 324 * - gray  - not orphan, not marked as false positive (min_count == 0) or
 325 *		sufficient references to it (count >= min_count)
 326 * - black - ignore, it doesn't contain references (e.g. text section)
 327 *		(min_count == -1). No function defined for this color.
 328 * Newly created objects don't have any color assigned (object->count == -1)
 329 * before the next memory scan when they become white.
 330 */
 331static bool color_white(const struct kmemleak_object *object)
 332{
 333	return object->count != KMEMLEAK_BLACK &&
 334		object->count < object->min_count;
 335}
 336
 337static bool color_gray(const struct kmemleak_object *object)
 338{
 339	return object->min_count != KMEMLEAK_BLACK &&
 340		object->count >= object->min_count;
 341}
 342
 343/*
 344 * Objects are considered unreferenced only if their color is white, they have
 345 * not be deleted and have a minimum age to avoid false positives caused by
 346 * pointers temporarily stored in CPU registers.
 347 */
 348static bool unreferenced_object(struct kmemleak_object *object)
 349{
 350	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
 351		time_before_eq(object->jiffies + jiffies_min_age,
 352			       jiffies_last_scan);
 353}
 354
 355/*
 356 * Printing of the unreferenced objects information to the seq file. The
 357 * print_unreferenced function must be called with the object->lock held.
 358 */
 359static void print_unreferenced(struct seq_file *seq,
 360			       struct kmemleak_object *object)
 361{
 362	int i;
 363	unsigned long *entries;
 364	unsigned int nr_entries;
 365
 366	nr_entries = stack_depot_fetch(object->trace_handle, &entries);
 367	warn_or_seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
 368			  object->pointer, object->size);
 369	warn_or_seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu\n",
 370			   object->comm, object->pid, object->jiffies);
 
 371	hex_dump_object(seq, object);
 372	warn_or_seq_printf(seq, "  backtrace (crc %x):\n", object->checksum);
 373
 374	for (i = 0; i < nr_entries; i++) {
 375		void *ptr = (void *)entries[i];
 376		warn_or_seq_printf(seq, "    %pS\n", ptr);
 377	}
 378}
 379
 380/*
 381 * Print the kmemleak_object information. This function is used mainly for
 382 * debugging special cases when kmemleak operations. It must be called with
 383 * the object->lock held.
 384 */
 385static void dump_object_info(struct kmemleak_object *object)
 386{
 387	pr_notice("Object 0x%08lx (size %zu):\n",
 388			object->pointer, object->size);
 389	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
 390			object->comm, object->pid, object->jiffies);
 391	pr_notice("  min_count = %d\n", object->min_count);
 392	pr_notice("  count = %d\n", object->count);
 393	pr_notice("  flags = 0x%x\n", object->flags);
 394	pr_notice("  checksum = %u\n", object->checksum);
 395	pr_notice("  backtrace:\n");
 396	if (object->trace_handle)
 397		stack_depot_print(object->trace_handle);
 398}
 399
 400static struct rb_root *object_tree(unsigned long objflags)
 401{
 402	if (objflags & OBJECT_PHYS)
 403		return &object_phys_tree_root;
 404	if (objflags & OBJECT_PERCPU)
 405		return &object_percpu_tree_root;
 406	return &object_tree_root;
 407}
 408
 409/*
 410 * Look-up a memory block metadata (kmemleak_object) in the object search
 411 * tree based on a pointer value. If alias is 0, only values pointing to the
 412 * beginning of the memory block are allowed. The kmemleak_lock must be held
 413 * when calling this function.
 414 */
 415static struct kmemleak_object *__lookup_object(unsigned long ptr, int alias,
 416					       unsigned int objflags)
 417{
 418	struct rb_node *rb = object_tree(objflags)->rb_node;
 419	unsigned long untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
 420
 421	while (rb) {
 422		struct kmemleak_object *object;
 423		unsigned long untagged_objp;
 424
 425		object = rb_entry(rb, struct kmemleak_object, rb_node);
 426		untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
 427
 428		if (untagged_ptr < untagged_objp)
 429			rb = object->rb_node.rb_left;
 430		else if (untagged_objp + object->size <= untagged_ptr)
 431			rb = object->rb_node.rb_right;
 432		else if (untagged_objp == untagged_ptr || alias)
 433			return object;
 434		else {
 435			kmemleak_warn("Found object by alias at 0x%08lx\n",
 436				      ptr);
 437			dump_object_info(object);
 438			break;
 439		}
 440	}
 441	return NULL;
 442}
 443
 444/* Look-up a kmemleak object which allocated with virtual address. */
 445static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
 446{
 447	return __lookup_object(ptr, alias, 0);
 448}
 449
 450/*
 451 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
 452 * that once an object's use_count reached 0, the RCU freeing was already
 453 * registered and the object should no longer be used. This function must be
 454 * called under the protection of rcu_read_lock().
 455 */
 456static int get_object(struct kmemleak_object *object)
 457{
 458	return atomic_inc_not_zero(&object->use_count);
 459}
 460
 461/*
 462 * Memory pool allocation and freeing. kmemleak_lock must not be held.
 463 */
 464static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
 465{
 466	unsigned long flags;
 467	struct kmemleak_object *object;
 468
 469	/* try the slab allocator first */
 470	if (object_cache) {
 471		object = kmem_cache_alloc_noprof(object_cache,
 472						 gfp_nested_mask(gfp));
 473		if (object)
 474			return object;
 475	}
 476
 477	/* slab allocation failed, try the memory pool */
 478	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 479	object = list_first_entry_or_null(&mem_pool_free_list,
 480					  typeof(*object), object_list);
 481	if (object)
 482		list_del(&object->object_list);
 483	else if (mem_pool_free_count)
 484		object = &mem_pool[--mem_pool_free_count];
 485	else
 486		pr_warn_once("Memory pool empty, consider increasing CONFIG_DEBUG_KMEMLEAK_MEM_POOL_SIZE\n");
 487	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 488
 489	return object;
 490}
 491
 492/*
 493 * Return the object to either the slab allocator or the memory pool.
 494 */
 495static void mem_pool_free(struct kmemleak_object *object)
 496{
 497	unsigned long flags;
 498
 499	if (object < mem_pool || object >= mem_pool + ARRAY_SIZE(mem_pool)) {
 500		kmem_cache_free(object_cache, object);
 501		return;
 502	}
 503
 504	/* add the object to the memory pool free list */
 505	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 506	list_add(&object->object_list, &mem_pool_free_list);
 507	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 508}
 509
 510/*
 511 * RCU callback to free a kmemleak_object.
 512 */
 513static void free_object_rcu(struct rcu_head *rcu)
 514{
 515	struct hlist_node *tmp;
 516	struct kmemleak_scan_area *area;
 517	struct kmemleak_object *object =
 518		container_of(rcu, struct kmemleak_object, rcu);
 519
 520	/*
 521	 * Once use_count is 0 (guaranteed by put_object), there is no other
 522	 * code accessing this object, hence no need for locking.
 523	 */
 524	hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
 525		hlist_del(&area->node);
 526		kmem_cache_free(scan_area_cache, area);
 527	}
 528	mem_pool_free(object);
 529}
 530
 531/*
 532 * Decrement the object use_count. Once the count is 0, free the object using
 533 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
 534 * delete_object() path, the delayed RCU freeing ensures that there is no
 535 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
 536 * is also possible.
 537 */
 538static void put_object(struct kmemleak_object *object)
 539{
 540	if (!atomic_dec_and_test(&object->use_count))
 541		return;
 542
 543	/* should only get here after delete_object was called */
 544	WARN_ON(object->flags & OBJECT_ALLOCATED);
 545
 546	/*
 547	 * It may be too early for the RCU callbacks, however, there is no
 548	 * concurrent object_list traversal when !object_cache and all objects
 549	 * came from the memory pool. Free the object directly.
 550	 */
 551	if (object_cache)
 552		call_rcu(&object->rcu, free_object_rcu);
 553	else
 554		free_object_rcu(&object->rcu);
 555}
 556
 557/*
 558 * Look up an object in the object search tree and increase its use_count.
 559 */
 560static struct kmemleak_object *__find_and_get_object(unsigned long ptr, int alias,
 561						     unsigned int objflags)
 562{
 563	unsigned long flags;
 564	struct kmemleak_object *object;
 565
 566	rcu_read_lock();
 567	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 568	object = __lookup_object(ptr, alias, objflags);
 569	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 570
 571	/* check whether the object is still available */
 572	if (object && !get_object(object))
 573		object = NULL;
 574	rcu_read_unlock();
 575
 576	return object;
 577}
 578
 579/* Look up and get an object which allocated with virtual address. */
 580static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 581{
 582	return __find_and_get_object(ptr, alias, 0);
 583}
 584
 585/*
 586 * Remove an object from its object tree and object_list. Must be called with
 587 * the kmemleak_lock held _if_ kmemleak is still enabled.
 588 */
 589static void __remove_object(struct kmemleak_object *object)
 590{
 591	rb_erase(&object->rb_node, object_tree(object->flags));
 592	if (!(object->del_state & DELSTATE_NO_DELETE))
 593		list_del_rcu(&object->object_list);
 594	object->del_state |= DELSTATE_REMOVED;
 595}
 596
 597static struct kmemleak_object *__find_and_remove_object(unsigned long ptr,
 598							int alias,
 599							unsigned int objflags)
 
 
 
 600{
 
 601	struct kmemleak_object *object;
 602
 603	object = __lookup_object(ptr, alias, objflags);
 
 604	if (object)
 605		__remove_object(object);
 
 606
 607	return object;
 608}
 609
 610/*
 611 * Look up an object in the object search tree and remove it from both object
 612 * tree root and object_list. The returned object's use_count should be at
 613 * least 1, as initially set by create_object().
 614 */
 615static struct kmemleak_object *find_and_remove_object(unsigned long ptr, int alias,
 616						      unsigned int objflags)
 617{
 618	unsigned long flags;
 619	struct kmemleak_object *object;
 620
 621	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 622	object = __find_and_remove_object(ptr, alias, objflags);
 623	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 624
 625	return object;
 626}
 627
 628static noinline depot_stack_handle_t set_track_prepare(void)
 
 
 
 
 
 629{
 630	depot_stack_handle_t trace_handle;
 631	unsigned long entries[MAX_TRACE];
 632	unsigned int nr_entries;
 633
 634	/*
 635	 * Use object_cache to determine whether kmemleak_init() has
 636	 * been invoked. stack_depot_early_init() is called before
 637	 * kmemleak_init() in mm_core_init().
 638	 */
 639	if (!object_cache)
 640		return 0;
 641	nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 3);
 642	trace_handle = stack_depot_save(entries, nr_entries, GFP_NOWAIT);
 643
 644	return trace_handle;
 645}
 646
 647static struct kmemleak_object *__alloc_object(gfp_t gfp)
 648{
 649	struct kmemleak_object *object;
 650
 651	object = mem_pool_alloc(gfp);
 652	if (!object) {
 653		pr_warn("Cannot allocate a kmemleak_object structure\n");
 654		kmemleak_disable();
 655		return NULL;
 656	}
 657
 658	INIT_LIST_HEAD(&object->object_list);
 659	INIT_LIST_HEAD(&object->gray_list);
 660	INIT_HLIST_HEAD(&object->area_list);
 661	raw_spin_lock_init(&object->lock);
 662	atomic_set(&object->use_count, 1);
 
 
 
 663	object->excess_ref = 0;
 
 664	object->count = 0;			/* white color initially */
 
 665	object->checksum = 0;
 666	object->del_state = 0;
 667
 668	/* task information */
 669	if (in_hardirq()) {
 670		object->pid = 0;
 671		strscpy(object->comm, "hardirq");
 672	} else if (in_serving_softirq()) {
 673		object->pid = 0;
 674		strscpy(object->comm, "softirq");
 675	} else {
 676		object->pid = current->pid;
 677		/*
 678		 * There is a small chance of a race with set_task_comm(),
 679		 * however using get_task_comm() here may cause locking
 680		 * dependency issues with current->alloc_lock. In the worst
 681		 * case, the command line is not correct.
 682		 */
 683		strscpy(object->comm, current->comm);
 684	}
 685
 686	/* kernel backtrace */
 687	object->trace_handle = set_track_prepare();
 688
 689	return object;
 690}
 691
 692static int __link_object(struct kmemleak_object *object, unsigned long ptr,
 693			 size_t size, int min_count, unsigned int objflags)
 694{
 695
 696	struct kmemleak_object *parent;
 697	struct rb_node **link, *rb_parent;
 698	unsigned long untagged_ptr;
 699	unsigned long untagged_objp;
 700
 701	object->flags = OBJECT_ALLOCATED | objflags;
 702	object->pointer = ptr;
 703	object->size = kfence_ksize((void *)ptr) ?: size;
 704	object->min_count = min_count;
 705	object->jiffies = jiffies;
 706
 707	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
 708	/*
 709	 * Only update min_addr and max_addr with object storing virtual
 710	 * address. And update min_percpu_addr max_percpu_addr for per-CPU
 711	 * objects.
 712	 */
 713	if (objflags & OBJECT_PERCPU) {
 714		min_percpu_addr = min(min_percpu_addr, untagged_ptr);
 715		max_percpu_addr = max(max_percpu_addr, untagged_ptr + size);
 716	} else if (!(objflags & OBJECT_PHYS)) {
 717		min_addr = min(min_addr, untagged_ptr);
 718		max_addr = max(max_addr, untagged_ptr + size);
 719	}
 720	link = &object_tree(objflags)->rb_node;
 721	rb_parent = NULL;
 722	while (*link) {
 723		rb_parent = *link;
 724		parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
 725		untagged_objp = (unsigned long)kasan_reset_tag((void *)parent->pointer);
 726		if (untagged_ptr + size <= untagged_objp)
 727			link = &parent->rb_node.rb_left;
 728		else if (untagged_objp + parent->size <= untagged_ptr)
 729			link = &parent->rb_node.rb_right;
 730		else {
 731			kmemleak_stop("Cannot insert 0x%lx into the object search tree (overlaps existing)\n",
 732				      ptr);
 733			/*
 734			 * No need for parent->lock here since "parent" cannot
 735			 * be freed while the kmemleak_lock is held.
 736			 */
 737			dump_object_info(parent);
 738			return -EEXIST;
 
 
 739		}
 740	}
 741	rb_link_node(&object->rb_node, rb_parent, link);
 742	rb_insert_color(&object->rb_node, object_tree(objflags));
 
 743	list_add_tail_rcu(&object->object_list, &object_list);
 744
 745	return 0;
 746}
 747
 748/*
 749 * Create the metadata (struct kmemleak_object) corresponding to an allocated
 750 * memory block and add it to the object_list and object tree.
 751 */
 752static void __create_object(unsigned long ptr, size_t size,
 753				int min_count, gfp_t gfp, unsigned int objflags)
 754{
 755	struct kmemleak_object *object;
 756	unsigned long flags;
 757	int ret;
 758
 759	object = __alloc_object(gfp);
 760	if (!object)
 761		return;
 762
 763	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 764	ret = __link_object(object, ptr, size, min_count, objflags);
 765	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 766	if (ret)
 767		mem_pool_free(object);
 768}
 769
 770/* Create kmemleak object which allocated with virtual address. */
 771static void create_object(unsigned long ptr, size_t size,
 772			  int min_count, gfp_t gfp)
 773{
 774	__create_object(ptr, size, min_count, gfp, 0);
 775}
 776
 777/* Create kmemleak object which allocated with physical address. */
 778static void create_object_phys(unsigned long ptr, size_t size,
 779			       int min_count, gfp_t gfp)
 780{
 781	__create_object(ptr, size, min_count, gfp, OBJECT_PHYS);
 782}
 783
 784/* Create kmemleak object corresponding to a per-CPU allocation. */
 785static void create_object_percpu(unsigned long ptr, size_t size,
 786				 int min_count, gfp_t gfp)
 787{
 788	__create_object(ptr, size, min_count, gfp, OBJECT_PERCPU);
 789}
 790
 791/*
 792 * Mark the object as not allocated and schedule RCU freeing via put_object().
 793 */
 794static void __delete_object(struct kmemleak_object *object)
 795{
 796	unsigned long flags;
 797
 798	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
 799	WARN_ON(atomic_read(&object->use_count) < 1);
 800
 801	/*
 802	 * Locking here also ensures that the corresponding memory block
 803	 * cannot be freed when it is being scanned.
 804	 */
 805	raw_spin_lock_irqsave(&object->lock, flags);
 806	object->flags &= ~OBJECT_ALLOCATED;
 807	raw_spin_unlock_irqrestore(&object->lock, flags);
 808	put_object(object);
 809}
 810
 811/*
 812 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 813 * delete it.
 814 */
 815static void delete_object_full(unsigned long ptr, unsigned int objflags)
 816{
 817	struct kmemleak_object *object;
 818
 819	object = find_and_remove_object(ptr, 0, objflags);
 820	if (!object) {
 821#ifdef DEBUG
 822		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
 823			      ptr);
 824#endif
 825		return;
 826	}
 827	__delete_object(object);
 828}
 829
 830/*
 831 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 832 * delete it. If the memory block is partially freed, the function may create
 833 * additional metadata for the remaining parts of the block.
 834 */
 835static void delete_object_part(unsigned long ptr, size_t size,
 836			       unsigned int objflags)
 837{
 838	struct kmemleak_object *object, *object_l, *object_r;
 839	unsigned long start, end, flags;
 840
 841	object_l = __alloc_object(GFP_KERNEL);
 842	if (!object_l)
 843		return;
 844
 845	object_r = __alloc_object(GFP_KERNEL);
 846	if (!object_r)
 847		goto out;
 848
 849	raw_spin_lock_irqsave(&kmemleak_lock, flags);
 850	object = __find_and_remove_object(ptr, 1, objflags);
 851	if (!object) {
 852#ifdef DEBUG
 853		kmemleak_warn("Partially freeing unknown object at 0x%08lx (size %zu)\n",
 854			      ptr, size);
 855#endif
 856		goto unlock;
 857	}
 858
 859	/*
 860	 * Create one or two objects that may result from the memory block
 861	 * split. Note that partial freeing is only done by free_bootmem() and
 862	 * this happens before kmemleak_init() is called.
 863	 */
 864	start = object->pointer;
 865	end = object->pointer + object->size;
 866	if ((ptr > start) &&
 867	    !__link_object(object_l, start, ptr - start,
 868			   object->min_count, objflags))
 869		object_l = NULL;
 870	if ((ptr + size < end) &&
 871	    !__link_object(object_r, ptr + size, end - ptr - size,
 872			   object->min_count, objflags))
 873		object_r = NULL;
 874
 875unlock:
 876	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
 877	if (object)
 878		__delete_object(object);
 879
 880out:
 881	if (object_l)
 882		mem_pool_free(object_l);
 883	if (object_r)
 884		mem_pool_free(object_r);
 885}
 886
 887static void __paint_it(struct kmemleak_object *object, int color)
 888{
 889	object->min_count = color;
 890	if (color == KMEMLEAK_BLACK)
 891		object->flags |= OBJECT_NO_SCAN;
 892}
 893
 894static void paint_it(struct kmemleak_object *object, int color)
 895{
 896	unsigned long flags;
 897
 898	raw_spin_lock_irqsave(&object->lock, flags);
 899	__paint_it(object, color);
 900	raw_spin_unlock_irqrestore(&object->lock, flags);
 901}
 902
 903static void paint_ptr(unsigned long ptr, int color, unsigned int objflags)
 904{
 905	struct kmemleak_object *object;
 906
 907	object = __find_and_get_object(ptr, 0, objflags);
 908	if (!object) {
 909		kmemleak_warn("Trying to color unknown object at 0x%08lx as %s\n",
 910			      ptr,
 911			      (color == KMEMLEAK_GREY) ? "Grey" :
 912			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
 913		return;
 914	}
 915	paint_it(object, color);
 916	put_object(object);
 917}
 918
 919/*
 920 * Mark an object permanently as gray-colored so that it can no longer be
 921 * reported as a leak. This is used in general to mark a false positive.
 922 */
 923static void make_gray_object(unsigned long ptr)
 924{
 925	paint_ptr(ptr, KMEMLEAK_GREY, 0);
 926}
 927
 928/*
 929 * Mark the object as black-colored so that it is ignored from scans and
 930 * reporting.
 931 */
 932static void make_black_object(unsigned long ptr, unsigned int objflags)
 933{
 934	paint_ptr(ptr, KMEMLEAK_BLACK, objflags);
 935}
 936
 937/*
 938 * Reset the checksum of an object. The immediate effect is that it will not
 939 * be reported as a leak during the next scan until its checksum is updated.
 940 */
 941static void reset_checksum(unsigned long ptr)
 942{
 943	unsigned long flags;
 944	struct kmemleak_object *object;
 945
 946	object = find_and_get_object(ptr, 0);
 947	if (!object) {
 948		kmemleak_warn("Not resetting the checksum of an unknown object at 0x%08lx\n",
 949			      ptr);
 950		return;
 951	}
 952
 953	raw_spin_lock_irqsave(&object->lock, flags);
 954	object->checksum = 0;
 955	raw_spin_unlock_irqrestore(&object->lock, flags);
 956	put_object(object);
 957}
 958
 959/*
 960 * Add a scanning area to the object. If at least one such area is added,
 961 * kmemleak will only scan these ranges rather than the whole memory block.
 962 */
 963static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 964{
 965	unsigned long flags;
 966	struct kmemleak_object *object;
 967	struct kmemleak_scan_area *area = NULL;
 968	unsigned long untagged_ptr;
 969	unsigned long untagged_objp;
 970
 971	object = find_and_get_object(ptr, 1);
 972	if (!object) {
 973		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
 974			      ptr);
 975		return;
 976	}
 977
 978	untagged_ptr = (unsigned long)kasan_reset_tag((void *)ptr);
 979	untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
 980
 981	if (scan_area_cache)
 982		area = kmem_cache_alloc_noprof(scan_area_cache,
 983					       gfp_nested_mask(gfp));
 984
 985	raw_spin_lock_irqsave(&object->lock, flags);
 986	if (!area) {
 987		pr_warn_once("Cannot allocate a scan area, scanning the full object\n");
 988		/* mark the object for full scan to avoid false positives */
 989		object->flags |= OBJECT_FULL_SCAN;
 990		goto out_unlock;
 991	}
 992	if (size == SIZE_MAX) {
 993		size = untagged_objp + object->size - untagged_ptr;
 994	} else if (untagged_ptr + size > untagged_objp + object->size) {
 995		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
 996		dump_object_info(object);
 997		kmem_cache_free(scan_area_cache, area);
 998		goto out_unlock;
 999	}
1000
1001	INIT_HLIST_NODE(&area->node);
1002	area->start = ptr;
1003	area->size = size;
1004
1005	hlist_add_head(&area->node, &object->area_list);
1006out_unlock:
1007	raw_spin_unlock_irqrestore(&object->lock, flags);
1008	put_object(object);
1009}
1010
1011/*
1012 * Any surplus references (object already gray) to 'ptr' are passed to
1013 * 'excess_ref'. This is used in the vmalloc() case where a pointer to
1014 * vm_struct may be used as an alternative reference to the vmalloc'ed object
1015 * (see free_thread_stack()).
1016 */
1017static void object_set_excess_ref(unsigned long ptr, unsigned long excess_ref)
1018{
1019	unsigned long flags;
1020	struct kmemleak_object *object;
1021
1022	object = find_and_get_object(ptr, 0);
1023	if (!object) {
1024		kmemleak_warn("Setting excess_ref on unknown object at 0x%08lx\n",
1025			      ptr);
1026		return;
1027	}
1028
1029	raw_spin_lock_irqsave(&object->lock, flags);
1030	object->excess_ref = excess_ref;
1031	raw_spin_unlock_irqrestore(&object->lock, flags);
1032	put_object(object);
1033}
1034
1035/*
1036 * Set the OBJECT_NO_SCAN flag for the object corresponding to the given
1037 * pointer. Such object will not be scanned by kmemleak but references to it
1038 * are searched.
1039 */
1040static void object_no_scan(unsigned long ptr)
1041{
1042	unsigned long flags;
1043	struct kmemleak_object *object;
1044
1045	object = find_and_get_object(ptr, 0);
1046	if (!object) {
1047		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
1048		return;
1049	}
1050
1051	raw_spin_lock_irqsave(&object->lock, flags);
1052	object->flags |= OBJECT_NO_SCAN;
1053	raw_spin_unlock_irqrestore(&object->lock, flags);
1054	put_object(object);
1055}
1056
1057/**
1058 * kmemleak_alloc - register a newly allocated object
1059 * @ptr:	pointer to beginning of the object
1060 * @size:	size of the object
1061 * @min_count:	minimum number of references to this object. If during memory
1062 *		scanning a number of references less than @min_count is found,
1063 *		the object is reported as a memory leak. If @min_count is 0,
1064 *		the object is never reported as a leak. If @min_count is -1,
1065 *		the object is ignored (not scanned and not reported as a leak)
1066 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1067 *
1068 * This function is called from the kernel allocators when a new object
1069 * (memory block) is allocated (kmem_cache_alloc, kmalloc etc.).
1070 */
1071void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
1072			  gfp_t gfp)
1073{
1074	pr_debug("%s(0x%px, %zu, %d)\n", __func__, ptr, size, min_count);
1075
1076	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1077		create_object((unsigned long)ptr, size, min_count, gfp);
1078}
1079EXPORT_SYMBOL_GPL(kmemleak_alloc);
1080
1081/**
1082 * kmemleak_alloc_percpu - register a newly allocated __percpu object
1083 * @ptr:	__percpu pointer to beginning of the object
1084 * @size:	size of the object
1085 * @gfp:	flags used for kmemleak internal memory allocations
1086 *
1087 * This function is called from the kernel percpu allocator when a new object
1088 * (memory block) is allocated (alloc_percpu).
1089 */
1090void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size,
1091				 gfp_t gfp)
1092{
1093	pr_debug("%s(0x%px, %zu)\n", __func__, ptr, size);
 
 
1094
1095	if (kmemleak_enabled && ptr && !IS_ERR_PCPU(ptr))
1096		create_object_percpu((__force unsigned long)ptr, size, 1, gfp);
 
 
 
 
 
 
1097}
1098EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
1099
1100/**
1101 * kmemleak_vmalloc - register a newly vmalloc'ed object
1102 * @area:	pointer to vm_struct
1103 * @size:	size of the object
1104 * @gfp:	__vmalloc() flags used for kmemleak internal memory allocations
1105 *
1106 * This function is called from the vmalloc() kernel allocator when a new
1107 * object (memory block) is allocated.
1108 */
1109void __ref kmemleak_vmalloc(const struct vm_struct *area, size_t size, gfp_t gfp)
1110{
1111	pr_debug("%s(0x%px, %zu)\n", __func__, area, size);
1112
1113	/*
1114	 * A min_count = 2 is needed because vm_struct contains a reference to
1115	 * the virtual address of the vmalloc'ed block.
1116	 */
1117	if (kmemleak_enabled) {
1118		create_object((unsigned long)area->addr, size, 2, gfp);
1119		object_set_excess_ref((unsigned long)area,
1120				      (unsigned long)area->addr);
1121	}
1122}
1123EXPORT_SYMBOL_GPL(kmemleak_vmalloc);
1124
1125/**
1126 * kmemleak_free - unregister a previously registered object
1127 * @ptr:	pointer to beginning of the object
1128 *
1129 * This function is called from the kernel allocators when an object (memory
1130 * block) is freed (kmem_cache_free, kfree, vfree etc.).
1131 */
1132void __ref kmemleak_free(const void *ptr)
1133{
1134	pr_debug("%s(0x%px)\n", __func__, ptr);
1135
1136	if (kmemleak_free_enabled && ptr && !IS_ERR(ptr))
1137		delete_object_full((unsigned long)ptr, 0);
1138}
1139EXPORT_SYMBOL_GPL(kmemleak_free);
1140
1141/**
1142 * kmemleak_free_part - partially unregister a previously registered object
1143 * @ptr:	pointer to the beginning or inside the object. This also
1144 *		represents the start of the range to be freed
1145 * @size:	size to be unregistered
1146 *
1147 * This function is called when only a part of a memory block is freed
1148 * (usually from the bootmem allocator).
1149 */
1150void __ref kmemleak_free_part(const void *ptr, size_t size)
1151{
1152	pr_debug("%s(0x%px)\n", __func__, ptr);
1153
1154	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1155		delete_object_part((unsigned long)ptr, size, 0);
1156}
1157EXPORT_SYMBOL_GPL(kmemleak_free_part);
1158
1159/**
1160 * kmemleak_free_percpu - unregister a previously registered __percpu object
1161 * @ptr:	__percpu pointer to beginning of the object
1162 *
1163 * This function is called from the kernel percpu allocator when an object
1164 * (memory block) is freed (free_percpu).
1165 */
1166void __ref kmemleak_free_percpu(const void __percpu *ptr)
1167{
1168	pr_debug("%s(0x%px)\n", __func__, ptr);
 
 
1169
1170	if (kmemleak_free_enabled && ptr && !IS_ERR_PCPU(ptr))
1171		delete_object_full((__force unsigned long)ptr, OBJECT_PERCPU);
 
 
1172}
1173EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
1174
1175/**
1176 * kmemleak_update_trace - update object allocation stack trace
1177 * @ptr:	pointer to beginning of the object
1178 *
1179 * Override the object allocation stack trace for cases where the actual
1180 * allocation place is not always useful.
1181 */
1182void __ref kmemleak_update_trace(const void *ptr)
1183{
1184	struct kmemleak_object *object;
1185	depot_stack_handle_t trace_handle;
1186	unsigned long flags;
1187
1188	pr_debug("%s(0x%px)\n", __func__, ptr);
1189
1190	if (!kmemleak_enabled || IS_ERR_OR_NULL(ptr))
1191		return;
1192
1193	object = find_and_get_object((unsigned long)ptr, 1);
1194	if (!object) {
1195#ifdef DEBUG
1196		kmemleak_warn("Updating stack trace for unknown object at %p\n",
1197			      ptr);
1198#endif
1199		return;
1200	}
1201
1202	trace_handle = set_track_prepare();
1203	raw_spin_lock_irqsave(&object->lock, flags);
1204	object->trace_handle = trace_handle;
1205	raw_spin_unlock_irqrestore(&object->lock, flags);
1206
1207	put_object(object);
1208}
1209EXPORT_SYMBOL(kmemleak_update_trace);
1210
1211/**
1212 * kmemleak_not_leak - mark an allocated object as false positive
1213 * @ptr:	pointer to beginning of the object
1214 *
1215 * Calling this function on an object will cause the memory block to no longer
1216 * be reported as leak and always be scanned.
1217 */
1218void __ref kmemleak_not_leak(const void *ptr)
1219{
1220	pr_debug("%s(0x%px)\n", __func__, ptr);
1221
1222	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1223		make_gray_object((unsigned long)ptr);
1224}
1225EXPORT_SYMBOL(kmemleak_not_leak);
1226
1227/**
1228 * kmemleak_transient_leak - mark an allocated object as transient false positive
1229 * @ptr:	pointer to beginning of the object
1230 *
1231 * Calling this function on an object will cause the memory block to not be
1232 * reported as a leak temporarily. This may happen, for example, if the object
1233 * is part of a singly linked list and the ->next reference to it is changed.
1234 */
1235void __ref kmemleak_transient_leak(const void *ptr)
1236{
1237	pr_debug("%s(0x%px)\n", __func__, ptr);
1238
1239	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1240		reset_checksum((unsigned long)ptr);
1241}
1242EXPORT_SYMBOL(kmemleak_transient_leak);
1243
1244/**
1245 * kmemleak_ignore - ignore an allocated object
1246 * @ptr:	pointer to beginning of the object
1247 *
1248 * Calling this function on an object will cause the memory block to be
1249 * ignored (not scanned and not reported as a leak). This is usually done when
1250 * it is known that the corresponding block is not a leak and does not contain
1251 * any references to other allocated memory blocks.
1252 */
1253void __ref kmemleak_ignore(const void *ptr)
1254{
1255	pr_debug("%s(0x%px)\n", __func__, ptr);
1256
1257	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1258		make_black_object((unsigned long)ptr, 0);
1259}
1260EXPORT_SYMBOL(kmemleak_ignore);
1261
1262/**
1263 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1264 * @ptr:	pointer to beginning or inside the object. This also
1265 *		represents the start of the scan area
1266 * @size:	size of the scan area
1267 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1268 *
1269 * This function is used when it is known that only certain parts of an object
1270 * contain references to other objects. Kmemleak will only scan these areas
1271 * reducing the number false negatives.
1272 */
1273void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1274{
1275	pr_debug("%s(0x%px)\n", __func__, ptr);
1276
1277	if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1278		add_scan_area((unsigned long)ptr, size, gfp);
1279}
1280EXPORT_SYMBOL(kmemleak_scan_area);
1281
1282/**
1283 * kmemleak_no_scan - do not scan an allocated object
1284 * @ptr:	pointer to beginning of the object
1285 *
1286 * This function notifies kmemleak not to scan the given memory block. Useful
1287 * in situations where it is known that the given object does not contain any
1288 * references to other objects. Kmemleak will not scan such objects reducing
1289 * the number of false negatives.
1290 */
1291void __ref kmemleak_no_scan(const void *ptr)
1292{
1293	pr_debug("%s(0x%px)\n", __func__, ptr);
1294
1295	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1296		object_no_scan((unsigned long)ptr);
1297}
1298EXPORT_SYMBOL(kmemleak_no_scan);
1299
1300/**
1301 * kmemleak_alloc_phys - similar to kmemleak_alloc but taking a physical
1302 *			 address argument
1303 * @phys:	physical address of the object
1304 * @size:	size of the object
 
 
1305 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1306 */
1307void __ref kmemleak_alloc_phys(phys_addr_t phys, size_t size, gfp_t gfp)
 
1308{
1309	pr_debug("%s(0x%px, %zu)\n", __func__, &phys, size);
1310
1311	if (kmemleak_enabled)
1312		/*
1313		 * Create object with OBJECT_PHYS flag and
1314		 * assume min_count 0.
1315		 */
1316		create_object_phys((unsigned long)phys, size, 0, gfp);
1317}
1318EXPORT_SYMBOL(kmemleak_alloc_phys);
1319
1320/**
1321 * kmemleak_free_part_phys - similar to kmemleak_free_part but taking a
1322 *			     physical address argument
1323 * @phys:	physical address if the beginning or inside an object. This
1324 *		also represents the start of the range to be freed
1325 * @size:	size to be unregistered
1326 */
1327void __ref kmemleak_free_part_phys(phys_addr_t phys, size_t size)
1328{
1329	pr_debug("%s(0x%px)\n", __func__, &phys);
 
 
 
1330
1331	if (kmemleak_enabled)
1332		delete_object_part((unsigned long)phys, size, OBJECT_PHYS);
 
 
 
 
 
 
 
1333}
1334EXPORT_SYMBOL(kmemleak_free_part_phys);
1335
1336/**
1337 * kmemleak_ignore_phys - similar to kmemleak_ignore but taking a physical
1338 *			  address argument
1339 * @phys:	physical address of the object
1340 */
1341void __ref kmemleak_ignore_phys(phys_addr_t phys)
1342{
1343	pr_debug("%s(0x%px)\n", __func__, &phys);
1344
1345	if (kmemleak_enabled)
1346		make_black_object((unsigned long)phys, OBJECT_PHYS);
1347}
1348EXPORT_SYMBOL(kmemleak_ignore_phys);
1349
1350/*
1351 * Update an object's checksum and return true if it was modified.
1352 */
1353static bool update_checksum(struct kmemleak_object *object)
1354{
1355	u32 old_csum = object->checksum;
1356
1357	if (WARN_ON_ONCE(object->flags & OBJECT_PHYS))
1358		return false;
1359
1360	kasan_disable_current();
1361	kcsan_disable_current();
1362	if (object->flags & OBJECT_PERCPU) {
1363		unsigned int cpu;
1364
1365		object->checksum = 0;
1366		for_each_possible_cpu(cpu) {
1367			void *ptr = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1368
1369			object->checksum ^= crc32(0, kasan_reset_tag((void *)ptr), object->size);
1370		}
1371	} else {
1372		object->checksum = crc32(0, kasan_reset_tag((void *)object->pointer), object->size);
1373	}
1374	kasan_enable_current();
1375	kcsan_enable_current();
1376
1377	return object->checksum != old_csum;
1378}
1379
1380/*
1381 * Update an object's references. object->lock must be held by the caller.
1382 */
1383static void update_refs(struct kmemleak_object *object)
1384{
1385	if (!color_white(object)) {
1386		/* non-orphan, ignored or new */
1387		return;
1388	}
1389
1390	/*
1391	 * Increase the object's reference count (number of pointers to the
1392	 * memory block). If this count reaches the required minimum, the
1393	 * object's color will become gray and it will be added to the
1394	 * gray_list.
1395	 */
1396	object->count++;
1397	if (color_gray(object)) {
1398		/* put_object() called when removing from gray_list */
1399		WARN_ON(!get_object(object));
1400		list_add_tail(&object->gray_list, &gray_list);
1401	}
1402}
1403
1404static void pointer_update_refs(struct kmemleak_object *scanned,
1405			 unsigned long pointer, unsigned int objflags)
1406{
1407	struct kmemleak_object *object;
1408	unsigned long untagged_ptr;
1409	unsigned long excess_ref;
1410
1411	untagged_ptr = (unsigned long)kasan_reset_tag((void *)pointer);
1412	if (objflags & OBJECT_PERCPU) {
1413		if (untagged_ptr < min_percpu_addr || untagged_ptr >= max_percpu_addr)
1414			return;
1415	} else {
1416		if (untagged_ptr < min_addr || untagged_ptr >= max_addr)
1417			return;
1418	}
1419
1420	/*
1421	 * No need for get_object() here since we hold kmemleak_lock.
1422	 * object->use_count cannot be dropped to 0 while the object
1423	 * is still present in object_tree_root and object_list
1424	 * (with updates protected by kmemleak_lock).
1425	 */
1426	object = __lookup_object(pointer, 1, objflags);
1427	if (!object)
1428		return;
1429	if (object == scanned)
1430		/* self referenced, ignore */
1431		return;
1432
1433	/*
1434	 * Avoid the lockdep recursive warning on object->lock being
1435	 * previously acquired in scan_object(). These locks are
1436	 * enclosed by scan_mutex.
1437	 */
1438	raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1439	/* only pass surplus references (object already gray) */
1440	if (color_gray(object)) {
1441		excess_ref = object->excess_ref;
1442		/* no need for update_refs() if object already gray */
1443	} else {
1444		excess_ref = 0;
1445		update_refs(object);
1446	}
1447	raw_spin_unlock(&object->lock);
1448
1449	if (excess_ref) {
1450		object = lookup_object(excess_ref, 0);
1451		if (!object)
1452			return;
1453		if (object == scanned)
1454			/* circular reference, ignore */
1455			return;
1456		raw_spin_lock_nested(&object->lock, SINGLE_DEPTH_NESTING);
1457		update_refs(object);
1458		raw_spin_unlock(&object->lock);
1459	}
1460}
1461
1462/*
1463 * Memory scanning is a long process and it needs to be interruptible. This
1464 * function checks whether such interrupt condition occurred.
1465 */
1466static int scan_should_stop(void)
1467{
1468	if (!kmemleak_enabled)
1469		return 1;
1470
1471	/*
1472	 * This function may be called from either process or kthread context,
1473	 * hence the need to check for both stop conditions.
1474	 */
1475	if (current->mm)
1476		return signal_pending(current);
1477	else
1478		return kthread_should_stop();
1479
1480	return 0;
1481}
1482
1483/*
1484 * Scan a memory block (exclusive range) for valid pointers and add those
1485 * found to the gray list.
1486 */
1487static void scan_block(void *_start, void *_end,
1488		       struct kmemleak_object *scanned)
1489{
1490	unsigned long *ptr;
1491	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1492	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1493	unsigned long flags;
 
1494
1495	raw_spin_lock_irqsave(&kmemleak_lock, flags);
1496	for (ptr = start; ptr < end; ptr++) {
 
1497		unsigned long pointer;
 
1498
1499		if (scan_should_stop())
1500			break;
1501
1502		kasan_disable_current();
1503		pointer = *(unsigned long *)kasan_reset_tag((void *)ptr);
1504		kasan_enable_current();
1505
1506		pointer_update_refs(scanned, pointer, 0);
1507		pointer_update_refs(scanned, pointer, OBJECT_PERCPU);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1508	}
1509	raw_spin_unlock_irqrestore(&kmemleak_lock, flags);
1510}
1511
1512/*
1513 * Scan a large memory block in MAX_SCAN_SIZE chunks to reduce the latency.
1514 */
1515#ifdef CONFIG_SMP
1516static void scan_large_block(void *start, void *end)
1517{
1518	void *next;
1519
1520	while (start < end) {
1521		next = min(start + MAX_SCAN_SIZE, end);
1522		scan_block(start, next, NULL);
1523		start = next;
1524		cond_resched();
1525	}
1526}
1527#endif
1528
1529/*
1530 * Scan a memory block corresponding to a kmemleak_object. A condition is
1531 * that object->use_count >= 1.
1532 */
1533static void scan_object(struct kmemleak_object *object)
1534{
1535	struct kmemleak_scan_area *area;
1536	unsigned long flags;
1537
1538	/*
1539	 * Once the object->lock is acquired, the corresponding memory block
1540	 * cannot be freed (the same lock is acquired in delete_object).
1541	 */
1542	raw_spin_lock_irqsave(&object->lock, flags);
1543	if (object->flags & OBJECT_NO_SCAN)
1544		goto out;
1545	if (!(object->flags & OBJECT_ALLOCATED))
1546		/* already freed object */
1547		goto out;
1548
1549	if (object->flags & OBJECT_PERCPU) {
1550		unsigned int cpu;
1551
1552		for_each_possible_cpu(cpu) {
1553			void *start = per_cpu_ptr((void __percpu *)object->pointer, cpu);
1554			void *end = start + object->size;
1555
1556			scan_block(start, end, object);
1557
1558			raw_spin_unlock_irqrestore(&object->lock, flags);
1559			cond_resched();
1560			raw_spin_lock_irqsave(&object->lock, flags);
1561			if (!(object->flags & OBJECT_ALLOCATED))
1562				break;
1563		}
1564	} else if (hlist_empty(&object->area_list) ||
1565	    object->flags & OBJECT_FULL_SCAN) {
1566		void *start = object->flags & OBJECT_PHYS ?
1567				__va((phys_addr_t)object->pointer) :
1568				(void *)object->pointer;
1569		void *end = start + object->size;
1570		void *next;
1571
1572		do {
1573			next = min(start + MAX_SCAN_SIZE, end);
1574			scan_block(start, next, object);
1575
1576			start = next;
1577			if (start >= end)
1578				break;
1579
1580			raw_spin_unlock_irqrestore(&object->lock, flags);
1581			cond_resched();
1582			raw_spin_lock_irqsave(&object->lock, flags);
1583		} while (object->flags & OBJECT_ALLOCATED);
1584	} else {
1585		hlist_for_each_entry(area, &object->area_list, node)
1586			scan_block((void *)area->start,
1587				   (void *)(area->start + area->size),
1588				   object);
1589	}
1590out:
1591	raw_spin_unlock_irqrestore(&object->lock, flags);
1592}
1593
1594/*
1595 * Scan the objects already referenced (gray objects). More objects will be
1596 * referenced and, if there are no memory leaks, all the objects are scanned.
1597 */
1598static void scan_gray_list(void)
1599{
1600	struct kmemleak_object *object, *tmp;
1601
1602	/*
1603	 * The list traversal is safe for both tail additions and removals
1604	 * from inside the loop. The kmemleak objects cannot be freed from
1605	 * outside the loop because their use_count was incremented.
1606	 */
1607	object = list_entry(gray_list.next, typeof(*object), gray_list);
1608	while (&object->gray_list != &gray_list) {
1609		cond_resched();
1610
1611		/* may add new objects to the list */
1612		if (!scan_should_stop())
1613			scan_object(object);
1614
1615		tmp = list_entry(object->gray_list.next, typeof(*object),
1616				 gray_list);
1617
1618		/* remove the object from the list and release it */
1619		list_del(&object->gray_list);
1620		put_object(object);
1621
1622		object = tmp;
1623	}
1624	WARN_ON(!list_empty(&gray_list));
1625}
1626
1627/*
1628 * Conditionally call resched() in an object iteration loop while making sure
1629 * that the given object won't go away without RCU read lock by performing a
1630 * get_object() if necessaary.
1631 */
1632static void kmemleak_cond_resched(struct kmemleak_object *object)
1633{
1634	if (!get_object(object))
1635		return;	/* Try next object */
1636
1637	raw_spin_lock_irq(&kmemleak_lock);
1638	if (object->del_state & DELSTATE_REMOVED)
1639		goto unlock_put;	/* Object removed */
1640	object->del_state |= DELSTATE_NO_DELETE;
1641	raw_spin_unlock_irq(&kmemleak_lock);
1642
1643	rcu_read_unlock();
1644	cond_resched();
1645	rcu_read_lock();
1646
1647	raw_spin_lock_irq(&kmemleak_lock);
1648	if (object->del_state & DELSTATE_REMOVED)
1649		list_del_rcu(&object->object_list);
1650	object->del_state &= ~DELSTATE_NO_DELETE;
1651unlock_put:
1652	raw_spin_unlock_irq(&kmemleak_lock);
1653	put_object(object);
1654}
1655
1656/*
1657 * Scan data sections and all the referenced memory blocks allocated via the
1658 * kernel's standard allocators. This function must be called with the
1659 * scan_mutex held.
1660 */
1661static void kmemleak_scan(void)
1662{
 
1663	struct kmemleak_object *object;
1664	struct zone *zone;
1665	int __maybe_unused i;
1666	int new_leaks = 0;
1667
1668	jiffies_last_scan = jiffies;
1669
1670	/* prepare the kmemleak_object's */
1671	rcu_read_lock();
1672	list_for_each_entry_rcu(object, &object_list, object_list) {
1673		raw_spin_lock_irq(&object->lock);
1674#ifdef DEBUG
1675		/*
1676		 * With a few exceptions there should be a maximum of
1677		 * 1 reference to any object at this point.
1678		 */
1679		if (atomic_read(&object->use_count) > 1) {
1680			pr_debug("object->use_count = %d\n",
1681				 atomic_read(&object->use_count));
1682			dump_object_info(object);
1683		}
1684#endif
1685
1686		/* ignore objects outside lowmem (paint them black) */
1687		if ((object->flags & OBJECT_PHYS) &&
1688		   !(object->flags & OBJECT_NO_SCAN)) {
1689			unsigned long phys = object->pointer;
1690
1691			if (PHYS_PFN(phys) < min_low_pfn ||
1692			    PHYS_PFN(phys + object->size) > max_low_pfn)
1693				__paint_it(object, KMEMLEAK_BLACK);
1694		}
1695
1696		/* reset the reference count (whiten the object) */
1697		object->count = 0;
1698		if (color_gray(object) && get_object(object))
1699			list_add_tail(&object->gray_list, &gray_list);
1700
1701		raw_spin_unlock_irq(&object->lock);
1702
1703		if (need_resched())
1704			kmemleak_cond_resched(object);
1705	}
1706	rcu_read_unlock();
1707
1708#ifdef CONFIG_SMP
1709	/* per-cpu sections scanning */
1710	for_each_possible_cpu(i)
1711		scan_large_block(__per_cpu_start + per_cpu_offset(i),
1712				 __per_cpu_end + per_cpu_offset(i));
1713#endif
1714
1715	/*
1716	 * Struct page scanning for each node.
1717	 */
1718	get_online_mems();
1719	for_each_populated_zone(zone) {
1720		unsigned long start_pfn = zone->zone_start_pfn;
1721		unsigned long end_pfn = zone_end_pfn(zone);
1722		unsigned long pfn;
1723
1724		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1725			struct page *page = pfn_to_online_page(pfn);
1726
1727			if (!(pfn & 63))
1728				cond_resched();
1729
1730			if (!page)
1731				continue;
1732
1733			/* only scan pages belonging to this zone */
1734			if (page_zone(page) != zone)
1735				continue;
1736			/* only scan if page is in use */
1737			if (page_count(page) == 0)
1738				continue;
1739			scan_block(page, page + 1, NULL);
 
 
1740		}
1741	}
1742	put_online_mems();
1743
1744	/*
1745	 * Scanning the task stacks (may introduce false negatives).
1746	 */
1747	if (kmemleak_stack_scan) {
1748		struct task_struct *p, *g;
1749
1750		rcu_read_lock();
1751		for_each_process_thread(g, p) {
1752			void *stack = try_get_task_stack(p);
1753			if (stack) {
1754				scan_block(stack, stack + THREAD_SIZE, NULL);
1755				put_task_stack(p);
1756			}
1757		}
1758		rcu_read_unlock();
1759	}
1760
1761	/*
1762	 * Scan the objects already referenced from the sections scanned
1763	 * above.
1764	 */
1765	scan_gray_list();
1766
1767	/*
1768	 * Check for new or unreferenced objects modified since the previous
1769	 * scan and color them gray until the next scan.
1770	 */
1771	rcu_read_lock();
1772	list_for_each_entry_rcu(object, &object_list, object_list) {
1773		if (need_resched())
1774			kmemleak_cond_resched(object);
1775
1776		/*
1777		 * This is racy but we can save the overhead of lock/unlock
1778		 * calls. The missed objects, if any, should be caught in
1779		 * the next scan.
1780		 */
1781		if (!color_white(object))
1782			continue;
1783		raw_spin_lock_irq(&object->lock);
1784		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1785		    && update_checksum(object) && get_object(object)) {
1786			/* color it gray temporarily */
1787			object->count = object->min_count;
1788			list_add_tail(&object->gray_list, &gray_list);
1789		}
1790		raw_spin_unlock_irq(&object->lock);
1791	}
1792	rcu_read_unlock();
1793
1794	/*
1795	 * Re-scan the gray list for modified unreferenced objects.
1796	 */
1797	scan_gray_list();
1798
1799	/*
1800	 * If scanning was stopped do not report any new unreferenced objects.
1801	 */
1802	if (scan_should_stop())
1803		return;
1804
1805	/*
1806	 * Scanning result reporting.
1807	 */
1808	rcu_read_lock();
1809	list_for_each_entry_rcu(object, &object_list, object_list) {
1810		if (need_resched())
1811			kmemleak_cond_resched(object);
1812
1813		/*
1814		 * This is racy but we can save the overhead of lock/unlock
1815		 * calls. The missed objects, if any, should be caught in
1816		 * the next scan.
1817		 */
1818		if (!color_white(object))
1819			continue;
1820		raw_spin_lock_irq(&object->lock);
1821		if (unreferenced_object(object) &&
1822		    !(object->flags & OBJECT_REPORTED)) {
1823			object->flags |= OBJECT_REPORTED;
1824
1825			if (kmemleak_verbose)
1826				print_unreferenced(NULL, object);
1827
1828			new_leaks++;
1829		}
1830		raw_spin_unlock_irq(&object->lock);
1831	}
1832	rcu_read_unlock();
1833
1834	if (new_leaks) {
1835		kmemleak_found_leaks = true;
1836
1837		pr_info("%d new suspected memory leaks (see /sys/kernel/debug/kmemleak)\n",
1838			new_leaks);
1839	}
1840
1841}
1842
1843/*
1844 * Thread function performing automatic memory scanning. Unreferenced objects
1845 * at the end of a memory scan are reported but only the first time.
1846 */
1847static int kmemleak_scan_thread(void *arg)
1848{
1849	static int first_run = IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN);
1850
1851	pr_info("Automatic memory scanning thread started\n");
1852	set_user_nice(current, 10);
1853
1854	/*
1855	 * Wait before the first scan to allow the system to fully initialize.
1856	 */
1857	if (first_run) {
1858		signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
1859		first_run = 0;
1860		while (timeout && !kthread_should_stop())
1861			timeout = schedule_timeout_interruptible(timeout);
1862	}
1863
1864	while (!kthread_should_stop()) {
1865		signed long timeout = READ_ONCE(jiffies_scan_wait);
1866
1867		mutex_lock(&scan_mutex);
1868		kmemleak_scan();
1869		mutex_unlock(&scan_mutex);
1870
1871		/* wait before the next scan */
1872		while (timeout && !kthread_should_stop())
1873			timeout = schedule_timeout_interruptible(timeout);
1874	}
1875
1876	pr_info("Automatic memory scanning thread ended\n");
1877
1878	return 0;
1879}
1880
1881/*
1882 * Start the automatic memory scanning thread. This function must be called
1883 * with the scan_mutex held.
1884 */
1885static void start_scan_thread(void)
1886{
1887	if (scan_thread)
1888		return;
1889	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1890	if (IS_ERR(scan_thread)) {
1891		pr_warn("Failed to create the scan thread\n");
1892		scan_thread = NULL;
1893	}
1894}
1895
1896/*
1897 * Stop the automatic memory scanning thread.
1898 */
1899static void stop_scan_thread(void)
1900{
1901	if (scan_thread) {
1902		kthread_stop(scan_thread);
1903		scan_thread = NULL;
1904	}
1905}
1906
1907/*
1908 * Iterate over the object_list and return the first valid object at or after
1909 * the required position with its use_count incremented. The function triggers
1910 * a memory scanning when the pos argument points to the first position.
1911 */
1912static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1913{
1914	struct kmemleak_object *object;
1915	loff_t n = *pos;
1916	int err;
1917
1918	err = mutex_lock_interruptible(&scan_mutex);
1919	if (err < 0)
1920		return ERR_PTR(err);
1921
1922	rcu_read_lock();
1923	list_for_each_entry_rcu(object, &object_list, object_list) {
1924		if (n-- > 0)
1925			continue;
1926		if (get_object(object))
1927			goto out;
1928	}
1929	object = NULL;
1930out:
1931	return object;
1932}
1933
1934/*
1935 * Return the next object in the object_list. The function decrements the
1936 * use_count of the previous object and increases that of the next one.
1937 */
1938static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1939{
1940	struct kmemleak_object *prev_obj = v;
1941	struct kmemleak_object *next_obj = NULL;
1942	struct kmemleak_object *obj = prev_obj;
1943
1944	++(*pos);
1945
1946	list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
1947		if (get_object(obj)) {
1948			next_obj = obj;
1949			break;
1950		}
1951	}
1952
1953	put_object(prev_obj);
1954	return next_obj;
1955}
1956
1957/*
1958 * Decrement the use_count of the last object required, if any.
1959 */
1960static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1961{
1962	if (!IS_ERR(v)) {
1963		/*
1964		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1965		 * waiting was interrupted, so only release it if !IS_ERR.
1966		 */
1967		rcu_read_unlock();
1968		mutex_unlock(&scan_mutex);
1969		if (v)
1970			put_object(v);
1971	}
1972}
1973
1974/*
1975 * Print the information for an unreferenced object to the seq file.
1976 */
1977static int kmemleak_seq_show(struct seq_file *seq, void *v)
1978{
1979	struct kmemleak_object *object = v;
1980	unsigned long flags;
1981
1982	raw_spin_lock_irqsave(&object->lock, flags);
1983	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1984		print_unreferenced(seq, object);
1985	raw_spin_unlock_irqrestore(&object->lock, flags);
1986	return 0;
1987}
1988
1989static const struct seq_operations kmemleak_seq_ops = {
1990	.start = kmemleak_seq_start,
1991	.next  = kmemleak_seq_next,
1992	.stop  = kmemleak_seq_stop,
1993	.show  = kmemleak_seq_show,
1994};
1995
1996static int kmemleak_open(struct inode *inode, struct file *file)
1997{
1998	return seq_open(file, &kmemleak_seq_ops);
1999}
2000
2001static int dump_str_object_info(const char *str)
2002{
2003	unsigned long flags;
2004	struct kmemleak_object *object;
2005	unsigned long addr;
2006
2007	if (kstrtoul(str, 0, &addr))
2008		return -EINVAL;
2009	object = find_and_get_object(addr, 0);
2010	if (!object) {
2011		pr_info("Unknown object at 0x%08lx\n", addr);
2012		return -EINVAL;
2013	}
2014
2015	raw_spin_lock_irqsave(&object->lock, flags);
2016	dump_object_info(object);
2017	raw_spin_unlock_irqrestore(&object->lock, flags);
2018
2019	put_object(object);
2020	return 0;
2021}
2022
2023/*
2024 * We use grey instead of black to ensure we can do future scans on the same
2025 * objects. If we did not do future scans these black objects could
2026 * potentially contain references to newly allocated objects in the future and
2027 * we'd end up with false positives.
2028 */
2029static void kmemleak_clear(void)
2030{
2031	struct kmemleak_object *object;
 
2032
2033	rcu_read_lock();
2034	list_for_each_entry_rcu(object, &object_list, object_list) {
2035		raw_spin_lock_irq(&object->lock);
2036		if ((object->flags & OBJECT_REPORTED) &&
2037		    unreferenced_object(object))
2038			__paint_it(object, KMEMLEAK_GREY);
2039		raw_spin_unlock_irq(&object->lock);
2040	}
2041	rcu_read_unlock();
2042
2043	kmemleak_found_leaks = false;
2044}
2045
2046static void __kmemleak_do_cleanup(void);
2047
2048/*
2049 * File write operation to configure kmemleak at run-time. The following
2050 * commands can be written to the /sys/kernel/debug/kmemleak file:
2051 *   off	- disable kmemleak (irreversible)
2052 *   stack=on	- enable the task stacks scanning
2053 *   stack=off	- disable the tasks stacks scanning
2054 *   scan=on	- start the automatic memory scanning thread
2055 *   scan=off	- stop the automatic memory scanning thread
2056 *   scan=...	- set the automatic memory scanning period in seconds (0 to
2057 *		  disable it)
2058 *   scan	- trigger a memory scan
2059 *   clear	- mark all current reported unreferenced kmemleak objects as
2060 *		  grey to ignore printing them, or free all kmemleak objects
2061 *		  if kmemleak has been disabled.
2062 *   dump=...	- dump information about the object found at the given address
2063 */
2064static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
2065			      size_t size, loff_t *ppos)
2066{
2067	char buf[64];
2068	int buf_size;
2069	int ret;
2070
2071	buf_size = min(size, (sizeof(buf) - 1));
2072	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
2073		return -EFAULT;
2074	buf[buf_size] = 0;
2075
2076	ret = mutex_lock_interruptible(&scan_mutex);
2077	if (ret < 0)
2078		return ret;
2079
2080	if (strncmp(buf, "clear", 5) == 0) {
2081		if (kmemleak_enabled)
2082			kmemleak_clear();
2083		else
2084			__kmemleak_do_cleanup();
2085		goto out;
2086	}
2087
2088	if (!kmemleak_enabled) {
2089		ret = -EPERM;
2090		goto out;
2091	}
2092
2093	if (strncmp(buf, "off", 3) == 0)
2094		kmemleak_disable();
2095	else if (strncmp(buf, "stack=on", 8) == 0)
2096		kmemleak_stack_scan = 1;
2097	else if (strncmp(buf, "stack=off", 9) == 0)
2098		kmemleak_stack_scan = 0;
2099	else if (strncmp(buf, "scan=on", 7) == 0)
2100		start_scan_thread();
2101	else if (strncmp(buf, "scan=off", 8) == 0)
2102		stop_scan_thread();
2103	else if (strncmp(buf, "scan=", 5) == 0) {
2104		unsigned secs;
2105		unsigned long msecs;
2106
2107		ret = kstrtouint(buf + 5, 0, &secs);
2108		if (ret < 0)
2109			goto out;
2110
2111		msecs = secs * MSEC_PER_SEC;
2112		if (msecs > UINT_MAX)
2113			msecs = UINT_MAX;
2114
2115		stop_scan_thread();
2116		if (msecs) {
2117			WRITE_ONCE(jiffies_scan_wait, msecs_to_jiffies(msecs));
2118			start_scan_thread();
2119		}
2120	} else if (strncmp(buf, "scan", 4) == 0)
2121		kmemleak_scan();
2122	else if (strncmp(buf, "dump=", 5) == 0)
2123		ret = dump_str_object_info(buf + 5);
2124	else
2125		ret = -EINVAL;
2126
2127out:
2128	mutex_unlock(&scan_mutex);
2129	if (ret < 0)
2130		return ret;
2131
2132	/* ignore the rest of the buffer, only one command at a time */
2133	*ppos += size;
2134	return size;
2135}
2136
2137static const struct file_operations kmemleak_fops = {
2138	.owner		= THIS_MODULE,
2139	.open		= kmemleak_open,
2140	.read		= seq_read,
2141	.write		= kmemleak_write,
2142	.llseek		= seq_lseek,
2143	.release	= seq_release,
2144};
2145
2146static void __kmemleak_do_cleanup(void)
2147{
2148	struct kmemleak_object *object, *tmp;
2149
2150	/*
2151	 * Kmemleak has already been disabled, no need for RCU list traversal
2152	 * or kmemleak_lock held.
2153	 */
2154	list_for_each_entry_safe(object, tmp, &object_list, object_list) {
2155		__remove_object(object);
2156		__delete_object(object);
2157	}
2158}
2159
2160/*
2161 * Stop the memory scanning thread and free the kmemleak internal objects if
2162 * no previous scan thread (otherwise, kmemleak may still have some useful
2163 * information on memory leaks).
2164 */
2165static void kmemleak_do_cleanup(struct work_struct *work)
2166{
2167	stop_scan_thread();
2168
2169	mutex_lock(&scan_mutex);
2170	/*
2171	 * Once it is made sure that kmemleak_scan has stopped, it is safe to no
2172	 * longer track object freeing. Ordering of the scan thread stopping and
2173	 * the memory accesses below is guaranteed by the kthread_stop()
2174	 * function.
2175	 */
2176	kmemleak_free_enabled = 0;
2177	mutex_unlock(&scan_mutex);
2178
2179	if (!kmemleak_found_leaks)
2180		__kmemleak_do_cleanup();
2181	else
2182		pr_info("Kmemleak disabled without freeing internal data. Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\".\n");
2183}
2184
2185static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
2186
2187/*
2188 * Disable kmemleak. No memory allocation/freeing will be traced once this
2189 * function is called. Disabling kmemleak is an irreversible operation.
2190 */
2191static void kmemleak_disable(void)
2192{
2193	/* atomically check whether it was already invoked */
2194	if (cmpxchg(&kmemleak_error, 0, 1))
2195		return;
2196
2197	/* stop any memory operation tracing */
2198	kmemleak_enabled = 0;
2199
2200	/* check whether it is too early for a kernel thread */
2201	if (kmemleak_late_initialized)
2202		schedule_work(&cleanup_work);
2203	else
2204		kmemleak_free_enabled = 0;
2205
2206	pr_info("Kernel memory leak detector disabled\n");
2207}
2208
2209/*
2210 * Allow boot-time kmemleak disabling (enabled by default).
2211 */
2212static int __init kmemleak_boot_config(char *str)
2213{
2214	if (!str)
2215		return -EINVAL;
2216	if (strcmp(str, "off") == 0)
2217		kmemleak_disable();
2218	else if (strcmp(str, "on") == 0) {
2219		kmemleak_skip_disable = 1;
2220		stack_depot_request_early_init();
2221	}
2222	else
2223		return -EINVAL;
2224	return 0;
2225}
2226early_param("kmemleak", kmemleak_boot_config);
2227
2228/*
2229 * Kmemleak initialization.
2230 */
2231void __init kmemleak_init(void)
2232{
2233#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
2234	if (!kmemleak_skip_disable) {
2235		kmemleak_disable();
2236		return;
2237	}
2238#endif
2239
2240	if (kmemleak_error)
2241		return;
2242
2243	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
2244	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
2245
2246	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
2247	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
2248
2249	/* register the data/bss sections */
2250	create_object((unsigned long)_sdata, _edata - _sdata,
2251		      KMEMLEAK_GREY, GFP_ATOMIC);
2252	create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
2253		      KMEMLEAK_GREY, GFP_ATOMIC);
2254	/* only register .data..ro_after_init if not within .data */
2255	if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
2256		create_object((unsigned long)__start_ro_after_init,
2257			      __end_ro_after_init - __start_ro_after_init,
2258			      KMEMLEAK_GREY, GFP_ATOMIC);
2259}
2260
2261/*
2262 * Late initialization function.
2263 */
2264static int __init kmemleak_late_init(void)
2265{
2266	kmemleak_late_initialized = 1;
2267
2268	debugfs_create_file("kmemleak", 0644, NULL, NULL, &kmemleak_fops);
2269
2270	if (kmemleak_error) {
2271		/*
2272		 * Some error occurred and kmemleak was disabled. There is a
2273		 * small chance that kmemleak_disable() was called immediately
2274		 * after setting kmemleak_late_initialized and we may end up with
2275		 * two clean-up threads but serialized by scan_mutex.
2276		 */
2277		schedule_work(&cleanup_work);
2278		return -ENOMEM;
2279	}
2280
2281	if (IS_ENABLED(CONFIG_DEBUG_KMEMLEAK_AUTO_SCAN)) {
2282		mutex_lock(&scan_mutex);
2283		start_scan_thread();
2284		mutex_unlock(&scan_mutex);
2285	}
2286
2287	pr_info("Kernel memory leak detector initialized (mem pool available: %d)\n",
2288		mem_pool_free_count);
2289
2290	return 0;
2291}
2292late_initcall(kmemleak_late_init);