Linux Audio

Check our new training course

Loading...
v3.15
   1/*
   2 * mm/kmemleak.c
   3 *
   4 * Copyright (C) 2008 ARM Limited
   5 * Written by Catalin Marinas <catalin.marinas@arm.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19 *
  20 *
  21 * For more information on the algorithm and kmemleak usage, please see
  22 * Documentation/kmemleak.txt.
  23 *
  24 * Notes on locking
  25 * ----------------
  26 *
  27 * The following locks and mutexes are used by kmemleak:
  28 *
  29 * - kmemleak_lock (rwlock): protects the object_list modifications and
  30 *   accesses to the object_tree_root. The object_list is the main list
  31 *   holding the metadata (struct kmemleak_object) for the allocated memory
  32 *   blocks. The object_tree_root is a red black tree used to look-up
  33 *   metadata based on a pointer to the corresponding memory block.  The
  34 *   kmemleak_object structures are added to the object_list and
  35 *   object_tree_root in the create_object() function called from the
  36 *   kmemleak_alloc() callback and removed in delete_object() called from the
  37 *   kmemleak_free() callback
  38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
  39 *   the metadata (e.g. count) are protected by this lock. Note that some
  40 *   members of this structure may be protected by other means (atomic or
  41 *   kmemleak_lock). This lock is also held when scanning the corresponding
  42 *   memory block to avoid the kernel freeing it via the kmemleak_free()
  43 *   callback. This is less heavyweight than holding a global lock like
  44 *   kmemleak_lock during scanning
  45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
  46 *   unreferenced objects at a time. The gray_list contains the objects which
  47 *   are already referenced or marked as false positives and need to be
  48 *   scanned. This list is only modified during a scanning episode when the
  49 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
  50 *   Note that the kmemleak_object.use_count is incremented when an object is
  51 *   added to the gray_list and therefore cannot be freed. This mutex also
  52 *   prevents multiple users of the "kmemleak" debugfs file together with
  53 *   modifications to the memory scanning parameters including the scan_thread
  54 *   pointer
  55 *
  56 * The kmemleak_object structures have a use_count incremented or decremented
  57 * using the get_object()/put_object() functions. When the use_count becomes
  58 * 0, this count can no longer be incremented and put_object() schedules the
  59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
  60 * function must be protected by rcu_read_lock() to avoid accessing a freed
  61 * structure.
  62 */
  63
  64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  65
  66#include <linux/init.h>
  67#include <linux/kernel.h>
  68#include <linux/list.h>
  69#include <linux/sched.h>
  70#include <linux/jiffies.h>
  71#include <linux/delay.h>
  72#include <linux/export.h>
  73#include <linux/kthread.h>
  74#include <linux/rbtree.h>
  75#include <linux/fs.h>
  76#include <linux/debugfs.h>
  77#include <linux/seq_file.h>
  78#include <linux/cpumask.h>
  79#include <linux/spinlock.h>
  80#include <linux/mutex.h>
  81#include <linux/rcupdate.h>
  82#include <linux/stacktrace.h>
  83#include <linux/cache.h>
  84#include <linux/percpu.h>
  85#include <linux/hardirq.h>
  86#include <linux/mmzone.h>
  87#include <linux/slab.h>
  88#include <linux/thread_info.h>
  89#include <linux/err.h>
  90#include <linux/uaccess.h>
  91#include <linux/string.h>
  92#include <linux/nodemask.h>
  93#include <linux/mm.h>
  94#include <linux/workqueue.h>
  95#include <linux/crc32.h>
  96
  97#include <asm/sections.h>
  98#include <asm/processor.h>
  99#include <linux/atomic.h>
 100
 101#include <linux/kmemcheck.h>
 102#include <linux/kmemleak.h>
 103#include <linux/memory_hotplug.h>
 104
 105/*
 106 * Kmemleak configuration and common defines.
 107 */
 108#define MAX_TRACE		16	/* stack trace length */
 109#define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
 110#define SECS_FIRST_SCAN		60	/* delay before the first scan */
 111#define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
 112#define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
 113
 114#define BYTES_PER_POINTER	sizeof(void *)
 115
 116/* GFP bitmask for kmemleak internal allocations */
 117#define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
 118				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
 119				 __GFP_NOWARN)
 120
 121/* scanning area inside a memory block */
 122struct kmemleak_scan_area {
 123	struct hlist_node node;
 124	unsigned long start;
 125	size_t size;
 126};
 127
 128#define KMEMLEAK_GREY	0
 129#define KMEMLEAK_BLACK	-1
 130
 131/*
 132 * Structure holding the metadata for each allocated memory block.
 133 * Modifications to such objects should be made while holding the
 134 * object->lock. Insertions or deletions from object_list, gray_list or
 135 * rb_node are already protected by the corresponding locks or mutex (see
 136 * the notes on locking above). These objects are reference-counted
 137 * (use_count) and freed using the RCU mechanism.
 138 */
 139struct kmemleak_object {
 140	spinlock_t lock;
 141	unsigned long flags;		/* object status flags */
 142	struct list_head object_list;
 143	struct list_head gray_list;
 144	struct rb_node rb_node;
 145	struct rcu_head rcu;		/* object_list lockless traversal */
 146	/* object usage count; object freed when use_count == 0 */
 147	atomic_t use_count;
 148	unsigned long pointer;
 149	size_t size;
 150	/* minimum number of a pointers found before it is considered leak */
 151	int min_count;
 152	/* the total number of pointers found pointing to this object */
 153	int count;
 154	/* checksum for detecting modified objects */
 155	u32 checksum;
 156	/* memory ranges to be scanned inside an object (empty for all) */
 157	struct hlist_head area_list;
 158	unsigned long trace[MAX_TRACE];
 159	unsigned int trace_len;
 160	unsigned long jiffies;		/* creation timestamp */
 161	pid_t pid;			/* pid of the current task */
 162	char comm[TASK_COMM_LEN];	/* executable name */
 163};
 164
 165/* flag representing the memory block allocation status */
 166#define OBJECT_ALLOCATED	(1 << 0)
 167/* flag set after the first reporting of an unreference object */
 168#define OBJECT_REPORTED		(1 << 1)
 169/* flag set to not scan the object */
 170#define OBJECT_NO_SCAN		(1 << 2)
 171
 172/* number of bytes to print per line; must be 16 or 32 */
 173#define HEX_ROW_SIZE		16
 174/* number of bytes to print at a time (1, 2, 4, 8) */
 175#define HEX_GROUP_SIZE		1
 176/* include ASCII after the hex output */
 177#define HEX_ASCII		1
 178/* max number of lines to be printed */
 179#define HEX_MAX_LINES		2
 180
 181/* the list of all allocated objects */
 182static LIST_HEAD(object_list);
 183/* the list of gray-colored objects (see color_gray comment below) */
 184static LIST_HEAD(gray_list);
 185/* search tree for object boundaries */
 186static struct rb_root object_tree_root = RB_ROOT;
 187/* rw_lock protecting the access to object_list and object_tree_root */
 188static DEFINE_RWLOCK(kmemleak_lock);
 189
 190/* allocation caches for kmemleak internal data */
 191static struct kmem_cache *object_cache;
 192static struct kmem_cache *scan_area_cache;
 193
 194/* set if tracing memory operations is enabled */
 195static int kmemleak_enabled;
 196/* set in the late_initcall if there were no errors */
 197static int kmemleak_initialized;
 198/* enables or disables early logging of the memory operations */
 199static int kmemleak_early_log = 1;
 200/* set if a kmemleak warning was issued */
 201static int kmemleak_warning;
 202/* set if a fatal kmemleak error has occurred */
 203static int kmemleak_error;
 204
 205/* minimum and maximum address that may be valid pointers */
 206static unsigned long min_addr = ULONG_MAX;
 207static unsigned long max_addr;
 208
 209static struct task_struct *scan_thread;
 210/* used to avoid reporting of recently allocated objects */
 211static unsigned long jiffies_min_age;
 212static unsigned long jiffies_last_scan;
 213/* delay between automatic memory scannings */
 214static signed long jiffies_scan_wait;
 215/* enables or disables the task stacks scanning */
 216static int kmemleak_stack_scan = 1;
 217/* protects the memory scanning, parameters and debug/kmemleak file access */
 218static DEFINE_MUTEX(scan_mutex);
 219/* setting kmemleak=on, will set this var, skipping the disable */
 220static int kmemleak_skip_disable;
 221/* If there are leaks that can be reported */
 222static bool kmemleak_found_leaks;
 223
 224/*
 225 * Early object allocation/freeing logging. Kmemleak is initialized after the
 226 * kernel allocator. However, both the kernel allocator and kmemleak may
 227 * allocate memory blocks which need to be tracked. Kmemleak defines an
 228 * arbitrary buffer to hold the allocation/freeing information before it is
 229 * fully initialized.
 230 */
 231
 232/* kmemleak operation type for early logging */
 233enum {
 234	KMEMLEAK_ALLOC,
 235	KMEMLEAK_ALLOC_PERCPU,
 236	KMEMLEAK_FREE,
 237	KMEMLEAK_FREE_PART,
 238	KMEMLEAK_FREE_PERCPU,
 239	KMEMLEAK_NOT_LEAK,
 240	KMEMLEAK_IGNORE,
 241	KMEMLEAK_SCAN_AREA,
 242	KMEMLEAK_NO_SCAN
 243};
 244
 245/*
 246 * Structure holding the information passed to kmemleak callbacks during the
 247 * early logging.
 248 */
 249struct early_log {
 250	int op_type;			/* kmemleak operation type */
 251	const void *ptr;		/* allocated/freed memory block */
 252	size_t size;			/* memory block size */
 253	int min_count;			/* minimum reference count */
 254	unsigned long trace[MAX_TRACE];	/* stack trace */
 255	unsigned int trace_len;		/* stack trace length */
 256};
 257
 258/* early logging buffer and current position */
 259static struct early_log
 260	early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
 261static int crt_early_log __initdata;
 262
 263static void kmemleak_disable(void);
 264
 265/*
 266 * Print a warning and dump the stack trace.
 267 */
 268#define kmemleak_warn(x...)	do {		\
 269	pr_warning(x);				\
 270	dump_stack();				\
 271	kmemleak_warning = 1;			\
 272} while (0)
 273
 274/*
 275 * Macro invoked when a serious kmemleak condition occurred and cannot be
 276 * recovered from. Kmemleak will be disabled and further allocation/freeing
 277 * tracing no longer available.
 278 */
 279#define kmemleak_stop(x...)	do {	\
 280	kmemleak_warn(x);		\
 281	kmemleak_disable();		\
 282} while (0)
 283
 284/*
 285 * Printing of the objects hex dump to the seq file. The number of lines to be
 286 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
 287 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
 288 * with the object->lock held.
 289 */
 290static void hex_dump_object(struct seq_file *seq,
 291			    struct kmemleak_object *object)
 292{
 293	const u8 *ptr = (const u8 *)object->pointer;
 294	int i, len, remaining;
 295	unsigned char linebuf[HEX_ROW_SIZE * 5];
 296
 297	/* limit the number of lines to HEX_MAX_LINES */
 298	remaining = len =
 299		min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
 300
 301	seq_printf(seq, "  hex dump (first %d bytes):\n", len);
 302	for (i = 0; i < len; i += HEX_ROW_SIZE) {
 303		int linelen = min(remaining, HEX_ROW_SIZE);
 304
 305		remaining -= HEX_ROW_SIZE;
 306		hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
 307				   HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
 308				   HEX_ASCII);
 309		seq_printf(seq, "    %s\n", linebuf);
 310	}
 311}
 312
 313/*
 314 * Object colors, encoded with count and min_count:
 315 * - white - orphan object, not enough references to it (count < min_count)
 316 * - gray  - not orphan, not marked as false positive (min_count == 0) or
 317 *		sufficient references to it (count >= min_count)
 318 * - black - ignore, it doesn't contain references (e.g. text section)
 319 *		(min_count == -1). No function defined for this color.
 320 * Newly created objects don't have any color assigned (object->count == -1)
 321 * before the next memory scan when they become white.
 322 */
 323static bool color_white(const struct kmemleak_object *object)
 324{
 325	return object->count != KMEMLEAK_BLACK &&
 326		object->count < object->min_count;
 327}
 328
 329static bool color_gray(const struct kmemleak_object *object)
 330{
 331	return object->min_count != KMEMLEAK_BLACK &&
 332		object->count >= object->min_count;
 333}
 334
 335/*
 336 * Objects are considered unreferenced only if their color is white, they have
 337 * not be deleted and have a minimum age to avoid false positives caused by
 338 * pointers temporarily stored in CPU registers.
 339 */
 340static bool unreferenced_object(struct kmemleak_object *object)
 341{
 342	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
 343		time_before_eq(object->jiffies + jiffies_min_age,
 344			       jiffies_last_scan);
 345}
 346
 347/*
 348 * Printing of the unreferenced objects information to the seq file. The
 349 * print_unreferenced function must be called with the object->lock held.
 350 */
 351static void print_unreferenced(struct seq_file *seq,
 352			       struct kmemleak_object *object)
 353{
 354	int i;
 355	unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
 356
 357	seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
 358		   object->pointer, object->size);
 359	seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
 360		   object->comm, object->pid, object->jiffies,
 361		   msecs_age / 1000, msecs_age % 1000);
 362	hex_dump_object(seq, object);
 363	seq_printf(seq, "  backtrace:\n");
 364
 365	for (i = 0; i < object->trace_len; i++) {
 366		void *ptr = (void *)object->trace[i];
 367		seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
 368	}
 369}
 370
 371/*
 372 * Print the kmemleak_object information. This function is used mainly for
 373 * debugging special cases when kmemleak operations. It must be called with
 374 * the object->lock held.
 375 */
 376static void dump_object_info(struct kmemleak_object *object)
 377{
 378	struct stack_trace trace;
 379
 380	trace.nr_entries = object->trace_len;
 381	trace.entries = object->trace;
 382
 383	pr_notice("Object 0x%08lx (size %zu):\n",
 384		  object->pointer, object->size);
 385	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
 386		  object->comm, object->pid, object->jiffies);
 387	pr_notice("  min_count = %d\n", object->min_count);
 388	pr_notice("  count = %d\n", object->count);
 389	pr_notice("  flags = 0x%lx\n", object->flags);
 390	pr_notice("  checksum = %d\n", object->checksum);
 391	pr_notice("  backtrace:\n");
 392	print_stack_trace(&trace, 4);
 393}
 394
 395/*
 396 * Look-up a memory block metadata (kmemleak_object) in the object search
 397 * tree based on a pointer value. If alias is 0, only values pointing to the
 398 * beginning of the memory block are allowed. The kmemleak_lock must be held
 399 * when calling this function.
 400 */
 401static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
 402{
 403	struct rb_node *rb = object_tree_root.rb_node;
 
 
 404
 405	while (rb) {
 406		struct kmemleak_object *object =
 407			rb_entry(rb, struct kmemleak_object, rb_node);
 408		if (ptr < object->pointer)
 409			rb = object->rb_node.rb_left;
 410		else if (object->pointer + object->size <= ptr)
 411			rb = object->rb_node.rb_right;
 412		else if (object->pointer == ptr || alias)
 413			return object;
 414		else {
 415			kmemleak_warn("Found object by alias at 0x%08lx\n",
 416				      ptr);
 417			dump_object_info(object);
 418			break;
 419		}
 420	}
 421	return NULL;
 
 
 422}
 423
 424/*
 425 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
 426 * that once an object's use_count reached 0, the RCU freeing was already
 427 * registered and the object should no longer be used. This function must be
 428 * called under the protection of rcu_read_lock().
 429 */
 430static int get_object(struct kmemleak_object *object)
 431{
 432	return atomic_inc_not_zero(&object->use_count);
 433}
 434
 435/*
 436 * RCU callback to free a kmemleak_object.
 437 */
 438static void free_object_rcu(struct rcu_head *rcu)
 439{
 440	struct hlist_node *tmp;
 441	struct kmemleak_scan_area *area;
 442	struct kmemleak_object *object =
 443		container_of(rcu, struct kmemleak_object, rcu);
 444
 445	/*
 446	 * Once use_count is 0 (guaranteed by put_object), there is no other
 447	 * code accessing this object, hence no need for locking.
 448	 */
 449	hlist_for_each_entry_safe(area, tmp, &object->area_list, node) {
 450		hlist_del(&area->node);
 451		kmem_cache_free(scan_area_cache, area);
 452	}
 453	kmem_cache_free(object_cache, object);
 454}
 455
 456/*
 457 * Decrement the object use_count. Once the count is 0, free the object using
 458 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
 459 * delete_object() path, the delayed RCU freeing ensures that there is no
 460 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
 461 * is also possible.
 462 */
 463static void put_object(struct kmemleak_object *object)
 464{
 465	if (!atomic_dec_and_test(&object->use_count))
 466		return;
 467
 468	/* should only get here after delete_object was called */
 469	WARN_ON(object->flags & OBJECT_ALLOCATED);
 470
 471	call_rcu(&object->rcu, free_object_rcu);
 472}
 473
 474/*
 475 * Look up an object in the object search tree and increase its use_count.
 476 */
 477static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 478{
 479	unsigned long flags;
 480	struct kmemleak_object *object = NULL;
 481
 482	rcu_read_lock();
 483	read_lock_irqsave(&kmemleak_lock, flags);
 484	if (ptr >= min_addr && ptr < max_addr)
 485		object = lookup_object(ptr, alias);
 486	read_unlock_irqrestore(&kmemleak_lock, flags);
 487
 488	/* check whether the object is still available */
 489	if (object && !get_object(object))
 490		object = NULL;
 491	rcu_read_unlock();
 492
 493	return object;
 494}
 495
 496/*
 497 * Save stack trace to the given array of MAX_TRACE size.
 498 */
 499static int __save_stack_trace(unsigned long *trace)
 500{
 501	struct stack_trace stack_trace;
 502
 503	stack_trace.max_entries = MAX_TRACE;
 504	stack_trace.nr_entries = 0;
 505	stack_trace.entries = trace;
 506	stack_trace.skip = 2;
 507	save_stack_trace(&stack_trace);
 508
 509	return stack_trace.nr_entries;
 510}
 511
 512/*
 513 * Create the metadata (struct kmemleak_object) corresponding to an allocated
 514 * memory block and add it to the object_list and object_tree_root.
 515 */
 516static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 517					     int min_count, gfp_t gfp)
 518{
 519	unsigned long flags;
 520	struct kmemleak_object *object, *parent;
 521	struct rb_node **link, *rb_parent;
 522
 523	object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
 524	if (!object) {
 525		pr_warning("Cannot allocate a kmemleak_object structure\n");
 526		kmemleak_disable();
 527		return NULL;
 528	}
 529
 530	INIT_LIST_HEAD(&object->object_list);
 531	INIT_LIST_HEAD(&object->gray_list);
 532	INIT_HLIST_HEAD(&object->area_list);
 533	spin_lock_init(&object->lock);
 534	atomic_set(&object->use_count, 1);
 535	object->flags = OBJECT_ALLOCATED;
 536	object->pointer = ptr;
 537	object->size = size;
 538	object->min_count = min_count;
 539	object->count = 0;			/* white color initially */
 540	object->jiffies = jiffies;
 541	object->checksum = 0;
 542
 543	/* task information */
 544	if (in_irq()) {
 545		object->pid = 0;
 546		strncpy(object->comm, "hardirq", sizeof(object->comm));
 547	} else if (in_softirq()) {
 548		object->pid = 0;
 549		strncpy(object->comm, "softirq", sizeof(object->comm));
 550	} else {
 551		object->pid = current->pid;
 552		/*
 553		 * There is a small chance of a race with set_task_comm(),
 554		 * however using get_task_comm() here may cause locking
 555		 * dependency issues with current->alloc_lock. In the worst
 556		 * case, the command line is not correct.
 557		 */
 558		strncpy(object->comm, current->comm, sizeof(object->comm));
 559	}
 560
 561	/* kernel backtrace */
 562	object->trace_len = __save_stack_trace(object->trace);
 563
 
 
 
 
 564	write_lock_irqsave(&kmemleak_lock, flags);
 565
 566	min_addr = min(min_addr, ptr);
 567	max_addr = max(max_addr, ptr + size);
 568	link = &object_tree_root.rb_node;
 569	rb_parent = NULL;
 570	while (*link) {
 571		rb_parent = *link;
 572		parent = rb_entry(rb_parent, struct kmemleak_object, rb_node);
 573		if (ptr + size <= parent->pointer)
 574			link = &parent->rb_node.rb_left;
 575		else if (parent->pointer + parent->size <= ptr)
 576			link = &parent->rb_node.rb_right;
 577		else {
 578			kmemleak_stop("Cannot insert 0x%lx into the object "
 579				      "search tree (overlaps existing)\n",
 580				      ptr);
 581			kmem_cache_free(object_cache, object);
 582			object = parent;
 583			spin_lock(&object->lock);
 584			dump_object_info(object);
 585			spin_unlock(&object->lock);
 586			goto out;
 587		}
 588	}
 589	rb_link_node(&object->rb_node, rb_parent, link);
 590	rb_insert_color(&object->rb_node, &object_tree_root);
 591
 
 
 592	list_add_tail_rcu(&object->object_list, &object_list);
 593out:
 594	write_unlock_irqrestore(&kmemleak_lock, flags);
 595	return object;
 596}
 597
 598/*
 599 * Remove the metadata (struct kmemleak_object) for a memory block from the
 600 * object_list and object_tree_root and decrement its use_count.
 601 */
 602static void __delete_object(struct kmemleak_object *object)
 603{
 604	unsigned long flags;
 605
 606	write_lock_irqsave(&kmemleak_lock, flags);
 607	rb_erase(&object->rb_node, &object_tree_root);
 608	list_del_rcu(&object->object_list);
 609	write_unlock_irqrestore(&kmemleak_lock, flags);
 610
 611	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
 612	WARN_ON(atomic_read(&object->use_count) < 2);
 613
 614	/*
 615	 * Locking here also ensures that the corresponding memory block
 616	 * cannot be freed when it is being scanned.
 617	 */
 618	spin_lock_irqsave(&object->lock, flags);
 619	object->flags &= ~OBJECT_ALLOCATED;
 620	spin_unlock_irqrestore(&object->lock, flags);
 621	put_object(object);
 622}
 623
 624/*
 625 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 626 * delete it.
 627 */
 628static void delete_object_full(unsigned long ptr)
 629{
 630	struct kmemleak_object *object;
 631
 632	object = find_and_get_object(ptr, 0);
 633	if (!object) {
 634#ifdef DEBUG
 635		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
 636			      ptr);
 637#endif
 638		return;
 639	}
 640	__delete_object(object);
 641	put_object(object);
 642}
 643
 644/*
 645 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 646 * delete it. If the memory block is partially freed, the function may create
 647 * additional metadata for the remaining parts of the block.
 648 */
 649static void delete_object_part(unsigned long ptr, size_t size)
 650{
 651	struct kmemleak_object *object;
 652	unsigned long start, end;
 653
 654	object = find_and_get_object(ptr, 1);
 655	if (!object) {
 656#ifdef DEBUG
 657		kmemleak_warn("Partially freeing unknown object at 0x%08lx "
 658			      "(size %zu)\n", ptr, size);
 659#endif
 660		return;
 661	}
 662	__delete_object(object);
 663
 664	/*
 665	 * Create one or two objects that may result from the memory block
 666	 * split. Note that partial freeing is only done by free_bootmem() and
 667	 * this happens before kmemleak_init() is called. The path below is
 668	 * only executed during early log recording in kmemleak_init(), so
 669	 * GFP_KERNEL is enough.
 670	 */
 671	start = object->pointer;
 672	end = object->pointer + object->size;
 673	if (ptr > start)
 674		create_object(start, ptr - start, object->min_count,
 675			      GFP_KERNEL);
 676	if (ptr + size < end)
 677		create_object(ptr + size, end - ptr - size, object->min_count,
 678			      GFP_KERNEL);
 679
 680	put_object(object);
 681}
 682
 683static void __paint_it(struct kmemleak_object *object, int color)
 684{
 685	object->min_count = color;
 686	if (color == KMEMLEAK_BLACK)
 687		object->flags |= OBJECT_NO_SCAN;
 688}
 689
 690static void paint_it(struct kmemleak_object *object, int color)
 691{
 692	unsigned long flags;
 693
 694	spin_lock_irqsave(&object->lock, flags);
 695	__paint_it(object, color);
 696	spin_unlock_irqrestore(&object->lock, flags);
 697}
 698
 699static void paint_ptr(unsigned long ptr, int color)
 700{
 701	struct kmemleak_object *object;
 702
 703	object = find_and_get_object(ptr, 0);
 704	if (!object) {
 705		kmemleak_warn("Trying to color unknown object "
 706			      "at 0x%08lx as %s\n", ptr,
 707			      (color == KMEMLEAK_GREY) ? "Grey" :
 708			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
 709		return;
 710	}
 711	paint_it(object, color);
 712	put_object(object);
 713}
 714
 715/*
 716 * Mark an object permanently as gray-colored so that it can no longer be
 717 * reported as a leak. This is used in general to mark a false positive.
 718 */
 719static void make_gray_object(unsigned long ptr)
 720{
 721	paint_ptr(ptr, KMEMLEAK_GREY);
 722}
 723
 724/*
 725 * Mark the object as black-colored so that it is ignored from scans and
 726 * reporting.
 727 */
 728static void make_black_object(unsigned long ptr)
 729{
 730	paint_ptr(ptr, KMEMLEAK_BLACK);
 731}
 732
 733/*
 734 * Add a scanning area to the object. If at least one such area is added,
 735 * kmemleak will only scan these ranges rather than the whole memory block.
 736 */
 737static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 738{
 739	unsigned long flags;
 740	struct kmemleak_object *object;
 741	struct kmemleak_scan_area *area;
 742
 743	object = find_and_get_object(ptr, 1);
 744	if (!object) {
 745		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
 746			      ptr);
 747		return;
 748	}
 749
 750	area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 751	if (!area) {
 752		pr_warning("Cannot allocate a scan area\n");
 753		goto out;
 754	}
 755
 756	spin_lock_irqsave(&object->lock, flags);
 757	if (size == SIZE_MAX) {
 758		size = object->pointer + object->size - ptr;
 759	} else if (ptr + size > object->pointer + object->size) {
 760		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
 761		dump_object_info(object);
 762		kmem_cache_free(scan_area_cache, area);
 763		goto out_unlock;
 764	}
 765
 766	INIT_HLIST_NODE(&area->node);
 767	area->start = ptr;
 768	area->size = size;
 769
 770	hlist_add_head(&area->node, &object->area_list);
 771out_unlock:
 772	spin_unlock_irqrestore(&object->lock, flags);
 773out:
 774	put_object(object);
 775}
 776
 777/*
 778 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
 779 * pointer. Such object will not be scanned by kmemleak but references to it
 780 * are searched.
 781 */
 782static void object_no_scan(unsigned long ptr)
 783{
 784	unsigned long flags;
 785	struct kmemleak_object *object;
 786
 787	object = find_and_get_object(ptr, 0);
 788	if (!object) {
 789		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
 790		return;
 791	}
 792
 793	spin_lock_irqsave(&object->lock, flags);
 794	object->flags |= OBJECT_NO_SCAN;
 795	spin_unlock_irqrestore(&object->lock, flags);
 796	put_object(object);
 797}
 798
 799/*
 800 * Log an early kmemleak_* call to the early_log buffer. These calls will be
 801 * processed later once kmemleak is fully initialized.
 802 */
 803static void __init log_early(int op_type, const void *ptr, size_t size,
 804			     int min_count)
 805{
 806	unsigned long flags;
 807	struct early_log *log;
 808
 809	if (kmemleak_error) {
 810		/* kmemleak stopped recording, just count the requests */
 811		crt_early_log++;
 812		return;
 813	}
 814
 815	if (crt_early_log >= ARRAY_SIZE(early_log)) {
 
 
 816		kmemleak_disable();
 817		return;
 818	}
 819
 820	/*
 821	 * There is no need for locking since the kernel is still in UP mode
 822	 * at this stage. Disabling the IRQs is enough.
 823	 */
 824	local_irq_save(flags);
 825	log = &early_log[crt_early_log];
 826	log->op_type = op_type;
 827	log->ptr = ptr;
 828	log->size = size;
 829	log->min_count = min_count;
 830	log->trace_len = __save_stack_trace(log->trace);
 
 831	crt_early_log++;
 832	local_irq_restore(flags);
 833}
 834
 835/*
 836 * Log an early allocated block and populate the stack trace.
 837 */
 838static void early_alloc(struct early_log *log)
 839{
 840	struct kmemleak_object *object;
 841	unsigned long flags;
 842	int i;
 843
 844	if (!kmemleak_enabled || !log->ptr || IS_ERR(log->ptr))
 845		return;
 846
 847	/*
 848	 * RCU locking needed to ensure object is not freed via put_object().
 849	 */
 850	rcu_read_lock();
 851	object = create_object((unsigned long)log->ptr, log->size,
 852			       log->min_count, GFP_ATOMIC);
 853	if (!object)
 854		goto out;
 855	spin_lock_irqsave(&object->lock, flags);
 856	for (i = 0; i < log->trace_len; i++)
 857		object->trace[i] = log->trace[i];
 858	object->trace_len = log->trace_len;
 859	spin_unlock_irqrestore(&object->lock, flags);
 860out:
 861	rcu_read_unlock();
 862}
 863
 864/*
 865 * Log an early allocated block and populate the stack trace.
 866 */
 867static void early_alloc_percpu(struct early_log *log)
 868{
 869	unsigned int cpu;
 870	const void __percpu *ptr = log->ptr;
 871
 872	for_each_possible_cpu(cpu) {
 873		log->ptr = per_cpu_ptr(ptr, cpu);
 874		early_alloc(log);
 875	}
 876}
 877
 878/**
 879 * kmemleak_alloc - register a newly allocated object
 880 * @ptr:	pointer to beginning of the object
 881 * @size:	size of the object
 882 * @min_count:	minimum number of references to this object. If during memory
 883 *		scanning a number of references less than @min_count is found,
 884 *		the object is reported as a memory leak. If @min_count is 0,
 885 *		the object is never reported as a leak. If @min_count is -1,
 886 *		the object is ignored (not scanned and not reported as a leak)
 887 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
 888 *
 889 * This function is called from the kernel allocators when a new object
 890 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
 891 */
 892void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
 893			  gfp_t gfp)
 894{
 895	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
 896
 897	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 898		create_object((unsigned long)ptr, size, min_count, gfp);
 899	else if (kmemleak_early_log)
 900		log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
 901}
 902EXPORT_SYMBOL_GPL(kmemleak_alloc);
 903
 904/**
 905 * kmemleak_alloc_percpu - register a newly allocated __percpu object
 906 * @ptr:	__percpu pointer to beginning of the object
 907 * @size:	size of the object
 908 *
 909 * This function is called from the kernel percpu allocator when a new object
 910 * (memory block) is allocated (alloc_percpu). It assumes GFP_KERNEL
 911 * allocation.
 912 */
 913void __ref kmemleak_alloc_percpu(const void __percpu *ptr, size_t size)
 914{
 915	unsigned int cpu;
 916
 917	pr_debug("%s(0x%p, %zu)\n", __func__, ptr, size);
 918
 919	/*
 920	 * Percpu allocations are only scanned and not reported as leaks
 921	 * (min_count is set to 0).
 922	 */
 923	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 924		for_each_possible_cpu(cpu)
 925			create_object((unsigned long)per_cpu_ptr(ptr, cpu),
 926				      size, 0, GFP_KERNEL);
 927	else if (kmemleak_early_log)
 928		log_early(KMEMLEAK_ALLOC_PERCPU, ptr, size, 0);
 929}
 930EXPORT_SYMBOL_GPL(kmemleak_alloc_percpu);
 931
 932/**
 933 * kmemleak_free - unregister a previously registered object
 934 * @ptr:	pointer to beginning of the object
 935 *
 936 * This function is called from the kernel allocators when an object (memory
 937 * block) is freed (kmem_cache_free, kfree, vfree etc.).
 938 */
 939void __ref kmemleak_free(const void *ptr)
 940{
 941	pr_debug("%s(0x%p)\n", __func__, ptr);
 942
 943	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 944		delete_object_full((unsigned long)ptr);
 945	else if (kmemleak_early_log)
 946		log_early(KMEMLEAK_FREE, ptr, 0, 0);
 947}
 948EXPORT_SYMBOL_GPL(kmemleak_free);
 949
 950/**
 951 * kmemleak_free_part - partially unregister a previously registered object
 952 * @ptr:	pointer to the beginning or inside the object. This also
 953 *		represents the start of the range to be freed
 954 * @size:	size to be unregistered
 955 *
 956 * This function is called when only a part of a memory block is freed
 957 * (usually from the bootmem allocator).
 958 */
 959void __ref kmemleak_free_part(const void *ptr, size_t size)
 960{
 961	pr_debug("%s(0x%p)\n", __func__, ptr);
 962
 963	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 964		delete_object_part((unsigned long)ptr, size);
 965	else if (kmemleak_early_log)
 966		log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
 967}
 968EXPORT_SYMBOL_GPL(kmemleak_free_part);
 969
 970/**
 971 * kmemleak_free_percpu - unregister a previously registered __percpu object
 972 * @ptr:	__percpu pointer to beginning of the object
 973 *
 974 * This function is called from the kernel percpu allocator when an object
 975 * (memory block) is freed (free_percpu).
 976 */
 977void __ref kmemleak_free_percpu(const void __percpu *ptr)
 978{
 979	unsigned int cpu;
 980
 981	pr_debug("%s(0x%p)\n", __func__, ptr);
 982
 983	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
 984		for_each_possible_cpu(cpu)
 985			delete_object_full((unsigned long)per_cpu_ptr(ptr,
 986								      cpu));
 987	else if (kmemleak_early_log)
 988		log_early(KMEMLEAK_FREE_PERCPU, ptr, 0, 0);
 989}
 990EXPORT_SYMBOL_GPL(kmemleak_free_percpu);
 991
 992/**
 993 * kmemleak_not_leak - mark an allocated object as false positive
 994 * @ptr:	pointer to beginning of the object
 995 *
 996 * Calling this function on an object will cause the memory block to no longer
 997 * be reported as leak and always be scanned.
 998 */
 999void __ref kmemleak_not_leak(const void *ptr)
1000{
1001	pr_debug("%s(0x%p)\n", __func__, ptr);
1002
1003	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1004		make_gray_object((unsigned long)ptr);
1005	else if (kmemleak_early_log)
1006		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
1007}
1008EXPORT_SYMBOL(kmemleak_not_leak);
1009
1010/**
1011 * kmemleak_ignore - ignore an allocated object
1012 * @ptr:	pointer to beginning of the object
1013 *
1014 * Calling this function on an object will cause the memory block to be
1015 * ignored (not scanned and not reported as a leak). This is usually done when
1016 * it is known that the corresponding block is not a leak and does not contain
1017 * any references to other allocated memory blocks.
1018 */
1019void __ref kmemleak_ignore(const void *ptr)
1020{
1021	pr_debug("%s(0x%p)\n", __func__, ptr);
1022
1023	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1024		make_black_object((unsigned long)ptr);
1025	else if (kmemleak_early_log)
1026		log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
1027}
1028EXPORT_SYMBOL(kmemleak_ignore);
1029
1030/**
1031 * kmemleak_scan_area - limit the range to be scanned in an allocated object
1032 * @ptr:	pointer to beginning or inside the object. This also
1033 *		represents the start of the scan area
1034 * @size:	size of the scan area
1035 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
1036 *
1037 * This function is used when it is known that only certain parts of an object
1038 * contain references to other objects. Kmemleak will only scan these areas
1039 * reducing the number false negatives.
1040 */
1041void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
1042{
1043	pr_debug("%s(0x%p)\n", __func__, ptr);
1044
1045	if (kmemleak_enabled && ptr && size && !IS_ERR(ptr))
1046		add_scan_area((unsigned long)ptr, size, gfp);
1047	else if (kmemleak_early_log)
1048		log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
1049}
1050EXPORT_SYMBOL(kmemleak_scan_area);
1051
1052/**
1053 * kmemleak_no_scan - do not scan an allocated object
1054 * @ptr:	pointer to beginning of the object
1055 *
1056 * This function notifies kmemleak not to scan the given memory block. Useful
1057 * in situations where it is known that the given object does not contain any
1058 * references to other objects. Kmemleak will not scan such objects reducing
1059 * the number of false negatives.
1060 */
1061void __ref kmemleak_no_scan(const void *ptr)
1062{
1063	pr_debug("%s(0x%p)\n", __func__, ptr);
1064
1065	if (kmemleak_enabled && ptr && !IS_ERR(ptr))
1066		object_no_scan((unsigned long)ptr);
1067	else if (kmemleak_early_log)
1068		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
1069}
1070EXPORT_SYMBOL(kmemleak_no_scan);
1071
1072/*
1073 * Update an object's checksum and return true if it was modified.
1074 */
1075static bool update_checksum(struct kmemleak_object *object)
1076{
1077	u32 old_csum = object->checksum;
1078
1079	if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1080		return false;
1081
1082	object->checksum = crc32(0, (void *)object->pointer, object->size);
1083	return object->checksum != old_csum;
1084}
1085
1086/*
1087 * Memory scanning is a long process and it needs to be interruptable. This
1088 * function checks whether such interrupt condition occurred.
1089 */
1090static int scan_should_stop(void)
1091{
1092	if (!kmemleak_enabled)
1093		return 1;
1094
1095	/*
1096	 * This function may be called from either process or kthread context,
1097	 * hence the need to check for both stop conditions.
1098	 */
1099	if (current->mm)
1100		return signal_pending(current);
1101	else
1102		return kthread_should_stop();
1103
1104	return 0;
1105}
1106
1107/*
1108 * Scan a memory block (exclusive range) for valid pointers and add those
1109 * found to the gray list.
1110 */
1111static void scan_block(void *_start, void *_end,
1112		       struct kmemleak_object *scanned, int allow_resched)
1113{
1114	unsigned long *ptr;
1115	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1116	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1117
1118	for (ptr = start; ptr < end; ptr++) {
1119		struct kmemleak_object *object;
1120		unsigned long flags;
1121		unsigned long pointer;
1122
1123		if (allow_resched)
1124			cond_resched();
1125		if (scan_should_stop())
1126			break;
1127
1128		/* don't scan uninitialized memory */
1129		if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1130						  BYTES_PER_POINTER))
1131			continue;
1132
1133		pointer = *ptr;
1134
1135		object = find_and_get_object(pointer, 1);
1136		if (!object)
1137			continue;
1138		if (object == scanned) {
1139			/* self referenced, ignore */
1140			put_object(object);
1141			continue;
1142		}
1143
1144		/*
1145		 * Avoid the lockdep recursive warning on object->lock being
1146		 * previously acquired in scan_object(). These locks are
1147		 * enclosed by scan_mutex.
1148		 */
1149		spin_lock_irqsave_nested(&object->lock, flags,
1150					 SINGLE_DEPTH_NESTING);
1151		if (!color_white(object)) {
1152			/* non-orphan, ignored or new */
1153			spin_unlock_irqrestore(&object->lock, flags);
1154			put_object(object);
1155			continue;
1156		}
1157
1158		/*
1159		 * Increase the object's reference count (number of pointers
1160		 * to the memory block). If this count reaches the required
1161		 * minimum, the object's color will become gray and it will be
1162		 * added to the gray_list.
1163		 */
1164		object->count++;
1165		if (color_gray(object)) {
1166			list_add_tail(&object->gray_list, &gray_list);
1167			spin_unlock_irqrestore(&object->lock, flags);
1168			continue;
1169		}
1170
1171		spin_unlock_irqrestore(&object->lock, flags);
1172		put_object(object);
1173	}
1174}
1175
1176/*
1177 * Scan a memory block corresponding to a kmemleak_object. A condition is
1178 * that object->use_count >= 1.
1179 */
1180static void scan_object(struct kmemleak_object *object)
1181{
1182	struct kmemleak_scan_area *area;
 
1183	unsigned long flags;
1184
1185	/*
1186	 * Once the object->lock is acquired, the corresponding memory block
1187	 * cannot be freed (the same lock is acquired in delete_object).
1188	 */
1189	spin_lock_irqsave(&object->lock, flags);
1190	if (object->flags & OBJECT_NO_SCAN)
1191		goto out;
1192	if (!(object->flags & OBJECT_ALLOCATED))
1193		/* already freed object */
1194		goto out;
1195	if (hlist_empty(&object->area_list)) {
1196		void *start = (void *)object->pointer;
1197		void *end = (void *)(object->pointer + object->size);
1198
1199		while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1200		       !(object->flags & OBJECT_NO_SCAN)) {
1201			scan_block(start, min(start + MAX_SCAN_SIZE, end),
1202				   object, 0);
1203			start += MAX_SCAN_SIZE;
1204
1205			spin_unlock_irqrestore(&object->lock, flags);
1206			cond_resched();
1207			spin_lock_irqsave(&object->lock, flags);
1208		}
1209	} else
1210		hlist_for_each_entry(area, &object->area_list, node)
1211			scan_block((void *)area->start,
1212				   (void *)(area->start + area->size),
1213				   object, 0);
1214out:
1215	spin_unlock_irqrestore(&object->lock, flags);
1216}
1217
1218/*
1219 * Scan the objects already referenced (gray objects). More objects will be
1220 * referenced and, if there are no memory leaks, all the objects are scanned.
1221 */
1222static void scan_gray_list(void)
1223{
1224	struct kmemleak_object *object, *tmp;
1225
1226	/*
1227	 * The list traversal is safe for both tail additions and removals
1228	 * from inside the loop. The kmemleak objects cannot be freed from
1229	 * outside the loop because their use_count was incremented.
1230	 */
1231	object = list_entry(gray_list.next, typeof(*object), gray_list);
1232	while (&object->gray_list != &gray_list) {
1233		cond_resched();
1234
1235		/* may add new objects to the list */
1236		if (!scan_should_stop())
1237			scan_object(object);
1238
1239		tmp = list_entry(object->gray_list.next, typeof(*object),
1240				 gray_list);
1241
1242		/* remove the object from the list and release it */
1243		list_del(&object->gray_list);
1244		put_object(object);
1245
1246		object = tmp;
1247	}
1248	WARN_ON(!list_empty(&gray_list));
1249}
1250
1251/*
1252 * Scan data sections and all the referenced memory blocks allocated via the
1253 * kernel's standard allocators. This function must be called with the
1254 * scan_mutex held.
1255 */
1256static void kmemleak_scan(void)
1257{
1258	unsigned long flags;
1259	struct kmemleak_object *object;
1260	int i;
1261	int new_leaks = 0;
1262
1263	jiffies_last_scan = jiffies;
1264
1265	/* prepare the kmemleak_object's */
1266	rcu_read_lock();
1267	list_for_each_entry_rcu(object, &object_list, object_list) {
1268		spin_lock_irqsave(&object->lock, flags);
1269#ifdef DEBUG
1270		/*
1271		 * With a few exceptions there should be a maximum of
1272		 * 1 reference to any object at this point.
1273		 */
1274		if (atomic_read(&object->use_count) > 1) {
1275			pr_debug("object->use_count = %d\n",
1276				 atomic_read(&object->use_count));
1277			dump_object_info(object);
1278		}
1279#endif
1280		/* reset the reference count (whiten the object) */
1281		object->count = 0;
1282		if (color_gray(object) && get_object(object))
1283			list_add_tail(&object->gray_list, &gray_list);
1284
1285		spin_unlock_irqrestore(&object->lock, flags);
1286	}
1287	rcu_read_unlock();
1288
1289	/* data/bss scanning */
1290	scan_block(_sdata, _edata, NULL, 1);
1291	scan_block(__bss_start, __bss_stop, NULL, 1);
1292
1293#ifdef CONFIG_SMP
1294	/* per-cpu sections scanning */
1295	for_each_possible_cpu(i)
1296		scan_block(__per_cpu_start + per_cpu_offset(i),
1297			   __per_cpu_end + per_cpu_offset(i), NULL, 1);
1298#endif
1299
1300	/*
1301	 * Struct page scanning for each node.
 
1302	 */
1303	lock_memory_hotplug();
1304	for_each_online_node(i) {
1305		unsigned long start_pfn = node_start_pfn(i);
1306		unsigned long end_pfn = node_end_pfn(i);
 
1307		unsigned long pfn;
1308
1309		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1310			struct page *page;
1311
1312			if (!pfn_valid(pfn))
1313				continue;
1314			page = pfn_to_page(pfn);
1315			/* only scan if page is in use */
1316			if (page_count(page) == 0)
1317				continue;
1318			scan_block(page, page + 1, NULL, 1);
1319		}
1320	}
1321	unlock_memory_hotplug();
1322
1323	/*
1324	 * Scanning the task stacks (may introduce false negatives).
1325	 */
1326	if (kmemleak_stack_scan) {
1327		struct task_struct *p, *g;
1328
1329		read_lock(&tasklist_lock);
1330		do_each_thread(g, p) {
1331			scan_block(task_stack_page(p), task_stack_page(p) +
1332				   THREAD_SIZE, NULL, 0);
1333		} while_each_thread(g, p);
1334		read_unlock(&tasklist_lock);
1335	}
1336
1337	/*
1338	 * Scan the objects already referenced from the sections scanned
1339	 * above.
1340	 */
1341	scan_gray_list();
1342
1343	/*
1344	 * Check for new or unreferenced objects modified since the previous
1345	 * scan and color them gray until the next scan.
1346	 */
1347	rcu_read_lock();
1348	list_for_each_entry_rcu(object, &object_list, object_list) {
1349		spin_lock_irqsave(&object->lock, flags);
1350		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1351		    && update_checksum(object) && get_object(object)) {
1352			/* color it gray temporarily */
1353			object->count = object->min_count;
1354			list_add_tail(&object->gray_list, &gray_list);
1355		}
1356		spin_unlock_irqrestore(&object->lock, flags);
1357	}
1358	rcu_read_unlock();
1359
1360	/*
1361	 * Re-scan the gray list for modified unreferenced objects.
1362	 */
1363	scan_gray_list();
1364
1365	/*
1366	 * If scanning was stopped do not report any new unreferenced objects.
1367	 */
1368	if (scan_should_stop())
1369		return;
1370
1371	/*
1372	 * Scanning result reporting.
1373	 */
1374	rcu_read_lock();
1375	list_for_each_entry_rcu(object, &object_list, object_list) {
1376		spin_lock_irqsave(&object->lock, flags);
1377		if (unreferenced_object(object) &&
1378		    !(object->flags & OBJECT_REPORTED)) {
1379			object->flags |= OBJECT_REPORTED;
1380			new_leaks++;
1381		}
1382		spin_unlock_irqrestore(&object->lock, flags);
1383	}
1384	rcu_read_unlock();
1385
1386	if (new_leaks) {
1387		kmemleak_found_leaks = true;
1388
1389		pr_info("%d new suspected memory leaks (see "
1390			"/sys/kernel/debug/kmemleak)\n", new_leaks);
1391	}
1392
1393}
1394
1395/*
1396 * Thread function performing automatic memory scanning. Unreferenced objects
1397 * at the end of a memory scan are reported but only the first time.
1398 */
1399static int kmemleak_scan_thread(void *arg)
1400{
1401	static int first_run = 1;
1402
1403	pr_info("Automatic memory scanning thread started\n");
1404	set_user_nice(current, 10);
1405
1406	/*
1407	 * Wait before the first scan to allow the system to fully initialize.
1408	 */
1409	if (first_run) {
1410		first_run = 0;
1411		ssleep(SECS_FIRST_SCAN);
1412	}
1413
1414	while (!kthread_should_stop()) {
1415		signed long timeout = jiffies_scan_wait;
1416
1417		mutex_lock(&scan_mutex);
1418		kmemleak_scan();
1419		mutex_unlock(&scan_mutex);
1420
1421		/* wait before the next scan */
1422		while (timeout && !kthread_should_stop())
1423			timeout = schedule_timeout_interruptible(timeout);
1424	}
1425
1426	pr_info("Automatic memory scanning thread ended\n");
1427
1428	return 0;
1429}
1430
1431/*
1432 * Start the automatic memory scanning thread. This function must be called
1433 * with the scan_mutex held.
1434 */
1435static void start_scan_thread(void)
1436{
1437	if (scan_thread)
1438		return;
1439	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1440	if (IS_ERR(scan_thread)) {
1441		pr_warning("Failed to create the scan thread\n");
1442		scan_thread = NULL;
1443	}
1444}
1445
1446/*
1447 * Stop the automatic memory scanning thread. This function must be called
1448 * with the scan_mutex held.
1449 */
1450static void stop_scan_thread(void)
1451{
1452	if (scan_thread) {
1453		kthread_stop(scan_thread);
1454		scan_thread = NULL;
1455	}
1456}
1457
1458/*
1459 * Iterate over the object_list and return the first valid object at or after
1460 * the required position with its use_count incremented. The function triggers
1461 * a memory scanning when the pos argument points to the first position.
1462 */
1463static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1464{
1465	struct kmemleak_object *object;
1466	loff_t n = *pos;
1467	int err;
1468
1469	err = mutex_lock_interruptible(&scan_mutex);
1470	if (err < 0)
1471		return ERR_PTR(err);
1472
1473	rcu_read_lock();
1474	list_for_each_entry_rcu(object, &object_list, object_list) {
1475		if (n-- > 0)
1476			continue;
1477		if (get_object(object))
1478			goto out;
1479	}
1480	object = NULL;
1481out:
1482	return object;
1483}
1484
1485/*
1486 * Return the next object in the object_list. The function decrements the
1487 * use_count of the previous object and increases that of the next one.
1488 */
1489static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1490{
1491	struct kmemleak_object *prev_obj = v;
1492	struct kmemleak_object *next_obj = NULL;
1493	struct kmemleak_object *obj = prev_obj;
1494
1495	++(*pos);
1496
1497	list_for_each_entry_continue_rcu(obj, &object_list, object_list) {
 
 
1498		if (get_object(obj)) {
1499			next_obj = obj;
1500			break;
1501		}
1502	}
1503
1504	put_object(prev_obj);
1505	return next_obj;
1506}
1507
1508/*
1509 * Decrement the use_count of the last object required, if any.
1510 */
1511static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1512{
1513	if (!IS_ERR(v)) {
1514		/*
1515		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1516		 * waiting was interrupted, so only release it if !IS_ERR.
1517		 */
1518		rcu_read_unlock();
1519		mutex_unlock(&scan_mutex);
1520		if (v)
1521			put_object(v);
1522	}
1523}
1524
1525/*
1526 * Print the information for an unreferenced object to the seq file.
1527 */
1528static int kmemleak_seq_show(struct seq_file *seq, void *v)
1529{
1530	struct kmemleak_object *object = v;
1531	unsigned long flags;
1532
1533	spin_lock_irqsave(&object->lock, flags);
1534	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1535		print_unreferenced(seq, object);
1536	spin_unlock_irqrestore(&object->lock, flags);
1537	return 0;
1538}
1539
1540static const struct seq_operations kmemleak_seq_ops = {
1541	.start = kmemleak_seq_start,
1542	.next  = kmemleak_seq_next,
1543	.stop  = kmemleak_seq_stop,
1544	.show  = kmemleak_seq_show,
1545};
1546
1547static int kmemleak_open(struct inode *inode, struct file *file)
1548{
 
 
 
1549	return seq_open(file, &kmemleak_seq_ops);
1550}
1551
 
 
 
 
 
1552static int dump_str_object_info(const char *str)
1553{
1554	unsigned long flags;
1555	struct kmemleak_object *object;
1556	unsigned long addr;
1557
1558	if (kstrtoul(str, 0, &addr))
1559		return -EINVAL;
1560	object = find_and_get_object(addr, 0);
1561	if (!object) {
1562		pr_info("Unknown object at 0x%08lx\n", addr);
1563		return -EINVAL;
1564	}
1565
1566	spin_lock_irqsave(&object->lock, flags);
1567	dump_object_info(object);
1568	spin_unlock_irqrestore(&object->lock, flags);
1569
1570	put_object(object);
1571	return 0;
1572}
1573
1574/*
1575 * We use grey instead of black to ensure we can do future scans on the same
1576 * objects. If we did not do future scans these black objects could
1577 * potentially contain references to newly allocated objects in the future and
1578 * we'd end up with false positives.
1579 */
1580static void kmemleak_clear(void)
1581{
1582	struct kmemleak_object *object;
1583	unsigned long flags;
1584
1585	rcu_read_lock();
1586	list_for_each_entry_rcu(object, &object_list, object_list) {
1587		spin_lock_irqsave(&object->lock, flags);
1588		if ((object->flags & OBJECT_REPORTED) &&
1589		    unreferenced_object(object))
1590			__paint_it(object, KMEMLEAK_GREY);
1591		spin_unlock_irqrestore(&object->lock, flags);
1592	}
1593	rcu_read_unlock();
1594
1595	kmemleak_found_leaks = false;
1596}
1597
1598static void __kmemleak_do_cleanup(void);
1599
1600/*
1601 * File write operation to configure kmemleak at run-time. The following
1602 * commands can be written to the /sys/kernel/debug/kmemleak file:
1603 *   off	- disable kmemleak (irreversible)
1604 *   stack=on	- enable the task stacks scanning
1605 *   stack=off	- disable the tasks stacks scanning
1606 *   scan=on	- start the automatic memory scanning thread
1607 *   scan=off	- stop the automatic memory scanning thread
1608 *   scan=...	- set the automatic memory scanning period in seconds (0 to
1609 *		  disable it)
1610 *   scan	- trigger a memory scan
1611 *   clear	- mark all current reported unreferenced kmemleak objects as
1612 *		  grey to ignore printing them, or free all kmemleak objects
1613 *		  if kmemleak has been disabled.
1614 *   dump=...	- dump information about the object found at the given address
1615 */
1616static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1617			      size_t size, loff_t *ppos)
1618{
1619	char buf[64];
1620	int buf_size;
1621	int ret;
1622
1623	buf_size = min(size, (sizeof(buf) - 1));
1624	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1625		return -EFAULT;
1626	buf[buf_size] = 0;
1627
1628	ret = mutex_lock_interruptible(&scan_mutex);
1629	if (ret < 0)
1630		return ret;
1631
1632	if (strncmp(buf, "clear", 5) == 0) {
1633		if (kmemleak_enabled)
1634			kmemleak_clear();
1635		else
1636			__kmemleak_do_cleanup();
1637		goto out;
1638	}
1639
1640	if (!kmemleak_enabled) {
1641		ret = -EBUSY;
1642		goto out;
1643	}
1644
1645	if (strncmp(buf, "off", 3) == 0)
1646		kmemleak_disable();
1647	else if (strncmp(buf, "stack=on", 8) == 0)
1648		kmemleak_stack_scan = 1;
1649	else if (strncmp(buf, "stack=off", 9) == 0)
1650		kmemleak_stack_scan = 0;
1651	else if (strncmp(buf, "scan=on", 7) == 0)
1652		start_scan_thread();
1653	else if (strncmp(buf, "scan=off", 8) == 0)
1654		stop_scan_thread();
1655	else if (strncmp(buf, "scan=", 5) == 0) {
1656		unsigned long secs;
1657
1658		ret = kstrtoul(buf + 5, 0, &secs);
1659		if (ret < 0)
1660			goto out;
1661		stop_scan_thread();
1662		if (secs) {
1663			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1664			start_scan_thread();
1665		}
1666	} else if (strncmp(buf, "scan", 4) == 0)
1667		kmemleak_scan();
 
 
1668	else if (strncmp(buf, "dump=", 5) == 0)
1669		ret = dump_str_object_info(buf + 5);
1670	else
1671		ret = -EINVAL;
1672
1673out:
1674	mutex_unlock(&scan_mutex);
1675	if (ret < 0)
1676		return ret;
1677
1678	/* ignore the rest of the buffer, only one command at a time */
1679	*ppos += size;
1680	return size;
1681}
1682
1683static const struct file_operations kmemleak_fops = {
1684	.owner		= THIS_MODULE,
1685	.open		= kmemleak_open,
1686	.read		= seq_read,
1687	.write		= kmemleak_write,
1688	.llseek		= seq_lseek,
1689	.release	= seq_release,
1690};
1691
1692static void __kmemleak_do_cleanup(void)
1693{
1694	struct kmemleak_object *object;
1695
1696	rcu_read_lock();
1697	list_for_each_entry_rcu(object, &object_list, object_list)
1698		delete_object_full(object->pointer);
1699	rcu_read_unlock();
1700}
1701
1702/*
1703 * Stop the memory scanning thread and free the kmemleak internal objects if
1704 * no previous scan thread (otherwise, kmemleak may still have some useful
1705 * information on memory leaks).
1706 */
1707static void kmemleak_do_cleanup(struct work_struct *work)
1708{
 
 
1709	mutex_lock(&scan_mutex);
1710	stop_scan_thread();
1711
1712	if (!kmemleak_found_leaks)
1713		__kmemleak_do_cleanup();
1714	else
1715		pr_info("Kmemleak disabled without freeing internal data. "
1716			"Reclaim the memory with \"echo clear > /sys/kernel/debug/kmemleak\"\n");
1717	mutex_unlock(&scan_mutex);
1718}
1719
1720static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1721
1722/*
1723 * Disable kmemleak. No memory allocation/freeing will be traced once this
1724 * function is called. Disabling kmemleak is an irreversible operation.
1725 */
1726static void kmemleak_disable(void)
1727{
1728	/* atomically check whether it was already invoked */
1729	if (cmpxchg(&kmemleak_error, 0, 1))
1730		return;
1731
1732	/* stop any memory operation tracing */
1733	kmemleak_enabled = 0;
 
1734
1735	/* check whether it is too early for a kernel thread */
1736	if (kmemleak_initialized)
1737		schedule_work(&cleanup_work);
1738
1739	pr_info("Kernel memory leak detector disabled\n");
1740}
1741
1742/*
1743 * Allow boot-time kmemleak disabling (enabled by default).
1744 */
1745static int kmemleak_boot_config(char *str)
1746{
1747	if (!str)
1748		return -EINVAL;
1749	if (strcmp(str, "off") == 0)
1750		kmemleak_disable();
1751	else if (strcmp(str, "on") == 0)
1752		kmemleak_skip_disable = 1;
1753	else
1754		return -EINVAL;
1755	return 0;
1756}
1757early_param("kmemleak", kmemleak_boot_config);
1758
1759static void __init print_log_trace(struct early_log *log)
1760{
1761	struct stack_trace trace;
1762
1763	trace.nr_entries = log->trace_len;
1764	trace.entries = log->trace;
1765
1766	pr_notice("Early log backtrace:\n");
1767	print_stack_trace(&trace, 2);
1768}
1769
1770/*
1771 * Kmemleak initialization.
1772 */
1773void __init kmemleak_init(void)
1774{
1775	int i;
1776	unsigned long flags;
1777
1778#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1779	if (!kmemleak_skip_disable) {
1780		kmemleak_early_log = 0;
1781		kmemleak_disable();
1782		return;
1783	}
1784#endif
1785
1786	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1787	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1788
1789	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1790	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1791
1792	if (crt_early_log >= ARRAY_SIZE(early_log))
1793		pr_warning("Early log buffer exceeded (%d), please increase "
1794			   "DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n", crt_early_log);
1795
1796	/* the kernel is still in UP mode, so disabling the IRQs is enough */
1797	local_irq_save(flags);
1798	kmemleak_early_log = 0;
1799	if (kmemleak_error) {
1800		local_irq_restore(flags);
1801		return;
1802	} else
1803		kmemleak_enabled = 1;
1804	local_irq_restore(flags);
1805
1806	/*
1807	 * This is the point where tracking allocations is safe. Automatic
1808	 * scanning is started during the late initcall. Add the early logged
1809	 * callbacks to the kmemleak infrastructure.
1810	 */
1811	for (i = 0; i < crt_early_log; i++) {
1812		struct early_log *log = &early_log[i];
1813
1814		switch (log->op_type) {
1815		case KMEMLEAK_ALLOC:
1816			early_alloc(log);
1817			break;
1818		case KMEMLEAK_ALLOC_PERCPU:
1819			early_alloc_percpu(log);
1820			break;
1821		case KMEMLEAK_FREE:
1822			kmemleak_free(log->ptr);
1823			break;
1824		case KMEMLEAK_FREE_PART:
1825			kmemleak_free_part(log->ptr, log->size);
1826			break;
1827		case KMEMLEAK_FREE_PERCPU:
1828			kmemleak_free_percpu(log->ptr);
1829			break;
1830		case KMEMLEAK_NOT_LEAK:
1831			kmemleak_not_leak(log->ptr);
1832			break;
1833		case KMEMLEAK_IGNORE:
1834			kmemleak_ignore(log->ptr);
1835			break;
1836		case KMEMLEAK_SCAN_AREA:
1837			kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1838			break;
1839		case KMEMLEAK_NO_SCAN:
1840			kmemleak_no_scan(log->ptr);
1841			break;
1842		default:
1843			kmemleak_warn("Unknown early log operation: %d\n",
1844				      log->op_type);
1845		}
1846
1847		if (kmemleak_warning) {
1848			print_log_trace(log);
1849			kmemleak_warning = 0;
1850		}
1851	}
1852}
1853
1854/*
1855 * Late initialization function.
1856 */
1857static int __init kmemleak_late_init(void)
1858{
1859	struct dentry *dentry;
1860
1861	kmemleak_initialized = 1;
1862
1863	if (kmemleak_error) {
1864		/*
1865		 * Some error occurred and kmemleak was disabled. There is a
1866		 * small chance that kmemleak_disable() was called immediately
1867		 * after setting kmemleak_initialized and we may end up with
1868		 * two clean-up threads but serialized by scan_mutex.
1869		 */
1870		schedule_work(&cleanup_work);
1871		return -ENOMEM;
1872	}
1873
1874	dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1875				     &kmemleak_fops);
1876	if (!dentry)
1877		pr_warning("Failed to create the debugfs kmemleak file\n");
1878	mutex_lock(&scan_mutex);
1879	start_scan_thread();
1880	mutex_unlock(&scan_mutex);
1881
1882	pr_info("Kernel memory leak detector initialized\n");
1883
1884	return 0;
1885}
1886late_initcall(kmemleak_late_init);
v3.1
   1/*
   2 * mm/kmemleak.c
   3 *
   4 * Copyright (C) 2008 ARM Limited
   5 * Written by Catalin Marinas <catalin.marinas@arm.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program; if not, write to the Free Software
  18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  19 *
  20 *
  21 * For more information on the algorithm and kmemleak usage, please see
  22 * Documentation/kmemleak.txt.
  23 *
  24 * Notes on locking
  25 * ----------------
  26 *
  27 * The following locks and mutexes are used by kmemleak:
  28 *
  29 * - kmemleak_lock (rwlock): protects the object_list modifications and
  30 *   accesses to the object_tree_root. The object_list is the main list
  31 *   holding the metadata (struct kmemleak_object) for the allocated memory
  32 *   blocks. The object_tree_root is a priority search tree used to look-up
  33 *   metadata based on a pointer to the corresponding memory block.  The
  34 *   kmemleak_object structures are added to the object_list and
  35 *   object_tree_root in the create_object() function called from the
  36 *   kmemleak_alloc() callback and removed in delete_object() called from the
  37 *   kmemleak_free() callback
  38 * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to
  39 *   the metadata (e.g. count) are protected by this lock. Note that some
  40 *   members of this structure may be protected by other means (atomic or
  41 *   kmemleak_lock). This lock is also held when scanning the corresponding
  42 *   memory block to avoid the kernel freeing it via the kmemleak_free()
  43 *   callback. This is less heavyweight than holding a global lock like
  44 *   kmemleak_lock during scanning
  45 * - scan_mutex (mutex): ensures that only one thread may scan the memory for
  46 *   unreferenced objects at a time. The gray_list contains the objects which
  47 *   are already referenced or marked as false positives and need to be
  48 *   scanned. This list is only modified during a scanning episode when the
  49 *   scan_mutex is held. At the end of a scan, the gray_list is always empty.
  50 *   Note that the kmemleak_object.use_count is incremented when an object is
  51 *   added to the gray_list and therefore cannot be freed. This mutex also
  52 *   prevents multiple users of the "kmemleak" debugfs file together with
  53 *   modifications to the memory scanning parameters including the scan_thread
  54 *   pointer
  55 *
  56 * The kmemleak_object structures have a use_count incremented or decremented
  57 * using the get_object()/put_object() functions. When the use_count becomes
  58 * 0, this count can no longer be incremented and put_object() schedules the
  59 * kmemleak_object freeing via an RCU callback. All calls to the get_object()
  60 * function must be protected by rcu_read_lock() to avoid accessing a freed
  61 * structure.
  62 */
  63
  64#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  65
  66#include <linux/init.h>
  67#include <linux/kernel.h>
  68#include <linux/list.h>
  69#include <linux/sched.h>
  70#include <linux/jiffies.h>
  71#include <linux/delay.h>
  72#include <linux/module.h>
  73#include <linux/kthread.h>
  74#include <linux/prio_tree.h>
  75#include <linux/fs.h>
  76#include <linux/debugfs.h>
  77#include <linux/seq_file.h>
  78#include <linux/cpumask.h>
  79#include <linux/spinlock.h>
  80#include <linux/mutex.h>
  81#include <linux/rcupdate.h>
  82#include <linux/stacktrace.h>
  83#include <linux/cache.h>
  84#include <linux/percpu.h>
  85#include <linux/hardirq.h>
  86#include <linux/mmzone.h>
  87#include <linux/slab.h>
  88#include <linux/thread_info.h>
  89#include <linux/err.h>
  90#include <linux/uaccess.h>
  91#include <linux/string.h>
  92#include <linux/nodemask.h>
  93#include <linux/mm.h>
  94#include <linux/workqueue.h>
  95#include <linux/crc32.h>
  96
  97#include <asm/sections.h>
  98#include <asm/processor.h>
  99#include <linux/atomic.h>
 100
 101#include <linux/kmemcheck.h>
 102#include <linux/kmemleak.h>
 
 103
 104/*
 105 * Kmemleak configuration and common defines.
 106 */
 107#define MAX_TRACE		16	/* stack trace length */
 108#define MSECS_MIN_AGE		5000	/* minimum object age for reporting */
 109#define SECS_FIRST_SCAN		60	/* delay before the first scan */
 110#define SECS_SCAN_WAIT		600	/* subsequent auto scanning delay */
 111#define MAX_SCAN_SIZE		4096	/* maximum size of a scanned block */
 112
 113#define BYTES_PER_POINTER	sizeof(void *)
 114
 115/* GFP bitmask for kmemleak internal allocations */
 116#define gfp_kmemleak_mask(gfp)	(((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
 117				 __GFP_NORETRY | __GFP_NOMEMALLOC | \
 118				 __GFP_NOWARN)
 119
 120/* scanning area inside a memory block */
 121struct kmemleak_scan_area {
 122	struct hlist_node node;
 123	unsigned long start;
 124	size_t size;
 125};
 126
 127#define KMEMLEAK_GREY	0
 128#define KMEMLEAK_BLACK	-1
 129
 130/*
 131 * Structure holding the metadata for each allocated memory block.
 132 * Modifications to such objects should be made while holding the
 133 * object->lock. Insertions or deletions from object_list, gray_list or
 134 * tree_node are already protected by the corresponding locks or mutex (see
 135 * the notes on locking above). These objects are reference-counted
 136 * (use_count) and freed using the RCU mechanism.
 137 */
 138struct kmemleak_object {
 139	spinlock_t lock;
 140	unsigned long flags;		/* object status flags */
 141	struct list_head object_list;
 142	struct list_head gray_list;
 143	struct prio_tree_node tree_node;
 144	struct rcu_head rcu;		/* object_list lockless traversal */
 145	/* object usage count; object freed when use_count == 0 */
 146	atomic_t use_count;
 147	unsigned long pointer;
 148	size_t size;
 149	/* minimum number of a pointers found before it is considered leak */
 150	int min_count;
 151	/* the total number of pointers found pointing to this object */
 152	int count;
 153	/* checksum for detecting modified objects */
 154	u32 checksum;
 155	/* memory ranges to be scanned inside an object (empty for all) */
 156	struct hlist_head area_list;
 157	unsigned long trace[MAX_TRACE];
 158	unsigned int trace_len;
 159	unsigned long jiffies;		/* creation timestamp */
 160	pid_t pid;			/* pid of the current task */
 161	char comm[TASK_COMM_LEN];	/* executable name */
 162};
 163
 164/* flag representing the memory block allocation status */
 165#define OBJECT_ALLOCATED	(1 << 0)
 166/* flag set after the first reporting of an unreference object */
 167#define OBJECT_REPORTED		(1 << 1)
 168/* flag set to not scan the object */
 169#define OBJECT_NO_SCAN		(1 << 2)
 170
 171/* number of bytes to print per line; must be 16 or 32 */
 172#define HEX_ROW_SIZE		16
 173/* number of bytes to print at a time (1, 2, 4, 8) */
 174#define HEX_GROUP_SIZE		1
 175/* include ASCII after the hex output */
 176#define HEX_ASCII		1
 177/* max number of lines to be printed */
 178#define HEX_MAX_LINES		2
 179
 180/* the list of all allocated objects */
 181static LIST_HEAD(object_list);
 182/* the list of gray-colored objects (see color_gray comment below) */
 183static LIST_HEAD(gray_list);
 184/* prio search tree for object boundaries */
 185static struct prio_tree_root object_tree_root;
 186/* rw_lock protecting the access to object_list and prio_tree_root */
 187static DEFINE_RWLOCK(kmemleak_lock);
 188
 189/* allocation caches for kmemleak internal data */
 190static struct kmem_cache *object_cache;
 191static struct kmem_cache *scan_area_cache;
 192
 193/* set if tracing memory operations is enabled */
 194static atomic_t kmemleak_enabled = ATOMIC_INIT(0);
 195/* set in the late_initcall if there were no errors */
 196static atomic_t kmemleak_initialized = ATOMIC_INIT(0);
 197/* enables or disables early logging of the memory operations */
 198static atomic_t kmemleak_early_log = ATOMIC_INIT(1);
 199/* set if a fata kmemleak error has occurred */
 200static atomic_t kmemleak_error = ATOMIC_INIT(0);
 
 
 201
 202/* minimum and maximum address that may be valid pointers */
 203static unsigned long min_addr = ULONG_MAX;
 204static unsigned long max_addr;
 205
 206static struct task_struct *scan_thread;
 207/* used to avoid reporting of recently allocated objects */
 208static unsigned long jiffies_min_age;
 209static unsigned long jiffies_last_scan;
 210/* delay between automatic memory scannings */
 211static signed long jiffies_scan_wait;
 212/* enables or disables the task stacks scanning */
 213static int kmemleak_stack_scan = 1;
 214/* protects the memory scanning, parameters and debug/kmemleak file access */
 215static DEFINE_MUTEX(scan_mutex);
 216/* setting kmemleak=on, will set this var, skipping the disable */
 217static int kmemleak_skip_disable;
 218
 
 219
 220/*
 221 * Early object allocation/freeing logging. Kmemleak is initialized after the
 222 * kernel allocator. However, both the kernel allocator and kmemleak may
 223 * allocate memory blocks which need to be tracked. Kmemleak defines an
 224 * arbitrary buffer to hold the allocation/freeing information before it is
 225 * fully initialized.
 226 */
 227
 228/* kmemleak operation type for early logging */
 229enum {
 230	KMEMLEAK_ALLOC,
 
 231	KMEMLEAK_FREE,
 232	KMEMLEAK_FREE_PART,
 
 233	KMEMLEAK_NOT_LEAK,
 234	KMEMLEAK_IGNORE,
 235	KMEMLEAK_SCAN_AREA,
 236	KMEMLEAK_NO_SCAN
 237};
 238
 239/*
 240 * Structure holding the information passed to kmemleak callbacks during the
 241 * early logging.
 242 */
 243struct early_log {
 244	int op_type;			/* kmemleak operation type */
 245	const void *ptr;		/* allocated/freed memory block */
 246	size_t size;			/* memory block size */
 247	int min_count;			/* minimum reference count */
 248	unsigned long trace[MAX_TRACE];	/* stack trace */
 249	unsigned int trace_len;		/* stack trace length */
 250};
 251
 252/* early logging buffer and current position */
 253static struct early_log
 254	early_log[CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE] __initdata;
 255static int crt_early_log __initdata;
 256
 257static void kmemleak_disable(void);
 258
 259/*
 260 * Print a warning and dump the stack trace.
 261 */
 262#define kmemleak_warn(x...)	do {	\
 263	pr_warning(x);			\
 264	dump_stack();			\
 
 265} while (0)
 266
 267/*
 268 * Macro invoked when a serious kmemleak condition occurred and cannot be
 269 * recovered from. Kmemleak will be disabled and further allocation/freeing
 270 * tracing no longer available.
 271 */
 272#define kmemleak_stop(x...)	do {	\
 273	kmemleak_warn(x);		\
 274	kmemleak_disable();		\
 275} while (0)
 276
 277/*
 278 * Printing of the objects hex dump to the seq file. The number of lines to be
 279 * printed is limited to HEX_MAX_LINES to prevent seq file spamming. The
 280 * actual number of printed bytes depends on HEX_ROW_SIZE. It must be called
 281 * with the object->lock held.
 282 */
 283static void hex_dump_object(struct seq_file *seq,
 284			    struct kmemleak_object *object)
 285{
 286	const u8 *ptr = (const u8 *)object->pointer;
 287	int i, len, remaining;
 288	unsigned char linebuf[HEX_ROW_SIZE * 5];
 289
 290	/* limit the number of lines to HEX_MAX_LINES */
 291	remaining = len =
 292		min(object->size, (size_t)(HEX_MAX_LINES * HEX_ROW_SIZE));
 293
 294	seq_printf(seq, "  hex dump (first %d bytes):\n", len);
 295	for (i = 0; i < len; i += HEX_ROW_SIZE) {
 296		int linelen = min(remaining, HEX_ROW_SIZE);
 297
 298		remaining -= HEX_ROW_SIZE;
 299		hex_dump_to_buffer(ptr + i, linelen, HEX_ROW_SIZE,
 300				   HEX_GROUP_SIZE, linebuf, sizeof(linebuf),
 301				   HEX_ASCII);
 302		seq_printf(seq, "    %s\n", linebuf);
 303	}
 304}
 305
 306/*
 307 * Object colors, encoded with count and min_count:
 308 * - white - orphan object, not enough references to it (count < min_count)
 309 * - gray  - not orphan, not marked as false positive (min_count == 0) or
 310 *		sufficient references to it (count >= min_count)
 311 * - black - ignore, it doesn't contain references (e.g. text section)
 312 *		(min_count == -1). No function defined for this color.
 313 * Newly created objects don't have any color assigned (object->count == -1)
 314 * before the next memory scan when they become white.
 315 */
 316static bool color_white(const struct kmemleak_object *object)
 317{
 318	return object->count != KMEMLEAK_BLACK &&
 319		object->count < object->min_count;
 320}
 321
 322static bool color_gray(const struct kmemleak_object *object)
 323{
 324	return object->min_count != KMEMLEAK_BLACK &&
 325		object->count >= object->min_count;
 326}
 327
 328/*
 329 * Objects are considered unreferenced only if their color is white, they have
 330 * not be deleted and have a minimum age to avoid false positives caused by
 331 * pointers temporarily stored in CPU registers.
 332 */
 333static bool unreferenced_object(struct kmemleak_object *object)
 334{
 335	return (color_white(object) && object->flags & OBJECT_ALLOCATED) &&
 336		time_before_eq(object->jiffies + jiffies_min_age,
 337			       jiffies_last_scan);
 338}
 339
 340/*
 341 * Printing of the unreferenced objects information to the seq file. The
 342 * print_unreferenced function must be called with the object->lock held.
 343 */
 344static void print_unreferenced(struct seq_file *seq,
 345			       struct kmemleak_object *object)
 346{
 347	int i;
 348	unsigned int msecs_age = jiffies_to_msecs(jiffies - object->jiffies);
 349
 350	seq_printf(seq, "unreferenced object 0x%08lx (size %zu):\n",
 351		   object->pointer, object->size);
 352	seq_printf(seq, "  comm \"%s\", pid %d, jiffies %lu (age %d.%03ds)\n",
 353		   object->comm, object->pid, object->jiffies,
 354		   msecs_age / 1000, msecs_age % 1000);
 355	hex_dump_object(seq, object);
 356	seq_printf(seq, "  backtrace:\n");
 357
 358	for (i = 0; i < object->trace_len; i++) {
 359		void *ptr = (void *)object->trace[i];
 360		seq_printf(seq, "    [<%p>] %pS\n", ptr, ptr);
 361	}
 362}
 363
 364/*
 365 * Print the kmemleak_object information. This function is used mainly for
 366 * debugging special cases when kmemleak operations. It must be called with
 367 * the object->lock held.
 368 */
 369static void dump_object_info(struct kmemleak_object *object)
 370{
 371	struct stack_trace trace;
 372
 373	trace.nr_entries = object->trace_len;
 374	trace.entries = object->trace;
 375
 376	pr_notice("Object 0x%08lx (size %zu):\n",
 377		  object->tree_node.start, object->size);
 378	pr_notice("  comm \"%s\", pid %d, jiffies %lu\n",
 379		  object->comm, object->pid, object->jiffies);
 380	pr_notice("  min_count = %d\n", object->min_count);
 381	pr_notice("  count = %d\n", object->count);
 382	pr_notice("  flags = 0x%lx\n", object->flags);
 383	pr_notice("  checksum = %d\n", object->checksum);
 384	pr_notice("  backtrace:\n");
 385	print_stack_trace(&trace, 4);
 386}
 387
 388/*
 389 * Look-up a memory block metadata (kmemleak_object) in the priority search
 390 * tree based on a pointer value. If alias is 0, only values pointing to the
 391 * beginning of the memory block are allowed. The kmemleak_lock must be held
 392 * when calling this function.
 393 */
 394static struct kmemleak_object *lookup_object(unsigned long ptr, int alias)
 395{
 396	struct prio_tree_node *node;
 397	struct prio_tree_iter iter;
 398	struct kmemleak_object *object;
 399
 400	prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr);
 401	node = prio_tree_next(&iter);
 402	if (node) {
 403		object = prio_tree_entry(node, struct kmemleak_object,
 404					 tree_node);
 405		if (!alias && object->pointer != ptr) {
 406			pr_warning("Found object by alias at 0x%08lx\n", ptr);
 407			dump_stack();
 
 
 
 
 408			dump_object_info(object);
 409			object = NULL;
 410		}
 411	} else
 412		object = NULL;
 413
 414	return object;
 415}
 416
 417/*
 418 * Increment the object use_count. Return 1 if successful or 0 otherwise. Note
 419 * that once an object's use_count reached 0, the RCU freeing was already
 420 * registered and the object should no longer be used. This function must be
 421 * called under the protection of rcu_read_lock().
 422 */
 423static int get_object(struct kmemleak_object *object)
 424{
 425	return atomic_inc_not_zero(&object->use_count);
 426}
 427
 428/*
 429 * RCU callback to free a kmemleak_object.
 430 */
 431static void free_object_rcu(struct rcu_head *rcu)
 432{
 433	struct hlist_node *elem, *tmp;
 434	struct kmemleak_scan_area *area;
 435	struct kmemleak_object *object =
 436		container_of(rcu, struct kmemleak_object, rcu);
 437
 438	/*
 439	 * Once use_count is 0 (guaranteed by put_object), there is no other
 440	 * code accessing this object, hence no need for locking.
 441	 */
 442	hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) {
 443		hlist_del(elem);
 444		kmem_cache_free(scan_area_cache, area);
 445	}
 446	kmem_cache_free(object_cache, object);
 447}
 448
 449/*
 450 * Decrement the object use_count. Once the count is 0, free the object using
 451 * an RCU callback. Since put_object() may be called via the kmemleak_free() ->
 452 * delete_object() path, the delayed RCU freeing ensures that there is no
 453 * recursive call to the kernel allocator. Lock-less RCU object_list traversal
 454 * is also possible.
 455 */
 456static void put_object(struct kmemleak_object *object)
 457{
 458	if (!atomic_dec_and_test(&object->use_count))
 459		return;
 460
 461	/* should only get here after delete_object was called */
 462	WARN_ON(object->flags & OBJECT_ALLOCATED);
 463
 464	call_rcu(&object->rcu, free_object_rcu);
 465}
 466
 467/*
 468 * Look up an object in the prio search tree and increase its use_count.
 469 */
 470static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias)
 471{
 472	unsigned long flags;
 473	struct kmemleak_object *object = NULL;
 474
 475	rcu_read_lock();
 476	read_lock_irqsave(&kmemleak_lock, flags);
 477	if (ptr >= min_addr && ptr < max_addr)
 478		object = lookup_object(ptr, alias);
 479	read_unlock_irqrestore(&kmemleak_lock, flags);
 480
 481	/* check whether the object is still available */
 482	if (object && !get_object(object))
 483		object = NULL;
 484	rcu_read_unlock();
 485
 486	return object;
 487}
 488
 489/*
 490 * Save stack trace to the given array of MAX_TRACE size.
 491 */
 492static int __save_stack_trace(unsigned long *trace)
 493{
 494	struct stack_trace stack_trace;
 495
 496	stack_trace.max_entries = MAX_TRACE;
 497	stack_trace.nr_entries = 0;
 498	stack_trace.entries = trace;
 499	stack_trace.skip = 2;
 500	save_stack_trace(&stack_trace);
 501
 502	return stack_trace.nr_entries;
 503}
 504
 505/*
 506 * Create the metadata (struct kmemleak_object) corresponding to an allocated
 507 * memory block and add it to the object_list and object_tree_root.
 508 */
 509static struct kmemleak_object *create_object(unsigned long ptr, size_t size,
 510					     int min_count, gfp_t gfp)
 511{
 512	unsigned long flags;
 513	struct kmemleak_object *object;
 514	struct prio_tree_node *node;
 515
 516	object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
 517	if (!object) {
 518		pr_warning("Cannot allocate a kmemleak_object structure\n");
 519		kmemleak_disable();
 520		return NULL;
 521	}
 522
 523	INIT_LIST_HEAD(&object->object_list);
 524	INIT_LIST_HEAD(&object->gray_list);
 525	INIT_HLIST_HEAD(&object->area_list);
 526	spin_lock_init(&object->lock);
 527	atomic_set(&object->use_count, 1);
 528	object->flags = OBJECT_ALLOCATED;
 529	object->pointer = ptr;
 530	object->size = size;
 531	object->min_count = min_count;
 532	object->count = 0;			/* white color initially */
 533	object->jiffies = jiffies;
 534	object->checksum = 0;
 535
 536	/* task information */
 537	if (in_irq()) {
 538		object->pid = 0;
 539		strncpy(object->comm, "hardirq", sizeof(object->comm));
 540	} else if (in_softirq()) {
 541		object->pid = 0;
 542		strncpy(object->comm, "softirq", sizeof(object->comm));
 543	} else {
 544		object->pid = current->pid;
 545		/*
 546		 * There is a small chance of a race with set_task_comm(),
 547		 * however using get_task_comm() here may cause locking
 548		 * dependency issues with current->alloc_lock. In the worst
 549		 * case, the command line is not correct.
 550		 */
 551		strncpy(object->comm, current->comm, sizeof(object->comm));
 552	}
 553
 554	/* kernel backtrace */
 555	object->trace_len = __save_stack_trace(object->trace);
 556
 557	INIT_PRIO_TREE_NODE(&object->tree_node);
 558	object->tree_node.start = ptr;
 559	object->tree_node.last = ptr + size - 1;
 560
 561	write_lock_irqsave(&kmemleak_lock, flags);
 562
 563	min_addr = min(min_addr, ptr);
 564	max_addr = max(max_addr, ptr + size);
 565	node = prio_tree_insert(&object_tree_root, &object->tree_node);
 566	/*
 567	 * The code calling the kernel does not yet have the pointer to the
 568	 * memory block to be able to free it.  However, we still hold the
 569	 * kmemleak_lock here in case parts of the kernel started freeing
 570	 * random memory blocks.
 571	 */
 572	if (node != &object->tree_node) {
 573		kmemleak_stop("Cannot insert 0x%lx into the object search tree "
 574			      "(already existing)\n", ptr);
 575		object = lookup_object(ptr, 1);
 576		spin_lock(&object->lock);
 577		dump_object_info(object);
 578		spin_unlock(&object->lock);
 
 
 
 
 
 
 
 
 
 579
 580		goto out;
 581	}
 582	list_add_tail_rcu(&object->object_list, &object_list);
 583out:
 584	write_unlock_irqrestore(&kmemleak_lock, flags);
 585	return object;
 586}
 587
 588/*
 589 * Remove the metadata (struct kmemleak_object) for a memory block from the
 590 * object_list and object_tree_root and decrement its use_count.
 591 */
 592static void __delete_object(struct kmemleak_object *object)
 593{
 594	unsigned long flags;
 595
 596	write_lock_irqsave(&kmemleak_lock, flags);
 597	prio_tree_remove(&object_tree_root, &object->tree_node);
 598	list_del_rcu(&object->object_list);
 599	write_unlock_irqrestore(&kmemleak_lock, flags);
 600
 601	WARN_ON(!(object->flags & OBJECT_ALLOCATED));
 602	WARN_ON(atomic_read(&object->use_count) < 2);
 603
 604	/*
 605	 * Locking here also ensures that the corresponding memory block
 606	 * cannot be freed when it is being scanned.
 607	 */
 608	spin_lock_irqsave(&object->lock, flags);
 609	object->flags &= ~OBJECT_ALLOCATED;
 610	spin_unlock_irqrestore(&object->lock, flags);
 611	put_object(object);
 612}
 613
 614/*
 615 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 616 * delete it.
 617 */
 618static void delete_object_full(unsigned long ptr)
 619{
 620	struct kmemleak_object *object;
 621
 622	object = find_and_get_object(ptr, 0);
 623	if (!object) {
 624#ifdef DEBUG
 625		kmemleak_warn("Freeing unknown object at 0x%08lx\n",
 626			      ptr);
 627#endif
 628		return;
 629	}
 630	__delete_object(object);
 631	put_object(object);
 632}
 633
 634/*
 635 * Look up the metadata (struct kmemleak_object) corresponding to ptr and
 636 * delete it. If the memory block is partially freed, the function may create
 637 * additional metadata for the remaining parts of the block.
 638 */
 639static void delete_object_part(unsigned long ptr, size_t size)
 640{
 641	struct kmemleak_object *object;
 642	unsigned long start, end;
 643
 644	object = find_and_get_object(ptr, 1);
 645	if (!object) {
 646#ifdef DEBUG
 647		kmemleak_warn("Partially freeing unknown object at 0x%08lx "
 648			      "(size %zu)\n", ptr, size);
 649#endif
 650		return;
 651	}
 652	__delete_object(object);
 653
 654	/*
 655	 * Create one or two objects that may result from the memory block
 656	 * split. Note that partial freeing is only done by free_bootmem() and
 657	 * this happens before kmemleak_init() is called. The path below is
 658	 * only executed during early log recording in kmemleak_init(), so
 659	 * GFP_KERNEL is enough.
 660	 */
 661	start = object->pointer;
 662	end = object->pointer + object->size;
 663	if (ptr > start)
 664		create_object(start, ptr - start, object->min_count,
 665			      GFP_KERNEL);
 666	if (ptr + size < end)
 667		create_object(ptr + size, end - ptr - size, object->min_count,
 668			      GFP_KERNEL);
 669
 670	put_object(object);
 671}
 672
 673static void __paint_it(struct kmemleak_object *object, int color)
 674{
 675	object->min_count = color;
 676	if (color == KMEMLEAK_BLACK)
 677		object->flags |= OBJECT_NO_SCAN;
 678}
 679
 680static void paint_it(struct kmemleak_object *object, int color)
 681{
 682	unsigned long flags;
 683
 684	spin_lock_irqsave(&object->lock, flags);
 685	__paint_it(object, color);
 686	spin_unlock_irqrestore(&object->lock, flags);
 687}
 688
 689static void paint_ptr(unsigned long ptr, int color)
 690{
 691	struct kmemleak_object *object;
 692
 693	object = find_and_get_object(ptr, 0);
 694	if (!object) {
 695		kmemleak_warn("Trying to color unknown object "
 696			      "at 0x%08lx as %s\n", ptr,
 697			      (color == KMEMLEAK_GREY) ? "Grey" :
 698			      (color == KMEMLEAK_BLACK) ? "Black" : "Unknown");
 699		return;
 700	}
 701	paint_it(object, color);
 702	put_object(object);
 703}
 704
 705/*
 706 * Mark an object permanently as gray-colored so that it can no longer be
 707 * reported as a leak. This is used in general to mark a false positive.
 708 */
 709static void make_gray_object(unsigned long ptr)
 710{
 711	paint_ptr(ptr, KMEMLEAK_GREY);
 712}
 713
 714/*
 715 * Mark the object as black-colored so that it is ignored from scans and
 716 * reporting.
 717 */
 718static void make_black_object(unsigned long ptr)
 719{
 720	paint_ptr(ptr, KMEMLEAK_BLACK);
 721}
 722
 723/*
 724 * Add a scanning area to the object. If at least one such area is added,
 725 * kmemleak will only scan these ranges rather than the whole memory block.
 726 */
 727static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
 728{
 729	unsigned long flags;
 730	struct kmemleak_object *object;
 731	struct kmemleak_scan_area *area;
 732
 733	object = find_and_get_object(ptr, 1);
 734	if (!object) {
 735		kmemleak_warn("Adding scan area to unknown object at 0x%08lx\n",
 736			      ptr);
 737		return;
 738	}
 739
 740	area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
 741	if (!area) {
 742		pr_warning("Cannot allocate a scan area\n");
 743		goto out;
 744	}
 745
 746	spin_lock_irqsave(&object->lock, flags);
 747	if (ptr + size > object->pointer + object->size) {
 
 
 748		kmemleak_warn("Scan area larger than object 0x%08lx\n", ptr);
 749		dump_object_info(object);
 750		kmem_cache_free(scan_area_cache, area);
 751		goto out_unlock;
 752	}
 753
 754	INIT_HLIST_NODE(&area->node);
 755	area->start = ptr;
 756	area->size = size;
 757
 758	hlist_add_head(&area->node, &object->area_list);
 759out_unlock:
 760	spin_unlock_irqrestore(&object->lock, flags);
 761out:
 762	put_object(object);
 763}
 764
 765/*
 766 * Set the OBJECT_NO_SCAN flag for the object corresponding to the give
 767 * pointer. Such object will not be scanned by kmemleak but references to it
 768 * are searched.
 769 */
 770static void object_no_scan(unsigned long ptr)
 771{
 772	unsigned long flags;
 773	struct kmemleak_object *object;
 774
 775	object = find_and_get_object(ptr, 0);
 776	if (!object) {
 777		kmemleak_warn("Not scanning unknown object at 0x%08lx\n", ptr);
 778		return;
 779	}
 780
 781	spin_lock_irqsave(&object->lock, flags);
 782	object->flags |= OBJECT_NO_SCAN;
 783	spin_unlock_irqrestore(&object->lock, flags);
 784	put_object(object);
 785}
 786
 787/*
 788 * Log an early kmemleak_* call to the early_log buffer. These calls will be
 789 * processed later once kmemleak is fully initialized.
 790 */
 791static void __init log_early(int op_type, const void *ptr, size_t size,
 792			     int min_count)
 793{
 794	unsigned long flags;
 795	struct early_log *log;
 796
 
 
 
 
 
 
 797	if (crt_early_log >= ARRAY_SIZE(early_log)) {
 798		pr_warning("Early log buffer exceeded, "
 799			   "please increase DEBUG_KMEMLEAK_EARLY_LOG_SIZE\n");
 800		kmemleak_disable();
 801		return;
 802	}
 803
 804	/*
 805	 * There is no need for locking since the kernel is still in UP mode
 806	 * at this stage. Disabling the IRQs is enough.
 807	 */
 808	local_irq_save(flags);
 809	log = &early_log[crt_early_log];
 810	log->op_type = op_type;
 811	log->ptr = ptr;
 812	log->size = size;
 813	log->min_count = min_count;
 814	if (op_type == KMEMLEAK_ALLOC)
 815		log->trace_len = __save_stack_trace(log->trace);
 816	crt_early_log++;
 817	local_irq_restore(flags);
 818}
 819
 820/*
 821 * Log an early allocated block and populate the stack trace.
 822 */
 823static void early_alloc(struct early_log *log)
 824{
 825	struct kmemleak_object *object;
 826	unsigned long flags;
 827	int i;
 828
 829	if (!atomic_read(&kmemleak_enabled) || !log->ptr || IS_ERR(log->ptr))
 830		return;
 831
 832	/*
 833	 * RCU locking needed to ensure object is not freed via put_object().
 834	 */
 835	rcu_read_lock();
 836	object = create_object((unsigned long)log->ptr, log->size,
 837			       log->min_count, GFP_ATOMIC);
 838	if (!object)
 839		goto out;
 840	spin_lock_irqsave(&object->lock, flags);
 841	for (i = 0; i < log->trace_len; i++)
 842		object->trace[i] = log->trace[i];
 843	object->trace_len = log->trace_len;
 844	spin_unlock_irqrestore(&object->lock, flags);
 845out:
 846	rcu_read_unlock();
 847}
 848
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 849/**
 850 * kmemleak_alloc - register a newly allocated object
 851 * @ptr:	pointer to beginning of the object
 852 * @size:	size of the object
 853 * @min_count:	minimum number of references to this object. If during memory
 854 *		scanning a number of references less than @min_count is found,
 855 *		the object is reported as a memory leak. If @min_count is 0,
 856 *		the object is never reported as a leak. If @min_count is -1,
 857 *		the object is ignored (not scanned and not reported as a leak)
 858 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
 859 *
 860 * This function is called from the kernel allocators when a new object
 861 * (memory block) is allocated (kmem_cache_alloc, kmalloc, vmalloc etc.).
 862 */
 863void __ref kmemleak_alloc(const void *ptr, size_t size, int min_count,
 864			  gfp_t gfp)
 865{
 866	pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count);
 867
 868	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 869		create_object((unsigned long)ptr, size, min_count, gfp);
 870	else if (atomic_read(&kmemleak_early_log))
 871		log_early(KMEMLEAK_ALLOC, ptr, size, min_count);
 872}
 873EXPORT_SYMBOL_GPL(kmemleak_alloc);
 874
 875/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 876 * kmemleak_free - unregister a previously registered object
 877 * @ptr:	pointer to beginning of the object
 878 *
 879 * This function is called from the kernel allocators when an object (memory
 880 * block) is freed (kmem_cache_free, kfree, vfree etc.).
 881 */
 882void __ref kmemleak_free(const void *ptr)
 883{
 884	pr_debug("%s(0x%p)\n", __func__, ptr);
 885
 886	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 887		delete_object_full((unsigned long)ptr);
 888	else if (atomic_read(&kmemleak_early_log))
 889		log_early(KMEMLEAK_FREE, ptr, 0, 0);
 890}
 891EXPORT_SYMBOL_GPL(kmemleak_free);
 892
 893/**
 894 * kmemleak_free_part - partially unregister a previously registered object
 895 * @ptr:	pointer to the beginning or inside the object. This also
 896 *		represents the start of the range to be freed
 897 * @size:	size to be unregistered
 898 *
 899 * This function is called when only a part of a memory block is freed
 900 * (usually from the bootmem allocator).
 901 */
 902void __ref kmemleak_free_part(const void *ptr, size_t size)
 903{
 904	pr_debug("%s(0x%p)\n", __func__, ptr);
 905
 906	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 907		delete_object_part((unsigned long)ptr, size);
 908	else if (atomic_read(&kmemleak_early_log))
 909		log_early(KMEMLEAK_FREE_PART, ptr, size, 0);
 910}
 911EXPORT_SYMBOL_GPL(kmemleak_free_part);
 912
 913/**
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 914 * kmemleak_not_leak - mark an allocated object as false positive
 915 * @ptr:	pointer to beginning of the object
 916 *
 917 * Calling this function on an object will cause the memory block to no longer
 918 * be reported as leak and always be scanned.
 919 */
 920void __ref kmemleak_not_leak(const void *ptr)
 921{
 922	pr_debug("%s(0x%p)\n", __func__, ptr);
 923
 924	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 925		make_gray_object((unsigned long)ptr);
 926	else if (atomic_read(&kmemleak_early_log))
 927		log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0);
 928}
 929EXPORT_SYMBOL(kmemleak_not_leak);
 930
 931/**
 932 * kmemleak_ignore - ignore an allocated object
 933 * @ptr:	pointer to beginning of the object
 934 *
 935 * Calling this function on an object will cause the memory block to be
 936 * ignored (not scanned and not reported as a leak). This is usually done when
 937 * it is known that the corresponding block is not a leak and does not contain
 938 * any references to other allocated memory blocks.
 939 */
 940void __ref kmemleak_ignore(const void *ptr)
 941{
 942	pr_debug("%s(0x%p)\n", __func__, ptr);
 943
 944	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 945		make_black_object((unsigned long)ptr);
 946	else if (atomic_read(&kmemleak_early_log))
 947		log_early(KMEMLEAK_IGNORE, ptr, 0, 0);
 948}
 949EXPORT_SYMBOL(kmemleak_ignore);
 950
 951/**
 952 * kmemleak_scan_area - limit the range to be scanned in an allocated object
 953 * @ptr:	pointer to beginning or inside the object. This also
 954 *		represents the start of the scan area
 955 * @size:	size of the scan area
 956 * @gfp:	kmalloc() flags used for kmemleak internal memory allocations
 957 *
 958 * This function is used when it is known that only certain parts of an object
 959 * contain references to other objects. Kmemleak will only scan these areas
 960 * reducing the number false negatives.
 961 */
 962void __ref kmemleak_scan_area(const void *ptr, size_t size, gfp_t gfp)
 963{
 964	pr_debug("%s(0x%p)\n", __func__, ptr);
 965
 966	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 967		add_scan_area((unsigned long)ptr, size, gfp);
 968	else if (atomic_read(&kmemleak_early_log))
 969		log_early(KMEMLEAK_SCAN_AREA, ptr, size, 0);
 970}
 971EXPORT_SYMBOL(kmemleak_scan_area);
 972
 973/**
 974 * kmemleak_no_scan - do not scan an allocated object
 975 * @ptr:	pointer to beginning of the object
 976 *
 977 * This function notifies kmemleak not to scan the given memory block. Useful
 978 * in situations where it is known that the given object does not contain any
 979 * references to other objects. Kmemleak will not scan such objects reducing
 980 * the number of false negatives.
 981 */
 982void __ref kmemleak_no_scan(const void *ptr)
 983{
 984	pr_debug("%s(0x%p)\n", __func__, ptr);
 985
 986	if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr))
 987		object_no_scan((unsigned long)ptr);
 988	else if (atomic_read(&kmemleak_early_log))
 989		log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0);
 990}
 991EXPORT_SYMBOL(kmemleak_no_scan);
 992
 993/*
 994 * Update an object's checksum and return true if it was modified.
 995 */
 996static bool update_checksum(struct kmemleak_object *object)
 997{
 998	u32 old_csum = object->checksum;
 999
1000	if (!kmemcheck_is_obj_initialized(object->pointer, object->size))
1001		return false;
1002
1003	object->checksum = crc32(0, (void *)object->pointer, object->size);
1004	return object->checksum != old_csum;
1005}
1006
1007/*
1008 * Memory scanning is a long process and it needs to be interruptable. This
1009 * function checks whether such interrupt condition occurred.
1010 */
1011static int scan_should_stop(void)
1012{
1013	if (!atomic_read(&kmemleak_enabled))
1014		return 1;
1015
1016	/*
1017	 * This function may be called from either process or kthread context,
1018	 * hence the need to check for both stop conditions.
1019	 */
1020	if (current->mm)
1021		return signal_pending(current);
1022	else
1023		return kthread_should_stop();
1024
1025	return 0;
1026}
1027
1028/*
1029 * Scan a memory block (exclusive range) for valid pointers and add those
1030 * found to the gray list.
1031 */
1032static void scan_block(void *_start, void *_end,
1033		       struct kmemleak_object *scanned, int allow_resched)
1034{
1035	unsigned long *ptr;
1036	unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER);
1037	unsigned long *end = _end - (BYTES_PER_POINTER - 1);
1038
1039	for (ptr = start; ptr < end; ptr++) {
1040		struct kmemleak_object *object;
1041		unsigned long flags;
1042		unsigned long pointer;
1043
1044		if (allow_resched)
1045			cond_resched();
1046		if (scan_should_stop())
1047			break;
1048
1049		/* don't scan uninitialized memory */
1050		if (!kmemcheck_is_obj_initialized((unsigned long)ptr,
1051						  BYTES_PER_POINTER))
1052			continue;
1053
1054		pointer = *ptr;
1055
1056		object = find_and_get_object(pointer, 1);
1057		if (!object)
1058			continue;
1059		if (object == scanned) {
1060			/* self referenced, ignore */
1061			put_object(object);
1062			continue;
1063		}
1064
1065		/*
1066		 * Avoid the lockdep recursive warning on object->lock being
1067		 * previously acquired in scan_object(). These locks are
1068		 * enclosed by scan_mutex.
1069		 */
1070		spin_lock_irqsave_nested(&object->lock, flags,
1071					 SINGLE_DEPTH_NESTING);
1072		if (!color_white(object)) {
1073			/* non-orphan, ignored or new */
1074			spin_unlock_irqrestore(&object->lock, flags);
1075			put_object(object);
1076			continue;
1077		}
1078
1079		/*
1080		 * Increase the object's reference count (number of pointers
1081		 * to the memory block). If this count reaches the required
1082		 * minimum, the object's color will become gray and it will be
1083		 * added to the gray_list.
1084		 */
1085		object->count++;
1086		if (color_gray(object)) {
1087			list_add_tail(&object->gray_list, &gray_list);
1088			spin_unlock_irqrestore(&object->lock, flags);
1089			continue;
1090		}
1091
1092		spin_unlock_irqrestore(&object->lock, flags);
1093		put_object(object);
1094	}
1095}
1096
1097/*
1098 * Scan a memory block corresponding to a kmemleak_object. A condition is
1099 * that object->use_count >= 1.
1100 */
1101static void scan_object(struct kmemleak_object *object)
1102{
1103	struct kmemleak_scan_area *area;
1104	struct hlist_node *elem;
1105	unsigned long flags;
1106
1107	/*
1108	 * Once the object->lock is acquired, the corresponding memory block
1109	 * cannot be freed (the same lock is acquired in delete_object).
1110	 */
1111	spin_lock_irqsave(&object->lock, flags);
1112	if (object->flags & OBJECT_NO_SCAN)
1113		goto out;
1114	if (!(object->flags & OBJECT_ALLOCATED))
1115		/* already freed object */
1116		goto out;
1117	if (hlist_empty(&object->area_list)) {
1118		void *start = (void *)object->pointer;
1119		void *end = (void *)(object->pointer + object->size);
1120
1121		while (start < end && (object->flags & OBJECT_ALLOCATED) &&
1122		       !(object->flags & OBJECT_NO_SCAN)) {
1123			scan_block(start, min(start + MAX_SCAN_SIZE, end),
1124				   object, 0);
1125			start += MAX_SCAN_SIZE;
1126
1127			spin_unlock_irqrestore(&object->lock, flags);
1128			cond_resched();
1129			spin_lock_irqsave(&object->lock, flags);
1130		}
1131	} else
1132		hlist_for_each_entry(area, elem, &object->area_list, node)
1133			scan_block((void *)area->start,
1134				   (void *)(area->start + area->size),
1135				   object, 0);
1136out:
1137	spin_unlock_irqrestore(&object->lock, flags);
1138}
1139
1140/*
1141 * Scan the objects already referenced (gray objects). More objects will be
1142 * referenced and, if there are no memory leaks, all the objects are scanned.
1143 */
1144static void scan_gray_list(void)
1145{
1146	struct kmemleak_object *object, *tmp;
1147
1148	/*
1149	 * The list traversal is safe for both tail additions and removals
1150	 * from inside the loop. The kmemleak objects cannot be freed from
1151	 * outside the loop because their use_count was incremented.
1152	 */
1153	object = list_entry(gray_list.next, typeof(*object), gray_list);
1154	while (&object->gray_list != &gray_list) {
1155		cond_resched();
1156
1157		/* may add new objects to the list */
1158		if (!scan_should_stop())
1159			scan_object(object);
1160
1161		tmp = list_entry(object->gray_list.next, typeof(*object),
1162				 gray_list);
1163
1164		/* remove the object from the list and release it */
1165		list_del(&object->gray_list);
1166		put_object(object);
1167
1168		object = tmp;
1169	}
1170	WARN_ON(!list_empty(&gray_list));
1171}
1172
1173/*
1174 * Scan data sections and all the referenced memory blocks allocated via the
1175 * kernel's standard allocators. This function must be called with the
1176 * scan_mutex held.
1177 */
1178static void kmemleak_scan(void)
1179{
1180	unsigned long flags;
1181	struct kmemleak_object *object;
1182	int i;
1183	int new_leaks = 0;
1184
1185	jiffies_last_scan = jiffies;
1186
1187	/* prepare the kmemleak_object's */
1188	rcu_read_lock();
1189	list_for_each_entry_rcu(object, &object_list, object_list) {
1190		spin_lock_irqsave(&object->lock, flags);
1191#ifdef DEBUG
1192		/*
1193		 * With a few exceptions there should be a maximum of
1194		 * 1 reference to any object at this point.
1195		 */
1196		if (atomic_read(&object->use_count) > 1) {
1197			pr_debug("object->use_count = %d\n",
1198				 atomic_read(&object->use_count));
1199			dump_object_info(object);
1200		}
1201#endif
1202		/* reset the reference count (whiten the object) */
1203		object->count = 0;
1204		if (color_gray(object) && get_object(object))
1205			list_add_tail(&object->gray_list, &gray_list);
1206
1207		spin_unlock_irqrestore(&object->lock, flags);
1208	}
1209	rcu_read_unlock();
1210
1211	/* data/bss scanning */
1212	scan_block(_sdata, _edata, NULL, 1);
1213	scan_block(__bss_start, __bss_stop, NULL, 1);
1214
1215#ifdef CONFIG_SMP
1216	/* per-cpu sections scanning */
1217	for_each_possible_cpu(i)
1218		scan_block(__per_cpu_start + per_cpu_offset(i),
1219			   __per_cpu_end + per_cpu_offset(i), NULL, 1);
1220#endif
1221
1222	/*
1223	 * Struct page scanning for each node. The code below is not yet safe
1224	 * with MEMORY_HOTPLUG.
1225	 */
 
1226	for_each_online_node(i) {
1227		pg_data_t *pgdat = NODE_DATA(i);
1228		unsigned long start_pfn = pgdat->node_start_pfn;
1229		unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1230		unsigned long pfn;
1231
1232		for (pfn = start_pfn; pfn < end_pfn; pfn++) {
1233			struct page *page;
1234
1235			if (!pfn_valid(pfn))
1236				continue;
1237			page = pfn_to_page(pfn);
1238			/* only scan if page is in use */
1239			if (page_count(page) == 0)
1240				continue;
1241			scan_block(page, page + 1, NULL, 1);
1242		}
1243	}
 
1244
1245	/*
1246	 * Scanning the task stacks (may introduce false negatives).
1247	 */
1248	if (kmemleak_stack_scan) {
1249		struct task_struct *p, *g;
1250
1251		read_lock(&tasklist_lock);
1252		do_each_thread(g, p) {
1253			scan_block(task_stack_page(p), task_stack_page(p) +
1254				   THREAD_SIZE, NULL, 0);
1255		} while_each_thread(g, p);
1256		read_unlock(&tasklist_lock);
1257	}
1258
1259	/*
1260	 * Scan the objects already referenced from the sections scanned
1261	 * above.
1262	 */
1263	scan_gray_list();
1264
1265	/*
1266	 * Check for new or unreferenced objects modified since the previous
1267	 * scan and color them gray until the next scan.
1268	 */
1269	rcu_read_lock();
1270	list_for_each_entry_rcu(object, &object_list, object_list) {
1271		spin_lock_irqsave(&object->lock, flags);
1272		if (color_white(object) && (object->flags & OBJECT_ALLOCATED)
1273		    && update_checksum(object) && get_object(object)) {
1274			/* color it gray temporarily */
1275			object->count = object->min_count;
1276			list_add_tail(&object->gray_list, &gray_list);
1277		}
1278		spin_unlock_irqrestore(&object->lock, flags);
1279	}
1280	rcu_read_unlock();
1281
1282	/*
1283	 * Re-scan the gray list for modified unreferenced objects.
1284	 */
1285	scan_gray_list();
1286
1287	/*
1288	 * If scanning was stopped do not report any new unreferenced objects.
1289	 */
1290	if (scan_should_stop())
1291		return;
1292
1293	/*
1294	 * Scanning result reporting.
1295	 */
1296	rcu_read_lock();
1297	list_for_each_entry_rcu(object, &object_list, object_list) {
1298		spin_lock_irqsave(&object->lock, flags);
1299		if (unreferenced_object(object) &&
1300		    !(object->flags & OBJECT_REPORTED)) {
1301			object->flags |= OBJECT_REPORTED;
1302			new_leaks++;
1303		}
1304		spin_unlock_irqrestore(&object->lock, flags);
1305	}
1306	rcu_read_unlock();
1307
1308	if (new_leaks)
 
 
1309		pr_info("%d new suspected memory leaks (see "
1310			"/sys/kernel/debug/kmemleak)\n", new_leaks);
 
1311
1312}
1313
1314/*
1315 * Thread function performing automatic memory scanning. Unreferenced objects
1316 * at the end of a memory scan are reported but only the first time.
1317 */
1318static int kmemleak_scan_thread(void *arg)
1319{
1320	static int first_run = 1;
1321
1322	pr_info("Automatic memory scanning thread started\n");
1323	set_user_nice(current, 10);
1324
1325	/*
1326	 * Wait before the first scan to allow the system to fully initialize.
1327	 */
1328	if (first_run) {
1329		first_run = 0;
1330		ssleep(SECS_FIRST_SCAN);
1331	}
1332
1333	while (!kthread_should_stop()) {
1334		signed long timeout = jiffies_scan_wait;
1335
1336		mutex_lock(&scan_mutex);
1337		kmemleak_scan();
1338		mutex_unlock(&scan_mutex);
1339
1340		/* wait before the next scan */
1341		while (timeout && !kthread_should_stop())
1342			timeout = schedule_timeout_interruptible(timeout);
1343	}
1344
1345	pr_info("Automatic memory scanning thread ended\n");
1346
1347	return 0;
1348}
1349
1350/*
1351 * Start the automatic memory scanning thread. This function must be called
1352 * with the scan_mutex held.
1353 */
1354static void start_scan_thread(void)
1355{
1356	if (scan_thread)
1357		return;
1358	scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak");
1359	if (IS_ERR(scan_thread)) {
1360		pr_warning("Failed to create the scan thread\n");
1361		scan_thread = NULL;
1362	}
1363}
1364
1365/*
1366 * Stop the automatic memory scanning thread. This function must be called
1367 * with the scan_mutex held.
1368 */
1369static void stop_scan_thread(void)
1370{
1371	if (scan_thread) {
1372		kthread_stop(scan_thread);
1373		scan_thread = NULL;
1374	}
1375}
1376
1377/*
1378 * Iterate over the object_list and return the first valid object at or after
1379 * the required position with its use_count incremented. The function triggers
1380 * a memory scanning when the pos argument points to the first position.
1381 */
1382static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos)
1383{
1384	struct kmemleak_object *object;
1385	loff_t n = *pos;
1386	int err;
1387
1388	err = mutex_lock_interruptible(&scan_mutex);
1389	if (err < 0)
1390		return ERR_PTR(err);
1391
1392	rcu_read_lock();
1393	list_for_each_entry_rcu(object, &object_list, object_list) {
1394		if (n-- > 0)
1395			continue;
1396		if (get_object(object))
1397			goto out;
1398	}
1399	object = NULL;
1400out:
1401	return object;
1402}
1403
1404/*
1405 * Return the next object in the object_list. The function decrements the
1406 * use_count of the previous object and increases that of the next one.
1407 */
1408static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1409{
1410	struct kmemleak_object *prev_obj = v;
1411	struct kmemleak_object *next_obj = NULL;
1412	struct list_head *n = &prev_obj->object_list;
1413
1414	++(*pos);
1415
1416	list_for_each_continue_rcu(n, &object_list) {
1417		struct kmemleak_object *obj =
1418			list_entry(n, struct kmemleak_object, object_list);
1419		if (get_object(obj)) {
1420			next_obj = obj;
1421			break;
1422		}
1423	}
1424
1425	put_object(prev_obj);
1426	return next_obj;
1427}
1428
1429/*
1430 * Decrement the use_count of the last object required, if any.
1431 */
1432static void kmemleak_seq_stop(struct seq_file *seq, void *v)
1433{
1434	if (!IS_ERR(v)) {
1435		/*
1436		 * kmemleak_seq_start may return ERR_PTR if the scan_mutex
1437		 * waiting was interrupted, so only release it if !IS_ERR.
1438		 */
1439		rcu_read_unlock();
1440		mutex_unlock(&scan_mutex);
1441		if (v)
1442			put_object(v);
1443	}
1444}
1445
1446/*
1447 * Print the information for an unreferenced object to the seq file.
1448 */
1449static int kmemleak_seq_show(struct seq_file *seq, void *v)
1450{
1451	struct kmemleak_object *object = v;
1452	unsigned long flags;
1453
1454	spin_lock_irqsave(&object->lock, flags);
1455	if ((object->flags & OBJECT_REPORTED) && unreferenced_object(object))
1456		print_unreferenced(seq, object);
1457	spin_unlock_irqrestore(&object->lock, flags);
1458	return 0;
1459}
1460
1461static const struct seq_operations kmemleak_seq_ops = {
1462	.start = kmemleak_seq_start,
1463	.next  = kmemleak_seq_next,
1464	.stop  = kmemleak_seq_stop,
1465	.show  = kmemleak_seq_show,
1466};
1467
1468static int kmemleak_open(struct inode *inode, struct file *file)
1469{
1470	if (!atomic_read(&kmemleak_enabled))
1471		return -EBUSY;
1472
1473	return seq_open(file, &kmemleak_seq_ops);
1474}
1475
1476static int kmemleak_release(struct inode *inode, struct file *file)
1477{
1478	return seq_release(inode, file);
1479}
1480
1481static int dump_str_object_info(const char *str)
1482{
1483	unsigned long flags;
1484	struct kmemleak_object *object;
1485	unsigned long addr;
1486
1487	addr= simple_strtoul(str, NULL, 0);
 
1488	object = find_and_get_object(addr, 0);
1489	if (!object) {
1490		pr_info("Unknown object at 0x%08lx\n", addr);
1491		return -EINVAL;
1492	}
1493
1494	spin_lock_irqsave(&object->lock, flags);
1495	dump_object_info(object);
1496	spin_unlock_irqrestore(&object->lock, flags);
1497
1498	put_object(object);
1499	return 0;
1500}
1501
1502/*
1503 * We use grey instead of black to ensure we can do future scans on the same
1504 * objects. If we did not do future scans these black objects could
1505 * potentially contain references to newly allocated objects in the future and
1506 * we'd end up with false positives.
1507 */
1508static void kmemleak_clear(void)
1509{
1510	struct kmemleak_object *object;
1511	unsigned long flags;
1512
1513	rcu_read_lock();
1514	list_for_each_entry_rcu(object, &object_list, object_list) {
1515		spin_lock_irqsave(&object->lock, flags);
1516		if ((object->flags & OBJECT_REPORTED) &&
1517		    unreferenced_object(object))
1518			__paint_it(object, KMEMLEAK_GREY);
1519		spin_unlock_irqrestore(&object->lock, flags);
1520	}
1521	rcu_read_unlock();
 
 
1522}
1523
 
 
1524/*
1525 * File write operation to configure kmemleak at run-time. The following
1526 * commands can be written to the /sys/kernel/debug/kmemleak file:
1527 *   off	- disable kmemleak (irreversible)
1528 *   stack=on	- enable the task stacks scanning
1529 *   stack=off	- disable the tasks stacks scanning
1530 *   scan=on	- start the automatic memory scanning thread
1531 *   scan=off	- stop the automatic memory scanning thread
1532 *   scan=...	- set the automatic memory scanning period in seconds (0 to
1533 *		  disable it)
1534 *   scan	- trigger a memory scan
1535 *   clear	- mark all current reported unreferenced kmemleak objects as
1536 *		  grey to ignore printing them
 
1537 *   dump=...	- dump information about the object found at the given address
1538 */
1539static ssize_t kmemleak_write(struct file *file, const char __user *user_buf,
1540			      size_t size, loff_t *ppos)
1541{
1542	char buf[64];
1543	int buf_size;
1544	int ret;
1545
1546	buf_size = min(size, (sizeof(buf) - 1));
1547	if (strncpy_from_user(buf, user_buf, buf_size) < 0)
1548		return -EFAULT;
1549	buf[buf_size] = 0;
1550
1551	ret = mutex_lock_interruptible(&scan_mutex);
1552	if (ret < 0)
1553		return ret;
1554
 
 
 
 
 
 
 
 
 
 
 
 
 
1555	if (strncmp(buf, "off", 3) == 0)
1556		kmemleak_disable();
1557	else if (strncmp(buf, "stack=on", 8) == 0)
1558		kmemleak_stack_scan = 1;
1559	else if (strncmp(buf, "stack=off", 9) == 0)
1560		kmemleak_stack_scan = 0;
1561	else if (strncmp(buf, "scan=on", 7) == 0)
1562		start_scan_thread();
1563	else if (strncmp(buf, "scan=off", 8) == 0)
1564		stop_scan_thread();
1565	else if (strncmp(buf, "scan=", 5) == 0) {
1566		unsigned long secs;
1567
1568		ret = strict_strtoul(buf + 5, 0, &secs);
1569		if (ret < 0)
1570			goto out;
1571		stop_scan_thread();
1572		if (secs) {
1573			jiffies_scan_wait = msecs_to_jiffies(secs * 1000);
1574			start_scan_thread();
1575		}
1576	} else if (strncmp(buf, "scan", 4) == 0)
1577		kmemleak_scan();
1578	else if (strncmp(buf, "clear", 5) == 0)
1579		kmemleak_clear();
1580	else if (strncmp(buf, "dump=", 5) == 0)
1581		ret = dump_str_object_info(buf + 5);
1582	else
1583		ret = -EINVAL;
1584
1585out:
1586	mutex_unlock(&scan_mutex);
1587	if (ret < 0)
1588		return ret;
1589
1590	/* ignore the rest of the buffer, only one command at a time */
1591	*ppos += size;
1592	return size;
1593}
1594
1595static const struct file_operations kmemleak_fops = {
1596	.owner		= THIS_MODULE,
1597	.open		= kmemleak_open,
1598	.read		= seq_read,
1599	.write		= kmemleak_write,
1600	.llseek		= seq_lseek,
1601	.release	= kmemleak_release,
1602};
1603
 
 
 
 
 
 
 
 
 
 
1604/*
1605 * Perform the freeing of the kmemleak internal objects after waiting for any
1606 * current memory scan to complete.
 
1607 */
1608static void kmemleak_do_cleanup(struct work_struct *work)
1609{
1610	struct kmemleak_object *object;
1611
1612	mutex_lock(&scan_mutex);
1613	stop_scan_thread();
1614
1615	rcu_read_lock();
1616	list_for_each_entry_rcu(object, &object_list, object_list)
1617		delete_object_full(object->pointer);
1618	rcu_read_unlock();
 
1619	mutex_unlock(&scan_mutex);
1620}
1621
1622static DECLARE_WORK(cleanup_work, kmemleak_do_cleanup);
1623
1624/*
1625 * Disable kmemleak. No memory allocation/freeing will be traced once this
1626 * function is called. Disabling kmemleak is an irreversible operation.
1627 */
1628static void kmemleak_disable(void)
1629{
1630	/* atomically check whether it was already invoked */
1631	if (atomic_cmpxchg(&kmemleak_error, 0, 1))
1632		return;
1633
1634	/* stop any memory operation tracing */
1635	atomic_set(&kmemleak_early_log, 0);
1636	atomic_set(&kmemleak_enabled, 0);
1637
1638	/* check whether it is too early for a kernel thread */
1639	if (atomic_read(&kmemleak_initialized))
1640		schedule_work(&cleanup_work);
1641
1642	pr_info("Kernel memory leak detector disabled\n");
1643}
1644
1645/*
1646 * Allow boot-time kmemleak disabling (enabled by default).
1647 */
1648static int kmemleak_boot_config(char *str)
1649{
1650	if (!str)
1651		return -EINVAL;
1652	if (strcmp(str, "off") == 0)
1653		kmemleak_disable();
1654	else if (strcmp(str, "on") == 0)
1655		kmemleak_skip_disable = 1;
1656	else
1657		return -EINVAL;
1658	return 0;
1659}
1660early_param("kmemleak", kmemleak_boot_config);
1661
 
 
 
 
 
 
 
 
 
 
 
1662/*
1663 * Kmemleak initialization.
1664 */
1665void __init kmemleak_init(void)
1666{
1667	int i;
1668	unsigned long flags;
1669
1670#ifdef CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF
1671	if (!kmemleak_skip_disable) {
 
1672		kmemleak_disable();
1673		return;
1674	}
1675#endif
1676
1677	jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
1678	jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
1679
1680	object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
1681	scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);
1682	INIT_PRIO_TREE_ROOT(&object_tree_root);
 
 
 
1683
1684	/* the kernel is still in UP mode, so disabling the IRQs is enough */
1685	local_irq_save(flags);
1686	if (!atomic_read(&kmemleak_error)) {
1687		atomic_set(&kmemleak_enabled, 1);
1688		atomic_set(&kmemleak_early_log, 0);
1689	}
 
 
1690	local_irq_restore(flags);
1691
1692	/*
1693	 * This is the point where tracking allocations is safe. Automatic
1694	 * scanning is started during the late initcall. Add the early logged
1695	 * callbacks to the kmemleak infrastructure.
1696	 */
1697	for (i = 0; i < crt_early_log; i++) {
1698		struct early_log *log = &early_log[i];
1699
1700		switch (log->op_type) {
1701		case KMEMLEAK_ALLOC:
1702			early_alloc(log);
1703			break;
 
 
 
1704		case KMEMLEAK_FREE:
1705			kmemleak_free(log->ptr);
1706			break;
1707		case KMEMLEAK_FREE_PART:
1708			kmemleak_free_part(log->ptr, log->size);
1709			break;
 
 
 
1710		case KMEMLEAK_NOT_LEAK:
1711			kmemleak_not_leak(log->ptr);
1712			break;
1713		case KMEMLEAK_IGNORE:
1714			kmemleak_ignore(log->ptr);
1715			break;
1716		case KMEMLEAK_SCAN_AREA:
1717			kmemleak_scan_area(log->ptr, log->size, GFP_KERNEL);
1718			break;
1719		case KMEMLEAK_NO_SCAN:
1720			kmemleak_no_scan(log->ptr);
1721			break;
1722		default:
1723			WARN_ON(1);
 
 
 
 
 
 
1724		}
1725	}
1726}
1727
1728/*
1729 * Late initialization function.
1730 */
1731static int __init kmemleak_late_init(void)
1732{
1733	struct dentry *dentry;
1734
1735	atomic_set(&kmemleak_initialized, 1);
1736
1737	if (atomic_read(&kmemleak_error)) {
1738		/*
1739		 * Some error occurred and kmemleak was disabled. There is a
1740		 * small chance that kmemleak_disable() was called immediately
1741		 * after setting kmemleak_initialized and we may end up with
1742		 * two clean-up threads but serialized by scan_mutex.
1743		 */
1744		schedule_work(&cleanup_work);
1745		return -ENOMEM;
1746	}
1747
1748	dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
1749				     &kmemleak_fops);
1750	if (!dentry)
1751		pr_warning("Failed to create the debugfs kmemleak file\n");
1752	mutex_lock(&scan_mutex);
1753	start_scan_thread();
1754	mutex_unlock(&scan_mutex);
1755
1756	pr_info("Kernel memory leak detector initialized\n");
1757
1758	return 0;
1759}
1760late_initcall(kmemleak_late_init);