Linux Audio

Check our new training course

Loading...
v5.14.15
 
   1/*
   2 * Generic infrastructure for lifetime debugging of objects.
   3 *
   4 * Started by Thomas Gleixner
   5 *
   6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
   7 *
   8 * For licencing details see kernel-base/COPYING
   9 */
  10
  11#define pr_fmt(fmt) "ODEBUG: " fmt
  12
  13#include <linux/debugobjects.h>
  14#include <linux/interrupt.h>
  15#include <linux/sched.h>
  16#include <linux/sched/task_stack.h>
  17#include <linux/seq_file.h>
  18#include <linux/debugfs.h>
  19#include <linux/slab.h>
  20#include <linux/hash.h>
  21#include <linux/kmemleak.h>
  22#include <linux/cpu.h>
  23
  24#define ODEBUG_HASH_BITS	14
  25#define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
  26
  27#define ODEBUG_POOL_SIZE	1024
  28#define ODEBUG_POOL_MIN_LEVEL	256
  29#define ODEBUG_POOL_PERCPU_SIZE	64
  30#define ODEBUG_BATCH_SIZE	16
  31
  32#define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
  33#define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
  34#define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
  35
  36/*
  37 * We limit the freeing of debug objects via workqueue at a maximum
  38 * frequency of 10Hz and about 1024 objects for each freeing operation.
  39 * So it is freeing at most 10k debug objects per second.
  40 */
  41#define ODEBUG_FREE_WORK_MAX	1024
  42#define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
  43
  44struct debug_bucket {
  45	struct hlist_head	list;
  46	raw_spinlock_t		lock;
  47};
  48
  49/*
  50 * Debug object percpu free list
  51 * Access is protected by disabling irq
  52 */
  53struct debug_percpu_free {
  54	struct hlist_head	free_objs;
  55	int			obj_free;
  56};
  57
  58static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
  59
  60static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
  61
  62static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  63
  64static DEFINE_RAW_SPINLOCK(pool_lock);
  65
  66static HLIST_HEAD(obj_pool);
  67static HLIST_HEAD(obj_to_free);
  68
  69/*
  70 * Because of the presence of percpu free pools, obj_pool_free will
  71 * under-count those in the percpu free pools. Similarly, obj_pool_used
  72 * will over-count those in the percpu free pools. Adjustments will be
  73 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
  74 * can be off.
  75 */
  76static int			obj_pool_min_free = ODEBUG_POOL_SIZE;
  77static int			obj_pool_free = ODEBUG_POOL_SIZE;
  78static int			obj_pool_used;
  79static int			obj_pool_max_used;
  80static bool			obj_freeing;
  81/* The number of objs on the global free list */
  82static int			obj_nr_tofree;
  83
  84static int			debug_objects_maxchain __read_mostly;
  85static int __maybe_unused	debug_objects_maxchecked __read_mostly;
  86static int			debug_objects_fixups __read_mostly;
  87static int			debug_objects_warnings __read_mostly;
  88static int			debug_objects_enabled __read_mostly
  89				= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  90static int			debug_objects_pool_size __read_mostly
  91				= ODEBUG_POOL_SIZE;
  92static int			debug_objects_pool_min_level __read_mostly
  93				= ODEBUG_POOL_MIN_LEVEL;
  94static const struct debug_obj_descr *descr_test  __read_mostly;
  95static struct kmem_cache	*obj_cache __read_mostly;
  96
  97/*
  98 * Track numbers of kmem_cache_alloc()/free() calls done.
  99 */
 100static int			debug_objects_allocated;
 101static int			debug_objects_freed;
 102
 103static void free_obj_work(struct work_struct *work);
 104static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
 105
 106static int __init enable_object_debug(char *str)
 107{
 108	debug_objects_enabled = 1;
 109	return 0;
 110}
 111
 112static int __init disable_object_debug(char *str)
 113{
 114	debug_objects_enabled = 0;
 115	return 0;
 116}
 117
 118early_param("debug_objects", enable_object_debug);
 119early_param("no_debug_objects", disable_object_debug);
 120
 121static const char *obj_states[ODEBUG_STATE_MAX] = {
 122	[ODEBUG_STATE_NONE]		= "none",
 123	[ODEBUG_STATE_INIT]		= "initialized",
 124	[ODEBUG_STATE_INACTIVE]		= "inactive",
 125	[ODEBUG_STATE_ACTIVE]		= "active",
 126	[ODEBUG_STATE_DESTROYED]	= "destroyed",
 127	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
 128};
 129
 130static void fill_pool(void)
 131{
 132	gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
 133	struct debug_obj *obj;
 134	unsigned long flags;
 135
 136	if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
 137		return;
 138
 139	/*
 140	 * Reuse objs from the global free list; they will be reinitialized
 141	 * when allocating.
 142	 *
 143	 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
 144	 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
 145	 * sections.
 146	 */
 147	while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
 148		raw_spin_lock_irqsave(&pool_lock, flags);
 149		/*
 150		 * Recheck with the lock held as the worker thread might have
 151		 * won the race and freed the global free list already.
 152		 */
 153		while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
 154			obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
 155			hlist_del(&obj->node);
 156			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
 157			hlist_add_head(&obj->node, &obj_pool);
 158			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 159		}
 160		raw_spin_unlock_irqrestore(&pool_lock, flags);
 161	}
 162
 163	if (unlikely(!obj_cache))
 164		return;
 165
 166	while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
 167		struct debug_obj *new[ODEBUG_BATCH_SIZE];
 168		int cnt;
 169
 170		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
 171			new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
 172			if (!new[cnt])
 173				break;
 174		}
 175		if (!cnt)
 176			return;
 177
 178		raw_spin_lock_irqsave(&pool_lock, flags);
 179		while (cnt) {
 180			hlist_add_head(&new[--cnt]->node, &obj_pool);
 181			debug_objects_allocated++;
 182			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 183		}
 184		raw_spin_unlock_irqrestore(&pool_lock, flags);
 185	}
 186}
 187
 188/*
 189 * Lookup an object in the hash bucket.
 190 */
 191static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
 192{
 193	struct debug_obj *obj;
 194	int cnt = 0;
 195
 196	hlist_for_each_entry(obj, &b->list, node) {
 197		cnt++;
 198		if (obj->object == addr)
 199			return obj;
 200	}
 201	if (cnt > debug_objects_maxchain)
 202		debug_objects_maxchain = cnt;
 203
 204	return NULL;
 205}
 206
 207/*
 208 * Allocate a new object from the hlist
 209 */
 210static struct debug_obj *__alloc_object(struct hlist_head *list)
 211{
 212	struct debug_obj *obj = NULL;
 213
 214	if (list->first) {
 215		obj = hlist_entry(list->first, typeof(*obj), node);
 216		hlist_del(&obj->node);
 217	}
 218
 219	return obj;
 220}
 221
 222/*
 223 * Allocate a new object. If the pool is empty, switch off the debugger.
 224 * Must be called with interrupts disabled.
 225 */
 226static struct debug_obj *
 227alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
 228{
 229	struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
 230	struct debug_obj *obj;
 231
 232	if (likely(obj_cache)) {
 233		obj = __alloc_object(&percpu_pool->free_objs);
 234		if (obj) {
 235			percpu_pool->obj_free--;
 236			goto init_obj;
 237		}
 238	}
 239
 240	raw_spin_lock(&pool_lock);
 241	obj = __alloc_object(&obj_pool);
 242	if (obj) {
 243		obj_pool_used++;
 244		WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 245
 246		/*
 247		 * Looking ahead, allocate one batch of debug objects and
 248		 * put them into the percpu free pool.
 249		 */
 250		if (likely(obj_cache)) {
 251			int i;
 252
 253			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
 254				struct debug_obj *obj2;
 255
 256				obj2 = __alloc_object(&obj_pool);
 257				if (!obj2)
 258					break;
 259				hlist_add_head(&obj2->node,
 260					       &percpu_pool->free_objs);
 261				percpu_pool->obj_free++;
 262				obj_pool_used++;
 263				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 264			}
 265		}
 266
 267		if (obj_pool_used > obj_pool_max_used)
 268			obj_pool_max_used = obj_pool_used;
 269
 270		if (obj_pool_free < obj_pool_min_free)
 271			obj_pool_min_free = obj_pool_free;
 272	}
 273	raw_spin_unlock(&pool_lock);
 274
 275init_obj:
 276	if (obj) {
 277		obj->object = addr;
 278		obj->descr  = descr;
 279		obj->state  = ODEBUG_STATE_NONE;
 280		obj->astate = 0;
 281		hlist_add_head(&obj->node, &b->list);
 282	}
 283	return obj;
 284}
 285
 286/*
 287 * workqueue function to free objects.
 288 *
 289 * To reduce contention on the global pool_lock, the actual freeing of
 290 * debug objects will be delayed if the pool_lock is busy.
 291 */
 292static void free_obj_work(struct work_struct *work)
 293{
 294	struct hlist_node *tmp;
 295	struct debug_obj *obj;
 296	unsigned long flags;
 297	HLIST_HEAD(tofree);
 298
 299	WRITE_ONCE(obj_freeing, false);
 300	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
 301		return;
 302
 303	if (obj_pool_free >= debug_objects_pool_size)
 304		goto free_objs;
 305
 306	/*
 307	 * The objs on the pool list might be allocated before the work is
 308	 * run, so recheck if pool list it full or not, if not fill pool
 309	 * list from the global free list. As it is likely that a workload
 310	 * may be gearing up to use more and more objects, don't free any
 311	 * of them until the next round.
 312	 */
 313	while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
 314		obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
 315		hlist_del(&obj->node);
 316		hlist_add_head(&obj->node, &obj_pool);
 317		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 318		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
 319	}
 320	raw_spin_unlock_irqrestore(&pool_lock, flags);
 321	return;
 322
 323free_objs:
 324	/*
 325	 * Pool list is already full and there are still objs on the free
 326	 * list. Move remaining free objs to a temporary list to free the
 327	 * memory outside the pool_lock held region.
 328	 */
 329	if (obj_nr_tofree) {
 330		hlist_move_list(&obj_to_free, &tofree);
 331		debug_objects_freed += obj_nr_tofree;
 332		WRITE_ONCE(obj_nr_tofree, 0);
 333	}
 334	raw_spin_unlock_irqrestore(&pool_lock, flags);
 335
 336	hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
 337		hlist_del(&obj->node);
 338		kmem_cache_free(obj_cache, obj);
 339	}
 340}
 341
 342static void __free_object(struct debug_obj *obj)
 343{
 344	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
 345	struct debug_percpu_free *percpu_pool;
 346	int lookahead_count = 0;
 347	unsigned long flags;
 348	bool work;
 349
 350	local_irq_save(flags);
 351	if (!obj_cache)
 352		goto free_to_obj_pool;
 353
 354	/*
 355	 * Try to free it into the percpu pool first.
 356	 */
 357	percpu_pool = this_cpu_ptr(&percpu_obj_pool);
 358	if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
 359		hlist_add_head(&obj->node, &percpu_pool->free_objs);
 360		percpu_pool->obj_free++;
 361		local_irq_restore(flags);
 362		return;
 363	}
 364
 365	/*
 366	 * As the percpu pool is full, look ahead and pull out a batch
 367	 * of objects from the percpu pool and free them as well.
 368	 */
 369	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
 370		objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
 371		if (!objs[lookahead_count])
 372			break;
 373		percpu_pool->obj_free--;
 374	}
 375
 376free_to_obj_pool:
 377	raw_spin_lock(&pool_lock);
 378	work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
 379	       (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
 380	obj_pool_used--;
 381
 382	if (work) {
 383		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
 384		hlist_add_head(&obj->node, &obj_to_free);
 385		if (lookahead_count) {
 386			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
 387			obj_pool_used -= lookahead_count;
 388			while (lookahead_count) {
 389				hlist_add_head(&objs[--lookahead_count]->node,
 390					       &obj_to_free);
 391			}
 392		}
 393
 394		if ((obj_pool_free > debug_objects_pool_size) &&
 395		    (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
 396			int i;
 397
 398			/*
 399			 * Free one more batch of objects from obj_pool.
 400			 */
 401			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
 402				obj = __alloc_object(&obj_pool);
 403				hlist_add_head(&obj->node, &obj_to_free);
 404				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 405				WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
 406			}
 407		}
 408	} else {
 409		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 410		hlist_add_head(&obj->node, &obj_pool);
 411		if (lookahead_count) {
 412			WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
 413			obj_pool_used -= lookahead_count;
 414			while (lookahead_count) {
 415				hlist_add_head(&objs[--lookahead_count]->node,
 416					       &obj_pool);
 417			}
 418		}
 419	}
 420	raw_spin_unlock(&pool_lock);
 421	local_irq_restore(flags);
 422}
 423
 424/*
 425 * Put the object back into the pool and schedule work to free objects
 426 * if necessary.
 427 */
 428static void free_object(struct debug_obj *obj)
 429{
 430	__free_object(obj);
 431	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
 432		WRITE_ONCE(obj_freeing, true);
 433		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
 434	}
 435}
 436
 437#ifdef CONFIG_HOTPLUG_CPU
 438static int object_cpu_offline(unsigned int cpu)
 439{
 440	struct debug_percpu_free *percpu_pool;
 441	struct hlist_node *tmp;
 442	struct debug_obj *obj;
 
 443
 444	/* Remote access is safe as the CPU is dead already */
 445	percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
 446	hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
 447		hlist_del(&obj->node);
 448		kmem_cache_free(obj_cache, obj);
 449	}
 
 
 
 
 
 
 450	percpu_pool->obj_free = 0;
 451
 452	return 0;
 453}
 454#endif
 455
 456/*
 457 * We run out of memory. That means we probably have tons of objects
 458 * allocated.
 459 */
 460static void debug_objects_oom(void)
 461{
 462	struct debug_bucket *db = obj_hash;
 463	struct hlist_node *tmp;
 464	HLIST_HEAD(freelist);
 465	struct debug_obj *obj;
 466	unsigned long flags;
 467	int i;
 468
 469	pr_warn("Out of memory. ODEBUG disabled\n");
 470
 471	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
 472		raw_spin_lock_irqsave(&db->lock, flags);
 473		hlist_move_list(&db->list, &freelist);
 474		raw_spin_unlock_irqrestore(&db->lock, flags);
 475
 476		/* Now free them */
 477		hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
 478			hlist_del(&obj->node);
 479			free_object(obj);
 480		}
 481	}
 482}
 483
 484/*
 485 * We use the pfn of the address for the hash. That way we can check
 486 * for freed objects simply by checking the affected bucket.
 487 */
 488static struct debug_bucket *get_bucket(unsigned long addr)
 489{
 490	unsigned long hash;
 491
 492	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
 493	return &obj_hash[hash];
 494}
 495
 496static void debug_print_object(struct debug_obj *obj, char *msg)
 497{
 498	const struct debug_obj_descr *descr = obj->descr;
 499	static int limit;
 500
 
 
 
 
 
 
 
 
 
 501	if (limit < 5 && descr != descr_test) {
 502		void *hint = descr->debug_hint ?
 503			descr->debug_hint(obj->object) : NULL;
 504		limit++;
 505		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
 506				 "object type: %s hint: %pS\n",
 507			msg, obj_states[obj->state], obj->astate,
 508			descr->name, hint);
 509	}
 510	debug_objects_warnings++;
 511}
 512
 513/*
 514 * Try to repair the damage, so we have a better chance to get useful
 515 * debug output.
 516 */
 517static bool
 518debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
 519		   void * addr, enum debug_obj_state state)
 520{
 521	if (fixup && fixup(addr, state)) {
 522		debug_objects_fixups++;
 523		return true;
 524	}
 525	return false;
 526}
 527
 528static void debug_object_is_on_stack(void *addr, int onstack)
 529{
 530	int is_on_stack;
 531	static int limit;
 532
 533	if (limit > 4)
 534		return;
 535
 536	is_on_stack = object_is_on_stack(addr);
 537	if (is_on_stack == onstack)
 538		return;
 539
 540	limit++;
 541	if (is_on_stack)
 542		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
 543			 task_stack_page(current));
 544	else
 545		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
 546			 task_stack_page(current));
 547
 548	WARN_ON(1);
 549}
 550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 551static void
 552__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
 553{
 554	enum debug_obj_state state;
 555	bool check_stack = false;
 556	struct debug_bucket *db;
 557	struct debug_obj *obj;
 558	unsigned long flags;
 559
 560	fill_pool();
 561
 562	db = get_bucket((unsigned long) addr);
 563
 564	raw_spin_lock_irqsave(&db->lock, flags);
 565
 566	obj = lookup_object(addr, db);
 567	if (!obj) {
 568		obj = alloc_object(addr, db, descr);
 569		if (!obj) {
 570			debug_objects_enabled = 0;
 571			raw_spin_unlock_irqrestore(&db->lock, flags);
 572			debug_objects_oom();
 573			return;
 574		}
 575		check_stack = true;
 576	}
 577
 578	switch (obj->state) {
 579	case ODEBUG_STATE_NONE:
 580	case ODEBUG_STATE_INIT:
 581	case ODEBUG_STATE_INACTIVE:
 582		obj->state = ODEBUG_STATE_INIT;
 583		break;
 584
 585	case ODEBUG_STATE_ACTIVE:
 586		state = obj->state;
 587		raw_spin_unlock_irqrestore(&db->lock, flags);
 588		debug_print_object(obj, "init");
 589		debug_object_fixup(descr->fixup_init, addr, state);
 590		return;
 591
 592	case ODEBUG_STATE_DESTROYED:
 593		raw_spin_unlock_irqrestore(&db->lock, flags);
 594		debug_print_object(obj, "init");
 595		return;
 596	default:
 597		break;
 598	}
 599
 
 600	raw_spin_unlock_irqrestore(&db->lock, flags);
 601	if (check_stack)
 602		debug_object_is_on_stack(addr, onstack);
 
 
 603}
 604
 605/**
 606 * debug_object_init - debug checks when an object is initialized
 607 * @addr:	address of the object
 608 * @descr:	pointer to an object specific debug description structure
 609 */
 610void debug_object_init(void *addr, const struct debug_obj_descr *descr)
 611{
 612	if (!debug_objects_enabled)
 613		return;
 614
 615	__debug_object_init(addr, descr, 0);
 616}
 617EXPORT_SYMBOL_GPL(debug_object_init);
 618
 619/**
 620 * debug_object_init_on_stack - debug checks when an object on stack is
 621 *				initialized
 622 * @addr:	address of the object
 623 * @descr:	pointer to an object specific debug description structure
 624 */
 625void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
 626{
 627	if (!debug_objects_enabled)
 628		return;
 629
 630	__debug_object_init(addr, descr, 1);
 631}
 632EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
 633
 634/**
 635 * debug_object_activate - debug checks when an object is activated
 636 * @addr:	address of the object
 637 * @descr:	pointer to an object specific debug description structure
 638 * Returns 0 for success, -EINVAL for check failed.
 639 */
 640int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
 641{
 642	enum debug_obj_state state;
 643	struct debug_bucket *db;
 644	struct debug_obj *obj;
 645	unsigned long flags;
 646	int ret;
 647	struct debug_obj o = { .object = addr,
 648			       .state = ODEBUG_STATE_NOTAVAILABLE,
 649			       .descr = descr };
 650
 651	if (!debug_objects_enabled)
 652		return 0;
 653
 
 
 654	db = get_bucket((unsigned long) addr);
 655
 656	raw_spin_lock_irqsave(&db->lock, flags);
 657
 658	obj = lookup_object(addr, db);
 659	if (obj) {
 660		bool print_object = false;
 661
 
 
 662		switch (obj->state) {
 663		case ODEBUG_STATE_INIT:
 664		case ODEBUG_STATE_INACTIVE:
 665			obj->state = ODEBUG_STATE_ACTIVE;
 666			ret = 0;
 667			break;
 668
 669		case ODEBUG_STATE_ACTIVE:
 670			state = obj->state;
 671			raw_spin_unlock_irqrestore(&db->lock, flags);
 672			debug_print_object(obj, "activate");
 673			ret = debug_object_fixup(descr->fixup_activate, addr, state);
 674			return ret ? 0 : -EINVAL;
 675
 676		case ODEBUG_STATE_DESTROYED:
 677			print_object = true;
 678			ret = -EINVAL;
 679			break;
 
 
 
 
 680		default:
 681			ret = 0;
 682			break;
 683		}
 684		raw_spin_unlock_irqrestore(&db->lock, flags);
 685		if (print_object)
 686			debug_print_object(obj, "activate");
 687		return ret;
 688	}
 689
 690	raw_spin_unlock_irqrestore(&db->lock, flags);
 
 691
 692	/*
 693	 * We are here when a static object is activated. We
 694	 * let the type specific code confirm whether this is
 695	 * true or not. if true, we just make sure that the
 696	 * static object is tracked in the object tracker. If
 697	 * not, this must be a bug, so we try to fix it up.
 698	 */
 699	if (descr->is_static_object && descr->is_static_object(addr)) {
 700		/* track this static object */
 701		debug_object_init(addr, descr);
 702		debug_object_activate(addr, descr);
 703	} else {
 704		debug_print_object(&o, "activate");
 705		ret = debug_object_fixup(descr->fixup_activate, addr,
 706					ODEBUG_STATE_NOTAVAILABLE);
 707		return ret ? 0 : -EINVAL;
 708	}
 709	return 0;
 710}
 711EXPORT_SYMBOL_GPL(debug_object_activate);
 712
 713/**
 714 * debug_object_deactivate - debug checks when an object is deactivated
 715 * @addr:	address of the object
 716 * @descr:	pointer to an object specific debug description structure
 717 */
 718void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
 719{
 
 720	struct debug_bucket *db;
 721	struct debug_obj *obj;
 722	unsigned long flags;
 723	bool print_object = false;
 724
 725	if (!debug_objects_enabled)
 726		return;
 727
 728	db = get_bucket((unsigned long) addr);
 729
 730	raw_spin_lock_irqsave(&db->lock, flags);
 731
 732	obj = lookup_object(addr, db);
 733	if (obj) {
 734		switch (obj->state) {
 
 
 735		case ODEBUG_STATE_INIT:
 736		case ODEBUG_STATE_INACTIVE:
 737		case ODEBUG_STATE_ACTIVE:
 738			if (!obj->astate)
 739				obj->state = ODEBUG_STATE_INACTIVE;
 740			else
 741				print_object = true;
 742			break;
 743
 744		case ODEBUG_STATE_DESTROYED:
 745			print_object = true;
 746			break;
 747		default:
 748			break;
 
 749		}
 
 750	}
 751
 752	raw_spin_unlock_irqrestore(&db->lock, flags);
 753	if (!obj) {
 754		struct debug_obj o = { .object = addr,
 755				       .state = ODEBUG_STATE_NOTAVAILABLE,
 756				       .descr = descr };
 757
 758		debug_print_object(&o, "deactivate");
 759	} else if (print_object) {
 760		debug_print_object(obj, "deactivate");
 761	}
 762}
 763EXPORT_SYMBOL_GPL(debug_object_deactivate);
 764
 765/**
 766 * debug_object_destroy - debug checks when an object is destroyed
 767 * @addr:	address of the object
 768 * @descr:	pointer to an object specific debug description structure
 769 */
 770void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
 771{
 772	enum debug_obj_state state;
 773	struct debug_bucket *db;
 774	struct debug_obj *obj;
 775	unsigned long flags;
 776	bool print_object = false;
 777
 778	if (!debug_objects_enabled)
 779		return;
 780
 781	db = get_bucket((unsigned long) addr);
 782
 783	raw_spin_lock_irqsave(&db->lock, flags);
 784
 785	obj = lookup_object(addr, db);
 786	if (!obj)
 787		goto out_unlock;
 
 
 788
 789	switch (obj->state) {
 
 
 
 790	case ODEBUG_STATE_NONE:
 791	case ODEBUG_STATE_INIT:
 792	case ODEBUG_STATE_INACTIVE:
 793		obj->state = ODEBUG_STATE_DESTROYED;
 794		break;
 795	case ODEBUG_STATE_ACTIVE:
 796		state = obj->state;
 797		raw_spin_unlock_irqrestore(&db->lock, flags);
 798		debug_print_object(obj, "destroy");
 799		debug_object_fixup(descr->fixup_destroy, addr, state);
 800		return;
 801
 802	case ODEBUG_STATE_DESTROYED:
 803		print_object = true;
 804		break;
 805	default:
 806		break;
 807	}
 808out_unlock:
 
 809	raw_spin_unlock_irqrestore(&db->lock, flags);
 810	if (print_object)
 811		debug_print_object(obj, "destroy");
 
 
 812}
 813EXPORT_SYMBOL_GPL(debug_object_destroy);
 814
 815/**
 816 * debug_object_free - debug checks when an object is freed
 817 * @addr:	address of the object
 818 * @descr:	pointer to an object specific debug description structure
 819 */
 820void debug_object_free(void *addr, const struct debug_obj_descr *descr)
 821{
 822	enum debug_obj_state state;
 823	struct debug_bucket *db;
 824	struct debug_obj *obj;
 825	unsigned long flags;
 826
 827	if (!debug_objects_enabled)
 828		return;
 829
 830	db = get_bucket((unsigned long) addr);
 831
 832	raw_spin_lock_irqsave(&db->lock, flags);
 833
 834	obj = lookup_object(addr, db);
 835	if (!obj)
 836		goto out_unlock;
 
 
 837
 838	switch (obj->state) {
 839	case ODEBUG_STATE_ACTIVE:
 840		state = obj->state;
 841		raw_spin_unlock_irqrestore(&db->lock, flags);
 842		debug_print_object(obj, "free");
 843		debug_object_fixup(descr->fixup_free, addr, state);
 844		return;
 845	default:
 846		hlist_del(&obj->node);
 847		raw_spin_unlock_irqrestore(&db->lock, flags);
 848		free_object(obj);
 849		return;
 850	}
 851out_unlock:
 
 852	raw_spin_unlock_irqrestore(&db->lock, flags);
 
 
 
 853}
 854EXPORT_SYMBOL_GPL(debug_object_free);
 855
 856/**
 857 * debug_object_assert_init - debug checks when object should be init-ed
 858 * @addr:	address of the object
 859 * @descr:	pointer to an object specific debug description structure
 860 */
 861void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
 862{
 
 863	struct debug_bucket *db;
 864	struct debug_obj *obj;
 865	unsigned long flags;
 866
 867	if (!debug_objects_enabled)
 868		return;
 869
 
 
 870	db = get_bucket((unsigned long) addr);
 871
 872	raw_spin_lock_irqsave(&db->lock, flags);
 
 
 
 
 873
 874	obj = lookup_object(addr, db);
 875	if (!obj) {
 876		struct debug_obj o = { .object = addr,
 877				       .state = ODEBUG_STATE_NOTAVAILABLE,
 878				       .descr = descr };
 879
 880		raw_spin_unlock_irqrestore(&db->lock, flags);
 881		/*
 882		 * Maybe the object is static, and we let the type specific
 883		 * code confirm. Track this static object if true, else invoke
 884		 * fixup.
 885		 */
 886		if (descr->is_static_object && descr->is_static_object(addr)) {
 887			/* Track this static object */
 888			debug_object_init(addr, descr);
 889		} else {
 890			debug_print_object(&o, "assert_init");
 891			debug_object_fixup(descr->fixup_assert_init, addr,
 892					   ODEBUG_STATE_NOTAVAILABLE);
 893		}
 894		return;
 895	}
 896
 897	raw_spin_unlock_irqrestore(&db->lock, flags);
 
 
 898}
 899EXPORT_SYMBOL_GPL(debug_object_assert_init);
 900
 901/**
 902 * debug_object_active_state - debug checks object usage state machine
 903 * @addr:	address of the object
 904 * @descr:	pointer to an object specific debug description structure
 905 * @expect:	expected state
 906 * @next:	state to move to if expected state is found
 907 */
 908void
 909debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
 910			  unsigned int expect, unsigned int next)
 911{
 
 912	struct debug_bucket *db;
 913	struct debug_obj *obj;
 914	unsigned long flags;
 915	bool print_object = false;
 916
 917	if (!debug_objects_enabled)
 918		return;
 919
 920	db = get_bucket((unsigned long) addr);
 921
 922	raw_spin_lock_irqsave(&db->lock, flags);
 923
 924	obj = lookup_object(addr, db);
 925	if (obj) {
 926		switch (obj->state) {
 927		case ODEBUG_STATE_ACTIVE:
 928			if (obj->astate == expect)
 929				obj->astate = next;
 930			else
 931				print_object = true;
 932			break;
 933
 934		default:
 935			print_object = true;
 936			break;
 937		}
 
 938	}
 939
 940	raw_spin_unlock_irqrestore(&db->lock, flags);
 941	if (!obj) {
 942		struct debug_obj o = { .object = addr,
 943				       .state = ODEBUG_STATE_NOTAVAILABLE,
 944				       .descr = descr };
 945
 946		debug_print_object(&o, "active_state");
 947	} else if (print_object) {
 948		debug_print_object(obj, "active_state");
 949	}
 950}
 951EXPORT_SYMBOL_GPL(debug_object_active_state);
 952
 953#ifdef CONFIG_DEBUG_OBJECTS_FREE
 954static void __debug_check_no_obj_freed(const void *address, unsigned long size)
 955{
 956	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
 957	const struct debug_obj_descr *descr;
 958	enum debug_obj_state state;
 959	struct debug_bucket *db;
 960	struct hlist_node *tmp;
 961	struct debug_obj *obj;
 962	int cnt, objs_checked = 0;
 963
 964	saddr = (unsigned long) address;
 965	eaddr = saddr + size;
 966	paddr = saddr & ODEBUG_CHUNK_MASK;
 967	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
 968	chunks >>= ODEBUG_CHUNK_SHIFT;
 969
 970	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
 971		db = get_bucket(paddr);
 972
 973repeat:
 974		cnt = 0;
 975		raw_spin_lock_irqsave(&db->lock, flags);
 976		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
 977			cnt++;
 978			oaddr = (unsigned long) obj->object;
 979			if (oaddr < saddr || oaddr >= eaddr)
 980				continue;
 981
 982			switch (obj->state) {
 983			case ODEBUG_STATE_ACTIVE:
 984				descr = obj->descr;
 985				state = obj->state;
 986				raw_spin_unlock_irqrestore(&db->lock, flags);
 987				debug_print_object(obj, "free");
 988				debug_object_fixup(descr->fixup_free,
 989						   (void *) oaddr, state);
 990				goto repeat;
 991			default:
 992				hlist_del(&obj->node);
 993				__free_object(obj);
 994				break;
 995			}
 996		}
 997		raw_spin_unlock_irqrestore(&db->lock, flags);
 998
 999		if (cnt > debug_objects_maxchain)
1000			debug_objects_maxchain = cnt;
1001
1002		objs_checked += cnt;
1003	}
1004
1005	if (objs_checked > debug_objects_maxchecked)
1006		debug_objects_maxchecked = objs_checked;
1007
1008	/* Schedule work to actually kmem_cache_free() objects */
1009	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1010		WRITE_ONCE(obj_freeing, true);
1011		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1012	}
1013}
1014
1015void debug_check_no_obj_freed(const void *address, unsigned long size)
1016{
1017	if (debug_objects_enabled)
1018		__debug_check_no_obj_freed(address, size);
1019}
1020#endif
1021
1022#ifdef CONFIG_DEBUG_FS
1023
1024static int debug_stats_show(struct seq_file *m, void *v)
1025{
1026	int cpu, obj_percpu_free = 0;
1027
1028	for_each_possible_cpu(cpu)
1029		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1030
1031	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1032	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1033	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1034	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1035	seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1036	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1037	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1038	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1039	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1040	seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1041	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1042	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1043	return 0;
1044}
1045DEFINE_SHOW_ATTRIBUTE(debug_stats);
1046
1047static int __init debug_objects_init_debugfs(void)
1048{
1049	struct dentry *dbgdir;
1050
1051	if (!debug_objects_enabled)
1052		return 0;
1053
1054	dbgdir = debugfs_create_dir("debug_objects", NULL);
1055
1056	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1057
1058	return 0;
1059}
1060__initcall(debug_objects_init_debugfs);
1061
1062#else
1063static inline void debug_objects_init_debugfs(void) { }
1064#endif
1065
1066#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1067
1068/* Random data structure for the self test */
1069struct self_test {
1070	unsigned long	dummy1[6];
1071	int		static_init;
1072	unsigned long	dummy2[3];
1073};
1074
1075static __initconst const struct debug_obj_descr descr_type_test;
1076
1077static bool __init is_static_object(void *addr)
1078{
1079	struct self_test *obj = addr;
1080
1081	return obj->static_init;
1082}
1083
1084/*
1085 * fixup_init is called when:
1086 * - an active object is initialized
1087 */
1088static bool __init fixup_init(void *addr, enum debug_obj_state state)
1089{
1090	struct self_test *obj = addr;
1091
1092	switch (state) {
1093	case ODEBUG_STATE_ACTIVE:
1094		debug_object_deactivate(obj, &descr_type_test);
1095		debug_object_init(obj, &descr_type_test);
1096		return true;
1097	default:
1098		return false;
1099	}
1100}
1101
1102/*
1103 * fixup_activate is called when:
1104 * - an active object is activated
1105 * - an unknown non-static object is activated
1106 */
1107static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1108{
1109	struct self_test *obj = addr;
1110
1111	switch (state) {
1112	case ODEBUG_STATE_NOTAVAILABLE:
1113		return true;
1114	case ODEBUG_STATE_ACTIVE:
1115		debug_object_deactivate(obj, &descr_type_test);
1116		debug_object_activate(obj, &descr_type_test);
1117		return true;
1118
1119	default:
1120		return false;
1121	}
1122}
1123
1124/*
1125 * fixup_destroy is called when:
1126 * - an active object is destroyed
1127 */
1128static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1129{
1130	struct self_test *obj = addr;
1131
1132	switch (state) {
1133	case ODEBUG_STATE_ACTIVE:
1134		debug_object_deactivate(obj, &descr_type_test);
1135		debug_object_destroy(obj, &descr_type_test);
1136		return true;
1137	default:
1138		return false;
1139	}
1140}
1141
1142/*
1143 * fixup_free is called when:
1144 * - an active object is freed
1145 */
1146static bool __init fixup_free(void *addr, enum debug_obj_state state)
1147{
1148	struct self_test *obj = addr;
1149
1150	switch (state) {
1151	case ODEBUG_STATE_ACTIVE:
1152		debug_object_deactivate(obj, &descr_type_test);
1153		debug_object_free(obj, &descr_type_test);
1154		return true;
1155	default:
1156		return false;
1157	}
1158}
1159
1160static int __init
1161check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1162{
1163	struct debug_bucket *db;
1164	struct debug_obj *obj;
1165	unsigned long flags;
1166	int res = -EINVAL;
1167
1168	db = get_bucket((unsigned long) addr);
1169
1170	raw_spin_lock_irqsave(&db->lock, flags);
1171
1172	obj = lookup_object(addr, db);
1173	if (!obj && state != ODEBUG_STATE_NONE) {
1174		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1175		goto out;
1176	}
1177	if (obj && obj->state != state) {
1178		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1179		       obj->state, state);
1180		goto out;
1181	}
1182	if (fixups != debug_objects_fixups) {
1183		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1184		       fixups, debug_objects_fixups);
1185		goto out;
1186	}
1187	if (warnings != debug_objects_warnings) {
1188		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1189		       warnings, debug_objects_warnings);
1190		goto out;
1191	}
1192	res = 0;
1193out:
1194	raw_spin_unlock_irqrestore(&db->lock, flags);
1195	if (res)
1196		debug_objects_enabled = 0;
1197	return res;
1198}
1199
1200static __initconst const struct debug_obj_descr descr_type_test = {
1201	.name			= "selftest",
1202	.is_static_object	= is_static_object,
1203	.fixup_init		= fixup_init,
1204	.fixup_activate		= fixup_activate,
1205	.fixup_destroy		= fixup_destroy,
1206	.fixup_free		= fixup_free,
1207};
1208
1209static __initdata struct self_test obj = { .static_init = 0 };
1210
1211static void __init debug_objects_selftest(void)
1212{
1213	int fixups, oldfixups, warnings, oldwarnings;
1214	unsigned long flags;
1215
1216	local_irq_save(flags);
1217
1218	fixups = oldfixups = debug_objects_fixups;
1219	warnings = oldwarnings = debug_objects_warnings;
1220	descr_test = &descr_type_test;
1221
1222	debug_object_init(&obj, &descr_type_test);
1223	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1224		goto out;
1225	debug_object_activate(&obj, &descr_type_test);
1226	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1227		goto out;
1228	debug_object_activate(&obj, &descr_type_test);
1229	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1230		goto out;
1231	debug_object_deactivate(&obj, &descr_type_test);
1232	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1233		goto out;
1234	debug_object_destroy(&obj, &descr_type_test);
1235	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1236		goto out;
1237	debug_object_init(&obj, &descr_type_test);
1238	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1239		goto out;
1240	debug_object_activate(&obj, &descr_type_test);
1241	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1242		goto out;
1243	debug_object_deactivate(&obj, &descr_type_test);
1244	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1245		goto out;
1246	debug_object_free(&obj, &descr_type_test);
1247	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1248		goto out;
1249
1250	obj.static_init = 1;
1251	debug_object_activate(&obj, &descr_type_test);
1252	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1253		goto out;
1254	debug_object_init(&obj, &descr_type_test);
1255	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1256		goto out;
1257	debug_object_free(&obj, &descr_type_test);
1258	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1259		goto out;
1260
1261#ifdef CONFIG_DEBUG_OBJECTS_FREE
1262	debug_object_init(&obj, &descr_type_test);
1263	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1264		goto out;
1265	debug_object_activate(&obj, &descr_type_test);
1266	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1267		goto out;
1268	__debug_check_no_obj_freed(&obj, sizeof(obj));
1269	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1270		goto out;
1271#endif
1272	pr_info("selftest passed\n");
1273
1274out:
1275	debug_objects_fixups = oldfixups;
1276	debug_objects_warnings = oldwarnings;
1277	descr_test = NULL;
1278
1279	local_irq_restore(flags);
1280}
1281#else
1282static inline void debug_objects_selftest(void) { }
1283#endif
1284
1285/*
1286 * Called during early boot to initialize the hash buckets and link
1287 * the static object pool objects into the poll list. After this call
1288 * the object tracker is fully operational.
1289 */
1290void __init debug_objects_early_init(void)
1291{
1292	int i;
1293
1294	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1295		raw_spin_lock_init(&obj_hash[i].lock);
1296
1297	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1298		hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1299}
1300
1301/*
1302 * Convert the statically allocated objects to dynamic ones:
1303 */
1304static int __init debug_objects_replace_static_objects(void)
1305{
1306	struct debug_bucket *db = obj_hash;
1307	struct hlist_node *tmp;
1308	struct debug_obj *obj, *new;
1309	HLIST_HEAD(objects);
1310	int i, cnt = 0;
1311
1312	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1313		obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1314		if (!obj)
1315			goto free;
1316		hlist_add_head(&obj->node, &objects);
1317	}
1318
 
 
1319	/*
1320	 * debug_objects_mem_init() is now called early that only one CPU is up
1321	 * and interrupts have been disabled, so it is safe to replace the
1322	 * active object references.
1323	 */
1324
1325	/* Remove the statically allocated objects from the pool */
1326	hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1327		hlist_del(&obj->node);
1328	/* Move the allocated objects to the pool */
1329	hlist_move_list(&objects, &obj_pool);
1330
1331	/* Replace the active object references */
1332	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1333		hlist_move_list(&db->list, &objects);
1334
1335		hlist_for_each_entry(obj, &objects, node) {
1336			new = hlist_entry(obj_pool.first, typeof(*obj), node);
1337			hlist_del(&new->node);
1338			/* copy object data */
1339			*new = *obj;
1340			hlist_add_head(&new->node, &db->list);
1341			cnt++;
1342		}
1343	}
1344
1345	pr_debug("%d of %d active objects replaced\n",
1346		 cnt, obj_pool_used);
1347	return 0;
1348free:
1349	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1350		hlist_del(&obj->node);
1351		kmem_cache_free(obj_cache, obj);
1352	}
1353	return -ENOMEM;
1354}
1355
1356/*
1357 * Called after the kmem_caches are functional to setup a dedicated
1358 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1359 * prevents that the debug code is called on kmem_cache_free() for the
1360 * debug tracker objects to avoid recursive calls.
1361 */
1362void __init debug_objects_mem_init(void)
1363{
1364	int cpu, extras;
1365
1366	if (!debug_objects_enabled)
1367		return;
1368
1369	/*
1370	 * Initialize the percpu object pools
1371	 *
1372	 * Initialization is not strictly necessary, but was done for
1373	 * completeness.
1374	 */
1375	for_each_possible_cpu(cpu)
1376		INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1377
1378	obj_cache = kmem_cache_create("debug_objects_cache",
1379				      sizeof (struct debug_obj), 0,
1380				      SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1381				      NULL);
1382
1383	if (!obj_cache || debug_objects_replace_static_objects()) {
1384		debug_objects_enabled = 0;
1385		kmem_cache_destroy(obj_cache);
1386		pr_warn("out of memory.\n");
 
1387	} else
1388		debug_objects_selftest();
1389
1390#ifdef CONFIG_HOTPLUG_CPU
1391	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1392					object_cpu_offline);
1393#endif
1394
1395	/*
1396	 * Increase the thresholds for allocating and freeing objects
1397	 * according to the number of possible CPUs available in the system.
1398	 */
1399	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1400	debug_objects_pool_size += extras;
1401	debug_objects_pool_min_level += extras;
1402}
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Generic infrastructure for lifetime debugging of objects.
   4 *
 
 
   5 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
 
 
   6 */
   7
   8#define pr_fmt(fmt) "ODEBUG: " fmt
   9
  10#include <linux/debugobjects.h>
  11#include <linux/interrupt.h>
  12#include <linux/sched.h>
  13#include <linux/sched/task_stack.h>
  14#include <linux/seq_file.h>
  15#include <linux/debugfs.h>
  16#include <linux/slab.h>
  17#include <linux/hash.h>
  18#include <linux/kmemleak.h>
  19#include <linux/cpu.h>
  20
  21#define ODEBUG_HASH_BITS	14
  22#define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
  23
  24#define ODEBUG_POOL_SIZE	1024
  25#define ODEBUG_POOL_MIN_LEVEL	256
  26#define ODEBUG_POOL_PERCPU_SIZE	64
  27#define ODEBUG_BATCH_SIZE	16
  28
  29#define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
  30#define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
  31#define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
  32
  33/*
  34 * We limit the freeing of debug objects via workqueue at a maximum
  35 * frequency of 10Hz and about 1024 objects for each freeing operation.
  36 * So it is freeing at most 10k debug objects per second.
  37 */
  38#define ODEBUG_FREE_WORK_MAX	1024
  39#define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
  40
  41struct debug_bucket {
  42	struct hlist_head	list;
  43	raw_spinlock_t		lock;
  44};
  45
  46/*
  47 * Debug object percpu free list
  48 * Access is protected by disabling irq
  49 */
  50struct debug_percpu_free {
  51	struct hlist_head	free_objs;
  52	int			obj_free;
  53};
  54
  55static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
  56
  57static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
  58
  59static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  60
  61static DEFINE_RAW_SPINLOCK(pool_lock);
  62
  63static HLIST_HEAD(obj_pool);
  64static HLIST_HEAD(obj_to_free);
  65
  66/*
  67 * Because of the presence of percpu free pools, obj_pool_free will
  68 * under-count those in the percpu free pools. Similarly, obj_pool_used
  69 * will over-count those in the percpu free pools. Adjustments will be
  70 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
  71 * can be off.
  72 */
  73static int			obj_pool_min_free = ODEBUG_POOL_SIZE;
  74static int			obj_pool_free = ODEBUG_POOL_SIZE;
  75static int			obj_pool_used;
  76static int			obj_pool_max_used;
  77static bool			obj_freeing;
  78/* The number of objs on the global free list */
  79static int			obj_nr_tofree;
  80
  81static int			debug_objects_maxchain __read_mostly;
  82static int __maybe_unused	debug_objects_maxchecked __read_mostly;
  83static int			debug_objects_fixups __read_mostly;
  84static int			debug_objects_warnings __read_mostly;
  85static int			debug_objects_enabled __read_mostly
  86				= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  87static int			debug_objects_pool_size __read_mostly
  88				= ODEBUG_POOL_SIZE;
  89static int			debug_objects_pool_min_level __read_mostly
  90				= ODEBUG_POOL_MIN_LEVEL;
  91static const struct debug_obj_descr *descr_test  __read_mostly;
  92static struct kmem_cache	*obj_cache __ro_after_init;
  93
  94/*
  95 * Track numbers of kmem_cache_alloc()/free() calls done.
  96 */
  97static int			debug_objects_allocated;
  98static int			debug_objects_freed;
  99
 100static void free_obj_work(struct work_struct *work);
 101static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
 102
 103static int __init enable_object_debug(char *str)
 104{
 105	debug_objects_enabled = 1;
 106	return 0;
 107}
 108
 109static int __init disable_object_debug(char *str)
 110{
 111	debug_objects_enabled = 0;
 112	return 0;
 113}
 114
 115early_param("debug_objects", enable_object_debug);
 116early_param("no_debug_objects", disable_object_debug);
 117
 118static const char *obj_states[ODEBUG_STATE_MAX] = {
 119	[ODEBUG_STATE_NONE]		= "none",
 120	[ODEBUG_STATE_INIT]		= "initialized",
 121	[ODEBUG_STATE_INACTIVE]		= "inactive",
 122	[ODEBUG_STATE_ACTIVE]		= "active",
 123	[ODEBUG_STATE_DESTROYED]	= "destroyed",
 124	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
 125};
 126
 127static void fill_pool(void)
 128{
 129	gfp_t gfp = __GFP_HIGH | __GFP_NOWARN;
 130	struct debug_obj *obj;
 131	unsigned long flags;
 132
 133	if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
 134		return;
 135
 136	/*
 137	 * Reuse objs from the global free list; they will be reinitialized
 138	 * when allocating.
 139	 *
 140	 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
 141	 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
 142	 * sections.
 143	 */
 144	while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
 145		raw_spin_lock_irqsave(&pool_lock, flags);
 146		/*
 147		 * Recheck with the lock held as the worker thread might have
 148		 * won the race and freed the global free list already.
 149		 */
 150		while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
 151			obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
 152			hlist_del(&obj->node);
 153			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
 154			hlist_add_head(&obj->node, &obj_pool);
 155			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 156		}
 157		raw_spin_unlock_irqrestore(&pool_lock, flags);
 158	}
 159
 160	if (unlikely(!obj_cache))
 161		return;
 162
 163	while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
 164		struct debug_obj *new[ODEBUG_BATCH_SIZE];
 165		int cnt;
 166
 167		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
 168			new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
 169			if (!new[cnt])
 170				break;
 171		}
 172		if (!cnt)
 173			return;
 174
 175		raw_spin_lock_irqsave(&pool_lock, flags);
 176		while (cnt) {
 177			hlist_add_head(&new[--cnt]->node, &obj_pool);
 178			debug_objects_allocated++;
 179			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 180		}
 181		raw_spin_unlock_irqrestore(&pool_lock, flags);
 182	}
 183}
 184
 185/*
 186 * Lookup an object in the hash bucket.
 187 */
 188static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
 189{
 190	struct debug_obj *obj;
 191	int cnt = 0;
 192
 193	hlist_for_each_entry(obj, &b->list, node) {
 194		cnt++;
 195		if (obj->object == addr)
 196			return obj;
 197	}
 198	if (cnt > debug_objects_maxchain)
 199		debug_objects_maxchain = cnt;
 200
 201	return NULL;
 202}
 203
 204/*
 205 * Allocate a new object from the hlist
 206 */
 207static struct debug_obj *__alloc_object(struct hlist_head *list)
 208{
 209	struct debug_obj *obj = NULL;
 210
 211	if (list->first) {
 212		obj = hlist_entry(list->first, typeof(*obj), node);
 213		hlist_del(&obj->node);
 214	}
 215
 216	return obj;
 217}
 218
 
 
 
 
 219static struct debug_obj *
 220alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
 221{
 222	struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
 223	struct debug_obj *obj;
 224
 225	if (likely(obj_cache)) {
 226		obj = __alloc_object(&percpu_pool->free_objs);
 227		if (obj) {
 228			percpu_pool->obj_free--;
 229			goto init_obj;
 230		}
 231	}
 232
 233	raw_spin_lock(&pool_lock);
 234	obj = __alloc_object(&obj_pool);
 235	if (obj) {
 236		obj_pool_used++;
 237		WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 238
 239		/*
 240		 * Looking ahead, allocate one batch of debug objects and
 241		 * put them into the percpu free pool.
 242		 */
 243		if (likely(obj_cache)) {
 244			int i;
 245
 246			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
 247				struct debug_obj *obj2;
 248
 249				obj2 = __alloc_object(&obj_pool);
 250				if (!obj2)
 251					break;
 252				hlist_add_head(&obj2->node,
 253					       &percpu_pool->free_objs);
 254				percpu_pool->obj_free++;
 255				obj_pool_used++;
 256				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 257			}
 258		}
 259
 260		if (obj_pool_used > obj_pool_max_used)
 261			obj_pool_max_used = obj_pool_used;
 262
 263		if (obj_pool_free < obj_pool_min_free)
 264			obj_pool_min_free = obj_pool_free;
 265	}
 266	raw_spin_unlock(&pool_lock);
 267
 268init_obj:
 269	if (obj) {
 270		obj->object = addr;
 271		obj->descr  = descr;
 272		obj->state  = ODEBUG_STATE_NONE;
 273		obj->astate = 0;
 274		hlist_add_head(&obj->node, &b->list);
 275	}
 276	return obj;
 277}
 278
 279/*
 280 * workqueue function to free objects.
 281 *
 282 * To reduce contention on the global pool_lock, the actual freeing of
 283 * debug objects will be delayed if the pool_lock is busy.
 284 */
 285static void free_obj_work(struct work_struct *work)
 286{
 287	struct hlist_node *tmp;
 288	struct debug_obj *obj;
 289	unsigned long flags;
 290	HLIST_HEAD(tofree);
 291
 292	WRITE_ONCE(obj_freeing, false);
 293	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
 294		return;
 295
 296	if (obj_pool_free >= debug_objects_pool_size)
 297		goto free_objs;
 298
 299	/*
 300	 * The objs on the pool list might be allocated before the work is
 301	 * run, so recheck if pool list it full or not, if not fill pool
 302	 * list from the global free list. As it is likely that a workload
 303	 * may be gearing up to use more and more objects, don't free any
 304	 * of them until the next round.
 305	 */
 306	while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
 307		obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
 308		hlist_del(&obj->node);
 309		hlist_add_head(&obj->node, &obj_pool);
 310		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 311		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
 312	}
 313	raw_spin_unlock_irqrestore(&pool_lock, flags);
 314	return;
 315
 316free_objs:
 317	/*
 318	 * Pool list is already full and there are still objs on the free
 319	 * list. Move remaining free objs to a temporary list to free the
 320	 * memory outside the pool_lock held region.
 321	 */
 322	if (obj_nr_tofree) {
 323		hlist_move_list(&obj_to_free, &tofree);
 324		debug_objects_freed += obj_nr_tofree;
 325		WRITE_ONCE(obj_nr_tofree, 0);
 326	}
 327	raw_spin_unlock_irqrestore(&pool_lock, flags);
 328
 329	hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
 330		hlist_del(&obj->node);
 331		kmem_cache_free(obj_cache, obj);
 332	}
 333}
 334
 335static void __free_object(struct debug_obj *obj)
 336{
 337	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
 338	struct debug_percpu_free *percpu_pool;
 339	int lookahead_count = 0;
 340	unsigned long flags;
 341	bool work;
 342
 343	local_irq_save(flags);
 344	if (!obj_cache)
 345		goto free_to_obj_pool;
 346
 347	/*
 348	 * Try to free it into the percpu pool first.
 349	 */
 350	percpu_pool = this_cpu_ptr(&percpu_obj_pool);
 351	if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
 352		hlist_add_head(&obj->node, &percpu_pool->free_objs);
 353		percpu_pool->obj_free++;
 354		local_irq_restore(flags);
 355		return;
 356	}
 357
 358	/*
 359	 * As the percpu pool is full, look ahead and pull out a batch
 360	 * of objects from the percpu pool and free them as well.
 361	 */
 362	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
 363		objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
 364		if (!objs[lookahead_count])
 365			break;
 366		percpu_pool->obj_free--;
 367	}
 368
 369free_to_obj_pool:
 370	raw_spin_lock(&pool_lock);
 371	work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
 372	       (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
 373	obj_pool_used--;
 374
 375	if (work) {
 376		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
 377		hlist_add_head(&obj->node, &obj_to_free);
 378		if (lookahead_count) {
 379			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
 380			obj_pool_used -= lookahead_count;
 381			while (lookahead_count) {
 382				hlist_add_head(&objs[--lookahead_count]->node,
 383					       &obj_to_free);
 384			}
 385		}
 386
 387		if ((obj_pool_free > debug_objects_pool_size) &&
 388		    (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
 389			int i;
 390
 391			/*
 392			 * Free one more batch of objects from obj_pool.
 393			 */
 394			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
 395				obj = __alloc_object(&obj_pool);
 396				hlist_add_head(&obj->node, &obj_to_free);
 397				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 398				WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
 399			}
 400		}
 401	} else {
 402		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 403		hlist_add_head(&obj->node, &obj_pool);
 404		if (lookahead_count) {
 405			WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
 406			obj_pool_used -= lookahead_count;
 407			while (lookahead_count) {
 408				hlist_add_head(&objs[--lookahead_count]->node,
 409					       &obj_pool);
 410			}
 411		}
 412	}
 413	raw_spin_unlock(&pool_lock);
 414	local_irq_restore(flags);
 415}
 416
 417/*
 418 * Put the object back into the pool and schedule work to free objects
 419 * if necessary.
 420 */
 421static void free_object(struct debug_obj *obj)
 422{
 423	__free_object(obj);
 424	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
 425		WRITE_ONCE(obj_freeing, true);
 426		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
 427	}
 428}
 429
 430#ifdef CONFIG_HOTPLUG_CPU
 431static int object_cpu_offline(unsigned int cpu)
 432{
 433	struct debug_percpu_free *percpu_pool;
 434	struct hlist_node *tmp;
 435	struct debug_obj *obj;
 436	unsigned long flags;
 437
 438	/* Remote access is safe as the CPU is dead already */
 439	percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
 440	hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
 441		hlist_del(&obj->node);
 442		kmem_cache_free(obj_cache, obj);
 443	}
 444
 445	raw_spin_lock_irqsave(&pool_lock, flags);
 446	obj_pool_used -= percpu_pool->obj_free;
 447	debug_objects_freed += percpu_pool->obj_free;
 448	raw_spin_unlock_irqrestore(&pool_lock, flags);
 449
 450	percpu_pool->obj_free = 0;
 451
 452	return 0;
 453}
 454#endif
 455
 456/*
 457 * We run out of memory. That means we probably have tons of objects
 458 * allocated.
 459 */
 460static void debug_objects_oom(void)
 461{
 462	struct debug_bucket *db = obj_hash;
 463	struct hlist_node *tmp;
 464	HLIST_HEAD(freelist);
 465	struct debug_obj *obj;
 466	unsigned long flags;
 467	int i;
 468
 469	pr_warn("Out of memory. ODEBUG disabled\n");
 470
 471	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
 472		raw_spin_lock_irqsave(&db->lock, flags);
 473		hlist_move_list(&db->list, &freelist);
 474		raw_spin_unlock_irqrestore(&db->lock, flags);
 475
 476		/* Now free them */
 477		hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
 478			hlist_del(&obj->node);
 479			free_object(obj);
 480		}
 481	}
 482}
 483
 484/*
 485 * We use the pfn of the address for the hash. That way we can check
 486 * for freed objects simply by checking the affected bucket.
 487 */
 488static struct debug_bucket *get_bucket(unsigned long addr)
 489{
 490	unsigned long hash;
 491
 492	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
 493	return &obj_hash[hash];
 494}
 495
 496static void debug_print_object(struct debug_obj *obj, char *msg)
 497{
 498	const struct debug_obj_descr *descr = obj->descr;
 499	static int limit;
 500
 501	/*
 502	 * Don't report if lookup_object_or_alloc() by the current thread
 503	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
 504	 * concurrent thread turned off debug_objects_enabled and cleared
 505	 * the hash buckets.
 506	 */
 507	if (!debug_objects_enabled)
 508		return;
 509
 510	if (limit < 5 && descr != descr_test) {
 511		void *hint = descr->debug_hint ?
 512			descr->debug_hint(obj->object) : NULL;
 513		limit++;
 514		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
 515				 "object: %p object type: %s hint: %pS\n",
 516			msg, obj_states[obj->state], obj->astate,
 517			obj->object, descr->name, hint);
 518	}
 519	debug_objects_warnings++;
 520}
 521
 522/*
 523 * Try to repair the damage, so we have a better chance to get useful
 524 * debug output.
 525 */
 526static bool
 527debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
 528		   void * addr, enum debug_obj_state state)
 529{
 530	if (fixup && fixup(addr, state)) {
 531		debug_objects_fixups++;
 532		return true;
 533	}
 534	return false;
 535}
 536
 537static void debug_object_is_on_stack(void *addr, int onstack)
 538{
 539	int is_on_stack;
 540	static int limit;
 541
 542	if (limit > 4)
 543		return;
 544
 545	is_on_stack = object_is_on_stack(addr);
 546	if (is_on_stack == onstack)
 547		return;
 548
 549	limit++;
 550	if (is_on_stack)
 551		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
 552			 task_stack_page(current));
 553	else
 554		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
 555			 task_stack_page(current));
 556
 557	WARN_ON(1);
 558}
 559
 560static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
 561						const struct debug_obj_descr *descr,
 562						bool onstack, bool alloc_ifstatic)
 563{
 564	struct debug_obj *obj = lookup_object(addr, b);
 565	enum debug_obj_state state = ODEBUG_STATE_NONE;
 566
 567	if (likely(obj))
 568		return obj;
 569
 570	/*
 571	 * debug_object_init() unconditionally allocates untracked
 572	 * objects. It does not matter whether it is a static object or
 573	 * not.
 574	 *
 575	 * debug_object_assert_init() and debug_object_activate() allow
 576	 * allocation only if the descriptor callback confirms that the
 577	 * object is static and considered initialized. For non-static
 578	 * objects the allocation needs to be done from the fixup callback.
 579	 */
 580	if (unlikely(alloc_ifstatic)) {
 581		if (!descr->is_static_object || !descr->is_static_object(addr))
 582			return ERR_PTR(-ENOENT);
 583		/* Statically allocated objects are considered initialized */
 584		state = ODEBUG_STATE_INIT;
 585	}
 586
 587	obj = alloc_object(addr, b, descr);
 588	if (likely(obj)) {
 589		obj->state = state;
 590		debug_object_is_on_stack(addr, onstack);
 591		return obj;
 592	}
 593
 594	/* Out of memory. Do the cleanup outside of the locked region */
 595	debug_objects_enabled = 0;
 596	return NULL;
 597}
 598
 599static void debug_objects_fill_pool(void)
 600{
 601	/*
 602	 * On RT enabled kernels the pool refill must happen in preemptible
 603	 * context -- for !RT kernels we rely on the fact that spinlock_t and
 604	 * raw_spinlock_t are basically the same type and this lock-type
 605	 * inversion works just fine.
 606	 */
 607	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
 608		/*
 609		 * Annotate away the spinlock_t inside raw_spinlock_t warning
 610		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
 611		 * the preemptible() condition above.
 612		 */
 613		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
 614		lock_map_acquire_try(&fill_pool_map);
 615		fill_pool();
 616		lock_map_release(&fill_pool_map);
 617	}
 618}
 619
 620static void
 621__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
 622{
 623	struct debug_obj *obj, o;
 
 624	struct debug_bucket *db;
 
 625	unsigned long flags;
 626
 627	debug_objects_fill_pool();
 628
 629	db = get_bucket((unsigned long) addr);
 630
 631	raw_spin_lock_irqsave(&db->lock, flags);
 632
 633	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
 634	if (unlikely(!obj)) {
 635		raw_spin_unlock_irqrestore(&db->lock, flags);
 636		debug_objects_oom();
 637		return;
 
 
 
 
 
 638	}
 639
 640	switch (obj->state) {
 641	case ODEBUG_STATE_NONE:
 642	case ODEBUG_STATE_INIT:
 643	case ODEBUG_STATE_INACTIVE:
 644		obj->state = ODEBUG_STATE_INIT;
 
 
 
 
 645		raw_spin_unlock_irqrestore(&db->lock, flags);
 
 
 
 
 
 
 
 646		return;
 647	default:
 648		break;
 649	}
 650
 651	o = *obj;
 652	raw_spin_unlock_irqrestore(&db->lock, flags);
 653	debug_print_object(&o, "init");
 654
 655	if (o.state == ODEBUG_STATE_ACTIVE)
 656		debug_object_fixup(descr->fixup_init, addr, o.state);
 657}
 658
 659/**
 660 * debug_object_init - debug checks when an object is initialized
 661 * @addr:	address of the object
 662 * @descr:	pointer to an object specific debug description structure
 663 */
 664void debug_object_init(void *addr, const struct debug_obj_descr *descr)
 665{
 666	if (!debug_objects_enabled)
 667		return;
 668
 669	__debug_object_init(addr, descr, 0);
 670}
 671EXPORT_SYMBOL_GPL(debug_object_init);
 672
 673/**
 674 * debug_object_init_on_stack - debug checks when an object on stack is
 675 *				initialized
 676 * @addr:	address of the object
 677 * @descr:	pointer to an object specific debug description structure
 678 */
 679void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
 680{
 681	if (!debug_objects_enabled)
 682		return;
 683
 684	__debug_object_init(addr, descr, 1);
 685}
 686EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
 687
 688/**
 689 * debug_object_activate - debug checks when an object is activated
 690 * @addr:	address of the object
 691 * @descr:	pointer to an object specific debug description structure
 692 * Returns 0 for success, -EINVAL for check failed.
 693 */
 694int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
 695{
 696	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
 697	struct debug_bucket *db;
 698	struct debug_obj *obj;
 699	unsigned long flags;
 
 
 
 
 700
 701	if (!debug_objects_enabled)
 702		return 0;
 703
 704	debug_objects_fill_pool();
 705
 706	db = get_bucket((unsigned long) addr);
 707
 708	raw_spin_lock_irqsave(&db->lock, flags);
 709
 710	obj = lookup_object_or_alloc(addr, db, descr, false, true);
 711	if (unlikely(!obj)) {
 712		raw_spin_unlock_irqrestore(&db->lock, flags);
 713		debug_objects_oom();
 714		return 0;
 715	} else if (likely(!IS_ERR(obj))) {
 716		switch (obj->state) {
 
 
 
 
 
 
 717		case ODEBUG_STATE_ACTIVE:
 
 
 
 
 
 
 718		case ODEBUG_STATE_DESTROYED:
 719			o = *obj;
 
 720			break;
 721		case ODEBUG_STATE_INIT:
 722		case ODEBUG_STATE_INACTIVE:
 723			obj->state = ODEBUG_STATE_ACTIVE;
 724			fallthrough;
 725		default:
 726			raw_spin_unlock_irqrestore(&db->lock, flags);
 727			return 0;
 728		}
 
 
 
 
 729	}
 730
 731	raw_spin_unlock_irqrestore(&db->lock, flags);
 732	debug_print_object(&o, "activate");
 733
 734	switch (o.state) {
 735	case ODEBUG_STATE_ACTIVE:
 736	case ODEBUG_STATE_NOTAVAILABLE:
 737		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
 738			return 0;
 739		fallthrough;
 740	default:
 741		return -EINVAL;
 
 
 
 
 
 
 
 
 742	}
 
 743}
 744EXPORT_SYMBOL_GPL(debug_object_activate);
 745
 746/**
 747 * debug_object_deactivate - debug checks when an object is deactivated
 748 * @addr:	address of the object
 749 * @descr:	pointer to an object specific debug description structure
 750 */
 751void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
 752{
 753	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
 754	struct debug_bucket *db;
 755	struct debug_obj *obj;
 756	unsigned long flags;
 
 757
 758	if (!debug_objects_enabled)
 759		return;
 760
 761	db = get_bucket((unsigned long) addr);
 762
 763	raw_spin_lock_irqsave(&db->lock, flags);
 764
 765	obj = lookup_object(addr, db);
 766	if (obj) {
 767		switch (obj->state) {
 768		case ODEBUG_STATE_DESTROYED:
 769			break;
 770		case ODEBUG_STATE_INIT:
 771		case ODEBUG_STATE_INACTIVE:
 772		case ODEBUG_STATE_ACTIVE:
 773			if (obj->astate)
 774				break;
 775			obj->state = ODEBUG_STATE_INACTIVE;
 776			fallthrough;
 
 
 
 
 
 777		default:
 778			raw_spin_unlock_irqrestore(&db->lock, flags);
 779			return;
 780		}
 781		o = *obj;
 782	}
 783
 784	raw_spin_unlock_irqrestore(&db->lock, flags);
 785	debug_print_object(&o, "deactivate");
 
 
 
 
 
 
 
 
 786}
 787EXPORT_SYMBOL_GPL(debug_object_deactivate);
 788
 789/**
 790 * debug_object_destroy - debug checks when an object is destroyed
 791 * @addr:	address of the object
 792 * @descr:	pointer to an object specific debug description structure
 793 */
 794void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
 795{
 796	struct debug_obj *obj, o;
 797	struct debug_bucket *db;
 
 798	unsigned long flags;
 
 799
 800	if (!debug_objects_enabled)
 801		return;
 802
 803	db = get_bucket((unsigned long) addr);
 804
 805	raw_spin_lock_irqsave(&db->lock, flags);
 806
 807	obj = lookup_object(addr, db);
 808	if (!obj) {
 809		raw_spin_unlock_irqrestore(&db->lock, flags);
 810		return;
 811	}
 812
 813	switch (obj->state) {
 814	case ODEBUG_STATE_ACTIVE:
 815	case ODEBUG_STATE_DESTROYED:
 816		break;
 817	case ODEBUG_STATE_NONE:
 818	case ODEBUG_STATE_INIT:
 819	case ODEBUG_STATE_INACTIVE:
 820		obj->state = ODEBUG_STATE_DESTROYED;
 821		fallthrough;
 822	default:
 
 823		raw_spin_unlock_irqrestore(&db->lock, flags);
 
 
 824		return;
 
 
 
 
 
 
 825	}
 826
 827	o = *obj;
 828	raw_spin_unlock_irqrestore(&db->lock, flags);
 829	debug_print_object(&o, "destroy");
 830
 831	if (o.state == ODEBUG_STATE_ACTIVE)
 832		debug_object_fixup(descr->fixup_destroy, addr, o.state);
 833}
 834EXPORT_SYMBOL_GPL(debug_object_destroy);
 835
 836/**
 837 * debug_object_free - debug checks when an object is freed
 838 * @addr:	address of the object
 839 * @descr:	pointer to an object specific debug description structure
 840 */
 841void debug_object_free(void *addr, const struct debug_obj_descr *descr)
 842{
 843	struct debug_obj *obj, o;
 844	struct debug_bucket *db;
 
 845	unsigned long flags;
 846
 847	if (!debug_objects_enabled)
 848		return;
 849
 850	db = get_bucket((unsigned long) addr);
 851
 852	raw_spin_lock_irqsave(&db->lock, flags);
 853
 854	obj = lookup_object(addr, db);
 855	if (!obj) {
 856		raw_spin_unlock_irqrestore(&db->lock, flags);
 857		return;
 858	}
 859
 860	switch (obj->state) {
 861	case ODEBUG_STATE_ACTIVE:
 862		break;
 
 
 
 
 863	default:
 864		hlist_del(&obj->node);
 865		raw_spin_unlock_irqrestore(&db->lock, flags);
 866		free_object(obj);
 867		return;
 868	}
 869
 870	o = *obj;
 871	raw_spin_unlock_irqrestore(&db->lock, flags);
 872	debug_print_object(&o, "free");
 873
 874	debug_object_fixup(descr->fixup_free, addr, o.state);
 875}
 876EXPORT_SYMBOL_GPL(debug_object_free);
 877
 878/**
 879 * debug_object_assert_init - debug checks when object should be init-ed
 880 * @addr:	address of the object
 881 * @descr:	pointer to an object specific debug description structure
 882 */
 883void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
 884{
 885	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
 886	struct debug_bucket *db;
 887	struct debug_obj *obj;
 888	unsigned long flags;
 889
 890	if (!debug_objects_enabled)
 891		return;
 892
 893	debug_objects_fill_pool();
 894
 895	db = get_bucket((unsigned long) addr);
 896
 897	raw_spin_lock_irqsave(&db->lock, flags);
 898	obj = lookup_object_or_alloc(addr, db, descr, false, true);
 899	raw_spin_unlock_irqrestore(&db->lock, flags);
 900	if (likely(!IS_ERR_OR_NULL(obj)))
 901		return;
 902
 903	/* If NULL the allocation has hit OOM */
 904	if (!obj) {
 905		debug_objects_oom();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 906		return;
 907	}
 908
 909	/* Object is neither tracked nor static. It's not initialized. */
 910	debug_print_object(&o, "assert_init");
 911	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
 912}
 913EXPORT_SYMBOL_GPL(debug_object_assert_init);
 914
 915/**
 916 * debug_object_active_state - debug checks object usage state machine
 917 * @addr:	address of the object
 918 * @descr:	pointer to an object specific debug description structure
 919 * @expect:	expected state
 920 * @next:	state to move to if expected state is found
 921 */
 922void
 923debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
 924			  unsigned int expect, unsigned int next)
 925{
 926	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
 927	struct debug_bucket *db;
 928	struct debug_obj *obj;
 929	unsigned long flags;
 
 930
 931	if (!debug_objects_enabled)
 932		return;
 933
 934	db = get_bucket((unsigned long) addr);
 935
 936	raw_spin_lock_irqsave(&db->lock, flags);
 937
 938	obj = lookup_object(addr, db);
 939	if (obj) {
 940		switch (obj->state) {
 941		case ODEBUG_STATE_ACTIVE:
 942			if (obj->astate != expect)
 943				break;
 944			obj->astate = next;
 945			raw_spin_unlock_irqrestore(&db->lock, flags);
 946			return;
 
 947		default:
 
 948			break;
 949		}
 950		o = *obj;
 951	}
 952
 953	raw_spin_unlock_irqrestore(&db->lock, flags);
 954	debug_print_object(&o, "active_state");
 
 
 
 
 
 
 
 
 955}
 956EXPORT_SYMBOL_GPL(debug_object_active_state);
 957
 958#ifdef CONFIG_DEBUG_OBJECTS_FREE
 959static void __debug_check_no_obj_freed(const void *address, unsigned long size)
 960{
 961	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
 962	int cnt, objs_checked = 0;
 963	struct debug_obj *obj, o;
 964	struct debug_bucket *db;
 965	struct hlist_node *tmp;
 
 
 966
 967	saddr = (unsigned long) address;
 968	eaddr = saddr + size;
 969	paddr = saddr & ODEBUG_CHUNK_MASK;
 970	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
 971	chunks >>= ODEBUG_CHUNK_SHIFT;
 972
 973	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
 974		db = get_bucket(paddr);
 975
 976repeat:
 977		cnt = 0;
 978		raw_spin_lock_irqsave(&db->lock, flags);
 979		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
 980			cnt++;
 981			oaddr = (unsigned long) obj->object;
 982			if (oaddr < saddr || oaddr >= eaddr)
 983				continue;
 984
 985			switch (obj->state) {
 986			case ODEBUG_STATE_ACTIVE:
 987				o = *obj;
 
 988				raw_spin_unlock_irqrestore(&db->lock, flags);
 989				debug_print_object(&o, "free");
 990				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
 
 991				goto repeat;
 992			default:
 993				hlist_del(&obj->node);
 994				__free_object(obj);
 995				break;
 996			}
 997		}
 998		raw_spin_unlock_irqrestore(&db->lock, flags);
 999
1000		if (cnt > debug_objects_maxchain)
1001			debug_objects_maxchain = cnt;
1002
1003		objs_checked += cnt;
1004	}
1005
1006	if (objs_checked > debug_objects_maxchecked)
1007		debug_objects_maxchecked = objs_checked;
1008
1009	/* Schedule work to actually kmem_cache_free() objects */
1010	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1011		WRITE_ONCE(obj_freeing, true);
1012		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1013	}
1014}
1015
1016void debug_check_no_obj_freed(const void *address, unsigned long size)
1017{
1018	if (debug_objects_enabled)
1019		__debug_check_no_obj_freed(address, size);
1020}
1021#endif
1022
1023#ifdef CONFIG_DEBUG_FS
1024
1025static int debug_stats_show(struct seq_file *m, void *v)
1026{
1027	int cpu, obj_percpu_free = 0;
1028
1029	for_each_possible_cpu(cpu)
1030		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1031
1032	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1033	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1034	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1035	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1036	seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1037	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1038	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1039	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1040	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1041	seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1042	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1043	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
1044	return 0;
1045}
1046DEFINE_SHOW_ATTRIBUTE(debug_stats);
1047
1048static int __init debug_objects_init_debugfs(void)
1049{
1050	struct dentry *dbgdir;
1051
1052	if (!debug_objects_enabled)
1053		return 0;
1054
1055	dbgdir = debugfs_create_dir("debug_objects", NULL);
1056
1057	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1058
1059	return 0;
1060}
1061__initcall(debug_objects_init_debugfs);
1062
1063#else
1064static inline void debug_objects_init_debugfs(void) { }
1065#endif
1066
1067#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1068
1069/* Random data structure for the self test */
1070struct self_test {
1071	unsigned long	dummy1[6];
1072	int		static_init;
1073	unsigned long	dummy2[3];
1074};
1075
1076static __initconst const struct debug_obj_descr descr_type_test;
1077
1078static bool __init is_static_object(void *addr)
1079{
1080	struct self_test *obj = addr;
1081
1082	return obj->static_init;
1083}
1084
1085/*
1086 * fixup_init is called when:
1087 * - an active object is initialized
1088 */
1089static bool __init fixup_init(void *addr, enum debug_obj_state state)
1090{
1091	struct self_test *obj = addr;
1092
1093	switch (state) {
1094	case ODEBUG_STATE_ACTIVE:
1095		debug_object_deactivate(obj, &descr_type_test);
1096		debug_object_init(obj, &descr_type_test);
1097		return true;
1098	default:
1099		return false;
1100	}
1101}
1102
1103/*
1104 * fixup_activate is called when:
1105 * - an active object is activated
1106 * - an unknown non-static object is activated
1107 */
1108static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1109{
1110	struct self_test *obj = addr;
1111
1112	switch (state) {
1113	case ODEBUG_STATE_NOTAVAILABLE:
1114		return true;
1115	case ODEBUG_STATE_ACTIVE:
1116		debug_object_deactivate(obj, &descr_type_test);
1117		debug_object_activate(obj, &descr_type_test);
1118		return true;
1119
1120	default:
1121		return false;
1122	}
1123}
1124
1125/*
1126 * fixup_destroy is called when:
1127 * - an active object is destroyed
1128 */
1129static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1130{
1131	struct self_test *obj = addr;
1132
1133	switch (state) {
1134	case ODEBUG_STATE_ACTIVE:
1135		debug_object_deactivate(obj, &descr_type_test);
1136		debug_object_destroy(obj, &descr_type_test);
1137		return true;
1138	default:
1139		return false;
1140	}
1141}
1142
1143/*
1144 * fixup_free is called when:
1145 * - an active object is freed
1146 */
1147static bool __init fixup_free(void *addr, enum debug_obj_state state)
1148{
1149	struct self_test *obj = addr;
1150
1151	switch (state) {
1152	case ODEBUG_STATE_ACTIVE:
1153		debug_object_deactivate(obj, &descr_type_test);
1154		debug_object_free(obj, &descr_type_test);
1155		return true;
1156	default:
1157		return false;
1158	}
1159}
1160
1161static int __init
1162check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1163{
1164	struct debug_bucket *db;
1165	struct debug_obj *obj;
1166	unsigned long flags;
1167	int res = -EINVAL;
1168
1169	db = get_bucket((unsigned long) addr);
1170
1171	raw_spin_lock_irqsave(&db->lock, flags);
1172
1173	obj = lookup_object(addr, db);
1174	if (!obj && state != ODEBUG_STATE_NONE) {
1175		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1176		goto out;
1177	}
1178	if (obj && obj->state != state) {
1179		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1180		       obj->state, state);
1181		goto out;
1182	}
1183	if (fixups != debug_objects_fixups) {
1184		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1185		       fixups, debug_objects_fixups);
1186		goto out;
1187	}
1188	if (warnings != debug_objects_warnings) {
1189		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1190		       warnings, debug_objects_warnings);
1191		goto out;
1192	}
1193	res = 0;
1194out:
1195	raw_spin_unlock_irqrestore(&db->lock, flags);
1196	if (res)
1197		debug_objects_enabled = 0;
1198	return res;
1199}
1200
1201static __initconst const struct debug_obj_descr descr_type_test = {
1202	.name			= "selftest",
1203	.is_static_object	= is_static_object,
1204	.fixup_init		= fixup_init,
1205	.fixup_activate		= fixup_activate,
1206	.fixup_destroy		= fixup_destroy,
1207	.fixup_free		= fixup_free,
1208};
1209
1210static __initdata struct self_test obj = { .static_init = 0 };
1211
1212static void __init debug_objects_selftest(void)
1213{
1214	int fixups, oldfixups, warnings, oldwarnings;
1215	unsigned long flags;
1216
1217	local_irq_save(flags);
1218
1219	fixups = oldfixups = debug_objects_fixups;
1220	warnings = oldwarnings = debug_objects_warnings;
1221	descr_test = &descr_type_test;
1222
1223	debug_object_init(&obj, &descr_type_test);
1224	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1225		goto out;
1226	debug_object_activate(&obj, &descr_type_test);
1227	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1228		goto out;
1229	debug_object_activate(&obj, &descr_type_test);
1230	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1231		goto out;
1232	debug_object_deactivate(&obj, &descr_type_test);
1233	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1234		goto out;
1235	debug_object_destroy(&obj, &descr_type_test);
1236	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1237		goto out;
1238	debug_object_init(&obj, &descr_type_test);
1239	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1240		goto out;
1241	debug_object_activate(&obj, &descr_type_test);
1242	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1243		goto out;
1244	debug_object_deactivate(&obj, &descr_type_test);
1245	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1246		goto out;
1247	debug_object_free(&obj, &descr_type_test);
1248	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1249		goto out;
1250
1251	obj.static_init = 1;
1252	debug_object_activate(&obj, &descr_type_test);
1253	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1254		goto out;
1255	debug_object_init(&obj, &descr_type_test);
1256	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1257		goto out;
1258	debug_object_free(&obj, &descr_type_test);
1259	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1260		goto out;
1261
1262#ifdef CONFIG_DEBUG_OBJECTS_FREE
1263	debug_object_init(&obj, &descr_type_test);
1264	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1265		goto out;
1266	debug_object_activate(&obj, &descr_type_test);
1267	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1268		goto out;
1269	__debug_check_no_obj_freed(&obj, sizeof(obj));
1270	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1271		goto out;
1272#endif
1273	pr_info("selftest passed\n");
1274
1275out:
1276	debug_objects_fixups = oldfixups;
1277	debug_objects_warnings = oldwarnings;
1278	descr_test = NULL;
1279
1280	local_irq_restore(flags);
1281}
1282#else
1283static inline void debug_objects_selftest(void) { }
1284#endif
1285
1286/*
1287 * Called during early boot to initialize the hash buckets and link
1288 * the static object pool objects into the poll list. After this call
1289 * the object tracker is fully operational.
1290 */
1291void __init debug_objects_early_init(void)
1292{
1293	int i;
1294
1295	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1296		raw_spin_lock_init(&obj_hash[i].lock);
1297
1298	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1299		hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1300}
1301
1302/*
1303 * Convert the statically allocated objects to dynamic ones:
1304 */
1305static int __init debug_objects_replace_static_objects(void)
1306{
1307	struct debug_bucket *db = obj_hash;
1308	struct hlist_node *tmp;
1309	struct debug_obj *obj, *new;
1310	HLIST_HEAD(objects);
1311	int i, cnt = 0;
1312
1313	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1314		obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1315		if (!obj)
1316			goto free;
1317		hlist_add_head(&obj->node, &objects);
1318	}
1319
1320	debug_objects_allocated += i;
1321
1322	/*
1323	 * debug_objects_mem_init() is now called early that only one CPU is up
1324	 * and interrupts have been disabled, so it is safe to replace the
1325	 * active object references.
1326	 */
1327
1328	/* Remove the statically allocated objects from the pool */
1329	hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1330		hlist_del(&obj->node);
1331	/* Move the allocated objects to the pool */
1332	hlist_move_list(&objects, &obj_pool);
1333
1334	/* Replace the active object references */
1335	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1336		hlist_move_list(&db->list, &objects);
1337
1338		hlist_for_each_entry(obj, &objects, node) {
1339			new = hlist_entry(obj_pool.first, typeof(*obj), node);
1340			hlist_del(&new->node);
1341			/* copy object data */
1342			*new = *obj;
1343			hlist_add_head(&new->node, &db->list);
1344			cnt++;
1345		}
1346	}
1347
1348	pr_debug("%d of %d active objects replaced\n",
1349		 cnt, obj_pool_used);
1350	return 0;
1351free:
1352	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
1353		hlist_del(&obj->node);
1354		kmem_cache_free(obj_cache, obj);
1355	}
1356	return -ENOMEM;
1357}
1358
1359/*
1360 * Called after the kmem_caches are functional to setup a dedicated
1361 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1362 * prevents that the debug code is called on kmem_cache_free() for the
1363 * debug tracker objects to avoid recursive calls.
1364 */
1365void __init debug_objects_mem_init(void)
1366{
1367	int cpu, extras;
1368
1369	if (!debug_objects_enabled)
1370		return;
1371
1372	/*
1373	 * Initialize the percpu object pools
1374	 *
1375	 * Initialization is not strictly necessary, but was done for
1376	 * completeness.
1377	 */
1378	for_each_possible_cpu(cpu)
1379		INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
1380
1381	obj_cache = kmem_cache_create("debug_objects_cache",
1382				      sizeof (struct debug_obj), 0,
1383				      SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1384				      NULL);
1385
1386	if (!obj_cache || debug_objects_replace_static_objects()) {
1387		debug_objects_enabled = 0;
1388		kmem_cache_destroy(obj_cache);
1389		pr_warn("out of memory.\n");
1390		return;
1391	} else
1392		debug_objects_selftest();
1393
1394#ifdef CONFIG_HOTPLUG_CPU
1395	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1396					object_cpu_offline);
1397#endif
1398
1399	/*
1400	 * Increase the thresholds for allocating and freeing objects
1401	 * according to the number of possible CPUs available in the system.
1402	 */
1403	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1404	debug_objects_pool_size += extras;
1405	debug_objects_pool_min_level += extras;
1406}