Linux Audio

Check our new training course

Loading...
v5.14.15
 
   1/*
   2 * Generic infrastructure for lifetime debugging of objects.
   3 *
   4 * Started by Thomas Gleixner
   5 *
   6 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
   7 *
   8 * For licencing details see kernel-base/COPYING
   9 */
  10
  11#define pr_fmt(fmt) "ODEBUG: " fmt
  12
 
  13#include <linux/debugobjects.h>
  14#include <linux/interrupt.h>
 
 
  15#include <linux/sched.h>
 
  16#include <linux/sched/task_stack.h>
  17#include <linux/seq_file.h>
  18#include <linux/debugfs.h>
  19#include <linux/slab.h>
  20#include <linux/hash.h>
  21#include <linux/kmemleak.h>
  22#include <linux/cpu.h>
  23
  24#define ODEBUG_HASH_BITS	14
  25#define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
  26
  27#define ODEBUG_POOL_SIZE	1024
  28#define ODEBUG_POOL_MIN_LEVEL	256
  29#define ODEBUG_POOL_PERCPU_SIZE	64
  30#define ODEBUG_BATCH_SIZE	16
  31
 
 
 
 
 
 
  32#define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
  33#define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
  34#define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
  35
  36/*
  37 * We limit the freeing of debug objects via workqueue at a maximum
  38 * frequency of 10Hz and about 1024 objects for each freeing operation.
  39 * So it is freeing at most 10k debug objects per second.
  40 */
  41#define ODEBUG_FREE_WORK_MAX	1024
  42#define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
  43
  44struct debug_bucket {
  45	struct hlist_head	list;
  46	raw_spinlock_t		lock;
  47};
  48
  49/*
  50 * Debug object percpu free list
  51 * Access is protected by disabling irq
  52 */
  53struct debug_percpu_free {
  54	struct hlist_head	free_objs;
  55	int			obj_free;
  56};
  57
  58static DEFINE_PER_CPU(struct debug_percpu_free, percpu_obj_pool);
 
 
 
 
 
 
 
 
 
 
 
  59
  60static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
  61
  62static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  63
  64static DEFINE_RAW_SPINLOCK(pool_lock);
  65
  66static HLIST_HEAD(obj_pool);
  67static HLIST_HEAD(obj_to_free);
 
 
 
 
 
  68
  69/*
  70 * Because of the presence of percpu free pools, obj_pool_free will
  71 * under-count those in the percpu free pools. Similarly, obj_pool_used
  72 * will over-count those in the percpu free pools. Adjustments will be
  73 * made at debug_stats_show(). Both obj_pool_min_free and obj_pool_max_used
  74 * can be off.
  75 */
  76static int			obj_pool_min_free = ODEBUG_POOL_SIZE;
  77static int			obj_pool_free = ODEBUG_POOL_SIZE;
  78static int			obj_pool_used;
  79static int			obj_pool_max_used;
  80static bool			obj_freeing;
  81/* The number of objs on the global free list */
  82static int			obj_nr_tofree;
  83
  84static int			debug_objects_maxchain __read_mostly;
  85static int __maybe_unused	debug_objects_maxchecked __read_mostly;
  86static int			debug_objects_fixups __read_mostly;
  87static int			debug_objects_warnings __read_mostly;
  88static int			debug_objects_enabled __read_mostly
  89				= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  90static int			debug_objects_pool_size __read_mostly
  91				= ODEBUG_POOL_SIZE;
  92static int			debug_objects_pool_min_level __read_mostly
  93				= ODEBUG_POOL_MIN_LEVEL;
  94static const struct debug_obj_descr *descr_test  __read_mostly;
  95static struct kmem_cache	*obj_cache __read_mostly;
  96
  97/*
  98 * Track numbers of kmem_cache_alloc()/free() calls done.
  99 */
 100static int			debug_objects_allocated;
 101static int			debug_objects_freed;
 102
 103static void free_obj_work(struct work_struct *work);
 104static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
 105
 
 
 106static int __init enable_object_debug(char *str)
 107{
 108	debug_objects_enabled = 1;
 109	return 0;
 110}
 
 111
 112static int __init disable_object_debug(char *str)
 113{
 114	debug_objects_enabled = 0;
 115	return 0;
 116}
 117
 118early_param("debug_objects", enable_object_debug);
 119early_param("no_debug_objects", disable_object_debug);
 120
 121static const char *obj_states[ODEBUG_STATE_MAX] = {
 122	[ODEBUG_STATE_NONE]		= "none",
 123	[ODEBUG_STATE_INIT]		= "initialized",
 124	[ODEBUG_STATE_INACTIVE]		= "inactive",
 125	[ODEBUG_STATE_ACTIVE]		= "active",
 126	[ODEBUG_STATE_DESTROYED]	= "destroyed",
 127	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
 128};
 129
 130static void fill_pool(void)
 131{
 132	gfp_t gfp = GFP_ATOMIC | __GFP_NORETRY | __GFP_NOWARN;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 133	struct debug_obj *obj;
 134	unsigned long flags;
 135
 136	if (likely(READ_ONCE(obj_pool_free) >= debug_objects_pool_min_level))
 137		return;
 138
 139	/*
 140	 * Reuse objs from the global free list; they will be reinitialized
 141	 * when allocating.
 142	 *
 143	 * Both obj_nr_tofree and obj_pool_free are checked locklessly; the
 144	 * READ_ONCE()s pair with the WRITE_ONCE()s in pool_lock critical
 145	 * sections.
 146	 */
 147	while (READ_ONCE(obj_nr_tofree) && (READ_ONCE(obj_pool_free) < obj_pool_min_free)) {
 148		raw_spin_lock_irqsave(&pool_lock, flags);
 149		/*
 150		 * Recheck with the lock held as the worker thread might have
 151		 * won the race and freed the global free list already.
 152		 */
 153		while (obj_nr_tofree && (obj_pool_free < obj_pool_min_free)) {
 154			obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
 155			hlist_del(&obj->node);
 156			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
 157			hlist_add_head(&obj->node, &obj_pool);
 158			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 159		}
 160		raw_spin_unlock_irqrestore(&pool_lock, flags);
 161	}
 162
 163	if (unlikely(!obj_cache))
 164		return;
 
 
 165
 166	while (READ_ONCE(obj_pool_free) < debug_objects_pool_min_level) {
 167		struct debug_obj *new[ODEBUG_BATCH_SIZE];
 168		int cnt;
 169
 170		for (cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
 171			new[cnt] = kmem_cache_zalloc(obj_cache, gfp);
 172			if (!new[cnt])
 173				break;
 174		}
 175		if (!cnt)
 176			return;
 177
 178		raw_spin_lock_irqsave(&pool_lock, flags);
 179		while (cnt) {
 180			hlist_add_head(&new[--cnt]->node, &obj_pool);
 181			debug_objects_allocated++;
 182			WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 183		}
 184		raw_spin_unlock_irqrestore(&pool_lock, flags);
 185	}
 186}
 187
 188/*
 189 * Lookup an object in the hash bucket.
 190 */
 191static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
 192{
 
 193	struct debug_obj *obj;
 194	int cnt = 0;
 195
 196	hlist_for_each_entry(obj, &b->list, node) {
 197		cnt++;
 198		if (obj->object == addr)
 199			return obj;
 200	}
 201	if (cnt > debug_objects_maxchain)
 202		debug_objects_maxchain = cnt;
 203
 204	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 205}
 206
 207/*
 208 * Allocate a new object from the hlist
 209 */
 210static struct debug_obj *__alloc_object(struct hlist_head *list)
 211{
 212	struct debug_obj *obj = NULL;
 213
 214	if (list->first) {
 215		obj = hlist_entry(list->first, typeof(*obj), node);
 216		hlist_del(&obj->node);
 217	}
 218
 
 
 219	return obj;
 220}
 221
 222/*
 223 * Allocate a new object. If the pool is empty, switch off the debugger.
 224 * Must be called with interrupts disabled.
 225 */
 226static struct debug_obj *
 227alloc_object(void *addr, struct debug_bucket *b, const struct debug_obj_descr *descr)
 228{
 229	struct debug_percpu_free *percpu_pool = this_cpu_ptr(&percpu_obj_pool);
 230	struct debug_obj *obj;
 231
 232	if (likely(obj_cache)) {
 233		obj = __alloc_object(&percpu_pool->free_objs);
 234		if (obj) {
 235			percpu_pool->obj_free--;
 236			goto init_obj;
 237		}
 238	}
 239
 240	raw_spin_lock(&pool_lock);
 241	obj = __alloc_object(&obj_pool);
 242	if (obj) {
 243		obj_pool_used++;
 244		WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 245
 246		/*
 247		 * Looking ahead, allocate one batch of debug objects and
 248		 * put them into the percpu free pool.
 249		 */
 250		if (likely(obj_cache)) {
 251			int i;
 
 252
 253			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
 254				struct debug_obj *obj2;
 255
 256				obj2 = __alloc_object(&obj_pool);
 257				if (!obj2)
 258					break;
 259				hlist_add_head(&obj2->node,
 260					       &percpu_pool->free_objs);
 261				percpu_pool->obj_free++;
 262				obj_pool_used++;
 263				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 264			}
 
 265		}
 266
 267		if (obj_pool_used > obj_pool_max_used)
 268			obj_pool_max_used = obj_pool_used;
 269
 270		if (obj_pool_free < obj_pool_min_free)
 271			obj_pool_min_free = obj_pool_free;
 
 272	}
 273	raw_spin_unlock(&pool_lock);
 274
 275init_obj:
 276	if (obj) {
 277		obj->object = addr;
 278		obj->descr  = descr;
 279		obj->state  = ODEBUG_STATE_NONE;
 280		obj->astate = 0;
 281		hlist_add_head(&obj->node, &b->list);
 
 
 
 
 
 282	}
 283	return obj;
 
 
 
 
 
 
 
 
 
 
 
 
 284}
 285
 286/*
 287 * workqueue function to free objects.
 288 *
 289 * To reduce contention on the global pool_lock, the actual freeing of
 290 * debug objects will be delayed if the pool_lock is busy.
 291 */
 292static void free_obj_work(struct work_struct *work)
 293{
 294	struct hlist_node *tmp;
 295	struct debug_obj *obj;
 296	unsigned long flags;
 297	HLIST_HEAD(tofree);
 298
 299	WRITE_ONCE(obj_freeing, false);
 300	if (!raw_spin_trylock_irqsave(&pool_lock, flags))
 301		return;
 
 
 
 
 302
 303	if (obj_pool_free >= debug_objects_pool_size)
 304		goto free_objs;
 
 305
 306	/*
 307	 * The objs on the pool list might be allocated before the work is
 308	 * run, so recheck if pool list it full or not, if not fill pool
 309	 * list from the global free list. As it is likely that a workload
 310	 * may be gearing up to use more and more objects, don't free any
 311	 * of them until the next round.
 312	 */
 313	while (obj_nr_tofree && obj_pool_free < debug_objects_pool_size) {
 314		obj = hlist_entry(obj_to_free.first, typeof(*obj), node);
 315		hlist_del(&obj->node);
 316		hlist_add_head(&obj->node, &obj_pool);
 317		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 318		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree - 1);
 319	}
 320	raw_spin_unlock_irqrestore(&pool_lock, flags);
 321	return;
 322
 323free_objs:
 324	/*
 325	 * Pool list is already full and there are still objs on the free
 326	 * list. Move remaining free objs to a temporary list to free the
 327	 * memory outside the pool_lock held region.
 328	 */
 329	if (obj_nr_tofree) {
 330		hlist_move_list(&obj_to_free, &tofree);
 331		debug_objects_freed += obj_nr_tofree;
 332		WRITE_ONCE(obj_nr_tofree, 0);
 333	}
 334	raw_spin_unlock_irqrestore(&pool_lock, flags);
 335
 336	hlist_for_each_entry_safe(obj, tmp, &tofree, node) {
 337		hlist_del(&obj->node);
 338		kmem_cache_free(obj_cache, obj);
 
 
 
 
 
 
 
 
 
 339	}
 
 340}
 341
 342static void __free_object(struct debug_obj *obj)
 343{
 344	struct debug_obj *objs[ODEBUG_BATCH_SIZE];
 345	struct debug_percpu_free *percpu_pool;
 346	int lookahead_count = 0;
 347	unsigned long flags;
 348	bool work;
 349
 350	local_irq_save(flags);
 351	if (!obj_cache)
 352		goto free_to_obj_pool;
 
 
 
 
 353
 354	/*
 355	 * Try to free it into the percpu pool first.
 356	 */
 357	percpu_pool = this_cpu_ptr(&percpu_obj_pool);
 358	if (percpu_pool->obj_free < ODEBUG_POOL_PERCPU_SIZE) {
 359		hlist_add_head(&obj->node, &percpu_pool->free_objs);
 360		percpu_pool->obj_free++;
 361		local_irq_restore(flags);
 362		return;
 363	}
 
 
 
 
 
 
 364
 365	/*
 366	 * As the percpu pool is full, look ahead and pull out a batch
 367	 * of objects from the percpu pool and free them as well.
 
 368	 */
 369	for (; lookahead_count < ODEBUG_BATCH_SIZE; lookahead_count++) {
 370		objs[lookahead_count] = __alloc_object(&percpu_pool->free_objs);
 371		if (!objs[lookahead_count])
 
 
 
 
 
 372			break;
 373		percpu_pool->obj_free--;
 
 
 
 374	}
 
 
 375
 376free_to_obj_pool:
 377	raw_spin_lock(&pool_lock);
 378	work = (obj_pool_free > debug_objects_pool_size) && obj_cache &&
 379	       (obj_nr_tofree < ODEBUG_FREE_WORK_MAX);
 380	obj_pool_used--;
 381
 382	if (work) {
 383		WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
 384		hlist_add_head(&obj->node, &obj_to_free);
 385		if (lookahead_count) {
 386			WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + lookahead_count);
 387			obj_pool_used -= lookahead_count;
 388			while (lookahead_count) {
 389				hlist_add_head(&objs[--lookahead_count]->node,
 390					       &obj_to_free);
 391			}
 392		}
 393
 394		if ((obj_pool_free > debug_objects_pool_size) &&
 395		    (obj_nr_tofree < ODEBUG_FREE_WORK_MAX)) {
 396			int i;
 
 
 
 
 397
 398			/*
 399			 * Free one more batch of objects from obj_pool.
 400			 */
 401			for (i = 0; i < ODEBUG_BATCH_SIZE; i++) {
 402				obj = __alloc_object(&obj_pool);
 403				hlist_add_head(&obj->node, &obj_to_free);
 404				WRITE_ONCE(obj_pool_free, obj_pool_free - 1);
 405				WRITE_ONCE(obj_nr_tofree, obj_nr_tofree + 1);
 406			}
 407		}
 408	} else {
 409		WRITE_ONCE(obj_pool_free, obj_pool_free + 1);
 410		hlist_add_head(&obj->node, &obj_pool);
 411		if (lookahead_count) {
 412			WRITE_ONCE(obj_pool_free, obj_pool_free + lookahead_count);
 413			obj_pool_used -= lookahead_count;
 414			while (lookahead_count) {
 415				hlist_add_head(&objs[--lookahead_count]->node,
 416					       &obj_pool);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 417			}
 418		}
 
 419	}
 420	raw_spin_unlock(&pool_lock);
 421	local_irq_restore(flags);
 
 
 
 
 
 
 
 422}
 423
 424/*
 425 * Put the object back into the pool and schedule work to free objects
 426 * if necessary.
 427 */
 428static void free_object(struct debug_obj *obj)
 429{
 430	__free_object(obj);
 431	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
 432		WRITE_ONCE(obj_freeing, true);
 433		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
 434	}
 435}
 436
 437#ifdef CONFIG_HOTPLUG_CPU
 438static int object_cpu_offline(unsigned int cpu)
 439{
 440	struct debug_percpu_free *percpu_pool;
 441	struct hlist_node *tmp;
 442	struct debug_obj *obj;
 443
 444	/* Remote access is safe as the CPU is dead already */
 445	percpu_pool = per_cpu_ptr(&percpu_obj_pool, cpu);
 446	hlist_for_each_entry_safe(obj, tmp, &percpu_pool->free_objs, node) {
 
 
 447		hlist_del(&obj->node);
 448		kmem_cache_free(obj_cache, obj);
 449	}
 450	percpu_pool->obj_free = 0;
 
 
 
 
 
 
 451
 
 
 452	return 0;
 453}
 454#endif
 455
 456/*
 457 * We run out of memory. That means we probably have tons of objects
 458 * allocated.
 459 */
 460static void debug_objects_oom(void)
 461{
 462	struct debug_bucket *db = obj_hash;
 463	struct hlist_node *tmp;
 464	HLIST_HEAD(freelist);
 465	struct debug_obj *obj;
 466	unsigned long flags;
 467	int i;
 468
 469	pr_warn("Out of memory. ODEBUG disabled\n");
 470
 471	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
 472		raw_spin_lock_irqsave(&db->lock, flags);
 473		hlist_move_list(&db->list, &freelist);
 474		raw_spin_unlock_irqrestore(&db->lock, flags);
 475
 476		/* Now free them */
 477		hlist_for_each_entry_safe(obj, tmp, &freelist, node) {
 478			hlist_del(&obj->node);
 479			free_object(obj);
 480		}
 481	}
 482}
 483
 484/*
 485 * We use the pfn of the address for the hash. That way we can check
 486 * for freed objects simply by checking the affected bucket.
 487 */
 488static struct debug_bucket *get_bucket(unsigned long addr)
 489{
 490	unsigned long hash;
 491
 492	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
 493	return &obj_hash[hash];
 494}
 495
 496static void debug_print_object(struct debug_obj *obj, char *msg)
 497{
 498	const struct debug_obj_descr *descr = obj->descr;
 499	static int limit;
 500
 
 
 
 
 
 
 
 
 
 501	if (limit < 5 && descr != descr_test) {
 502		void *hint = descr->debug_hint ?
 503			descr->debug_hint(obj->object) : NULL;
 504		limit++;
 505		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
 506				 "object type: %s hint: %pS\n",
 507			msg, obj_states[obj->state], obj->astate,
 508			descr->name, hint);
 509	}
 510	debug_objects_warnings++;
 511}
 512
 513/*
 514 * Try to repair the damage, so we have a better chance to get useful
 515 * debug output.
 516 */
 517static bool
 518debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
 519		   void * addr, enum debug_obj_state state)
 520{
 521	if (fixup && fixup(addr, state)) {
 522		debug_objects_fixups++;
 523		return true;
 524	}
 525	return false;
 526}
 527
 528static void debug_object_is_on_stack(void *addr, int onstack)
 529{
 530	int is_on_stack;
 531	static int limit;
 532
 533	if (limit > 4)
 534		return;
 535
 536	is_on_stack = object_is_on_stack(addr);
 537	if (is_on_stack == onstack)
 538		return;
 539
 540	limit++;
 541	if (is_on_stack)
 542		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
 543			 task_stack_page(current));
 544	else
 545		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
 546			 task_stack_page(current));
 547
 548	WARN_ON(1);
 549}
 550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 551static void
 552__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
 553{
 554	enum debug_obj_state state;
 555	bool check_stack = false;
 556	struct debug_bucket *db;
 557	struct debug_obj *obj;
 558	unsigned long flags;
 559
 560	fill_pool();
 561
 562	db = get_bucket((unsigned long) addr);
 563
 564	raw_spin_lock_irqsave(&db->lock, flags);
 565
 566	obj = lookup_object(addr, db);
 567	if (!obj) {
 568		obj = alloc_object(addr, db, descr);
 569		if (!obj) {
 570			debug_objects_enabled = 0;
 571			raw_spin_unlock_irqrestore(&db->lock, flags);
 572			debug_objects_oom();
 573			return;
 574		}
 575		check_stack = true;
 576	}
 577
 578	switch (obj->state) {
 579	case ODEBUG_STATE_NONE:
 580	case ODEBUG_STATE_INIT:
 581	case ODEBUG_STATE_INACTIVE:
 582		obj->state = ODEBUG_STATE_INIT;
 583		break;
 584
 585	case ODEBUG_STATE_ACTIVE:
 586		state = obj->state;
 587		raw_spin_unlock_irqrestore(&db->lock, flags);
 588		debug_print_object(obj, "init");
 589		debug_object_fixup(descr->fixup_init, addr, state);
 590		return;
 591
 592	case ODEBUG_STATE_DESTROYED:
 593		raw_spin_unlock_irqrestore(&db->lock, flags);
 594		debug_print_object(obj, "init");
 595		return;
 596	default:
 597		break;
 598	}
 599
 
 600	raw_spin_unlock_irqrestore(&db->lock, flags);
 601	if (check_stack)
 602		debug_object_is_on_stack(addr, onstack);
 
 
 603}
 604
 605/**
 606 * debug_object_init - debug checks when an object is initialized
 607 * @addr:	address of the object
 608 * @descr:	pointer to an object specific debug description structure
 609 */
 610void debug_object_init(void *addr, const struct debug_obj_descr *descr)
 611{
 612	if (!debug_objects_enabled)
 613		return;
 614
 615	__debug_object_init(addr, descr, 0);
 616}
 617EXPORT_SYMBOL_GPL(debug_object_init);
 618
 619/**
 620 * debug_object_init_on_stack - debug checks when an object on stack is
 621 *				initialized
 622 * @addr:	address of the object
 623 * @descr:	pointer to an object specific debug description structure
 624 */
 625void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
 626{
 627	if (!debug_objects_enabled)
 628		return;
 629
 630	__debug_object_init(addr, descr, 1);
 631}
 632EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
 633
 634/**
 635 * debug_object_activate - debug checks when an object is activated
 636 * @addr:	address of the object
 637 * @descr:	pointer to an object specific debug description structure
 638 * Returns 0 for success, -EINVAL for check failed.
 639 */
 640int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
 641{
 642	enum debug_obj_state state;
 643	struct debug_bucket *db;
 644	struct debug_obj *obj;
 645	unsigned long flags;
 646	int ret;
 647	struct debug_obj o = { .object = addr,
 648			       .state = ODEBUG_STATE_NOTAVAILABLE,
 649			       .descr = descr };
 650
 651	if (!debug_objects_enabled)
 652		return 0;
 653
 
 
 654	db = get_bucket((unsigned long) addr);
 655
 656	raw_spin_lock_irqsave(&db->lock, flags);
 657
 658	obj = lookup_object(addr, db);
 659	if (obj) {
 660		bool print_object = false;
 661
 
 
 662		switch (obj->state) {
 663		case ODEBUG_STATE_INIT:
 664		case ODEBUG_STATE_INACTIVE:
 665			obj->state = ODEBUG_STATE_ACTIVE;
 666			ret = 0;
 667			break;
 668
 669		case ODEBUG_STATE_ACTIVE:
 670			state = obj->state;
 671			raw_spin_unlock_irqrestore(&db->lock, flags);
 672			debug_print_object(obj, "activate");
 673			ret = debug_object_fixup(descr->fixup_activate, addr, state);
 674			return ret ? 0 : -EINVAL;
 675
 676		case ODEBUG_STATE_DESTROYED:
 677			print_object = true;
 678			ret = -EINVAL;
 679			break;
 
 
 
 
 680		default:
 681			ret = 0;
 682			break;
 683		}
 684		raw_spin_unlock_irqrestore(&db->lock, flags);
 685		if (print_object)
 686			debug_print_object(obj, "activate");
 687		return ret;
 688	}
 689
 690	raw_spin_unlock_irqrestore(&db->lock, flags);
 
 691
 692	/*
 693	 * We are here when a static object is activated. We
 694	 * let the type specific code confirm whether this is
 695	 * true or not. if true, we just make sure that the
 696	 * static object is tracked in the object tracker. If
 697	 * not, this must be a bug, so we try to fix it up.
 698	 */
 699	if (descr->is_static_object && descr->is_static_object(addr)) {
 700		/* track this static object */
 701		debug_object_init(addr, descr);
 702		debug_object_activate(addr, descr);
 703	} else {
 704		debug_print_object(&o, "activate");
 705		ret = debug_object_fixup(descr->fixup_activate, addr,
 706					ODEBUG_STATE_NOTAVAILABLE);
 707		return ret ? 0 : -EINVAL;
 708	}
 709	return 0;
 710}
 711EXPORT_SYMBOL_GPL(debug_object_activate);
 712
 713/**
 714 * debug_object_deactivate - debug checks when an object is deactivated
 715 * @addr:	address of the object
 716 * @descr:	pointer to an object specific debug description structure
 717 */
 718void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
 719{
 
 720	struct debug_bucket *db;
 721	struct debug_obj *obj;
 722	unsigned long flags;
 723	bool print_object = false;
 724
 725	if (!debug_objects_enabled)
 726		return;
 727
 728	db = get_bucket((unsigned long) addr);
 729
 730	raw_spin_lock_irqsave(&db->lock, flags);
 731
 732	obj = lookup_object(addr, db);
 733	if (obj) {
 734		switch (obj->state) {
 
 
 735		case ODEBUG_STATE_INIT:
 736		case ODEBUG_STATE_INACTIVE:
 737		case ODEBUG_STATE_ACTIVE:
 738			if (!obj->astate)
 739				obj->state = ODEBUG_STATE_INACTIVE;
 740			else
 741				print_object = true;
 742			break;
 743
 744		case ODEBUG_STATE_DESTROYED:
 745			print_object = true;
 746			break;
 747		default:
 748			break;
 
 749		}
 
 750	}
 751
 752	raw_spin_unlock_irqrestore(&db->lock, flags);
 753	if (!obj) {
 754		struct debug_obj o = { .object = addr,
 755				       .state = ODEBUG_STATE_NOTAVAILABLE,
 756				       .descr = descr };
 757
 758		debug_print_object(&o, "deactivate");
 759	} else if (print_object) {
 760		debug_print_object(obj, "deactivate");
 761	}
 762}
 763EXPORT_SYMBOL_GPL(debug_object_deactivate);
 764
 765/**
 766 * debug_object_destroy - debug checks when an object is destroyed
 767 * @addr:	address of the object
 768 * @descr:	pointer to an object specific debug description structure
 769 */
 770void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
 771{
 772	enum debug_obj_state state;
 773	struct debug_bucket *db;
 774	struct debug_obj *obj;
 775	unsigned long flags;
 776	bool print_object = false;
 777
 778	if (!debug_objects_enabled)
 779		return;
 780
 781	db = get_bucket((unsigned long) addr);
 782
 783	raw_spin_lock_irqsave(&db->lock, flags);
 784
 785	obj = lookup_object(addr, db);
 786	if (!obj)
 787		goto out_unlock;
 
 
 788
 789	switch (obj->state) {
 
 
 
 790	case ODEBUG_STATE_NONE:
 791	case ODEBUG_STATE_INIT:
 792	case ODEBUG_STATE_INACTIVE:
 793		obj->state = ODEBUG_STATE_DESTROYED;
 794		break;
 795	case ODEBUG_STATE_ACTIVE:
 796		state = obj->state;
 797		raw_spin_unlock_irqrestore(&db->lock, flags);
 798		debug_print_object(obj, "destroy");
 799		debug_object_fixup(descr->fixup_destroy, addr, state);
 800		return;
 801
 802	case ODEBUG_STATE_DESTROYED:
 803		print_object = true;
 804		break;
 805	default:
 806		break;
 807	}
 808out_unlock:
 
 809	raw_spin_unlock_irqrestore(&db->lock, flags);
 810	if (print_object)
 811		debug_print_object(obj, "destroy");
 
 
 812}
 813EXPORT_SYMBOL_GPL(debug_object_destroy);
 814
 815/**
 816 * debug_object_free - debug checks when an object is freed
 817 * @addr:	address of the object
 818 * @descr:	pointer to an object specific debug description structure
 819 */
 820void debug_object_free(void *addr, const struct debug_obj_descr *descr)
 821{
 822	enum debug_obj_state state;
 823	struct debug_bucket *db;
 824	struct debug_obj *obj;
 825	unsigned long flags;
 826
 827	if (!debug_objects_enabled)
 828		return;
 829
 830	db = get_bucket((unsigned long) addr);
 831
 832	raw_spin_lock_irqsave(&db->lock, flags);
 833
 834	obj = lookup_object(addr, db);
 835	if (!obj)
 836		goto out_unlock;
 
 
 837
 838	switch (obj->state) {
 839	case ODEBUG_STATE_ACTIVE:
 840		state = obj->state;
 841		raw_spin_unlock_irqrestore(&db->lock, flags);
 842		debug_print_object(obj, "free");
 843		debug_object_fixup(descr->fixup_free, addr, state);
 844		return;
 845	default:
 846		hlist_del(&obj->node);
 847		raw_spin_unlock_irqrestore(&db->lock, flags);
 848		free_object(obj);
 849		return;
 850	}
 851out_unlock:
 
 852	raw_spin_unlock_irqrestore(&db->lock, flags);
 
 
 
 853}
 854EXPORT_SYMBOL_GPL(debug_object_free);
 855
 856/**
 857 * debug_object_assert_init - debug checks when object should be init-ed
 858 * @addr:	address of the object
 859 * @descr:	pointer to an object specific debug description structure
 860 */
 861void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
 862{
 
 863	struct debug_bucket *db;
 864	struct debug_obj *obj;
 865	unsigned long flags;
 866
 867	if (!debug_objects_enabled)
 868		return;
 869
 
 
 870	db = get_bucket((unsigned long) addr);
 871
 872	raw_spin_lock_irqsave(&db->lock, flags);
 
 
 
 
 873
 874	obj = lookup_object(addr, db);
 875	if (!obj) {
 876		struct debug_obj o = { .object = addr,
 877				       .state = ODEBUG_STATE_NOTAVAILABLE,
 878				       .descr = descr };
 879
 880		raw_spin_unlock_irqrestore(&db->lock, flags);
 881		/*
 882		 * Maybe the object is static, and we let the type specific
 883		 * code confirm. Track this static object if true, else invoke
 884		 * fixup.
 885		 */
 886		if (descr->is_static_object && descr->is_static_object(addr)) {
 887			/* Track this static object */
 888			debug_object_init(addr, descr);
 889		} else {
 890			debug_print_object(&o, "assert_init");
 891			debug_object_fixup(descr->fixup_assert_init, addr,
 892					   ODEBUG_STATE_NOTAVAILABLE);
 893		}
 894		return;
 895	}
 896
 897	raw_spin_unlock_irqrestore(&db->lock, flags);
 
 
 898}
 899EXPORT_SYMBOL_GPL(debug_object_assert_init);
 900
 901/**
 902 * debug_object_active_state - debug checks object usage state machine
 903 * @addr:	address of the object
 904 * @descr:	pointer to an object specific debug description structure
 905 * @expect:	expected state
 906 * @next:	state to move to if expected state is found
 907 */
 908void
 909debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
 910			  unsigned int expect, unsigned int next)
 911{
 
 912	struct debug_bucket *db;
 913	struct debug_obj *obj;
 914	unsigned long flags;
 915	bool print_object = false;
 916
 917	if (!debug_objects_enabled)
 918		return;
 919
 920	db = get_bucket((unsigned long) addr);
 921
 922	raw_spin_lock_irqsave(&db->lock, flags);
 923
 924	obj = lookup_object(addr, db);
 925	if (obj) {
 926		switch (obj->state) {
 927		case ODEBUG_STATE_ACTIVE:
 928			if (obj->astate == expect)
 929				obj->astate = next;
 930			else
 931				print_object = true;
 932			break;
 933
 934		default:
 935			print_object = true;
 936			break;
 937		}
 
 938	}
 939
 940	raw_spin_unlock_irqrestore(&db->lock, flags);
 941	if (!obj) {
 942		struct debug_obj o = { .object = addr,
 943				       .state = ODEBUG_STATE_NOTAVAILABLE,
 944				       .descr = descr };
 945
 946		debug_print_object(&o, "active_state");
 947	} else if (print_object) {
 948		debug_print_object(obj, "active_state");
 949	}
 950}
 951EXPORT_SYMBOL_GPL(debug_object_active_state);
 952
 953#ifdef CONFIG_DEBUG_OBJECTS_FREE
 954static void __debug_check_no_obj_freed(const void *address, unsigned long size)
 955{
 956	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
 957	const struct debug_obj_descr *descr;
 958	enum debug_obj_state state;
 959	struct debug_bucket *db;
 960	struct hlist_node *tmp;
 961	struct debug_obj *obj;
 962	int cnt, objs_checked = 0;
 963
 964	saddr = (unsigned long) address;
 965	eaddr = saddr + size;
 966	paddr = saddr & ODEBUG_CHUNK_MASK;
 967	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
 968	chunks >>= ODEBUG_CHUNK_SHIFT;
 969
 970	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
 971		db = get_bucket(paddr);
 972
 973repeat:
 974		cnt = 0;
 975		raw_spin_lock_irqsave(&db->lock, flags);
 976		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
 977			cnt++;
 978			oaddr = (unsigned long) obj->object;
 979			if (oaddr < saddr || oaddr >= eaddr)
 980				continue;
 981
 982			switch (obj->state) {
 983			case ODEBUG_STATE_ACTIVE:
 984				descr = obj->descr;
 985				state = obj->state;
 986				raw_spin_unlock_irqrestore(&db->lock, flags);
 987				debug_print_object(obj, "free");
 988				debug_object_fixup(descr->fixup_free,
 989						   (void *) oaddr, state);
 990				goto repeat;
 991			default:
 992				hlist_del(&obj->node);
 993				__free_object(obj);
 994				break;
 995			}
 996		}
 997		raw_spin_unlock_irqrestore(&db->lock, flags);
 998
 999		if (cnt > debug_objects_maxchain)
1000			debug_objects_maxchain = cnt;
1001
1002		objs_checked += cnt;
1003	}
1004
1005	if (objs_checked > debug_objects_maxchecked)
1006		debug_objects_maxchecked = objs_checked;
1007
1008	/* Schedule work to actually kmem_cache_free() objects */
1009	if (!READ_ONCE(obj_freeing) && READ_ONCE(obj_nr_tofree)) {
1010		WRITE_ONCE(obj_freeing, true);
1011		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1012	}
1013}
1014
1015void debug_check_no_obj_freed(const void *address, unsigned long size)
1016{
1017	if (debug_objects_enabled)
1018		__debug_check_no_obj_freed(address, size);
1019}
1020#endif
1021
1022#ifdef CONFIG_DEBUG_FS
1023
1024static int debug_stats_show(struct seq_file *m, void *v)
1025{
1026	int cpu, obj_percpu_free = 0;
1027
 
 
 
 
 
 
1028	for_each_possible_cpu(cpu)
1029		obj_percpu_free += per_cpu(percpu_obj_pool.obj_free, cpu);
1030
1031	seq_printf(m, "max_chain     :%d\n", debug_objects_maxchain);
1032	seq_printf(m, "max_checked   :%d\n", debug_objects_maxchecked);
1033	seq_printf(m, "warnings      :%d\n", debug_objects_warnings);
1034	seq_printf(m, "fixups        :%d\n", debug_objects_fixups);
1035	seq_printf(m, "pool_free     :%d\n", READ_ONCE(obj_pool_free) + obj_percpu_free);
1036	seq_printf(m, "pool_pcp_free :%d\n", obj_percpu_free);
1037	seq_printf(m, "pool_min_free :%d\n", obj_pool_min_free);
1038	seq_printf(m, "pool_used     :%d\n", obj_pool_used - obj_percpu_free);
1039	seq_printf(m, "pool_max_used :%d\n", obj_pool_max_used);
1040	seq_printf(m, "on_free_list  :%d\n", READ_ONCE(obj_nr_tofree));
1041	seq_printf(m, "objs_allocated:%d\n", debug_objects_allocated);
1042	seq_printf(m, "objs_freed    :%d\n", debug_objects_freed);
 
 
 
 
1043	return 0;
1044}
1045DEFINE_SHOW_ATTRIBUTE(debug_stats);
1046
1047static int __init debug_objects_init_debugfs(void)
1048{
1049	struct dentry *dbgdir;
1050
1051	if (!debug_objects_enabled)
1052		return 0;
1053
1054	dbgdir = debugfs_create_dir("debug_objects", NULL);
1055
1056	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1057
1058	return 0;
1059}
1060__initcall(debug_objects_init_debugfs);
1061
1062#else
1063static inline void debug_objects_init_debugfs(void) { }
1064#endif
1065
1066#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1067
1068/* Random data structure for the self test */
1069struct self_test {
1070	unsigned long	dummy1[6];
1071	int		static_init;
1072	unsigned long	dummy2[3];
1073};
1074
1075static __initconst const struct debug_obj_descr descr_type_test;
1076
1077static bool __init is_static_object(void *addr)
1078{
1079	struct self_test *obj = addr;
1080
1081	return obj->static_init;
1082}
1083
1084/*
1085 * fixup_init is called when:
1086 * - an active object is initialized
1087 */
1088static bool __init fixup_init(void *addr, enum debug_obj_state state)
1089{
1090	struct self_test *obj = addr;
1091
1092	switch (state) {
1093	case ODEBUG_STATE_ACTIVE:
1094		debug_object_deactivate(obj, &descr_type_test);
1095		debug_object_init(obj, &descr_type_test);
1096		return true;
1097	default:
1098		return false;
1099	}
1100}
1101
1102/*
1103 * fixup_activate is called when:
1104 * - an active object is activated
1105 * - an unknown non-static object is activated
1106 */
1107static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1108{
1109	struct self_test *obj = addr;
1110
1111	switch (state) {
1112	case ODEBUG_STATE_NOTAVAILABLE:
1113		return true;
1114	case ODEBUG_STATE_ACTIVE:
1115		debug_object_deactivate(obj, &descr_type_test);
1116		debug_object_activate(obj, &descr_type_test);
1117		return true;
1118
1119	default:
1120		return false;
1121	}
1122}
1123
1124/*
1125 * fixup_destroy is called when:
1126 * - an active object is destroyed
1127 */
1128static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1129{
1130	struct self_test *obj = addr;
1131
1132	switch (state) {
1133	case ODEBUG_STATE_ACTIVE:
1134		debug_object_deactivate(obj, &descr_type_test);
1135		debug_object_destroy(obj, &descr_type_test);
1136		return true;
1137	default:
1138		return false;
1139	}
1140}
1141
1142/*
1143 * fixup_free is called when:
1144 * - an active object is freed
1145 */
1146static bool __init fixup_free(void *addr, enum debug_obj_state state)
1147{
1148	struct self_test *obj = addr;
1149
1150	switch (state) {
1151	case ODEBUG_STATE_ACTIVE:
1152		debug_object_deactivate(obj, &descr_type_test);
1153		debug_object_free(obj, &descr_type_test);
1154		return true;
1155	default:
1156		return false;
1157	}
1158}
1159
1160static int __init
1161check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1162{
1163	struct debug_bucket *db;
1164	struct debug_obj *obj;
1165	unsigned long flags;
1166	int res = -EINVAL;
1167
1168	db = get_bucket((unsigned long) addr);
1169
1170	raw_spin_lock_irqsave(&db->lock, flags);
1171
1172	obj = lookup_object(addr, db);
1173	if (!obj && state != ODEBUG_STATE_NONE) {
1174		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1175		goto out;
1176	}
1177	if (obj && obj->state != state) {
1178		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1179		       obj->state, state);
1180		goto out;
1181	}
1182	if (fixups != debug_objects_fixups) {
1183		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1184		       fixups, debug_objects_fixups);
1185		goto out;
1186	}
1187	if (warnings != debug_objects_warnings) {
1188		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1189		       warnings, debug_objects_warnings);
1190		goto out;
1191	}
1192	res = 0;
1193out:
1194	raw_spin_unlock_irqrestore(&db->lock, flags);
1195	if (res)
1196		debug_objects_enabled = 0;
1197	return res;
1198}
1199
1200static __initconst const struct debug_obj_descr descr_type_test = {
1201	.name			= "selftest",
1202	.is_static_object	= is_static_object,
1203	.fixup_init		= fixup_init,
1204	.fixup_activate		= fixup_activate,
1205	.fixup_destroy		= fixup_destroy,
1206	.fixup_free		= fixup_free,
1207};
1208
1209static __initdata struct self_test obj = { .static_init = 0 };
1210
1211static void __init debug_objects_selftest(void)
1212{
1213	int fixups, oldfixups, warnings, oldwarnings;
1214	unsigned long flags;
1215
1216	local_irq_save(flags);
1217
1218	fixups = oldfixups = debug_objects_fixups;
1219	warnings = oldwarnings = debug_objects_warnings;
1220	descr_test = &descr_type_test;
1221
1222	debug_object_init(&obj, &descr_type_test);
1223	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1224		goto out;
1225	debug_object_activate(&obj, &descr_type_test);
1226	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1227		goto out;
1228	debug_object_activate(&obj, &descr_type_test);
1229	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1230		goto out;
1231	debug_object_deactivate(&obj, &descr_type_test);
1232	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1233		goto out;
1234	debug_object_destroy(&obj, &descr_type_test);
1235	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1236		goto out;
1237	debug_object_init(&obj, &descr_type_test);
1238	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1239		goto out;
1240	debug_object_activate(&obj, &descr_type_test);
1241	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1242		goto out;
1243	debug_object_deactivate(&obj, &descr_type_test);
1244	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1245		goto out;
1246	debug_object_free(&obj, &descr_type_test);
1247	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1248		goto out;
1249
1250	obj.static_init = 1;
1251	debug_object_activate(&obj, &descr_type_test);
1252	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1253		goto out;
1254	debug_object_init(&obj, &descr_type_test);
1255	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1256		goto out;
1257	debug_object_free(&obj, &descr_type_test);
1258	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1259		goto out;
1260
1261#ifdef CONFIG_DEBUG_OBJECTS_FREE
1262	debug_object_init(&obj, &descr_type_test);
1263	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1264		goto out;
1265	debug_object_activate(&obj, &descr_type_test);
1266	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1267		goto out;
1268	__debug_check_no_obj_freed(&obj, sizeof(obj));
1269	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1270		goto out;
1271#endif
1272	pr_info("selftest passed\n");
1273
1274out:
1275	debug_objects_fixups = oldfixups;
1276	debug_objects_warnings = oldwarnings;
1277	descr_test = NULL;
1278
1279	local_irq_restore(flags);
 
1280}
1281#else
1282static inline void debug_objects_selftest(void) { }
1283#endif
1284
1285/*
1286 * Called during early boot to initialize the hash buckets and link
1287 * the static object pool objects into the poll list. After this call
1288 * the object tracker is fully operational.
1289 */
1290void __init debug_objects_early_init(void)
1291{
1292	int i;
1293
1294	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1295		raw_spin_lock_init(&obj_hash[i].lock);
1296
 
1297	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1298		hlist_add_head(&obj_static_pool[i].node, &obj_pool);
1299}
1300
1301/*
1302 * Convert the statically allocated objects to dynamic ones:
 
 
 
1303 */
1304static int __init debug_objects_replace_static_objects(void)
1305{
1306	struct debug_bucket *db = obj_hash;
1307	struct hlist_node *tmp;
1308	struct debug_obj *obj, *new;
1309	HLIST_HEAD(objects);
1310	int i, cnt = 0;
1311
1312	for (i = 0; i < ODEBUG_POOL_SIZE; i++) {
1313		obj = kmem_cache_zalloc(obj_cache, GFP_KERNEL);
1314		if (!obj)
1315			goto free;
1316		hlist_add_head(&obj->node, &objects);
1317	}
1318
1319	/*
1320	 * debug_objects_mem_init() is now called early that only one CPU is up
1321	 * and interrupts have been disabled, so it is safe to replace the
1322	 * active object references.
1323	 */
1324
1325	/* Remove the statically allocated objects from the pool */
1326	hlist_for_each_entry_safe(obj, tmp, &obj_pool, node)
1327		hlist_del(&obj->node);
1328	/* Move the allocated objects to the pool */
1329	hlist_move_list(&objects, &obj_pool);
1330
1331	/* Replace the active object references */
1332	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1333		hlist_move_list(&db->list, &objects);
1334
1335		hlist_for_each_entry(obj, &objects, node) {
1336			new = hlist_entry(obj_pool.first, typeof(*obj), node);
1337			hlist_del(&new->node);
1338			/* copy object data */
1339			*new = *obj;
1340			hlist_add_head(&new->node, &db->list);
1341			cnt++;
1342		}
1343	}
1344
1345	pr_debug("%d of %d active objects replaced\n",
1346		 cnt, obj_pool_used);
1347	return 0;
1348free:
1349	hlist_for_each_entry_safe(obj, tmp, &objects, node) {
 
1350		hlist_del(&obj->node);
1351		kmem_cache_free(obj_cache, obj);
1352	}
1353	return -ENOMEM;
1354}
1355
1356/*
1357 * Called after the kmem_caches are functional to setup a dedicated
1358 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1359 * prevents that the debug code is called on kmem_cache_free() for the
1360 * debug tracker objects to avoid recursive calls.
1361 */
1362void __init debug_objects_mem_init(void)
1363{
1364	int cpu, extras;
 
1365
1366	if (!debug_objects_enabled)
1367		return;
1368
 
 
 
 
 
 
 
 
 
 
 
 
1369	/*
1370	 * Initialize the percpu object pools
1371	 *
1372	 * Initialization is not strictly necessary, but was done for
1373	 * completeness.
1374	 */
1375	for_each_possible_cpu(cpu)
1376		INIT_HLIST_HEAD(&per_cpu(percpu_obj_pool.free_objs, cpu));
 
1377
1378	obj_cache = kmem_cache_create("debug_objects_cache",
1379				      sizeof (struct debug_obj), 0,
1380				      SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE,
1381				      NULL);
1382
1383	if (!obj_cache || debug_objects_replace_static_objects()) {
1384		debug_objects_enabled = 0;
1385		kmem_cache_destroy(obj_cache);
1386		pr_warn("out of memory.\n");
1387	} else
1388		debug_objects_selftest();
1389
1390#ifdef CONFIG_HOTPLUG_CPU
1391	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1392					object_cpu_offline);
1393#endif
1394
1395	/*
1396	 * Increase the thresholds for allocating and freeing objects
1397	 * according to the number of possible CPUs available in the system.
1398	 */
1399	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1400	debug_objects_pool_size += extras;
1401	debug_objects_pool_min_level += extras;
1402}
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Generic infrastructure for lifetime debugging of objects.
   4 *
 
 
   5 * Copyright (C) 2008, Thomas Gleixner <tglx@linutronix.de>
 
 
   6 */
   7
   8#define pr_fmt(fmt) "ODEBUG: " fmt
   9
  10#include <linux/cpu.h>
  11#include <linux/debugobjects.h>
  12#include <linux/debugfs.h>
  13#include <linux/hash.h>
  14#include <linux/kmemleak.h>
  15#include <linux/sched.h>
  16#include <linux/sched/loadavg.h>
  17#include <linux/sched/task_stack.h>
  18#include <linux/seq_file.h>
 
  19#include <linux/slab.h>
  20#include <linux/static_key.h>
 
 
  21
  22#define ODEBUG_HASH_BITS	14
  23#define ODEBUG_HASH_SIZE	(1 << ODEBUG_HASH_BITS)
  24
  25/* Must be power of two */
 
 
  26#define ODEBUG_BATCH_SIZE	16
  27
  28/* Initial values. Must all be a multiple of batch size */
  29#define ODEBUG_POOL_SIZE	(64 * ODEBUG_BATCH_SIZE)
  30#define ODEBUG_POOL_MIN_LEVEL	(ODEBUG_POOL_SIZE / 4)
  31
  32#define ODEBUG_POOL_PERCPU_SIZE	(8 * ODEBUG_BATCH_SIZE)
  33
  34#define ODEBUG_CHUNK_SHIFT	PAGE_SHIFT
  35#define ODEBUG_CHUNK_SIZE	(1 << ODEBUG_CHUNK_SHIFT)
  36#define ODEBUG_CHUNK_MASK	(~(ODEBUG_CHUNK_SIZE - 1))
  37
  38/*
  39 * We limit the freeing of debug objects via workqueue at a maximum
  40 * frequency of 10Hz and about 1024 objects for each freeing operation.
  41 * So it is freeing at most 10k debug objects per second.
  42 */
  43#define ODEBUG_FREE_WORK_MAX	(1024 / ODEBUG_BATCH_SIZE)
  44#define ODEBUG_FREE_WORK_DELAY	DIV_ROUND_UP(HZ, 10)
  45
  46struct debug_bucket {
  47	struct hlist_head	list;
  48	raw_spinlock_t		lock;
  49};
  50
  51struct pool_stats {
  52	unsigned int		cur_used;
  53	unsigned int		max_used;
  54	unsigned int		min_fill;
 
 
 
  55};
  56
  57struct obj_pool {
  58	struct hlist_head	objects;
  59	unsigned int		cnt;
  60	unsigned int		min_cnt;
  61	unsigned int		max_cnt;
  62	struct pool_stats	stats;
  63} ____cacheline_aligned;
  64
  65
  66static DEFINE_PER_CPU_ALIGNED(struct obj_pool, pool_pcpu)  = {
  67	.max_cnt	= ODEBUG_POOL_PERCPU_SIZE,
  68};
  69
  70static struct debug_bucket	obj_hash[ODEBUG_HASH_SIZE];
  71
  72static struct debug_obj		obj_static_pool[ODEBUG_POOL_SIZE] __initdata;
  73
  74static DEFINE_RAW_SPINLOCK(pool_lock);
  75
  76static struct obj_pool pool_global = {
  77	.min_cnt		= ODEBUG_POOL_MIN_LEVEL,
  78	.max_cnt		= ODEBUG_POOL_SIZE,
  79	.stats			= {
  80		.min_fill	= ODEBUG_POOL_SIZE,
  81	},
  82};
  83
  84static struct obj_pool pool_to_free = {
  85	.max_cnt	= UINT_MAX,
  86};
  87
  88static HLIST_HEAD(pool_boot);
  89
  90static unsigned long		avg_usage;
 
 
 
 
  91static bool			obj_freeing;
 
 
  92
  93static int __data_racy			debug_objects_maxchain __read_mostly;
  94static int __data_racy __maybe_unused	debug_objects_maxchecked __read_mostly;
  95static int __data_racy			debug_objects_fixups __read_mostly;
  96static int __data_racy			debug_objects_warnings __read_mostly;
  97static bool __data_racy			debug_objects_enabled __read_mostly
  98					= CONFIG_DEBUG_OBJECTS_ENABLE_DEFAULT;
  99
 100static const struct debug_obj_descr	*descr_test  __read_mostly;
 101static struct kmem_cache		*obj_cache __ro_after_init;
 
 
 
 102
 103/*
 104 * Track numbers of kmem_cache_alloc()/free() calls done.
 105 */
 106static int __data_racy		debug_objects_allocated;
 107static int __data_racy		debug_objects_freed;
 108
 109static void free_obj_work(struct work_struct *work);
 110static DECLARE_DELAYED_WORK(debug_obj_work, free_obj_work);
 111
 112static DEFINE_STATIC_KEY_FALSE(obj_cache_enabled);
 113
 114static int __init enable_object_debug(char *str)
 115{
 116	debug_objects_enabled = true;
 117	return 0;
 118}
 119early_param("debug_objects", enable_object_debug);
 120
 121static int __init disable_object_debug(char *str)
 122{
 123	debug_objects_enabled = false;
 124	return 0;
 125}
 
 
 126early_param("no_debug_objects", disable_object_debug);
 127
 128static const char *obj_states[ODEBUG_STATE_MAX] = {
 129	[ODEBUG_STATE_NONE]		= "none",
 130	[ODEBUG_STATE_INIT]		= "initialized",
 131	[ODEBUG_STATE_INACTIVE]		= "inactive",
 132	[ODEBUG_STATE_ACTIVE]		= "active",
 133	[ODEBUG_STATE_DESTROYED]	= "destroyed",
 134	[ODEBUG_STATE_NOTAVAILABLE]	= "not available",
 135};
 136
 137static __always_inline unsigned int pool_count(struct obj_pool *pool)
 138{
 139	return READ_ONCE(pool->cnt);
 140}
 141
 142static __always_inline bool pool_should_refill(struct obj_pool *pool)
 143{
 144	return pool_count(pool) < pool->min_cnt;
 145}
 146
 147static __always_inline bool pool_must_refill(struct obj_pool *pool)
 148{
 149	return pool_count(pool) < pool->min_cnt / 2;
 150}
 151
 152static bool pool_move_batch(struct obj_pool *dst, struct obj_pool *src)
 153{
 154	struct hlist_node *last, *next_batch, *first_batch;
 155	struct debug_obj *obj;
 
 156
 157	if (dst->cnt >= dst->max_cnt || !src->cnt)
 158		return false;
 159
 160	first_batch = src->objects.first;
 161	obj = hlist_entry(first_batch, typeof(*obj), node);
 162	last = obj->batch_last;
 163	next_batch = last->next;
 164
 165	/* Move the next batch to the front of the source pool */
 166	src->objects.first = next_batch;
 167	if (next_batch)
 168		next_batch->pprev = &src->objects.first;
 169
 170	/* Add the extracted batch to the destination pool */
 171	last->next = dst->objects.first;
 172	if (last->next)
 173		last->next->pprev = &last->next;
 174	first_batch->pprev = &dst->objects.first;
 175	dst->objects.first = first_batch;
 176
 177	WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
 178	WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
 179	return true;
 180}
 
 
 181
 182static bool pool_push_batch(struct obj_pool *dst, struct hlist_head *head)
 183{
 184	struct hlist_node *last;
 185	struct debug_obj *obj;
 186
 187	if (dst->cnt >= dst->max_cnt)
 188		return false;
 
 189
 190	obj = hlist_entry(head->first, typeof(*obj), node);
 191	last = obj->batch_last;
 
 
 
 
 
 192
 193	hlist_splice_init(head, last, &dst->objects);
 194	WRITE_ONCE(dst->cnt, dst->cnt + ODEBUG_BATCH_SIZE);
 195	return true;
 
 
 
 
 
 196}
 197
 198static bool pool_pop_batch(struct hlist_head *head, struct obj_pool *src)
 
 
 
 199{
 200	struct hlist_node *last, *next;
 201	struct debug_obj *obj;
 
 202
 203	if (!src->cnt)
 204		return false;
 
 
 
 
 
 205
 206	/* Move the complete list to the head */
 207	hlist_move_list(&src->objects, head);
 208
 209	obj = hlist_entry(head->first, typeof(*obj), node);
 210	last = obj->batch_last;
 211	next = last->next;
 212	/* Disconnect the batch from the list */
 213	last->next = NULL;
 214
 215	/* Move the node after last back to the source pool. */
 216	src->objects.first = next;
 217	if (next)
 218		next->pprev = &src->objects.first;
 219
 220	WRITE_ONCE(src->cnt, src->cnt - ODEBUG_BATCH_SIZE);
 221	return true;
 222}
 223
 
 
 
 224static struct debug_obj *__alloc_object(struct hlist_head *list)
 225{
 226	struct debug_obj *obj;
 227
 228	if (unlikely(!list->first))
 229		return NULL;
 
 
 230
 231	obj = hlist_entry(list->first, typeof(*obj), node);
 232	hlist_del(&obj->node);
 233	return obj;
 234}
 235
 236static void pcpu_refill_stats(void)
 
 
 
 
 
 237{
 238	struct pool_stats *stats = &pool_global.stats;
 
 239
 240	WRITE_ONCE(stats->cur_used, stats->cur_used + ODEBUG_BATCH_SIZE);
 
 
 
 
 
 
 241
 242	if (stats->cur_used > stats->max_used)
 243		stats->max_used = stats->cur_used;
 
 
 
 244
 245	if (pool_global.cnt < stats->min_fill)
 246		stats->min_fill = pool_global.cnt;
 247}
 248
 249static struct debug_obj *pcpu_alloc(void)
 250{
 251	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
 252
 253	lockdep_assert_irqs_disabled();
 
 254
 255	for (;;) {
 256		struct debug_obj *obj = __alloc_object(&pcp->objects);
 257
 258		if (likely(obj)) {
 259			pcp->cnt--;
 260			/*
 261			 * If this emptied a batch try to refill from the
 262			 * free pool. Don't do that if this was the top-most
 263			 * batch as pcpu_free() expects the per CPU pool
 264			 * to be less than ODEBUG_POOL_PERCPU_SIZE.
 265			 */
 266			if (unlikely(pcp->cnt < (ODEBUG_POOL_PERCPU_SIZE - ODEBUG_BATCH_SIZE) &&
 267				     !(pcp->cnt % ODEBUG_BATCH_SIZE))) {
 268				/*
 269				 * Don't try to allocate from the regular pool here
 270				 * to not exhaust it prematurely.
 271				 */
 272				if (pool_count(&pool_to_free)) {
 273					guard(raw_spinlock)(&pool_lock);
 274					pool_move_batch(pcp, &pool_to_free);
 275					pcpu_refill_stats();
 276				}
 277			}
 278			return obj;
 279		}
 280
 281		guard(raw_spinlock)(&pool_lock);
 282		if (!pool_move_batch(pcp, &pool_to_free)) {
 283			if (!pool_move_batch(pcp, &pool_global))
 284				return NULL;
 285		}
 286		pcpu_refill_stats();
 287	}
 288}
 289
 290static void pcpu_free(struct debug_obj *obj)
 291{
 292	struct obj_pool *pcp = this_cpu_ptr(&pool_pcpu);
 293	struct debug_obj *first;
 294
 295	lockdep_assert_irqs_disabled();
 296
 297	if (!(pcp->cnt % ODEBUG_BATCH_SIZE)) {
 298		obj->batch_last = &obj->node;
 299	} else {
 300		first = hlist_entry(pcp->objects.first, typeof(*first), node);
 301		obj->batch_last = first->batch_last;
 302	}
 303	hlist_add_head(&obj->node, &pcp->objects);
 304	pcp->cnt++;
 305
 306	/* Pool full ? */
 307	if (pcp->cnt < ODEBUG_POOL_PERCPU_SIZE)
 308		return;
 309
 310	/* Remove a batch from the per CPU pool */
 311	guard(raw_spinlock)(&pool_lock);
 312	/* Try to fit the batch into the pool_global first */
 313	if (!pool_move_batch(&pool_global, pcp))
 314		pool_move_batch(&pool_to_free, pcp);
 315	WRITE_ONCE(pool_global.stats.cur_used, pool_global.stats.cur_used - ODEBUG_BATCH_SIZE);
 316}
 317
 318static void free_object_list(struct hlist_head *head)
 
 
 
 
 
 
 319{
 320	struct hlist_node *tmp;
 321	struct debug_obj *obj;
 322	int cnt = 0;
 
 323
 324	hlist_for_each_entry_safe(obj, tmp, head, node) {
 325		hlist_del(&obj->node);
 326		kmem_cache_free(obj_cache, obj);
 327		cnt++;
 328	}
 329	debug_objects_freed += cnt;
 330}
 331
 332static void fill_pool_from_freelist(void)
 333{
 334	static unsigned long state;
 335
 336	/*
 337	 * Reuse objs from the global obj_to_free list; they will be
 338	 * reinitialized when allocating.
 
 
 
 339	 */
 340	if (!pool_count(&pool_to_free))
 341		return;
 
 
 
 
 
 
 
 342
 
 343	/*
 344	 * Prevent the context from being scheduled or interrupted after
 345	 * setting the state flag;
 
 346	 */
 347	guard(irqsave)();
 
 
 
 
 
 348
 349	/*
 350	 * Avoid lock contention on &pool_lock and avoid making the cache
 351	 * line exclusive by testing the bit before attempting to set it.
 352	 */
 353	if (test_bit(0, &state) || test_and_set_bit(0, &state))
 354		return;
 355
 356	/* Avoid taking the lock when there is no work to do */
 357	while (pool_should_refill(&pool_global) && pool_count(&pool_to_free)) {
 358		guard(raw_spinlock)(&pool_lock);
 359		/* Move a batch if possible */
 360		pool_move_batch(&pool_global, &pool_to_free);
 361	}
 362	clear_bit(0, &state);
 363}
 364
 365static bool kmem_alloc_batch(struct hlist_head *head, struct kmem_cache *cache, gfp_t gfp)
 366{
 367	struct hlist_node *last = NULL;
 368	struct debug_obj *obj;
 
 
 
 369
 370	for (int cnt = 0; cnt < ODEBUG_BATCH_SIZE; cnt++) {
 371		obj = kmem_cache_zalloc(cache, gfp);
 372		if (!obj) {
 373			free_object_list(head);
 374			return false;
 375		}
 376		debug_objects_allocated++;
 377
 378		if (!last)
 379			last = &obj->node;
 380		obj->batch_last = last;
 381
 382		hlist_add_head(&obj->node, head);
 
 
 
 
 383	}
 384	return true;
 385}
 386
 387static void fill_pool(void)
 388{
 389	static atomic_t cpus_allocating;
 390
 391	/*
 392	 * Avoid allocation and lock contention when:
 393	 *   - One other CPU is already allocating
 394	 *   - the global pool has not reached the critical level yet
 395	 */
 396	if (!pool_must_refill(&pool_global) && atomic_read(&cpus_allocating))
 397		return;
 398
 399	atomic_inc(&cpus_allocating);
 400	while (pool_should_refill(&pool_global)) {
 401		HLIST_HEAD(head);
 402
 403		if (!kmem_alloc_batch(&head, obj_cache, __GFP_HIGH | __GFP_NOWARN))
 404			break;
 405
 406		guard(raw_spinlock_irqsave)(&pool_lock);
 407		if (!pool_push_batch(&pool_global, &head))
 408			pool_push_batch(&pool_to_free, &head);
 409	}
 410	atomic_dec(&cpus_allocating);
 411}
 412
 413/*
 414 * Lookup an object in the hash bucket.
 415 */
 416static struct debug_obj *lookup_object(void *addr, struct debug_bucket *b)
 417{
 418	struct debug_obj *obj;
 419	int cnt = 0;
 
 
 
 
 
 
 
 
 
 
 420
 421	hlist_for_each_entry(obj, &b->list, node) {
 422		cnt++;
 423		if (obj->object == addr)
 424			return obj;
 425	}
 426	if (cnt > debug_objects_maxchain)
 427		debug_objects_maxchain = cnt;
 428
 429	return NULL;
 430}
 431
 432static void calc_usage(void)
 433{
 434	static DEFINE_RAW_SPINLOCK(avg_lock);
 435	static unsigned long avg_period;
 436	unsigned long cur, now = jiffies;
 437
 438	if (!time_after_eq(now, READ_ONCE(avg_period)))
 439		return;
 440
 441	if (!raw_spin_trylock(&avg_lock))
 442		return;
 443
 444	WRITE_ONCE(avg_period, now + msecs_to_jiffies(10));
 445	cur = READ_ONCE(pool_global.stats.cur_used) * ODEBUG_FREE_WORK_MAX;
 446	WRITE_ONCE(avg_usage, calc_load(avg_usage, EXP_5, cur));
 447	raw_spin_unlock(&avg_lock);
 448}
 449
 450static struct debug_obj *alloc_object(void *addr, struct debug_bucket *b,
 451				      const struct debug_obj_descr *descr)
 452{
 453	struct debug_obj *obj;
 454
 455	calc_usage();
 456
 457	if (static_branch_likely(&obj_cache_enabled))
 458		obj = pcpu_alloc();
 459	else
 460		obj = __alloc_object(&pool_boot);
 461
 462	if (likely(obj)) {
 463		obj->object = addr;
 464		obj->descr  = descr;
 465		obj->state  = ODEBUG_STATE_NONE;
 466		obj->astate = 0;
 467		hlist_add_head(&obj->node, &b->list);
 468	}
 469	return obj;
 470}
 471
 472/* workqueue function to free objects. */
 473static void free_obj_work(struct work_struct *work)
 474{
 475	static unsigned long last_use_avg;
 476	unsigned long cur_used, last_used, delta;
 477	unsigned int max_free = 0;
 478
 479	WRITE_ONCE(obj_freeing, false);
 480
 481	/* Rate limit freeing based on current use average */
 482	cur_used = READ_ONCE(avg_usage);
 483	last_used = last_use_avg;
 484	last_use_avg = cur_used;
 485
 486	if (!pool_count(&pool_to_free))
 487		return;
 488
 489	if (cur_used <= last_used) {
 490		delta = (last_used - cur_used) / ODEBUG_FREE_WORK_MAX;
 491		max_free = min(delta, ODEBUG_FREE_WORK_MAX);
 492	}
 493
 494	for (int cnt = 0; cnt < ODEBUG_FREE_WORK_MAX; cnt++) {
 495		HLIST_HEAD(tofree);
 496
 497		/* Acquire and drop the lock for each batch */
 498		scoped_guard(raw_spinlock_irqsave, &pool_lock) {
 499			if (!pool_to_free.cnt)
 500				return;
 501
 502			/* Refill the global pool if possible */
 503			if (pool_move_batch(&pool_global, &pool_to_free)) {
 504				/* Don't free as there seems to be demand */
 505				max_free = 0;
 506			} else if (max_free) {
 507				pool_pop_batch(&tofree, &pool_to_free);
 508				max_free--;
 509			} else {
 510				return;
 511			}
 512		}
 513		free_object_list(&tofree);
 514	}
 515}
 516
 517static void __free_object(struct debug_obj *obj)
 518{
 519	guard(irqsave)();
 520	if (static_branch_likely(&obj_cache_enabled))
 521		pcpu_free(obj);
 522	else
 523		hlist_add_head(&obj->node, &pool_boot);
 524}
 525
 526/*
 527 * Put the object back into the pool and schedule work to free objects
 528 * if necessary.
 529 */
 530static void free_object(struct debug_obj *obj)
 531{
 532	__free_object(obj);
 533	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
 534		WRITE_ONCE(obj_freeing, true);
 535		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
 536	}
 537}
 538
 539static void put_objects(struct hlist_head *list)
 
 540{
 
 541	struct hlist_node *tmp;
 542	struct debug_obj *obj;
 543
 544	/*
 545	 * Using free_object() puts the objects into reuse or schedules
 546	 * them for freeing and it get's all the accounting correct.
 547	 */
 548	hlist_for_each_entry_safe(obj, tmp, list, node) {
 549		hlist_del(&obj->node);
 550		free_object(obj);
 551	}
 552}
 553
 554#ifdef CONFIG_HOTPLUG_CPU
 555static int object_cpu_offline(unsigned int cpu)
 556{
 557	/* Remote access is safe as the CPU is dead already */
 558	struct obj_pool *pcp = per_cpu_ptr(&pool_pcpu, cpu);
 559
 560	put_objects(&pcp->objects);
 561	pcp->cnt = 0;
 562	return 0;
 563}
 564#endif
 565
 566/* Out of memory. Free all objects from hash */
 
 
 
 567static void debug_objects_oom(void)
 568{
 569	struct debug_bucket *db = obj_hash;
 
 570	HLIST_HEAD(freelist);
 
 
 
 571
 572	pr_warn("Out of memory. ODEBUG disabled\n");
 573
 574	for (int i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
 575		scoped_guard(raw_spinlock_irqsave, &db->lock)
 576			hlist_move_list(&db->list, &freelist);
 
 577
 578		put_objects(&freelist);
 
 
 
 
 579	}
 580}
 581
 582/*
 583 * We use the pfn of the address for the hash. That way we can check
 584 * for freed objects simply by checking the affected bucket.
 585 */
 586static struct debug_bucket *get_bucket(unsigned long addr)
 587{
 588	unsigned long hash;
 589
 590	hash = hash_long((addr >> ODEBUG_CHUNK_SHIFT), ODEBUG_HASH_BITS);
 591	return &obj_hash[hash];
 592}
 593
 594static void debug_print_object(struct debug_obj *obj, char *msg)
 595{
 596	const struct debug_obj_descr *descr = obj->descr;
 597	static int limit;
 598
 599	/*
 600	 * Don't report if lookup_object_or_alloc() by the current thread
 601	 * failed because lookup_object_or_alloc()/debug_objects_oom() by a
 602	 * concurrent thread turned off debug_objects_enabled and cleared
 603	 * the hash buckets.
 604	 */
 605	if (!debug_objects_enabled)
 606		return;
 607
 608	if (limit < 5 && descr != descr_test) {
 609		void *hint = descr->debug_hint ?
 610			descr->debug_hint(obj->object) : NULL;
 611		limit++;
 612		WARN(1, KERN_ERR "ODEBUG: %s %s (active state %u) "
 613				 "object: %p object type: %s hint: %pS\n",
 614			msg, obj_states[obj->state], obj->astate,
 615			obj->object, descr->name, hint);
 616	}
 617	debug_objects_warnings++;
 618}
 619
 620/*
 621 * Try to repair the damage, so we have a better chance to get useful
 622 * debug output.
 623 */
 624static bool
 625debug_object_fixup(bool (*fixup)(void *addr, enum debug_obj_state state),
 626		   void * addr, enum debug_obj_state state)
 627{
 628	if (fixup && fixup(addr, state)) {
 629		debug_objects_fixups++;
 630		return true;
 631	}
 632	return false;
 633}
 634
 635static void debug_object_is_on_stack(void *addr, int onstack)
 636{
 637	int is_on_stack;
 638	static int limit;
 639
 640	if (limit > 4)
 641		return;
 642
 643	is_on_stack = object_is_on_stack(addr);
 644	if (is_on_stack == onstack)
 645		return;
 646
 647	limit++;
 648	if (is_on_stack)
 649		pr_warn("object %p is on stack %p, but NOT annotated.\n", addr,
 650			 task_stack_page(current));
 651	else
 652		pr_warn("object %p is NOT on stack %p, but annotated.\n", addr,
 653			 task_stack_page(current));
 654
 655	WARN_ON(1);
 656}
 657
 658static struct debug_obj *lookup_object_or_alloc(void *addr, struct debug_bucket *b,
 659						const struct debug_obj_descr *descr,
 660						bool onstack, bool alloc_ifstatic)
 661{
 662	struct debug_obj *obj = lookup_object(addr, b);
 663	enum debug_obj_state state = ODEBUG_STATE_NONE;
 664
 665	if (likely(obj))
 666		return obj;
 667
 668	/*
 669	 * debug_object_init() unconditionally allocates untracked
 670	 * objects. It does not matter whether it is a static object or
 671	 * not.
 672	 *
 673	 * debug_object_assert_init() and debug_object_activate() allow
 674	 * allocation only if the descriptor callback confirms that the
 675	 * object is static and considered initialized. For non-static
 676	 * objects the allocation needs to be done from the fixup callback.
 677	 */
 678	if (unlikely(alloc_ifstatic)) {
 679		if (!descr->is_static_object || !descr->is_static_object(addr))
 680			return ERR_PTR(-ENOENT);
 681		/* Statically allocated objects are considered initialized */
 682		state = ODEBUG_STATE_INIT;
 683	}
 684
 685	obj = alloc_object(addr, b, descr);
 686	if (likely(obj)) {
 687		obj->state = state;
 688		debug_object_is_on_stack(addr, onstack);
 689		return obj;
 690	}
 691
 692	/* Out of memory. Do the cleanup outside of the locked region */
 693	debug_objects_enabled = false;
 694	return NULL;
 695}
 696
 697static void debug_objects_fill_pool(void)
 698{
 699	if (!static_branch_likely(&obj_cache_enabled))
 700		return;
 701
 702	if (likely(!pool_should_refill(&pool_global)))
 703		return;
 704
 705	/* Try reusing objects from obj_to_free_list */
 706	fill_pool_from_freelist();
 707
 708	if (likely(!pool_should_refill(&pool_global)))
 709		return;
 710
 711	/*
 712	 * On RT enabled kernels the pool refill must happen in preemptible
 713	 * context -- for !RT kernels we rely on the fact that spinlock_t and
 714	 * raw_spinlock_t are basically the same type and this lock-type
 715	 * inversion works just fine.
 716	 */
 717	if (!IS_ENABLED(CONFIG_PREEMPT_RT) || preemptible()) {
 718		/*
 719		 * Annotate away the spinlock_t inside raw_spinlock_t warning
 720		 * by temporarily raising the wait-type to WAIT_SLEEP, matching
 721		 * the preemptible() condition above.
 722		 */
 723		static DEFINE_WAIT_OVERRIDE_MAP(fill_pool_map, LD_WAIT_SLEEP);
 724		lock_map_acquire_try(&fill_pool_map);
 725		fill_pool();
 726		lock_map_release(&fill_pool_map);
 727	}
 728}
 729
 730static void
 731__debug_object_init(void *addr, const struct debug_obj_descr *descr, int onstack)
 732{
 733	struct debug_obj *obj, o;
 
 734	struct debug_bucket *db;
 
 735	unsigned long flags;
 736
 737	debug_objects_fill_pool();
 738
 739	db = get_bucket((unsigned long) addr);
 740
 741	raw_spin_lock_irqsave(&db->lock, flags);
 742
 743	obj = lookup_object_or_alloc(addr, db, descr, onstack, false);
 744	if (unlikely(!obj)) {
 745		raw_spin_unlock_irqrestore(&db->lock, flags);
 746		debug_objects_oom();
 747		return;
 
 
 
 
 
 748	}
 749
 750	switch (obj->state) {
 751	case ODEBUG_STATE_NONE:
 752	case ODEBUG_STATE_INIT:
 753	case ODEBUG_STATE_INACTIVE:
 754		obj->state = ODEBUG_STATE_INIT;
 
 
 
 
 755		raw_spin_unlock_irqrestore(&db->lock, flags);
 
 
 
 
 
 
 
 756		return;
 757	default:
 758		break;
 759	}
 760
 761	o = *obj;
 762	raw_spin_unlock_irqrestore(&db->lock, flags);
 763	debug_print_object(&o, "init");
 764
 765	if (o.state == ODEBUG_STATE_ACTIVE)
 766		debug_object_fixup(descr->fixup_init, addr, o.state);
 767}
 768
 769/**
 770 * debug_object_init - debug checks when an object is initialized
 771 * @addr:	address of the object
 772 * @descr:	pointer to an object specific debug description structure
 773 */
 774void debug_object_init(void *addr, const struct debug_obj_descr *descr)
 775{
 776	if (!debug_objects_enabled)
 777		return;
 778
 779	__debug_object_init(addr, descr, 0);
 780}
 781EXPORT_SYMBOL_GPL(debug_object_init);
 782
 783/**
 784 * debug_object_init_on_stack - debug checks when an object on stack is
 785 *				initialized
 786 * @addr:	address of the object
 787 * @descr:	pointer to an object specific debug description structure
 788 */
 789void debug_object_init_on_stack(void *addr, const struct debug_obj_descr *descr)
 790{
 791	if (!debug_objects_enabled)
 792		return;
 793
 794	__debug_object_init(addr, descr, 1);
 795}
 796EXPORT_SYMBOL_GPL(debug_object_init_on_stack);
 797
 798/**
 799 * debug_object_activate - debug checks when an object is activated
 800 * @addr:	address of the object
 801 * @descr:	pointer to an object specific debug description structure
 802 * Returns 0 for success, -EINVAL for check failed.
 803 */
 804int debug_object_activate(void *addr, const struct debug_obj_descr *descr)
 805{
 806	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
 807	struct debug_bucket *db;
 808	struct debug_obj *obj;
 809	unsigned long flags;
 
 
 
 
 810
 811	if (!debug_objects_enabled)
 812		return 0;
 813
 814	debug_objects_fill_pool();
 815
 816	db = get_bucket((unsigned long) addr);
 817
 818	raw_spin_lock_irqsave(&db->lock, flags);
 819
 820	obj = lookup_object_or_alloc(addr, db, descr, false, true);
 821	if (unlikely(!obj)) {
 822		raw_spin_unlock_irqrestore(&db->lock, flags);
 823		debug_objects_oom();
 824		return 0;
 825	} else if (likely(!IS_ERR(obj))) {
 826		switch (obj->state) {
 
 
 
 
 
 
 827		case ODEBUG_STATE_ACTIVE:
 
 
 
 
 
 
 828		case ODEBUG_STATE_DESTROYED:
 829			o = *obj;
 
 830			break;
 831		case ODEBUG_STATE_INIT:
 832		case ODEBUG_STATE_INACTIVE:
 833			obj->state = ODEBUG_STATE_ACTIVE;
 834			fallthrough;
 835		default:
 836			raw_spin_unlock_irqrestore(&db->lock, flags);
 837			return 0;
 838		}
 
 
 
 
 839	}
 840
 841	raw_spin_unlock_irqrestore(&db->lock, flags);
 842	debug_print_object(&o, "activate");
 843
 844	switch (o.state) {
 845	case ODEBUG_STATE_ACTIVE:
 846	case ODEBUG_STATE_NOTAVAILABLE:
 847		if (debug_object_fixup(descr->fixup_activate, addr, o.state))
 848			return 0;
 849		fallthrough;
 850	default:
 851		return -EINVAL;
 
 
 
 
 
 
 
 
 852	}
 
 853}
 854EXPORT_SYMBOL_GPL(debug_object_activate);
 855
 856/**
 857 * debug_object_deactivate - debug checks when an object is deactivated
 858 * @addr:	address of the object
 859 * @descr:	pointer to an object specific debug description structure
 860 */
 861void debug_object_deactivate(void *addr, const struct debug_obj_descr *descr)
 862{
 863	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
 864	struct debug_bucket *db;
 865	struct debug_obj *obj;
 866	unsigned long flags;
 
 867
 868	if (!debug_objects_enabled)
 869		return;
 870
 871	db = get_bucket((unsigned long) addr);
 872
 873	raw_spin_lock_irqsave(&db->lock, flags);
 874
 875	obj = lookup_object(addr, db);
 876	if (obj) {
 877		switch (obj->state) {
 878		case ODEBUG_STATE_DESTROYED:
 879			break;
 880		case ODEBUG_STATE_INIT:
 881		case ODEBUG_STATE_INACTIVE:
 882		case ODEBUG_STATE_ACTIVE:
 883			if (obj->astate)
 884				break;
 885			obj->state = ODEBUG_STATE_INACTIVE;
 886			fallthrough;
 
 
 
 
 
 887		default:
 888			raw_spin_unlock_irqrestore(&db->lock, flags);
 889			return;
 890		}
 891		o = *obj;
 892	}
 893
 894	raw_spin_unlock_irqrestore(&db->lock, flags);
 895	debug_print_object(&o, "deactivate");
 
 
 
 
 
 
 
 
 896}
 897EXPORT_SYMBOL_GPL(debug_object_deactivate);
 898
 899/**
 900 * debug_object_destroy - debug checks when an object is destroyed
 901 * @addr:	address of the object
 902 * @descr:	pointer to an object specific debug description structure
 903 */
 904void debug_object_destroy(void *addr, const struct debug_obj_descr *descr)
 905{
 906	struct debug_obj *obj, o;
 907	struct debug_bucket *db;
 
 908	unsigned long flags;
 
 909
 910	if (!debug_objects_enabled)
 911		return;
 912
 913	db = get_bucket((unsigned long) addr);
 914
 915	raw_spin_lock_irqsave(&db->lock, flags);
 916
 917	obj = lookup_object(addr, db);
 918	if (!obj) {
 919		raw_spin_unlock_irqrestore(&db->lock, flags);
 920		return;
 921	}
 922
 923	switch (obj->state) {
 924	case ODEBUG_STATE_ACTIVE:
 925	case ODEBUG_STATE_DESTROYED:
 926		break;
 927	case ODEBUG_STATE_NONE:
 928	case ODEBUG_STATE_INIT:
 929	case ODEBUG_STATE_INACTIVE:
 930		obj->state = ODEBUG_STATE_DESTROYED;
 931		fallthrough;
 932	default:
 
 933		raw_spin_unlock_irqrestore(&db->lock, flags);
 
 
 934		return;
 
 
 
 
 
 
 935	}
 936
 937	o = *obj;
 938	raw_spin_unlock_irqrestore(&db->lock, flags);
 939	debug_print_object(&o, "destroy");
 940
 941	if (o.state == ODEBUG_STATE_ACTIVE)
 942		debug_object_fixup(descr->fixup_destroy, addr, o.state);
 943}
 944EXPORT_SYMBOL_GPL(debug_object_destroy);
 945
 946/**
 947 * debug_object_free - debug checks when an object is freed
 948 * @addr:	address of the object
 949 * @descr:	pointer to an object specific debug description structure
 950 */
 951void debug_object_free(void *addr, const struct debug_obj_descr *descr)
 952{
 953	struct debug_obj *obj, o;
 954	struct debug_bucket *db;
 
 955	unsigned long flags;
 956
 957	if (!debug_objects_enabled)
 958		return;
 959
 960	db = get_bucket((unsigned long) addr);
 961
 962	raw_spin_lock_irqsave(&db->lock, flags);
 963
 964	obj = lookup_object(addr, db);
 965	if (!obj) {
 966		raw_spin_unlock_irqrestore(&db->lock, flags);
 967		return;
 968	}
 969
 970	switch (obj->state) {
 971	case ODEBUG_STATE_ACTIVE:
 972		break;
 
 
 
 
 973	default:
 974		hlist_del(&obj->node);
 975		raw_spin_unlock_irqrestore(&db->lock, flags);
 976		free_object(obj);
 977		return;
 978	}
 979
 980	o = *obj;
 981	raw_spin_unlock_irqrestore(&db->lock, flags);
 982	debug_print_object(&o, "free");
 983
 984	debug_object_fixup(descr->fixup_free, addr, o.state);
 985}
 986EXPORT_SYMBOL_GPL(debug_object_free);
 987
 988/**
 989 * debug_object_assert_init - debug checks when object should be init-ed
 990 * @addr:	address of the object
 991 * @descr:	pointer to an object specific debug description structure
 992 */
 993void debug_object_assert_init(void *addr, const struct debug_obj_descr *descr)
 994{
 995	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
 996	struct debug_bucket *db;
 997	struct debug_obj *obj;
 998	unsigned long flags;
 999
1000	if (!debug_objects_enabled)
1001		return;
1002
1003	debug_objects_fill_pool();
1004
1005	db = get_bucket((unsigned long) addr);
1006
1007	raw_spin_lock_irqsave(&db->lock, flags);
1008	obj = lookup_object_or_alloc(addr, db, descr, false, true);
1009	raw_spin_unlock_irqrestore(&db->lock, flags);
1010	if (likely(!IS_ERR_OR_NULL(obj)))
1011		return;
1012
1013	/* If NULL the allocation has hit OOM */
1014	if (!obj) {
1015		debug_objects_oom();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1016		return;
1017	}
1018
1019	/* Object is neither tracked nor static. It's not initialized. */
1020	debug_print_object(&o, "assert_init");
1021	debug_object_fixup(descr->fixup_assert_init, addr, ODEBUG_STATE_NOTAVAILABLE);
1022}
1023EXPORT_SYMBOL_GPL(debug_object_assert_init);
1024
1025/**
1026 * debug_object_active_state - debug checks object usage state machine
1027 * @addr:	address of the object
1028 * @descr:	pointer to an object specific debug description structure
1029 * @expect:	expected state
1030 * @next:	state to move to if expected state is found
1031 */
1032void
1033debug_object_active_state(void *addr, const struct debug_obj_descr *descr,
1034			  unsigned int expect, unsigned int next)
1035{
1036	struct debug_obj o = { .object = addr, .state = ODEBUG_STATE_NOTAVAILABLE, .descr = descr };
1037	struct debug_bucket *db;
1038	struct debug_obj *obj;
1039	unsigned long flags;
 
1040
1041	if (!debug_objects_enabled)
1042		return;
1043
1044	db = get_bucket((unsigned long) addr);
1045
1046	raw_spin_lock_irqsave(&db->lock, flags);
1047
1048	obj = lookup_object(addr, db);
1049	if (obj) {
1050		switch (obj->state) {
1051		case ODEBUG_STATE_ACTIVE:
1052			if (obj->astate != expect)
1053				break;
1054			obj->astate = next;
1055			raw_spin_unlock_irqrestore(&db->lock, flags);
1056			return;
 
1057		default:
 
1058			break;
1059		}
1060		o = *obj;
1061	}
1062
1063	raw_spin_unlock_irqrestore(&db->lock, flags);
1064	debug_print_object(&o, "active_state");
 
 
 
 
 
 
 
 
1065}
1066EXPORT_SYMBOL_GPL(debug_object_active_state);
1067
1068#ifdef CONFIG_DEBUG_OBJECTS_FREE
1069static void __debug_check_no_obj_freed(const void *address, unsigned long size)
1070{
1071	unsigned long flags, oaddr, saddr, eaddr, paddr, chunks;
1072	int cnt, objs_checked = 0;
1073	struct debug_obj *obj, o;
1074	struct debug_bucket *db;
1075	struct hlist_node *tmp;
 
 
1076
1077	saddr = (unsigned long) address;
1078	eaddr = saddr + size;
1079	paddr = saddr & ODEBUG_CHUNK_MASK;
1080	chunks = ((eaddr - paddr) + (ODEBUG_CHUNK_SIZE - 1));
1081	chunks >>= ODEBUG_CHUNK_SHIFT;
1082
1083	for (;chunks > 0; chunks--, paddr += ODEBUG_CHUNK_SIZE) {
1084		db = get_bucket(paddr);
1085
1086repeat:
1087		cnt = 0;
1088		raw_spin_lock_irqsave(&db->lock, flags);
1089		hlist_for_each_entry_safe(obj, tmp, &db->list, node) {
1090			cnt++;
1091			oaddr = (unsigned long) obj->object;
1092			if (oaddr < saddr || oaddr >= eaddr)
1093				continue;
1094
1095			switch (obj->state) {
1096			case ODEBUG_STATE_ACTIVE:
1097				o = *obj;
 
1098				raw_spin_unlock_irqrestore(&db->lock, flags);
1099				debug_print_object(&o, "free");
1100				debug_object_fixup(o.descr->fixup_free, (void *)oaddr, o.state);
 
1101				goto repeat;
1102			default:
1103				hlist_del(&obj->node);
1104				__free_object(obj);
1105				break;
1106			}
1107		}
1108		raw_spin_unlock_irqrestore(&db->lock, flags);
1109
1110		if (cnt > debug_objects_maxchain)
1111			debug_objects_maxchain = cnt;
1112
1113		objs_checked += cnt;
1114	}
1115
1116	if (objs_checked > debug_objects_maxchecked)
1117		debug_objects_maxchecked = objs_checked;
1118
1119	/* Schedule work to actually kmem_cache_free() objects */
1120	if (!READ_ONCE(obj_freeing) && pool_count(&pool_to_free)) {
1121		WRITE_ONCE(obj_freeing, true);
1122		schedule_delayed_work(&debug_obj_work, ODEBUG_FREE_WORK_DELAY);
1123	}
1124}
1125
1126void debug_check_no_obj_freed(const void *address, unsigned long size)
1127{
1128	if (debug_objects_enabled)
1129		__debug_check_no_obj_freed(address, size);
1130}
1131#endif
1132
1133#ifdef CONFIG_DEBUG_FS
1134
1135static int debug_stats_show(struct seq_file *m, void *v)
1136{
1137	unsigned int cpu, pool_used, pcp_free = 0;
1138
1139	/*
1140	 * pool_global.stats.cur_used is the number of batches currently
1141	 * handed out to per CPU pools. Convert it to number of objects
1142	 * and subtract the number of free objects in the per CPU pools.
1143	 * As this is lockless the number is an estimate.
1144	 */
1145	for_each_possible_cpu(cpu)
1146		pcp_free += per_cpu(pool_pcpu.cnt, cpu);
1147
1148	pool_used = READ_ONCE(pool_global.stats.cur_used);
1149	pcp_free = min(pool_used, pcp_free);
1150	pool_used -= pcp_free;
1151
1152	seq_printf(m, "max_chain     : %d\n", debug_objects_maxchain);
1153	seq_printf(m, "max_checked   : %d\n", debug_objects_maxchecked);
1154	seq_printf(m, "warnings      : %d\n", debug_objects_warnings);
1155	seq_printf(m, "fixups        : %d\n", debug_objects_fixups);
1156	seq_printf(m, "pool_free     : %u\n", pool_count(&pool_global) + pcp_free);
1157	seq_printf(m, "pool_pcp_free : %u\n", pcp_free);
1158	seq_printf(m, "pool_min_free : %u\n", data_race(pool_global.stats.min_fill));
1159	seq_printf(m, "pool_used     : %u\n", pool_used);
1160	seq_printf(m, "pool_max_used : %u\n", data_race(pool_global.stats.max_used));
1161	seq_printf(m, "on_free_list  : %u\n", pool_count(&pool_to_free));
1162	seq_printf(m, "objs_allocated: %d\n", debug_objects_allocated);
1163	seq_printf(m, "objs_freed    : %d\n", debug_objects_freed);
1164	return 0;
1165}
1166DEFINE_SHOW_ATTRIBUTE(debug_stats);
1167
1168static int __init debug_objects_init_debugfs(void)
1169{
1170	struct dentry *dbgdir;
1171
1172	if (!debug_objects_enabled)
1173		return 0;
1174
1175	dbgdir = debugfs_create_dir("debug_objects", NULL);
1176
1177	debugfs_create_file("stats", 0444, dbgdir, NULL, &debug_stats_fops);
1178
1179	return 0;
1180}
1181__initcall(debug_objects_init_debugfs);
1182
1183#else
1184static inline void debug_objects_init_debugfs(void) { }
1185#endif
1186
1187#ifdef CONFIG_DEBUG_OBJECTS_SELFTEST
1188
1189/* Random data structure for the self test */
1190struct self_test {
1191	unsigned long	dummy1[6];
1192	int		static_init;
1193	unsigned long	dummy2[3];
1194};
1195
1196static __initconst const struct debug_obj_descr descr_type_test;
1197
1198static bool __init is_static_object(void *addr)
1199{
1200	struct self_test *obj = addr;
1201
1202	return obj->static_init;
1203}
1204
1205/*
1206 * fixup_init is called when:
1207 * - an active object is initialized
1208 */
1209static bool __init fixup_init(void *addr, enum debug_obj_state state)
1210{
1211	struct self_test *obj = addr;
1212
1213	switch (state) {
1214	case ODEBUG_STATE_ACTIVE:
1215		debug_object_deactivate(obj, &descr_type_test);
1216		debug_object_init(obj, &descr_type_test);
1217		return true;
1218	default:
1219		return false;
1220	}
1221}
1222
1223/*
1224 * fixup_activate is called when:
1225 * - an active object is activated
1226 * - an unknown non-static object is activated
1227 */
1228static bool __init fixup_activate(void *addr, enum debug_obj_state state)
1229{
1230	struct self_test *obj = addr;
1231
1232	switch (state) {
1233	case ODEBUG_STATE_NOTAVAILABLE:
1234		return true;
1235	case ODEBUG_STATE_ACTIVE:
1236		debug_object_deactivate(obj, &descr_type_test);
1237		debug_object_activate(obj, &descr_type_test);
1238		return true;
1239
1240	default:
1241		return false;
1242	}
1243}
1244
1245/*
1246 * fixup_destroy is called when:
1247 * - an active object is destroyed
1248 */
1249static bool __init fixup_destroy(void *addr, enum debug_obj_state state)
1250{
1251	struct self_test *obj = addr;
1252
1253	switch (state) {
1254	case ODEBUG_STATE_ACTIVE:
1255		debug_object_deactivate(obj, &descr_type_test);
1256		debug_object_destroy(obj, &descr_type_test);
1257		return true;
1258	default:
1259		return false;
1260	}
1261}
1262
1263/*
1264 * fixup_free is called when:
1265 * - an active object is freed
1266 */
1267static bool __init fixup_free(void *addr, enum debug_obj_state state)
1268{
1269	struct self_test *obj = addr;
1270
1271	switch (state) {
1272	case ODEBUG_STATE_ACTIVE:
1273		debug_object_deactivate(obj, &descr_type_test);
1274		debug_object_free(obj, &descr_type_test);
1275		return true;
1276	default:
1277		return false;
1278	}
1279}
1280
1281static int __init
1282check_results(void *addr, enum debug_obj_state state, int fixups, int warnings)
1283{
1284	struct debug_bucket *db;
1285	struct debug_obj *obj;
1286	unsigned long flags;
1287	int res = -EINVAL;
1288
1289	db = get_bucket((unsigned long) addr);
1290
1291	raw_spin_lock_irqsave(&db->lock, flags);
1292
1293	obj = lookup_object(addr, db);
1294	if (!obj && state != ODEBUG_STATE_NONE) {
1295		WARN(1, KERN_ERR "ODEBUG: selftest object not found\n");
1296		goto out;
1297	}
1298	if (obj && obj->state != state) {
1299		WARN(1, KERN_ERR "ODEBUG: selftest wrong state: %d != %d\n",
1300		       obj->state, state);
1301		goto out;
1302	}
1303	if (fixups != debug_objects_fixups) {
1304		WARN(1, KERN_ERR "ODEBUG: selftest fixups failed %d != %d\n",
1305		       fixups, debug_objects_fixups);
1306		goto out;
1307	}
1308	if (warnings != debug_objects_warnings) {
1309		WARN(1, KERN_ERR "ODEBUG: selftest warnings failed %d != %d\n",
1310		       warnings, debug_objects_warnings);
1311		goto out;
1312	}
1313	res = 0;
1314out:
1315	raw_spin_unlock_irqrestore(&db->lock, flags);
1316	if (res)
1317		debug_objects_enabled = false;
1318	return res;
1319}
1320
1321static __initconst const struct debug_obj_descr descr_type_test = {
1322	.name			= "selftest",
1323	.is_static_object	= is_static_object,
1324	.fixup_init		= fixup_init,
1325	.fixup_activate		= fixup_activate,
1326	.fixup_destroy		= fixup_destroy,
1327	.fixup_free		= fixup_free,
1328};
1329
1330static __initdata struct self_test obj = { .static_init = 0 };
1331
1332static bool __init debug_objects_selftest(void)
1333{
1334	int fixups, oldfixups, warnings, oldwarnings;
1335	unsigned long flags;
1336
1337	local_irq_save(flags);
1338
1339	fixups = oldfixups = debug_objects_fixups;
1340	warnings = oldwarnings = debug_objects_warnings;
1341	descr_test = &descr_type_test;
1342
1343	debug_object_init(&obj, &descr_type_test);
1344	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1345		goto out;
1346	debug_object_activate(&obj, &descr_type_test);
1347	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1348		goto out;
1349	debug_object_activate(&obj, &descr_type_test);
1350	if (check_results(&obj, ODEBUG_STATE_ACTIVE, ++fixups, ++warnings))
1351		goto out;
1352	debug_object_deactivate(&obj, &descr_type_test);
1353	if (check_results(&obj, ODEBUG_STATE_INACTIVE, fixups, warnings))
1354		goto out;
1355	debug_object_destroy(&obj, &descr_type_test);
1356	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, warnings))
1357		goto out;
1358	debug_object_init(&obj, &descr_type_test);
1359	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1360		goto out;
1361	debug_object_activate(&obj, &descr_type_test);
1362	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1363		goto out;
1364	debug_object_deactivate(&obj, &descr_type_test);
1365	if (check_results(&obj, ODEBUG_STATE_DESTROYED, fixups, ++warnings))
1366		goto out;
1367	debug_object_free(&obj, &descr_type_test);
1368	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1369		goto out;
1370
1371	obj.static_init = 1;
1372	debug_object_activate(&obj, &descr_type_test);
1373	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1374		goto out;
1375	debug_object_init(&obj, &descr_type_test);
1376	if (check_results(&obj, ODEBUG_STATE_INIT, ++fixups, ++warnings))
1377		goto out;
1378	debug_object_free(&obj, &descr_type_test);
1379	if (check_results(&obj, ODEBUG_STATE_NONE, fixups, warnings))
1380		goto out;
1381
1382#ifdef CONFIG_DEBUG_OBJECTS_FREE
1383	debug_object_init(&obj, &descr_type_test);
1384	if (check_results(&obj, ODEBUG_STATE_INIT, fixups, warnings))
1385		goto out;
1386	debug_object_activate(&obj, &descr_type_test);
1387	if (check_results(&obj, ODEBUG_STATE_ACTIVE, fixups, warnings))
1388		goto out;
1389	__debug_check_no_obj_freed(&obj, sizeof(obj));
1390	if (check_results(&obj, ODEBUG_STATE_NONE, ++fixups, ++warnings))
1391		goto out;
1392#endif
1393	pr_info("selftest passed\n");
1394
1395out:
1396	debug_objects_fixups = oldfixups;
1397	debug_objects_warnings = oldwarnings;
1398	descr_test = NULL;
1399
1400	local_irq_restore(flags);
1401	return debug_objects_enabled;
1402}
1403#else
1404static inline bool debug_objects_selftest(void) { return true; }
1405#endif
1406
1407/*
1408 * Called during early boot to initialize the hash buckets and link
1409 * the static object pool objects into the poll list. After this call
1410 * the object tracker is fully operational.
1411 */
1412void __init debug_objects_early_init(void)
1413{
1414	int i;
1415
1416	for (i = 0; i < ODEBUG_HASH_SIZE; i++)
1417		raw_spin_lock_init(&obj_hash[i].lock);
1418
1419	/* Keep early boot simple and add everything to the boot list */
1420	for (i = 0; i < ODEBUG_POOL_SIZE; i++)
1421		hlist_add_head(&obj_static_pool[i].node, &pool_boot);
1422}
1423
1424/*
1425 * Convert the statically allocated objects to dynamic ones.
1426 * debug_objects_mem_init() is called early so only one CPU is up and
1427 * interrupts are disabled, which means it is safe to replace the active
1428 * object references.
1429 */
1430static bool __init debug_objects_replace_static_objects(struct kmem_cache *cache)
1431{
1432	struct debug_bucket *db = obj_hash;
1433	struct hlist_node *tmp;
1434	struct debug_obj *obj;
1435	HLIST_HEAD(objects);
1436	int i;
1437
1438	for (i = 0; i < ODEBUG_POOL_SIZE; i += ODEBUG_BATCH_SIZE) {
1439		if (!kmem_alloc_batch(&objects, cache, GFP_KERNEL))
 
1440			goto free;
1441		pool_push_batch(&pool_global, &objects);
1442	}
1443
1444	/* Disconnect the boot pool. */
1445	pool_boot.first = NULL;
 
 
 
 
 
 
 
 
 
1446
1447	/* Replace the active object references */
1448	for (i = 0; i < ODEBUG_HASH_SIZE; i++, db++) {
1449		hlist_move_list(&db->list, &objects);
1450
1451		hlist_for_each_entry(obj, &objects, node) {
1452			struct debug_obj *new = pcpu_alloc();
1453
1454			/* copy object data */
1455			*new = *obj;
1456			hlist_add_head(&new->node, &db->list);
 
1457		}
1458	}
1459	return true;
 
 
 
1460free:
1461	/* Can't use free_object_list() as the cache is not populated yet */
1462	hlist_for_each_entry_safe(obj, tmp, &pool_global.objects, node) {
1463		hlist_del(&obj->node);
1464		kmem_cache_free(cache, obj);
1465	}
1466	return false;
1467}
1468
1469/*
1470 * Called after the kmem_caches are functional to setup a dedicated
1471 * cache pool, which has the SLAB_DEBUG_OBJECTS flag set. This flag
1472 * prevents that the debug code is called on kmem_cache_free() for the
1473 * debug tracker objects to avoid recursive calls.
1474 */
1475void __init debug_objects_mem_init(void)
1476{
1477	struct kmem_cache *cache;
1478	int extras;
1479
1480	if (!debug_objects_enabled)
1481		return;
1482
1483	if (!debug_objects_selftest())
1484		return;
1485
1486	cache = kmem_cache_create("debug_objects_cache", sizeof (struct debug_obj), 0,
1487				  SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE, NULL);
1488
1489	if (!cache || !debug_objects_replace_static_objects(cache)) {
1490		debug_objects_enabled = false;
1491		pr_warn("Out of memory.\n");
1492		return;
1493	}
1494
1495	/*
1496	 * Adjust the thresholds for allocating and freeing objects
1497	 * according to the number of possible CPUs available in the
1498	 * system.
 
1499	 */
1500	extras = num_possible_cpus() * ODEBUG_BATCH_SIZE;
1501	pool_global.max_cnt += extras;
1502	pool_global.min_cnt += extras;
1503
1504	/* Everything worked. Expose the cache */
1505	obj_cache = cache;
1506	static_branch_enable(&obj_cache_enabled);
 
 
 
 
 
 
 
 
1507
1508#ifdef CONFIG_HOTPLUG_CPU
1509	cpuhp_setup_state_nocalls(CPUHP_DEBUG_OBJ_DEAD, "object:offline", NULL,
1510				  object_cpu_offline);
1511#endif
1512	return;
 
 
 
 
 
 
 
1513}