Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * jump label support
  4 *
  5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
  6 * Copyright (C) 2011 Peter Zijlstra
  7 *
  8 */
  9#include <linux/memory.h>
 10#include <linux/uaccess.h>
 11#include <linux/module.h>
 12#include <linux/list.h>
 13#include <linux/slab.h>
 14#include <linux/sort.h>
 15#include <linux/err.h>
 16#include <linux/static_key.h>
 17#include <linux/jump_label_ratelimit.h>
 18#include <linux/bug.h>
 19#include <linux/cpu.h>
 20#include <asm/sections.h>
 21
 22/* mutex to protect coming/going of the jump_label table */
 23static DEFINE_MUTEX(jump_label_mutex);
 24
 25void jump_label_lock(void)
 26{
 27	mutex_lock(&jump_label_mutex);
 28}
 29
 30void jump_label_unlock(void)
 31{
 32	mutex_unlock(&jump_label_mutex);
 33}
 34
 35static int jump_label_cmp(const void *a, const void *b)
 36{
 37	const struct jump_entry *jea = a;
 38	const struct jump_entry *jeb = b;
 39
 40	/*
 41	 * Entrires are sorted by key.
 42	 */
 43	if (jump_entry_key(jea) < jump_entry_key(jeb))
 44		return -1;
 45
 46	if (jump_entry_key(jea) > jump_entry_key(jeb))
 47		return 1;
 48
 49	/*
 50	 * In the batching mode, entries should also be sorted by the code
 51	 * inside the already sorted list of entries, enabling a bsearch in
 52	 * the vector.
 53	 */
 54	if (jump_entry_code(jea) < jump_entry_code(jeb))
 55		return -1;
 56
 57	if (jump_entry_code(jea) > jump_entry_code(jeb))
 58		return 1;
 59
 60	return 0;
 61}
 62
 63static void jump_label_swap(void *a, void *b, int size)
 64{
 65	long delta = (unsigned long)a - (unsigned long)b;
 66	struct jump_entry *jea = a;
 67	struct jump_entry *jeb = b;
 68	struct jump_entry tmp = *jea;
 69
 70	jea->code	= jeb->code - delta;
 71	jea->target	= jeb->target - delta;
 72	jea->key	= jeb->key - delta;
 73
 74	jeb->code	= tmp.code + delta;
 75	jeb->target	= tmp.target + delta;
 76	jeb->key	= tmp.key + delta;
 77}
 78
 79static void
 80jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
 81{
 82	unsigned long size;
 83	void *swapfn = NULL;
 84
 85	if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
 86		swapfn = jump_label_swap;
 87
 88	size = (((unsigned long)stop - (unsigned long)start)
 89					/ sizeof(struct jump_entry));
 90	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
 91}
 92
 93static void jump_label_update(struct static_key *key);
 94
 95/*
 96 * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
 97 * The use of 'atomic_read()' requires atomic.h and its problematic for some
 98 * kernel headers such as kernel.h and others. Since static_key_count() is not
 99 * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100 * to have it be a function here. Similarly, for 'static_key_enable()' and
101 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102 * to be included from most/all places for CONFIG_JUMP_LABEL.
103 */
104int static_key_count(struct static_key *key)
105{
106	/*
107	 * -1 means the first static_key_slow_inc() is in progress.
108	 *  static_key_enabled() must return true, so return 1 here.
109	 */
110	int n = atomic_read(&key->enabled);
111
112	return n >= 0 ? n : 1;
113}
114EXPORT_SYMBOL_GPL(static_key_count);
115
116void static_key_slow_inc_cpuslocked(struct static_key *key)
 
 
 
 
 
 
 
 
 
 
 
117{
118	int v, v1;
119
120	STATIC_KEY_CHECK_USE(key);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121	lockdep_assert_cpus_held();
122
123	/*
124	 * Careful if we get concurrent static_key_slow_inc() calls;
125	 * later calls must wait for the first one to _finish_ the
126	 * jump_label_update() process.  At the same time, however,
127	 * the jump_label_update() call below wants to see
128	 * static_key_enabled(&key) for jumps to be updated properly.
129	 *
130	 * So give a special meaning to negative key->enabled: it sends
131	 * static_key_slow_inc() down the slow path, and it is non-zero
132	 * so it counts as "enabled" in jump_label_update().  Note that
133	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
134	 */
135	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
136		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
137		if (likely(v1 == v))
138			return;
139	}
140
141	jump_label_lock();
142	if (atomic_read(&key->enabled) == 0) {
143		atomic_set(&key->enabled, -1);
144		jump_label_update(key);
145		/*
146		 * Ensure that if the above cmpxchg loop observes our positive
147		 * value, it must also observe all the text changes.
148		 */
149		atomic_set_release(&key->enabled, 1);
150	} else {
151		atomic_inc(&key->enabled);
 
 
 
152	}
153	jump_label_unlock();
 
154}
155
156void static_key_slow_inc(struct static_key *key)
157{
 
 
158	cpus_read_lock();
159	static_key_slow_inc_cpuslocked(key);
160	cpus_read_unlock();
 
161}
162EXPORT_SYMBOL_GPL(static_key_slow_inc);
163
164void static_key_enable_cpuslocked(struct static_key *key)
165{
166	STATIC_KEY_CHECK_USE(key);
167	lockdep_assert_cpus_held();
168
169	if (atomic_read(&key->enabled) > 0) {
170		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
171		return;
172	}
173
174	jump_label_lock();
175	if (atomic_read(&key->enabled) == 0) {
176		atomic_set(&key->enabled, -1);
177		jump_label_update(key);
178		/*
179		 * See static_key_slow_inc().
180		 */
181		atomic_set_release(&key->enabled, 1);
182	}
183	jump_label_unlock();
184}
185EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
186
187void static_key_enable(struct static_key *key)
188{
189	cpus_read_lock();
190	static_key_enable_cpuslocked(key);
191	cpus_read_unlock();
192}
193EXPORT_SYMBOL_GPL(static_key_enable);
194
195void static_key_disable_cpuslocked(struct static_key *key)
196{
197	STATIC_KEY_CHECK_USE(key);
198	lockdep_assert_cpus_held();
199
200	if (atomic_read(&key->enabled) != 1) {
201		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
202		return;
203	}
204
205	jump_label_lock();
206	if (atomic_cmpxchg(&key->enabled, 1, 0))
207		jump_label_update(key);
208	jump_label_unlock();
209}
210EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
211
212void static_key_disable(struct static_key *key)
213{
214	cpus_read_lock();
215	static_key_disable_cpuslocked(key);
216	cpus_read_unlock();
217}
218EXPORT_SYMBOL_GPL(static_key_disable);
219
220static bool static_key_slow_try_dec(struct static_key *key)
221{
222	int val;
223
224	val = atomic_fetch_add_unless(&key->enabled, -1, 1);
225	if (val == 1)
226		return false;
227
228	/*
229	 * The negative count check is valid even when a negative
230	 * key->enabled is in use by static_key_slow_inc(); a
231	 * __static_key_slow_dec() before the first static_key_slow_inc()
232	 * returns is unbalanced, because all other static_key_slow_inc()
233	 * instances block while the update is in progress.
234	 */
235	WARN(val < 0, "jump label: negative count!\n");
236	return true;
237}
238
239static void __static_key_slow_dec_cpuslocked(struct static_key *key)
240{
241	lockdep_assert_cpus_held();
242
243	if (static_key_slow_try_dec(key))
244		return;
245
246	jump_label_lock();
247	if (atomic_dec_and_test(&key->enabled))
248		jump_label_update(key);
249	jump_label_unlock();
250}
251
252static void __static_key_slow_dec(struct static_key *key)
253{
254	cpus_read_lock();
255	__static_key_slow_dec_cpuslocked(key);
256	cpus_read_unlock();
257}
258
259void jump_label_update_timeout(struct work_struct *work)
260{
261	struct static_key_deferred *key =
262		container_of(work, struct static_key_deferred, work.work);
263	__static_key_slow_dec(&key->key);
264}
265EXPORT_SYMBOL_GPL(jump_label_update_timeout);
266
267void static_key_slow_dec(struct static_key *key)
268{
269	STATIC_KEY_CHECK_USE(key);
270	__static_key_slow_dec(key);
271}
272EXPORT_SYMBOL_GPL(static_key_slow_dec);
273
274void static_key_slow_dec_cpuslocked(struct static_key *key)
275{
276	STATIC_KEY_CHECK_USE(key);
277	__static_key_slow_dec_cpuslocked(key);
278}
279
280void __static_key_slow_dec_deferred(struct static_key *key,
281				    struct delayed_work *work,
282				    unsigned long timeout)
283{
284	STATIC_KEY_CHECK_USE(key);
285
286	if (static_key_slow_try_dec(key))
287		return;
288
289	schedule_delayed_work(work, timeout);
290}
291EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
292
293void __static_key_deferred_flush(void *key, struct delayed_work *work)
294{
295	STATIC_KEY_CHECK_USE(key);
296	flush_delayed_work(work);
297}
298EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
299
300void jump_label_rate_limit(struct static_key_deferred *key,
301		unsigned long rl)
302{
303	STATIC_KEY_CHECK_USE(key);
304	key->timeout = rl;
305	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
306}
307EXPORT_SYMBOL_GPL(jump_label_rate_limit);
308
309static int addr_conflict(struct jump_entry *entry, void *start, void *end)
310{
311	if (jump_entry_code(entry) <= (unsigned long)end &&
312	    jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
313		return 1;
314
315	return 0;
316}
317
318static int __jump_label_text_reserved(struct jump_entry *iter_start,
319		struct jump_entry *iter_stop, void *start, void *end, bool init)
320{
321	struct jump_entry *iter;
322
323	iter = iter_start;
324	while (iter < iter_stop) {
325		if (init || !jump_entry_is_init(iter)) {
326			if (addr_conflict(iter, start, end))
327				return 1;
328		}
329		iter++;
330	}
331
332	return 0;
333}
334
335/*
336 * Update code which is definitely not currently executing.
337 * Architectures which need heavyweight synchronization to modify
338 * running code can override this to make the non-live update case
339 * cheaper.
340 */
341void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
342					    enum jump_label_type type)
343{
344	arch_jump_label_transform(entry, type);
345}
 
346
347static inline struct jump_entry *static_key_entries(struct static_key *key)
348{
349	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
350	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
351}
352
353static inline bool static_key_type(struct static_key *key)
354{
355	return key->type & JUMP_TYPE_TRUE;
356}
357
358static inline bool static_key_linked(struct static_key *key)
359{
360	return key->type & JUMP_TYPE_LINKED;
361}
362
363static inline void static_key_clear_linked(struct static_key *key)
364{
365	key->type &= ~JUMP_TYPE_LINKED;
366}
367
368static inline void static_key_set_linked(struct static_key *key)
369{
370	key->type |= JUMP_TYPE_LINKED;
371}
372
373/***
374 * A 'struct static_key' uses a union such that it either points directly
375 * to a table of 'struct jump_entry' or to a linked list of modules which in
376 * turn point to 'struct jump_entry' tables.
377 *
378 * The two lower bits of the pointer are used to keep track of which pointer
379 * type is in use and to store the initial branch direction, we use an access
380 * function which preserves these bits.
381 */
382static void static_key_set_entries(struct static_key *key,
383				   struct jump_entry *entries)
384{
385	unsigned long type;
386
387	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
388	type = key->type & JUMP_TYPE_MASK;
389	key->entries = entries;
390	key->type |= type;
391}
392
393static enum jump_label_type jump_label_type(struct jump_entry *entry)
394{
395	struct static_key *key = jump_entry_key(entry);
396	bool enabled = static_key_enabled(key);
397	bool branch = jump_entry_is_branch(entry);
398
399	/* See the comment in linux/jump_label.h */
400	return enabled ^ branch;
401}
402
403static bool jump_label_can_update(struct jump_entry *entry, bool init)
404{
405	/*
406	 * Cannot update code that was in an init text area.
407	 */
408	if (!init && jump_entry_is_init(entry))
409		return false;
410
411	if (!kernel_text_address(jump_entry_code(entry))) {
412		/*
413		 * This skips patching built-in __exit, which
414		 * is part of init_section_contains() but is
415		 * not part of kernel_text_address().
416		 *
417		 * Skipping built-in __exit is fine since it
418		 * will never be executed.
419		 */
420		WARN_ONCE(!jump_entry_is_init(entry),
421			  "can't patch jump_label at %pS",
422			  (void *)jump_entry_code(entry));
423		return false;
424	}
425
426	return true;
427}
428
429#ifndef HAVE_JUMP_LABEL_BATCH
430static void __jump_label_update(struct static_key *key,
431				struct jump_entry *entry,
432				struct jump_entry *stop,
433				bool init)
434{
435	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
436		if (jump_label_can_update(entry, init))
437			arch_jump_label_transform(entry, jump_label_type(entry));
438	}
439}
440#else
441static void __jump_label_update(struct static_key *key,
442				struct jump_entry *entry,
443				struct jump_entry *stop,
444				bool init)
445{
446	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
447
448		if (!jump_label_can_update(entry, init))
449			continue;
450
451		if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
452			/*
453			 * Queue is full: Apply the current queue and try again.
454			 */
455			arch_jump_label_transform_apply();
456			BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
457		}
458	}
459	arch_jump_label_transform_apply();
460}
461#endif
462
463void __init jump_label_init(void)
464{
465	struct jump_entry *iter_start = __start___jump_table;
466	struct jump_entry *iter_stop = __stop___jump_table;
467	struct static_key *key = NULL;
468	struct jump_entry *iter;
469
470	/*
471	 * Since we are initializing the static_key.enabled field with
472	 * with the 'raw' int values (to avoid pulling in atomic.h) in
473	 * jump_label.h, let's make sure that is safe. There are only two
474	 * cases to check since we initialize to 0 or 1.
475	 */
476	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
477	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
478
479	if (static_key_initialized)
480		return;
481
482	cpus_read_lock();
483	jump_label_lock();
484	jump_label_sort_entries(iter_start, iter_stop);
485
486	for (iter = iter_start; iter < iter_stop; iter++) {
487		struct static_key *iterk;
488		bool in_init;
489
490		/* rewrite NOPs */
491		if (jump_label_type(iter) == JUMP_LABEL_NOP)
492			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
493
494		in_init = init_section_contains((void *)jump_entry_code(iter), 1);
495		jump_entry_set_init(iter, in_init);
496
497		iterk = jump_entry_key(iter);
498		if (iterk == key)
499			continue;
500
501		key = iterk;
502		static_key_set_entries(key, iter);
503	}
504	static_key_initialized = true;
505	jump_label_unlock();
506	cpus_read_unlock();
507}
508
509#ifdef CONFIG_MODULES
510
511static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
512{
513	struct static_key *key = jump_entry_key(entry);
514	bool type = static_key_type(key);
515	bool branch = jump_entry_is_branch(entry);
516
517	/* See the comment in linux/jump_label.h */
518	return type ^ branch;
519}
520
521struct static_key_mod {
522	struct static_key_mod *next;
523	struct jump_entry *entries;
524	struct module *mod;
525};
526
527static inline struct static_key_mod *static_key_mod(struct static_key *key)
528{
529	WARN_ON_ONCE(!static_key_linked(key));
530	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
531}
532
533/***
534 * key->type and key->next are the same via union.
535 * This sets key->next and preserves the type bits.
536 *
537 * See additional comments above static_key_set_entries().
538 */
539static void static_key_set_mod(struct static_key *key,
540			       struct static_key_mod *mod)
541{
542	unsigned long type;
543
544	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
545	type = key->type & JUMP_TYPE_MASK;
546	key->next = mod;
547	key->type |= type;
548}
549
550static int __jump_label_mod_text_reserved(void *start, void *end)
551{
552	struct module *mod;
553	int ret;
554
555	preempt_disable();
556	mod = __module_text_address((unsigned long)start);
557	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
558	if (!try_module_get(mod))
559		mod = NULL;
560	preempt_enable();
561
562	if (!mod)
563		return 0;
564
565	ret = __jump_label_text_reserved(mod->jump_entries,
566				mod->jump_entries + mod->num_jump_entries,
567				start, end, mod->state == MODULE_STATE_COMING);
568
569	module_put(mod);
570
571	return ret;
572}
573
574static void __jump_label_mod_update(struct static_key *key)
575{
576	struct static_key_mod *mod;
577
578	for (mod = static_key_mod(key); mod; mod = mod->next) {
579		struct jump_entry *stop;
580		struct module *m;
581
582		/*
583		 * NULL if the static_key is defined in a module
584		 * that does not use it
585		 */
586		if (!mod->entries)
587			continue;
588
589		m = mod->mod;
590		if (!m)
591			stop = __stop___jump_table;
592		else
593			stop = m->jump_entries + m->num_jump_entries;
594		__jump_label_update(key, mod->entries, stop,
595				    m && m->state == MODULE_STATE_COMING);
596	}
597}
598
599/***
600 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
601 * @mod: module to patch
602 *
603 * Allow for run-time selection of the optimal nops. Before the module
604 * loads patch these with arch_get_jump_label_nop(), which is specified by
605 * the arch specific jump label code.
606 */
607void jump_label_apply_nops(struct module *mod)
608{
609	struct jump_entry *iter_start = mod->jump_entries;
610	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
611	struct jump_entry *iter;
612
613	/* if the module doesn't have jump label entries, just return */
614	if (iter_start == iter_stop)
615		return;
616
617	for (iter = iter_start; iter < iter_stop; iter++) {
618		/* Only write NOPs for arch_branch_static(). */
619		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
620			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
621	}
622}
623
624static int jump_label_add_module(struct module *mod)
625{
626	struct jump_entry *iter_start = mod->jump_entries;
627	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
628	struct jump_entry *iter;
629	struct static_key *key = NULL;
630	struct static_key_mod *jlm, *jlm2;
631
632	/* if the module doesn't have jump label entries, just return */
633	if (iter_start == iter_stop)
634		return 0;
635
636	jump_label_sort_entries(iter_start, iter_stop);
637
638	for (iter = iter_start; iter < iter_stop; iter++) {
639		struct static_key *iterk;
640		bool in_init;
641
642		in_init = within_module_init(jump_entry_code(iter), mod);
643		jump_entry_set_init(iter, in_init);
644
645		iterk = jump_entry_key(iter);
646		if (iterk == key)
647			continue;
648
649		key = iterk;
650		if (within_module((unsigned long)key, mod)) {
651			static_key_set_entries(key, iter);
652			continue;
653		}
654		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
655		if (!jlm)
656			return -ENOMEM;
657		if (!static_key_linked(key)) {
658			jlm2 = kzalloc(sizeof(struct static_key_mod),
659				       GFP_KERNEL);
660			if (!jlm2) {
661				kfree(jlm);
662				return -ENOMEM;
663			}
664			preempt_disable();
665			jlm2->mod = __module_address((unsigned long)key);
666			preempt_enable();
667			jlm2->entries = static_key_entries(key);
668			jlm2->next = NULL;
669			static_key_set_mod(key, jlm2);
670			static_key_set_linked(key);
671		}
672		jlm->mod = mod;
673		jlm->entries = iter;
674		jlm->next = static_key_mod(key);
675		static_key_set_mod(key, jlm);
676		static_key_set_linked(key);
677
678		/* Only update if we've changed from our initial state */
679		if (jump_label_type(iter) != jump_label_init_type(iter))
680			__jump_label_update(key, iter, iter_stop, true);
681	}
682
683	return 0;
684}
685
686static void jump_label_del_module(struct module *mod)
687{
688	struct jump_entry *iter_start = mod->jump_entries;
689	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
690	struct jump_entry *iter;
691	struct static_key *key = NULL;
692	struct static_key_mod *jlm, **prev;
693
694	for (iter = iter_start; iter < iter_stop; iter++) {
695		if (jump_entry_key(iter) == key)
696			continue;
697
698		key = jump_entry_key(iter);
699
700		if (within_module((unsigned long)key, mod))
701			continue;
702
703		/* No memory during module load */
704		if (WARN_ON(!static_key_linked(key)))
705			continue;
706
707		prev = &key->next;
708		jlm = static_key_mod(key);
709
710		while (jlm && jlm->mod != mod) {
711			prev = &jlm->next;
712			jlm = jlm->next;
713		}
714
715		/* No memory during module load */
716		if (WARN_ON(!jlm))
717			continue;
718
719		if (prev == &key->next)
720			static_key_set_mod(key, jlm->next);
721		else
722			*prev = jlm->next;
723
724		kfree(jlm);
725
726		jlm = static_key_mod(key);
727		/* if only one etry is left, fold it back into the static_key */
728		if (jlm->next == NULL) {
729			static_key_set_entries(key, jlm->entries);
730			static_key_clear_linked(key);
731			kfree(jlm);
732		}
733	}
734}
735
736static int
737jump_label_module_notify(struct notifier_block *self, unsigned long val,
738			 void *data)
739{
740	struct module *mod = data;
741	int ret = 0;
742
743	cpus_read_lock();
744	jump_label_lock();
745
746	switch (val) {
747	case MODULE_STATE_COMING:
748		ret = jump_label_add_module(mod);
749		if (ret) {
750			WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
751			jump_label_del_module(mod);
752		}
753		break;
754	case MODULE_STATE_GOING:
755		jump_label_del_module(mod);
756		break;
757	}
758
759	jump_label_unlock();
760	cpus_read_unlock();
761
762	return notifier_from_errno(ret);
763}
764
765static struct notifier_block jump_label_module_nb = {
766	.notifier_call = jump_label_module_notify,
767	.priority = 1, /* higher than tracepoints */
768};
769
770static __init int jump_label_init_module(void)
771{
772	return register_module_notifier(&jump_label_module_nb);
773}
774early_initcall(jump_label_init_module);
775
776#endif /* CONFIG_MODULES */
777
778/***
779 * jump_label_text_reserved - check if addr range is reserved
780 * @start: start text addr
781 * @end: end text addr
782 *
783 * checks if the text addr located between @start and @end
784 * overlaps with any of the jump label patch addresses. Code
785 * that wants to modify kernel text should first verify that
786 * it does not overlap with any of the jump label addresses.
787 * Caller must hold jump_label_mutex.
788 *
789 * returns 1 if there is an overlap, 0 otherwise
790 */
791int jump_label_text_reserved(void *start, void *end)
792{
793	bool init = system_state < SYSTEM_RUNNING;
794	int ret = __jump_label_text_reserved(__start___jump_table,
795			__stop___jump_table, start, end, init);
796
797	if (ret)
798		return ret;
799
800#ifdef CONFIG_MODULES
801	ret = __jump_label_mod_text_reserved(start, end);
802#endif
803	return ret;
804}
805
806static void jump_label_update(struct static_key *key)
807{
808	struct jump_entry *stop = __stop___jump_table;
809	bool init = system_state < SYSTEM_RUNNING;
810	struct jump_entry *entry;
811#ifdef CONFIG_MODULES
812	struct module *mod;
813
814	if (static_key_linked(key)) {
815		__jump_label_mod_update(key);
816		return;
817	}
818
819	preempt_disable();
820	mod = __module_address((unsigned long)key);
821	if (mod) {
822		stop = mod->jump_entries + mod->num_jump_entries;
823		init = mod->state == MODULE_STATE_COMING;
824	}
825	preempt_enable();
826#endif
827	entry = static_key_entries(key);
828	/* if there are no users, entry can be NULL */
829	if (entry)
830		__jump_label_update(key, entry, stop, init);
831}
832
833#ifdef CONFIG_STATIC_KEYS_SELFTEST
834static DEFINE_STATIC_KEY_TRUE(sk_true);
835static DEFINE_STATIC_KEY_FALSE(sk_false);
836
837static __init int jump_label_test(void)
838{
839	int i;
840
841	for (i = 0; i < 2; i++) {
842		WARN_ON(static_key_enabled(&sk_true.key) != true);
843		WARN_ON(static_key_enabled(&sk_false.key) != false);
844
845		WARN_ON(!static_branch_likely(&sk_true));
846		WARN_ON(!static_branch_unlikely(&sk_true));
847		WARN_ON(static_branch_likely(&sk_false));
848		WARN_ON(static_branch_unlikely(&sk_false));
849
850		static_branch_disable(&sk_true);
851		static_branch_enable(&sk_false);
852
853		WARN_ON(static_key_enabled(&sk_true.key) == true);
854		WARN_ON(static_key_enabled(&sk_false.key) == false);
855
856		WARN_ON(static_branch_likely(&sk_true));
857		WARN_ON(static_branch_unlikely(&sk_true));
858		WARN_ON(!static_branch_likely(&sk_false));
859		WARN_ON(!static_branch_unlikely(&sk_false));
860
861		static_branch_enable(&sk_true);
862		static_branch_disable(&sk_false);
863	}
864
865	return 0;
866}
867early_initcall(jump_label_test);
868#endif /* STATIC_KEYS_SELFTEST */
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * jump label support
  4 *
  5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
  6 * Copyright (C) 2011 Peter Zijlstra
  7 *
  8 */
  9#include <linux/memory.h>
 10#include <linux/uaccess.h>
 11#include <linux/module.h>
 12#include <linux/list.h>
 13#include <linux/slab.h>
 14#include <linux/sort.h>
 15#include <linux/err.h>
 16#include <linux/static_key.h>
 17#include <linux/jump_label_ratelimit.h>
 18#include <linux/bug.h>
 19#include <linux/cpu.h>
 20#include <asm/sections.h>
 21
 22/* mutex to protect coming/going of the jump_label table */
 23static DEFINE_MUTEX(jump_label_mutex);
 24
 25void jump_label_lock(void)
 26{
 27	mutex_lock(&jump_label_mutex);
 28}
 29
 30void jump_label_unlock(void)
 31{
 32	mutex_unlock(&jump_label_mutex);
 33}
 34
 35static int jump_label_cmp(const void *a, const void *b)
 36{
 37	const struct jump_entry *jea = a;
 38	const struct jump_entry *jeb = b;
 39
 40	/*
 41	 * Entrires are sorted by key.
 42	 */
 43	if (jump_entry_key(jea) < jump_entry_key(jeb))
 44		return -1;
 45
 46	if (jump_entry_key(jea) > jump_entry_key(jeb))
 47		return 1;
 48
 49	/*
 50	 * In the batching mode, entries should also be sorted by the code
 51	 * inside the already sorted list of entries, enabling a bsearch in
 52	 * the vector.
 53	 */
 54	if (jump_entry_code(jea) < jump_entry_code(jeb))
 55		return -1;
 56
 57	if (jump_entry_code(jea) > jump_entry_code(jeb))
 58		return 1;
 59
 60	return 0;
 61}
 62
 63static void jump_label_swap(void *a, void *b, int size)
 64{
 65	long delta = (unsigned long)a - (unsigned long)b;
 66	struct jump_entry *jea = a;
 67	struct jump_entry *jeb = b;
 68	struct jump_entry tmp = *jea;
 69
 70	jea->code	= jeb->code - delta;
 71	jea->target	= jeb->target - delta;
 72	jea->key	= jeb->key - delta;
 73
 74	jeb->code	= tmp.code + delta;
 75	jeb->target	= tmp.target + delta;
 76	jeb->key	= tmp.key + delta;
 77}
 78
 79static void
 80jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
 81{
 82	unsigned long size;
 83	void *swapfn = NULL;
 84
 85	if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
 86		swapfn = jump_label_swap;
 87
 88	size = (((unsigned long)stop - (unsigned long)start)
 89					/ sizeof(struct jump_entry));
 90	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
 91}
 92
 93static void jump_label_update(struct static_key *key);
 94
 95/*
 96 * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
 97 * The use of 'atomic_read()' requires atomic.h and its problematic for some
 98 * kernel headers such as kernel.h and others. Since static_key_count() is not
 99 * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100 * to have it be a function here. Similarly, for 'static_key_enable()' and
101 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102 * to be included from most/all places for CONFIG_JUMP_LABEL.
103 */
104int static_key_count(struct static_key *key)
105{
106	/*
107	 * -1 means the first static_key_slow_inc() is in progress.
108	 *  static_key_enabled() must return true, so return 1 here.
109	 */
110	int n = atomic_read(&key->enabled);
111
112	return n >= 0 ? n : 1;
113}
114EXPORT_SYMBOL_GPL(static_key_count);
115
116/*
117 * static_key_fast_inc_not_disabled - adds a user for a static key
118 * @key: static key that must be already enabled
119 *
120 * The caller must make sure that the static key can't get disabled while
121 * in this function. It doesn't patch jump labels, only adds a user to
122 * an already enabled static key.
123 *
124 * Returns true if the increment was done. Unlike refcount_t the ref counter
125 * is not saturated, but will fail to increment on overflow.
126 */
127bool static_key_fast_inc_not_disabled(struct static_key *key)
128{
129	int v;
130
131	STATIC_KEY_CHECK_USE(key);
132	/*
133	 * Negative key->enabled has a special meaning: it sends
134	 * static_key_slow_inc() down the slow path, and it is non-zero
135	 * so it counts as "enabled" in jump_label_update().  Note that
136	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
137	 */
138	v = atomic_read(&key->enabled);
139	do {
140		if (v <= 0 || (v + 1) < 0)
141			return false;
142	} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
143
144	return true;
145}
146EXPORT_SYMBOL_GPL(static_key_fast_inc_not_disabled);
147
148bool static_key_slow_inc_cpuslocked(struct static_key *key)
149{
150	lockdep_assert_cpus_held();
151
152	/*
153	 * Careful if we get concurrent static_key_slow_inc() calls;
154	 * later calls must wait for the first one to _finish_ the
155	 * jump_label_update() process.  At the same time, however,
156	 * the jump_label_update() call below wants to see
157	 * static_key_enabled(&key) for jumps to be updated properly.
 
 
 
 
 
158	 */
159	if (static_key_fast_inc_not_disabled(key))
160		return true;
 
 
 
161
162	jump_label_lock();
163	if (atomic_read(&key->enabled) == 0) {
164		atomic_set(&key->enabled, -1);
165		jump_label_update(key);
166		/*
167		 * Ensure that if the above cmpxchg loop observes our positive
168		 * value, it must also observe all the text changes.
169		 */
170		atomic_set_release(&key->enabled, 1);
171	} else {
172		if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key))) {
173			jump_label_unlock();
174			return false;
175		}
176	}
177	jump_label_unlock();
178	return true;
179}
180
181bool static_key_slow_inc(struct static_key *key)
182{
183	bool ret;
184
185	cpus_read_lock();
186	ret = static_key_slow_inc_cpuslocked(key);
187	cpus_read_unlock();
188	return ret;
189}
190EXPORT_SYMBOL_GPL(static_key_slow_inc);
191
192void static_key_enable_cpuslocked(struct static_key *key)
193{
194	STATIC_KEY_CHECK_USE(key);
195	lockdep_assert_cpus_held();
196
197	if (atomic_read(&key->enabled) > 0) {
198		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
199		return;
200	}
201
202	jump_label_lock();
203	if (atomic_read(&key->enabled) == 0) {
204		atomic_set(&key->enabled, -1);
205		jump_label_update(key);
206		/*
207		 * See static_key_slow_inc().
208		 */
209		atomic_set_release(&key->enabled, 1);
210	}
211	jump_label_unlock();
212}
213EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
214
215void static_key_enable(struct static_key *key)
216{
217	cpus_read_lock();
218	static_key_enable_cpuslocked(key);
219	cpus_read_unlock();
220}
221EXPORT_SYMBOL_GPL(static_key_enable);
222
223void static_key_disable_cpuslocked(struct static_key *key)
224{
225	STATIC_KEY_CHECK_USE(key);
226	lockdep_assert_cpus_held();
227
228	if (atomic_read(&key->enabled) != 1) {
229		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
230		return;
231	}
232
233	jump_label_lock();
234	if (atomic_cmpxchg(&key->enabled, 1, 0))
235		jump_label_update(key);
236	jump_label_unlock();
237}
238EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
239
240void static_key_disable(struct static_key *key)
241{
242	cpus_read_lock();
243	static_key_disable_cpuslocked(key);
244	cpus_read_unlock();
245}
246EXPORT_SYMBOL_GPL(static_key_disable);
247
248static bool static_key_slow_try_dec(struct static_key *key)
249{
250	int val;
251
252	val = atomic_fetch_add_unless(&key->enabled, -1, 1);
253	if (val == 1)
254		return false;
255
256	/*
257	 * The negative count check is valid even when a negative
258	 * key->enabled is in use by static_key_slow_inc(); a
259	 * __static_key_slow_dec() before the first static_key_slow_inc()
260	 * returns is unbalanced, because all other static_key_slow_inc()
261	 * instances block while the update is in progress.
262	 */
263	WARN(val < 0, "jump label: negative count!\n");
264	return true;
265}
266
267static void __static_key_slow_dec_cpuslocked(struct static_key *key)
268{
269	lockdep_assert_cpus_held();
270
271	if (static_key_slow_try_dec(key))
272		return;
273
274	jump_label_lock();
275	if (atomic_dec_and_test(&key->enabled))
276		jump_label_update(key);
277	jump_label_unlock();
278}
279
280static void __static_key_slow_dec(struct static_key *key)
281{
282	cpus_read_lock();
283	__static_key_slow_dec_cpuslocked(key);
284	cpus_read_unlock();
285}
286
287void jump_label_update_timeout(struct work_struct *work)
288{
289	struct static_key_deferred *key =
290		container_of(work, struct static_key_deferred, work.work);
291	__static_key_slow_dec(&key->key);
292}
293EXPORT_SYMBOL_GPL(jump_label_update_timeout);
294
295void static_key_slow_dec(struct static_key *key)
296{
297	STATIC_KEY_CHECK_USE(key);
298	__static_key_slow_dec(key);
299}
300EXPORT_SYMBOL_GPL(static_key_slow_dec);
301
302void static_key_slow_dec_cpuslocked(struct static_key *key)
303{
304	STATIC_KEY_CHECK_USE(key);
305	__static_key_slow_dec_cpuslocked(key);
306}
307
308void __static_key_slow_dec_deferred(struct static_key *key,
309				    struct delayed_work *work,
310				    unsigned long timeout)
311{
312	STATIC_KEY_CHECK_USE(key);
313
314	if (static_key_slow_try_dec(key))
315		return;
316
317	schedule_delayed_work(work, timeout);
318}
319EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
320
321void __static_key_deferred_flush(void *key, struct delayed_work *work)
322{
323	STATIC_KEY_CHECK_USE(key);
324	flush_delayed_work(work);
325}
326EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
327
328void jump_label_rate_limit(struct static_key_deferred *key,
329		unsigned long rl)
330{
331	STATIC_KEY_CHECK_USE(key);
332	key->timeout = rl;
333	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
334}
335EXPORT_SYMBOL_GPL(jump_label_rate_limit);
336
337static int addr_conflict(struct jump_entry *entry, void *start, void *end)
338{
339	if (jump_entry_code(entry) <= (unsigned long)end &&
340	    jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
341		return 1;
342
343	return 0;
344}
345
346static int __jump_label_text_reserved(struct jump_entry *iter_start,
347		struct jump_entry *iter_stop, void *start, void *end, bool init)
348{
349	struct jump_entry *iter;
350
351	iter = iter_start;
352	while (iter < iter_stop) {
353		if (init || !jump_entry_is_init(iter)) {
354			if (addr_conflict(iter, start, end))
355				return 1;
356		}
357		iter++;
358	}
359
360	return 0;
361}
362
363#ifndef arch_jump_label_transform_static
364static void arch_jump_label_transform_static(struct jump_entry *entry,
365					     enum jump_label_type type)
 
 
 
 
 
366{
367	/* nothing to do on most architectures */
368}
369#endif
370
371static inline struct jump_entry *static_key_entries(struct static_key *key)
372{
373	WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
374	return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
375}
376
377static inline bool static_key_type(struct static_key *key)
378{
379	return key->type & JUMP_TYPE_TRUE;
380}
381
382static inline bool static_key_linked(struct static_key *key)
383{
384	return key->type & JUMP_TYPE_LINKED;
385}
386
387static inline void static_key_clear_linked(struct static_key *key)
388{
389	key->type &= ~JUMP_TYPE_LINKED;
390}
391
392static inline void static_key_set_linked(struct static_key *key)
393{
394	key->type |= JUMP_TYPE_LINKED;
395}
396
397/***
398 * A 'struct static_key' uses a union such that it either points directly
399 * to a table of 'struct jump_entry' or to a linked list of modules which in
400 * turn point to 'struct jump_entry' tables.
401 *
402 * The two lower bits of the pointer are used to keep track of which pointer
403 * type is in use and to store the initial branch direction, we use an access
404 * function which preserves these bits.
405 */
406static void static_key_set_entries(struct static_key *key,
407				   struct jump_entry *entries)
408{
409	unsigned long type;
410
411	WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
412	type = key->type & JUMP_TYPE_MASK;
413	key->entries = entries;
414	key->type |= type;
415}
416
417static enum jump_label_type jump_label_type(struct jump_entry *entry)
418{
419	struct static_key *key = jump_entry_key(entry);
420	bool enabled = static_key_enabled(key);
421	bool branch = jump_entry_is_branch(entry);
422
423	/* See the comment in linux/jump_label.h */
424	return enabled ^ branch;
425}
426
427static bool jump_label_can_update(struct jump_entry *entry, bool init)
428{
429	/*
430	 * Cannot update code that was in an init text area.
431	 */
432	if (!init && jump_entry_is_init(entry))
433		return false;
434
435	if (!kernel_text_address(jump_entry_code(entry))) {
436		/*
437		 * This skips patching built-in __exit, which
438		 * is part of init_section_contains() but is
439		 * not part of kernel_text_address().
440		 *
441		 * Skipping built-in __exit is fine since it
442		 * will never be executed.
443		 */
444		WARN_ONCE(!jump_entry_is_init(entry),
445			  "can't patch jump_label at %pS",
446			  (void *)jump_entry_code(entry));
447		return false;
448	}
449
450	return true;
451}
452
453#ifndef HAVE_JUMP_LABEL_BATCH
454static void __jump_label_update(struct static_key *key,
455				struct jump_entry *entry,
456				struct jump_entry *stop,
457				bool init)
458{
459	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
460		if (jump_label_can_update(entry, init))
461			arch_jump_label_transform(entry, jump_label_type(entry));
462	}
463}
464#else
465static void __jump_label_update(struct static_key *key,
466				struct jump_entry *entry,
467				struct jump_entry *stop,
468				bool init)
469{
470	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
471
472		if (!jump_label_can_update(entry, init))
473			continue;
474
475		if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
476			/*
477			 * Queue is full: Apply the current queue and try again.
478			 */
479			arch_jump_label_transform_apply();
480			BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
481		}
482	}
483	arch_jump_label_transform_apply();
484}
485#endif
486
487void __init jump_label_init(void)
488{
489	struct jump_entry *iter_start = __start___jump_table;
490	struct jump_entry *iter_stop = __stop___jump_table;
491	struct static_key *key = NULL;
492	struct jump_entry *iter;
493
494	/*
495	 * Since we are initializing the static_key.enabled field with
496	 * with the 'raw' int values (to avoid pulling in atomic.h) in
497	 * jump_label.h, let's make sure that is safe. There are only two
498	 * cases to check since we initialize to 0 or 1.
499	 */
500	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
501	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
502
503	if (static_key_initialized)
504		return;
505
506	cpus_read_lock();
507	jump_label_lock();
508	jump_label_sort_entries(iter_start, iter_stop);
509
510	for (iter = iter_start; iter < iter_stop; iter++) {
511		struct static_key *iterk;
512		bool in_init;
513
514		/* rewrite NOPs */
515		if (jump_label_type(iter) == JUMP_LABEL_NOP)
516			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
517
518		in_init = init_section_contains((void *)jump_entry_code(iter), 1);
519		jump_entry_set_init(iter, in_init);
520
521		iterk = jump_entry_key(iter);
522		if (iterk == key)
523			continue;
524
525		key = iterk;
526		static_key_set_entries(key, iter);
527	}
528	static_key_initialized = true;
529	jump_label_unlock();
530	cpus_read_unlock();
531}
532
533#ifdef CONFIG_MODULES
534
535enum jump_label_type jump_label_init_type(struct jump_entry *entry)
536{
537	struct static_key *key = jump_entry_key(entry);
538	bool type = static_key_type(key);
539	bool branch = jump_entry_is_branch(entry);
540
541	/* See the comment in linux/jump_label.h */
542	return type ^ branch;
543}
544
545struct static_key_mod {
546	struct static_key_mod *next;
547	struct jump_entry *entries;
548	struct module *mod;
549};
550
551static inline struct static_key_mod *static_key_mod(struct static_key *key)
552{
553	WARN_ON_ONCE(!static_key_linked(key));
554	return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
555}
556
557/***
558 * key->type and key->next are the same via union.
559 * This sets key->next and preserves the type bits.
560 *
561 * See additional comments above static_key_set_entries().
562 */
563static void static_key_set_mod(struct static_key *key,
564			       struct static_key_mod *mod)
565{
566	unsigned long type;
567
568	WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
569	type = key->type & JUMP_TYPE_MASK;
570	key->next = mod;
571	key->type |= type;
572}
573
574static int __jump_label_mod_text_reserved(void *start, void *end)
575{
576	struct module *mod;
577	int ret;
578
579	preempt_disable();
580	mod = __module_text_address((unsigned long)start);
581	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
582	if (!try_module_get(mod))
583		mod = NULL;
584	preempt_enable();
585
586	if (!mod)
587		return 0;
588
589	ret = __jump_label_text_reserved(mod->jump_entries,
590				mod->jump_entries + mod->num_jump_entries,
591				start, end, mod->state == MODULE_STATE_COMING);
592
593	module_put(mod);
594
595	return ret;
596}
597
598static void __jump_label_mod_update(struct static_key *key)
599{
600	struct static_key_mod *mod;
601
602	for (mod = static_key_mod(key); mod; mod = mod->next) {
603		struct jump_entry *stop;
604		struct module *m;
605
606		/*
607		 * NULL if the static_key is defined in a module
608		 * that does not use it
609		 */
610		if (!mod->entries)
611			continue;
612
613		m = mod->mod;
614		if (!m)
615			stop = __stop___jump_table;
616		else
617			stop = m->jump_entries + m->num_jump_entries;
618		__jump_label_update(key, mod->entries, stop,
619				    m && m->state == MODULE_STATE_COMING);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
620	}
621}
622
623static int jump_label_add_module(struct module *mod)
624{
625	struct jump_entry *iter_start = mod->jump_entries;
626	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
627	struct jump_entry *iter;
628	struct static_key *key = NULL;
629	struct static_key_mod *jlm, *jlm2;
630
631	/* if the module doesn't have jump label entries, just return */
632	if (iter_start == iter_stop)
633		return 0;
634
635	jump_label_sort_entries(iter_start, iter_stop);
636
637	for (iter = iter_start; iter < iter_stop; iter++) {
638		struct static_key *iterk;
639		bool in_init;
640
641		in_init = within_module_init(jump_entry_code(iter), mod);
642		jump_entry_set_init(iter, in_init);
643
644		iterk = jump_entry_key(iter);
645		if (iterk == key)
646			continue;
647
648		key = iterk;
649		if (within_module((unsigned long)key, mod)) {
650			static_key_set_entries(key, iter);
651			continue;
652		}
653		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
654		if (!jlm)
655			return -ENOMEM;
656		if (!static_key_linked(key)) {
657			jlm2 = kzalloc(sizeof(struct static_key_mod),
658				       GFP_KERNEL);
659			if (!jlm2) {
660				kfree(jlm);
661				return -ENOMEM;
662			}
663			preempt_disable();
664			jlm2->mod = __module_address((unsigned long)key);
665			preempt_enable();
666			jlm2->entries = static_key_entries(key);
667			jlm2->next = NULL;
668			static_key_set_mod(key, jlm2);
669			static_key_set_linked(key);
670		}
671		jlm->mod = mod;
672		jlm->entries = iter;
673		jlm->next = static_key_mod(key);
674		static_key_set_mod(key, jlm);
675		static_key_set_linked(key);
676
677		/* Only update if we've changed from our initial state */
678		if (jump_label_type(iter) != jump_label_init_type(iter))
679			__jump_label_update(key, iter, iter_stop, true);
680	}
681
682	return 0;
683}
684
685static void jump_label_del_module(struct module *mod)
686{
687	struct jump_entry *iter_start = mod->jump_entries;
688	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
689	struct jump_entry *iter;
690	struct static_key *key = NULL;
691	struct static_key_mod *jlm, **prev;
692
693	for (iter = iter_start; iter < iter_stop; iter++) {
694		if (jump_entry_key(iter) == key)
695			continue;
696
697		key = jump_entry_key(iter);
698
699		if (within_module((unsigned long)key, mod))
700			continue;
701
702		/* No memory during module load */
703		if (WARN_ON(!static_key_linked(key)))
704			continue;
705
706		prev = &key->next;
707		jlm = static_key_mod(key);
708
709		while (jlm && jlm->mod != mod) {
710			prev = &jlm->next;
711			jlm = jlm->next;
712		}
713
714		/* No memory during module load */
715		if (WARN_ON(!jlm))
716			continue;
717
718		if (prev == &key->next)
719			static_key_set_mod(key, jlm->next);
720		else
721			*prev = jlm->next;
722
723		kfree(jlm);
724
725		jlm = static_key_mod(key);
726		/* if only one etry is left, fold it back into the static_key */
727		if (jlm->next == NULL) {
728			static_key_set_entries(key, jlm->entries);
729			static_key_clear_linked(key);
730			kfree(jlm);
731		}
732	}
733}
734
735static int
736jump_label_module_notify(struct notifier_block *self, unsigned long val,
737			 void *data)
738{
739	struct module *mod = data;
740	int ret = 0;
741
742	cpus_read_lock();
743	jump_label_lock();
744
745	switch (val) {
746	case MODULE_STATE_COMING:
747		ret = jump_label_add_module(mod);
748		if (ret) {
749			WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
750			jump_label_del_module(mod);
751		}
752		break;
753	case MODULE_STATE_GOING:
754		jump_label_del_module(mod);
755		break;
756	}
757
758	jump_label_unlock();
759	cpus_read_unlock();
760
761	return notifier_from_errno(ret);
762}
763
764static struct notifier_block jump_label_module_nb = {
765	.notifier_call = jump_label_module_notify,
766	.priority = 1, /* higher than tracepoints */
767};
768
769static __init int jump_label_init_module(void)
770{
771	return register_module_notifier(&jump_label_module_nb);
772}
773early_initcall(jump_label_init_module);
774
775#endif /* CONFIG_MODULES */
776
777/***
778 * jump_label_text_reserved - check if addr range is reserved
779 * @start: start text addr
780 * @end: end text addr
781 *
782 * checks if the text addr located between @start and @end
783 * overlaps with any of the jump label patch addresses. Code
784 * that wants to modify kernel text should first verify that
785 * it does not overlap with any of the jump label addresses.
786 * Caller must hold jump_label_mutex.
787 *
788 * returns 1 if there is an overlap, 0 otherwise
789 */
790int jump_label_text_reserved(void *start, void *end)
791{
792	bool init = system_state < SYSTEM_RUNNING;
793	int ret = __jump_label_text_reserved(__start___jump_table,
794			__stop___jump_table, start, end, init);
795
796	if (ret)
797		return ret;
798
799#ifdef CONFIG_MODULES
800	ret = __jump_label_mod_text_reserved(start, end);
801#endif
802	return ret;
803}
804
805static void jump_label_update(struct static_key *key)
806{
807	struct jump_entry *stop = __stop___jump_table;
808	bool init = system_state < SYSTEM_RUNNING;
809	struct jump_entry *entry;
810#ifdef CONFIG_MODULES
811	struct module *mod;
812
813	if (static_key_linked(key)) {
814		__jump_label_mod_update(key);
815		return;
816	}
817
818	preempt_disable();
819	mod = __module_address((unsigned long)key);
820	if (mod) {
821		stop = mod->jump_entries + mod->num_jump_entries;
822		init = mod->state == MODULE_STATE_COMING;
823	}
824	preempt_enable();
825#endif
826	entry = static_key_entries(key);
827	/* if there are no users, entry can be NULL */
828	if (entry)
829		__jump_label_update(key, entry, stop, init);
830}
831
832#ifdef CONFIG_STATIC_KEYS_SELFTEST
833static DEFINE_STATIC_KEY_TRUE(sk_true);
834static DEFINE_STATIC_KEY_FALSE(sk_false);
835
836static __init int jump_label_test(void)
837{
838	int i;
839
840	for (i = 0; i < 2; i++) {
841		WARN_ON(static_key_enabled(&sk_true.key) != true);
842		WARN_ON(static_key_enabled(&sk_false.key) != false);
843
844		WARN_ON(!static_branch_likely(&sk_true));
845		WARN_ON(!static_branch_unlikely(&sk_true));
846		WARN_ON(static_branch_likely(&sk_false));
847		WARN_ON(static_branch_unlikely(&sk_false));
848
849		static_branch_disable(&sk_true);
850		static_branch_enable(&sk_false);
851
852		WARN_ON(static_key_enabled(&sk_true.key) == true);
853		WARN_ON(static_key_enabled(&sk_false.key) == false);
854
855		WARN_ON(static_branch_likely(&sk_true));
856		WARN_ON(static_branch_unlikely(&sk_true));
857		WARN_ON(!static_branch_likely(&sk_false));
858		WARN_ON(!static_branch_unlikely(&sk_false));
859
860		static_branch_enable(&sk_true);
861		static_branch_disable(&sk_false);
862	}
863
864	return 0;
865}
866early_initcall(jump_label_test);
867#endif /* STATIC_KEYS_SELFTEST */