Linux Audio

Check our new training course

Embedded Linux training

Mar 10-20, 2025, special US time zones
Register
Loading...
v3.5.6
  1/*
  2 * jump label support
  3 *
  4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
  5 * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
  6 *
  7 */
  8#include <linux/memory.h>
  9#include <linux/uaccess.h>
 10#include <linux/module.h>
 11#include <linux/list.h>
 12#include <linux/slab.h>
 13#include <linux/sort.h>
 14#include <linux/err.h>
 15#include <linux/static_key.h>
 
 
 16
 17#ifdef HAVE_JUMP_LABEL
 18
 19/* mutex to protect coming/going of the the jump_label table */
 20static DEFINE_MUTEX(jump_label_mutex);
 21
 22void jump_label_lock(void)
 23{
 24	mutex_lock(&jump_label_mutex);
 25}
 26
 27void jump_label_unlock(void)
 28{
 29	mutex_unlock(&jump_label_mutex);
 30}
 31
 32static int jump_label_cmp(const void *a, const void *b)
 33{
 34	const struct jump_entry *jea = a;
 35	const struct jump_entry *jeb = b;
 36
 37	if (jea->key < jeb->key)
 38		return -1;
 39
 40	if (jea->key > jeb->key)
 41		return 1;
 42
 43	return 0;
 44}
 45
 46static void
 47jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
 48{
 49	unsigned long size;
 50
 51	size = (((unsigned long)stop - (unsigned long)start)
 52					/ sizeof(struct jump_entry));
 53	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
 54}
 55
 56static void jump_label_update(struct static_key *key, int enable);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 57
 58void static_key_slow_inc(struct static_key *key)
 59{
 60	if (atomic_inc_not_zero(&key->enabled))
 61		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 62
 63	jump_label_lock();
 64	if (atomic_read(&key->enabled) == 0) {
 65		if (!jump_label_get_branch_default(key))
 66			jump_label_update(key, JUMP_LABEL_ENABLE);
 67		else
 68			jump_label_update(key, JUMP_LABEL_DISABLE);
 
 69	}
 70	atomic_inc(&key->enabled);
 71	jump_label_unlock();
 72}
 73EXPORT_SYMBOL_GPL(static_key_slow_inc);
 74
 75static void __static_key_slow_dec(struct static_key *key,
 76		unsigned long rate_limit, struct delayed_work *work)
 77{
 
 
 
 
 
 
 
 78	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
 79		WARN(atomic_read(&key->enabled) < 0,
 80		     "jump label: negative count!\n");
 81		return;
 82	}
 83
 84	if (rate_limit) {
 85		atomic_inc(&key->enabled);
 86		schedule_delayed_work(work, rate_limit);
 87	} else {
 88		if (!jump_label_get_branch_default(key))
 89			jump_label_update(key, JUMP_LABEL_DISABLE);
 90		else
 91			jump_label_update(key, JUMP_LABEL_ENABLE);
 92	}
 93	jump_label_unlock();
 94}
 95
 96static void jump_label_update_timeout(struct work_struct *work)
 97{
 98	struct static_key_deferred *key =
 99		container_of(work, struct static_key_deferred, work.work);
100	__static_key_slow_dec(&key->key, 0, NULL);
101}
102
103void static_key_slow_dec(struct static_key *key)
104{
 
105	__static_key_slow_dec(key, 0, NULL);
106}
107EXPORT_SYMBOL_GPL(static_key_slow_dec);
108
109void static_key_slow_dec_deferred(struct static_key_deferred *key)
110{
 
111	__static_key_slow_dec(&key->key, key->timeout, &key->work);
112}
113EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
114
 
 
 
 
 
 
 
115void jump_label_rate_limit(struct static_key_deferred *key,
116		unsigned long rl)
117{
 
118	key->timeout = rl;
119	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
120}
 
121
122static int addr_conflict(struct jump_entry *entry, void *start, void *end)
123{
124	if (entry->code <= (unsigned long)end &&
125		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
126		return 1;
127
128	return 0;
129}
130
131static int __jump_label_text_reserved(struct jump_entry *iter_start,
132		struct jump_entry *iter_stop, void *start, void *end)
133{
134	struct jump_entry *iter;
135
136	iter = iter_start;
137	while (iter < iter_stop) {
138		if (addr_conflict(iter, start, end))
139			return 1;
140		iter++;
141	}
142
143	return 0;
144}
145
146/* 
147 * Update code which is definitely not currently executing.
148 * Architectures which need heavyweight synchronization to modify
149 * running code can override this to make the non-live update case
150 * cheaper.
151 */
152void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
153					    enum jump_label_type type)
154{
155	arch_jump_label_transform(entry, type);	
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
156}
157
158static void __jump_label_update(struct static_key *key,
159				struct jump_entry *entry,
160				struct jump_entry *stop, int enable)
161{
162	for (; (entry < stop) &&
163	      (entry->key == (jump_label_t)(unsigned long)key);
164	      entry++) {
165		/*
166		 * entry->code set to 0 invalidates module init text sections
167		 * kernel_text_address() verifies we are not in core kernel
168		 * init code, see jump_label_invalidate_module_init().
169		 */
170		if (entry->code && kernel_text_address(entry->code))
171			arch_jump_label_transform(entry, enable);
172	}
173}
174
175static enum jump_label_type jump_label_type(struct static_key *key)
176{
177	bool true_branch = jump_label_get_branch_default(key);
178	bool state = static_key_enabled(key);
179
180	if ((!true_branch && state) || (true_branch && !state))
181		return JUMP_LABEL_ENABLE;
182
183	return JUMP_LABEL_DISABLE;
184}
185
186void __init jump_label_init(void)
187{
188	struct jump_entry *iter_start = __start___jump_table;
189	struct jump_entry *iter_stop = __stop___jump_table;
190	struct static_key *key = NULL;
191	struct jump_entry *iter;
192
 
 
 
 
 
 
 
 
 
 
 
 
193	jump_label_lock();
194	jump_label_sort_entries(iter_start, iter_stop);
195
196	for (iter = iter_start; iter < iter_stop; iter++) {
197		struct static_key *iterk;
198
199		iterk = (struct static_key *)(unsigned long)iter->key;
200		arch_jump_label_transform_static(iter, jump_label_type(iterk));
 
 
 
201		if (iterk == key)
202			continue;
203
204		key = iterk;
205		/*
206		 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
207		 */
208		*((unsigned long *)&key->entries) += (unsigned long)iter;
209#ifdef CONFIG_MODULES
210		key->next = NULL;
211#endif
212	}
 
213	jump_label_unlock();
214}
215
216#ifdef CONFIG_MODULES
217
 
 
 
 
 
 
 
 
 
 
218struct static_key_mod {
219	struct static_key_mod *next;
220	struct jump_entry *entries;
221	struct module *mod;
222};
223
224static int __jump_label_mod_text_reserved(void *start, void *end)
225{
226	struct module *mod;
227
 
228	mod = __module_text_address((unsigned long)start);
 
 
 
229	if (!mod)
230		return 0;
231
232	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
233
234	return __jump_label_text_reserved(mod->jump_entries,
235				mod->jump_entries + mod->num_jump_entries,
236				start, end);
237}
238
239static void __jump_label_mod_update(struct static_key *key, int enable)
240{
241	struct static_key_mod *mod = key->next;
242
243	while (mod) {
244		struct module *m = mod->mod;
245
246		__jump_label_update(key, mod->entries,
247				    m->jump_entries + m->num_jump_entries,
248				    enable);
249		mod = mod->next;
250	}
251}
252
253/***
254 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
255 * @mod: module to patch
256 *
257 * Allow for run-time selection of the optimal nops. Before the module
258 * loads patch these with arch_get_jump_label_nop(), which is specified by
259 * the arch specific jump label code.
260 */
261void jump_label_apply_nops(struct module *mod)
262{
263	struct jump_entry *iter_start = mod->jump_entries;
264	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
265	struct jump_entry *iter;
266
267	/* if the module doesn't have jump label entries, just return */
268	if (iter_start == iter_stop)
269		return;
270
271	for (iter = iter_start; iter < iter_stop; iter++) {
272		arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
 
 
273	}
274}
275
276static int jump_label_add_module(struct module *mod)
277{
278	struct jump_entry *iter_start = mod->jump_entries;
279	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
280	struct jump_entry *iter;
281	struct static_key *key = NULL;
282	struct static_key_mod *jlm;
283
284	/* if the module doesn't have jump label entries, just return */
285	if (iter_start == iter_stop)
286		return 0;
287
288	jump_label_sort_entries(iter_start, iter_stop);
289
290	for (iter = iter_start; iter < iter_stop; iter++) {
291		struct static_key *iterk;
292
293		iterk = (struct static_key *)(unsigned long)iter->key;
294		if (iterk == key)
295			continue;
296
297		key = iterk;
298		if (__module_address(iter->key) == mod) {
299			/*
300			 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
301			 */
302			*((unsigned long *)&key->entries) += (unsigned long)iter;
303			key->next = NULL;
304			continue;
305		}
306		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
307		if (!jlm)
308			return -ENOMEM;
309		jlm->mod = mod;
310		jlm->entries = iter;
311		jlm->next = key->next;
312		key->next = jlm;
313
314		if (jump_label_type(key) == JUMP_LABEL_ENABLE)
315			__jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
 
316	}
317
318	return 0;
319}
320
321static void jump_label_del_module(struct module *mod)
322{
323	struct jump_entry *iter_start = mod->jump_entries;
324	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
325	struct jump_entry *iter;
326	struct static_key *key = NULL;
327	struct static_key_mod *jlm, **prev;
328
329	for (iter = iter_start; iter < iter_stop; iter++) {
330		if (iter->key == (jump_label_t)(unsigned long)key)
331			continue;
332
333		key = (struct static_key *)(unsigned long)iter->key;
334
335		if (__module_address(iter->key) == mod)
336			continue;
337
338		prev = &key->next;
339		jlm = key->next;
340
341		while (jlm && jlm->mod != mod) {
342			prev = &jlm->next;
343			jlm = jlm->next;
344		}
345
346		if (jlm) {
347			*prev = jlm->next;
348			kfree(jlm);
349		}
350	}
351}
352
353static void jump_label_invalidate_module_init(struct module *mod)
354{
355	struct jump_entry *iter_start = mod->jump_entries;
356	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
357	struct jump_entry *iter;
358
359	for (iter = iter_start; iter < iter_stop; iter++) {
360		if (within_module_init(iter->code, mod))
361			iter->code = 0;
362	}
363}
364
365static int
366jump_label_module_notify(struct notifier_block *self, unsigned long val,
367			 void *data)
368{
369	struct module *mod = data;
370	int ret = 0;
371
372	switch (val) {
373	case MODULE_STATE_COMING:
374		jump_label_lock();
375		ret = jump_label_add_module(mod);
376		if (ret)
377			jump_label_del_module(mod);
378		jump_label_unlock();
379		break;
380	case MODULE_STATE_GOING:
381		jump_label_lock();
382		jump_label_del_module(mod);
383		jump_label_unlock();
384		break;
385	case MODULE_STATE_LIVE:
386		jump_label_lock();
387		jump_label_invalidate_module_init(mod);
388		jump_label_unlock();
389		break;
390	}
391
392	return notifier_from_errno(ret);
393}
394
395struct notifier_block jump_label_module_nb = {
396	.notifier_call = jump_label_module_notify,
397	.priority = 1, /* higher than tracepoints */
398};
399
400static __init int jump_label_init_module(void)
401{
402	return register_module_notifier(&jump_label_module_nb);
403}
404early_initcall(jump_label_init_module);
405
406#endif /* CONFIG_MODULES */
407
408/***
409 * jump_label_text_reserved - check if addr range is reserved
410 * @start: start text addr
411 * @end: end text addr
412 *
413 * checks if the text addr located between @start and @end
414 * overlaps with any of the jump label patch addresses. Code
415 * that wants to modify kernel text should first verify that
416 * it does not overlap with any of the jump label addresses.
417 * Caller must hold jump_label_mutex.
418 *
419 * returns 1 if there is an overlap, 0 otherwise
420 */
421int jump_label_text_reserved(void *start, void *end)
422{
423	int ret = __jump_label_text_reserved(__start___jump_table,
424			__stop___jump_table, start, end);
425
426	if (ret)
427		return ret;
428
429#ifdef CONFIG_MODULES
430	ret = __jump_label_mod_text_reserved(start, end);
431#endif
432	return ret;
433}
434
435static void jump_label_update(struct static_key *key, int enable)
436{
437	struct jump_entry *stop = __stop___jump_table;
438	struct jump_entry *entry = jump_label_get_entries(key);
439
440#ifdef CONFIG_MODULES
441	struct module *mod = __module_address((unsigned long)key);
442
443	__jump_label_mod_update(key, enable);
444
 
 
445	if (mod)
446		stop = mod->jump_entries + mod->num_jump_entries;
 
447#endif
448	/* if there are no users, entry can be NULL */
449	if (entry)
450		__jump_label_update(key, entry, stop, enable);
451}
452
453#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v4.10.11
  1/*
  2 * jump label support
  3 *
  4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
  5 * Copyright (C) 2011 Peter Zijlstra
  6 *
  7 */
  8#include <linux/memory.h>
  9#include <linux/uaccess.h>
 10#include <linux/module.h>
 11#include <linux/list.h>
 12#include <linux/slab.h>
 13#include <linux/sort.h>
 14#include <linux/err.h>
 15#include <linux/static_key.h>
 16#include <linux/jump_label_ratelimit.h>
 17#include <linux/bug.h>
 18
 19#ifdef HAVE_JUMP_LABEL
 20
 21/* mutex to protect coming/going of the the jump_label table */
 22static DEFINE_MUTEX(jump_label_mutex);
 23
 24void jump_label_lock(void)
 25{
 26	mutex_lock(&jump_label_mutex);
 27}
 28
 29void jump_label_unlock(void)
 30{
 31	mutex_unlock(&jump_label_mutex);
 32}
 33
 34static int jump_label_cmp(const void *a, const void *b)
 35{
 36	const struct jump_entry *jea = a;
 37	const struct jump_entry *jeb = b;
 38
 39	if (jea->key < jeb->key)
 40		return -1;
 41
 42	if (jea->key > jeb->key)
 43		return 1;
 44
 45	return 0;
 46}
 47
 48static void
 49jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
 50{
 51	unsigned long size;
 52
 53	size = (((unsigned long)stop - (unsigned long)start)
 54					/ sizeof(struct jump_entry));
 55	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
 56}
 57
 58static void jump_label_update(struct static_key *key);
 59
 60/*
 61 * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
 62 * The use of 'atomic_read()' requires atomic.h and its problematic for some
 63 * kernel headers such as kernel.h and others. Since static_key_count() is not
 64 * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
 65 * to have it be a function here. Similarly, for 'static_key_enable()' and
 66 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
 67 * to be included from most/all places for HAVE_JUMP_LABEL.
 68 */
 69int static_key_count(struct static_key *key)
 70{
 71	/*
 72	 * -1 means the first static_key_slow_inc() is in progress.
 73	 *  static_key_enabled() must return true, so return 1 here.
 74	 */
 75	int n = atomic_read(&key->enabled);
 76
 77	return n >= 0 ? n : 1;
 78}
 79EXPORT_SYMBOL_GPL(static_key_count);
 80
 81void static_key_enable(struct static_key *key)
 82{
 83	int count = static_key_count(key);
 84
 85	WARN_ON_ONCE(count < 0 || count > 1);
 86
 87	if (!count)
 88		static_key_slow_inc(key);
 89}
 90EXPORT_SYMBOL_GPL(static_key_enable);
 91
 92void static_key_disable(struct static_key *key)
 93{
 94	int count = static_key_count(key);
 95
 96	WARN_ON_ONCE(count < 0 || count > 1);
 97
 98	if (count)
 99		static_key_slow_dec(key);
100}
101EXPORT_SYMBOL_GPL(static_key_disable);
102
103void static_key_slow_inc(struct static_key *key)
104{
105	int v, v1;
106
107	STATIC_KEY_CHECK_USE();
108
109	/*
110	 * Careful if we get concurrent static_key_slow_inc() calls;
111	 * later calls must wait for the first one to _finish_ the
112	 * jump_label_update() process.  At the same time, however,
113	 * the jump_label_update() call below wants to see
114	 * static_key_enabled(&key) for jumps to be updated properly.
115	 *
116	 * So give a special meaning to negative key->enabled: it sends
117	 * static_key_slow_inc() down the slow path, and it is non-zero
118	 * so it counts as "enabled" in jump_label_update().  Note that
119	 * atomic_inc_unless_negative() checks >= 0, so roll our own.
120	 */
121	for (v = atomic_read(&key->enabled); v > 0; v = v1) {
122		v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
123		if (likely(v1 == v))
124			return;
125	}
126
127	jump_label_lock();
128	if (atomic_read(&key->enabled) == 0) {
129		atomic_set(&key->enabled, -1);
130		jump_label_update(key);
131		atomic_set(&key->enabled, 1);
132	} else {
133		atomic_inc(&key->enabled);
134	}
 
135	jump_label_unlock();
136}
137EXPORT_SYMBOL_GPL(static_key_slow_inc);
138
139static void __static_key_slow_dec(struct static_key *key,
140		unsigned long rate_limit, struct delayed_work *work)
141{
142	/*
143	 * The negative count check is valid even when a negative
144	 * key->enabled is in use by static_key_slow_inc(); a
145	 * __static_key_slow_dec() before the first static_key_slow_inc()
146	 * returns is unbalanced, because all other static_key_slow_inc()
147	 * instances block while the update is in progress.
148	 */
149	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
150		WARN(atomic_read(&key->enabled) < 0,
151		     "jump label: negative count!\n");
152		return;
153	}
154
155	if (rate_limit) {
156		atomic_inc(&key->enabled);
157		schedule_delayed_work(work, rate_limit);
158	} else {
159		jump_label_update(key);
 
 
 
160	}
161	jump_label_unlock();
162}
163
164static void jump_label_update_timeout(struct work_struct *work)
165{
166	struct static_key_deferred *key =
167		container_of(work, struct static_key_deferred, work.work);
168	__static_key_slow_dec(&key->key, 0, NULL);
169}
170
171void static_key_slow_dec(struct static_key *key)
172{
173	STATIC_KEY_CHECK_USE();
174	__static_key_slow_dec(key, 0, NULL);
175}
176EXPORT_SYMBOL_GPL(static_key_slow_dec);
177
178void static_key_slow_dec_deferred(struct static_key_deferred *key)
179{
180	STATIC_KEY_CHECK_USE();
181	__static_key_slow_dec(&key->key, key->timeout, &key->work);
182}
183EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
184
185void static_key_deferred_flush(struct static_key_deferred *key)
186{
187	STATIC_KEY_CHECK_USE();
188	flush_delayed_work(&key->work);
189}
190EXPORT_SYMBOL_GPL(static_key_deferred_flush);
191
192void jump_label_rate_limit(struct static_key_deferred *key,
193		unsigned long rl)
194{
195	STATIC_KEY_CHECK_USE();
196	key->timeout = rl;
197	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
198}
199EXPORT_SYMBOL_GPL(jump_label_rate_limit);
200
201static int addr_conflict(struct jump_entry *entry, void *start, void *end)
202{
203	if (entry->code <= (unsigned long)end &&
204		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
205		return 1;
206
207	return 0;
208}
209
210static int __jump_label_text_reserved(struct jump_entry *iter_start,
211		struct jump_entry *iter_stop, void *start, void *end)
212{
213	struct jump_entry *iter;
214
215	iter = iter_start;
216	while (iter < iter_stop) {
217		if (addr_conflict(iter, start, end))
218			return 1;
219		iter++;
220	}
221
222	return 0;
223}
224
225/*
226 * Update code which is definitely not currently executing.
227 * Architectures which need heavyweight synchronization to modify
228 * running code can override this to make the non-live update case
229 * cheaper.
230 */
231void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
232					    enum jump_label_type type)
233{
234	arch_jump_label_transform(entry, type);
235}
236
237static inline struct jump_entry *static_key_entries(struct static_key *key)
238{
239	return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
240}
241
242static inline bool static_key_type(struct static_key *key)
243{
244	return (unsigned long)key->entries & JUMP_TYPE_MASK;
245}
246
247static inline struct static_key *jump_entry_key(struct jump_entry *entry)
248{
249	return (struct static_key *)((unsigned long)entry->key & ~1UL);
250}
251
252static bool jump_entry_branch(struct jump_entry *entry)
253{
254	return (unsigned long)entry->key & 1UL;
255}
256
257static enum jump_label_type jump_label_type(struct jump_entry *entry)
258{
259	struct static_key *key = jump_entry_key(entry);
260	bool enabled = static_key_enabled(key);
261	bool branch = jump_entry_branch(entry);
262
263	/* See the comment in linux/jump_label.h */
264	return enabled ^ branch;
265}
266
267static void __jump_label_update(struct static_key *key,
268				struct jump_entry *entry,
269				struct jump_entry *stop)
270{
271	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
 
 
272		/*
273		 * entry->code set to 0 invalidates module init text sections
274		 * kernel_text_address() verifies we are not in core kernel
275		 * init code, see jump_label_invalidate_module_init().
276		 */
277		if (entry->code && kernel_text_address(entry->code))
278			arch_jump_label_transform(entry, jump_label_type(entry));
279	}
280}
281
 
 
 
 
 
 
 
 
 
 
 
282void __init jump_label_init(void)
283{
284	struct jump_entry *iter_start = __start___jump_table;
285	struct jump_entry *iter_stop = __stop___jump_table;
286	struct static_key *key = NULL;
287	struct jump_entry *iter;
288
289	/*
290	 * Since we are initializing the static_key.enabled field with
291	 * with the 'raw' int values (to avoid pulling in atomic.h) in
292	 * jump_label.h, let's make sure that is safe. There are only two
293	 * cases to check since we initialize to 0 or 1.
294	 */
295	BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
296	BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
297
298	if (static_key_initialized)
299		return;
300
301	jump_label_lock();
302	jump_label_sort_entries(iter_start, iter_stop);
303
304	for (iter = iter_start; iter < iter_stop; iter++) {
305		struct static_key *iterk;
306
307		/* rewrite NOPs */
308		if (jump_label_type(iter) == JUMP_LABEL_NOP)
309			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
310
311		iterk = jump_entry_key(iter);
312		if (iterk == key)
313			continue;
314
315		key = iterk;
316		/*
317		 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
318		 */
319		*((unsigned long *)&key->entries) += (unsigned long)iter;
320#ifdef CONFIG_MODULES
321		key->next = NULL;
322#endif
323	}
324	static_key_initialized = true;
325	jump_label_unlock();
326}
327
328#ifdef CONFIG_MODULES
329
330static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
331{
332	struct static_key *key = jump_entry_key(entry);
333	bool type = static_key_type(key);
334	bool branch = jump_entry_branch(entry);
335
336	/* See the comment in linux/jump_label.h */
337	return type ^ branch;
338}
339
340struct static_key_mod {
341	struct static_key_mod *next;
342	struct jump_entry *entries;
343	struct module *mod;
344};
345
346static int __jump_label_mod_text_reserved(void *start, void *end)
347{
348	struct module *mod;
349
350	preempt_disable();
351	mod = __module_text_address((unsigned long)start);
352	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
353	preempt_enable();
354
355	if (!mod)
356		return 0;
357
 
358
359	return __jump_label_text_reserved(mod->jump_entries,
360				mod->jump_entries + mod->num_jump_entries,
361				start, end);
362}
363
364static void __jump_label_mod_update(struct static_key *key)
365{
366	struct static_key_mod *mod;
367
368	for (mod = key->next; mod; mod = mod->next) {
369		struct module *m = mod->mod;
370
371		__jump_label_update(key, mod->entries,
372				    m->jump_entries + m->num_jump_entries);
 
 
373	}
374}
375
376/***
377 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
378 * @mod: module to patch
379 *
380 * Allow for run-time selection of the optimal nops. Before the module
381 * loads patch these with arch_get_jump_label_nop(), which is specified by
382 * the arch specific jump label code.
383 */
384void jump_label_apply_nops(struct module *mod)
385{
386	struct jump_entry *iter_start = mod->jump_entries;
387	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
388	struct jump_entry *iter;
389
390	/* if the module doesn't have jump label entries, just return */
391	if (iter_start == iter_stop)
392		return;
393
394	for (iter = iter_start; iter < iter_stop; iter++) {
395		/* Only write NOPs for arch_branch_static(). */
396		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
397			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
398	}
399}
400
401static int jump_label_add_module(struct module *mod)
402{
403	struct jump_entry *iter_start = mod->jump_entries;
404	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
405	struct jump_entry *iter;
406	struct static_key *key = NULL;
407	struct static_key_mod *jlm;
408
409	/* if the module doesn't have jump label entries, just return */
410	if (iter_start == iter_stop)
411		return 0;
412
413	jump_label_sort_entries(iter_start, iter_stop);
414
415	for (iter = iter_start; iter < iter_stop; iter++) {
416		struct static_key *iterk;
417
418		iterk = jump_entry_key(iter);
419		if (iterk == key)
420			continue;
421
422		key = iterk;
423		if (within_module(iter->key, mod)) {
424			/*
425			 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
426			 */
427			*((unsigned long *)&key->entries) += (unsigned long)iter;
428			key->next = NULL;
429			continue;
430		}
431		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
432		if (!jlm)
433			return -ENOMEM;
434		jlm->mod = mod;
435		jlm->entries = iter;
436		jlm->next = key->next;
437		key->next = jlm;
438
439		/* Only update if we've changed from our initial state */
440		if (jump_label_type(iter) != jump_label_init_type(iter))
441			__jump_label_update(key, iter, iter_stop);
442	}
443
444	return 0;
445}
446
447static void jump_label_del_module(struct module *mod)
448{
449	struct jump_entry *iter_start = mod->jump_entries;
450	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
451	struct jump_entry *iter;
452	struct static_key *key = NULL;
453	struct static_key_mod *jlm, **prev;
454
455	for (iter = iter_start; iter < iter_stop; iter++) {
456		if (jump_entry_key(iter) == key)
457			continue;
458
459		key = jump_entry_key(iter);
460
461		if (within_module(iter->key, mod))
462			continue;
463
464		prev = &key->next;
465		jlm = key->next;
466
467		while (jlm && jlm->mod != mod) {
468			prev = &jlm->next;
469			jlm = jlm->next;
470		}
471
472		if (jlm) {
473			*prev = jlm->next;
474			kfree(jlm);
475		}
476	}
477}
478
479static void jump_label_invalidate_module_init(struct module *mod)
480{
481	struct jump_entry *iter_start = mod->jump_entries;
482	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
483	struct jump_entry *iter;
484
485	for (iter = iter_start; iter < iter_stop; iter++) {
486		if (within_module_init(iter->code, mod))
487			iter->code = 0;
488	}
489}
490
491static int
492jump_label_module_notify(struct notifier_block *self, unsigned long val,
493			 void *data)
494{
495	struct module *mod = data;
496	int ret = 0;
497
498	switch (val) {
499	case MODULE_STATE_COMING:
500		jump_label_lock();
501		ret = jump_label_add_module(mod);
502		if (ret)
503			jump_label_del_module(mod);
504		jump_label_unlock();
505		break;
506	case MODULE_STATE_GOING:
507		jump_label_lock();
508		jump_label_del_module(mod);
509		jump_label_unlock();
510		break;
511	case MODULE_STATE_LIVE:
512		jump_label_lock();
513		jump_label_invalidate_module_init(mod);
514		jump_label_unlock();
515		break;
516	}
517
518	return notifier_from_errno(ret);
519}
520
521static struct notifier_block jump_label_module_nb = {
522	.notifier_call = jump_label_module_notify,
523	.priority = 1, /* higher than tracepoints */
524};
525
526static __init int jump_label_init_module(void)
527{
528	return register_module_notifier(&jump_label_module_nb);
529}
530early_initcall(jump_label_init_module);
531
532#endif /* CONFIG_MODULES */
533
534/***
535 * jump_label_text_reserved - check if addr range is reserved
536 * @start: start text addr
537 * @end: end text addr
538 *
539 * checks if the text addr located between @start and @end
540 * overlaps with any of the jump label patch addresses. Code
541 * that wants to modify kernel text should first verify that
542 * it does not overlap with any of the jump label addresses.
543 * Caller must hold jump_label_mutex.
544 *
545 * returns 1 if there is an overlap, 0 otherwise
546 */
547int jump_label_text_reserved(void *start, void *end)
548{
549	int ret = __jump_label_text_reserved(__start___jump_table,
550			__stop___jump_table, start, end);
551
552	if (ret)
553		return ret;
554
555#ifdef CONFIG_MODULES
556	ret = __jump_label_mod_text_reserved(start, end);
557#endif
558	return ret;
559}
560
561static void jump_label_update(struct static_key *key)
562{
563	struct jump_entry *stop = __stop___jump_table;
564	struct jump_entry *entry = static_key_entries(key);
 
565#ifdef CONFIG_MODULES
566	struct module *mod;
567
568	__jump_label_mod_update(key);
569
570	preempt_disable();
571	mod = __module_address((unsigned long)key);
572	if (mod)
573		stop = mod->jump_entries + mod->num_jump_entries;
574	preempt_enable();
575#endif
576	/* if there are no users, entry can be NULL */
577	if (entry)
578		__jump_label_update(key, entry, stop);
579}
580
581#ifdef CONFIG_STATIC_KEYS_SELFTEST
582static DEFINE_STATIC_KEY_TRUE(sk_true);
583static DEFINE_STATIC_KEY_FALSE(sk_false);
584
585static __init int jump_label_test(void)
586{
587	int i;
588
589	for (i = 0; i < 2; i++) {
590		WARN_ON(static_key_enabled(&sk_true.key) != true);
591		WARN_ON(static_key_enabled(&sk_false.key) != false);
592
593		WARN_ON(!static_branch_likely(&sk_true));
594		WARN_ON(!static_branch_unlikely(&sk_true));
595		WARN_ON(static_branch_likely(&sk_false));
596		WARN_ON(static_branch_unlikely(&sk_false));
597
598		static_branch_disable(&sk_true);
599		static_branch_enable(&sk_false);
600
601		WARN_ON(static_key_enabled(&sk_true.key) == true);
602		WARN_ON(static_key_enabled(&sk_false.key) == false);
603
604		WARN_ON(static_branch_likely(&sk_true));
605		WARN_ON(static_branch_unlikely(&sk_true));
606		WARN_ON(!static_branch_likely(&sk_false));
607		WARN_ON(!static_branch_unlikely(&sk_false));
608
609		static_branch_enable(&sk_true);
610		static_branch_disable(&sk_false);
611	}
612
613	return 0;
614}
615late_initcall(jump_label_test);
616#endif /* STATIC_KEYS_SELFTEST */
617
618#endif /* HAVE_JUMP_LABEL */