Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * jump label support
  3 *
  4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
  5 * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
  6 *
  7 */
  8#include <linux/memory.h>
  9#include <linux/uaccess.h>
 10#include <linux/module.h>
 11#include <linux/list.h>
 12#include <linux/slab.h>
 13#include <linux/sort.h>
 14#include <linux/err.h>
 15#include <linux/static_key.h>
 16#include <linux/jump_label_ratelimit.h>
 17
 18#ifdef HAVE_JUMP_LABEL
 19
 20/* mutex to protect coming/going of the the jump_label table */
 21static DEFINE_MUTEX(jump_label_mutex);
 22
 23void jump_label_lock(void)
 24{
 25	mutex_lock(&jump_label_mutex);
 26}
 27
 28void jump_label_unlock(void)
 29{
 30	mutex_unlock(&jump_label_mutex);
 31}
 32
 33static int jump_label_cmp(const void *a, const void *b)
 34{
 35	const struct jump_entry *jea = a;
 36	const struct jump_entry *jeb = b;
 37
 38	if (jea->key < jeb->key)
 39		return -1;
 40
 41	if (jea->key > jeb->key)
 42		return 1;
 43
 44	return 0;
 45}
 46
 47static void
 48jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
 49{
 50	unsigned long size;
 51
 52	size = (((unsigned long)stop - (unsigned long)start)
 53					/ sizeof(struct jump_entry));
 54	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
 55}
 56
 57static void jump_label_update(struct static_key *key, int enable);
 58
 59void static_key_slow_inc(struct static_key *key)
 60{
 61	STATIC_KEY_CHECK_USE();
 62	if (atomic_inc_not_zero(&key->enabled))
 63		return;
 64
 65	jump_label_lock();
 66	if (atomic_read(&key->enabled) == 0) {
 67		if (!jump_label_get_branch_default(key))
 68			jump_label_update(key, JUMP_LABEL_ENABLE);
 69		else
 70			jump_label_update(key, JUMP_LABEL_DISABLE);
 71	}
 72	atomic_inc(&key->enabled);
 73	jump_label_unlock();
 74}
 75EXPORT_SYMBOL_GPL(static_key_slow_inc);
 76
 77static void __static_key_slow_dec(struct static_key *key,
 78		unsigned long rate_limit, struct delayed_work *work)
 79{
 80	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
 81		WARN(atomic_read(&key->enabled) < 0,
 82		     "jump label: negative count!\n");
 83		return;
 84	}
 85
 86	if (rate_limit) {
 87		atomic_inc(&key->enabled);
 88		schedule_delayed_work(work, rate_limit);
 89	} else {
 90		if (!jump_label_get_branch_default(key))
 91			jump_label_update(key, JUMP_LABEL_DISABLE);
 92		else
 93			jump_label_update(key, JUMP_LABEL_ENABLE);
 94	}
 95	jump_label_unlock();
 96}
 97
 98static void jump_label_update_timeout(struct work_struct *work)
 99{
100	struct static_key_deferred *key =
101		container_of(work, struct static_key_deferred, work.work);
102	__static_key_slow_dec(&key->key, 0, NULL);
103}
104
105void static_key_slow_dec(struct static_key *key)
106{
107	STATIC_KEY_CHECK_USE();
108	__static_key_slow_dec(key, 0, NULL);
109}
110EXPORT_SYMBOL_GPL(static_key_slow_dec);
111
112void static_key_slow_dec_deferred(struct static_key_deferred *key)
113{
114	STATIC_KEY_CHECK_USE();
115	__static_key_slow_dec(&key->key, key->timeout, &key->work);
116}
117EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
118
119void jump_label_rate_limit(struct static_key_deferred *key,
120		unsigned long rl)
121{
122	STATIC_KEY_CHECK_USE();
123	key->timeout = rl;
124	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
125}
126EXPORT_SYMBOL_GPL(jump_label_rate_limit);
127
128static int addr_conflict(struct jump_entry *entry, void *start, void *end)
129{
130	if (entry->code <= (unsigned long)end &&
131		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
132		return 1;
133
134	return 0;
135}
136
137static int __jump_label_text_reserved(struct jump_entry *iter_start,
138		struct jump_entry *iter_stop, void *start, void *end)
139{
140	struct jump_entry *iter;
141
142	iter = iter_start;
143	while (iter < iter_stop) {
144		if (addr_conflict(iter, start, end))
145			return 1;
146		iter++;
147	}
148
149	return 0;
150}
151
152/* 
153 * Update code which is definitely not currently executing.
154 * Architectures which need heavyweight synchronization to modify
155 * running code can override this to make the non-live update case
156 * cheaper.
157 */
158void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
159					    enum jump_label_type type)
160{
161	arch_jump_label_transform(entry, type);	
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
162}
163
164static void __jump_label_update(struct static_key *key,
165				struct jump_entry *entry,
166				struct jump_entry *stop, int enable)
167{
168	for (; (entry < stop) &&
169	      (entry->key == (jump_label_t)(unsigned long)key);
170	      entry++) {
171		/*
172		 * entry->code set to 0 invalidates module init text sections
173		 * kernel_text_address() verifies we are not in core kernel
174		 * init code, see jump_label_invalidate_module_init().
175		 */
176		if (entry->code && kernel_text_address(entry->code))
177			arch_jump_label_transform(entry, enable);
178	}
179}
180
181static enum jump_label_type jump_label_type(struct static_key *key)
182{
183	bool true_branch = jump_label_get_branch_default(key);
184	bool state = static_key_enabled(key);
185
186	if ((!true_branch && state) || (true_branch && !state))
187		return JUMP_LABEL_ENABLE;
188
189	return JUMP_LABEL_DISABLE;
190}
191
192void __init jump_label_init(void)
193{
194	struct jump_entry *iter_start = __start___jump_table;
195	struct jump_entry *iter_stop = __stop___jump_table;
196	struct static_key *key = NULL;
197	struct jump_entry *iter;
198
199	jump_label_lock();
200	jump_label_sort_entries(iter_start, iter_stop);
201
202	for (iter = iter_start; iter < iter_stop; iter++) {
203		struct static_key *iterk;
204
205		iterk = (struct static_key *)(unsigned long)iter->key;
206		arch_jump_label_transform_static(iter, jump_label_type(iterk));
 
 
 
207		if (iterk == key)
208			continue;
209
210		key = iterk;
211		/*
212		 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
213		 */
214		*((unsigned long *)&key->entries) += (unsigned long)iter;
215#ifdef CONFIG_MODULES
216		key->next = NULL;
217#endif
218	}
219	static_key_initialized = true;
220	jump_label_unlock();
221}
222
223#ifdef CONFIG_MODULES
224
 
 
 
 
 
 
 
 
 
 
225struct static_key_mod {
226	struct static_key_mod *next;
227	struct jump_entry *entries;
228	struct module *mod;
229};
230
231static int __jump_label_mod_text_reserved(void *start, void *end)
232{
233	struct module *mod;
234
235	mod = __module_text_address((unsigned long)start);
236	if (!mod)
237		return 0;
238
239	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
240
241	return __jump_label_text_reserved(mod->jump_entries,
242				mod->jump_entries + mod->num_jump_entries,
243				start, end);
244}
245
246static void __jump_label_mod_update(struct static_key *key, int enable)
247{
248	struct static_key_mod *mod = key->next;
249
250	while (mod) {
251		struct module *m = mod->mod;
252
253		__jump_label_update(key, mod->entries,
254				    m->jump_entries + m->num_jump_entries,
255				    enable);
256		mod = mod->next;
257	}
258}
259
260/***
261 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
262 * @mod: module to patch
263 *
264 * Allow for run-time selection of the optimal nops. Before the module
265 * loads patch these with arch_get_jump_label_nop(), which is specified by
266 * the arch specific jump label code.
267 */
268void jump_label_apply_nops(struct module *mod)
269{
270	struct jump_entry *iter_start = mod->jump_entries;
271	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
272	struct jump_entry *iter;
273
274	/* if the module doesn't have jump label entries, just return */
275	if (iter_start == iter_stop)
276		return;
277
278	for (iter = iter_start; iter < iter_stop; iter++) {
279		arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
 
 
280	}
281}
282
283static int jump_label_add_module(struct module *mod)
284{
285	struct jump_entry *iter_start = mod->jump_entries;
286	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
287	struct jump_entry *iter;
288	struct static_key *key = NULL;
289	struct static_key_mod *jlm;
290
291	/* if the module doesn't have jump label entries, just return */
292	if (iter_start == iter_stop)
293		return 0;
294
295	jump_label_sort_entries(iter_start, iter_stop);
296
297	for (iter = iter_start; iter < iter_stop; iter++) {
298		struct static_key *iterk;
299
300		iterk = (struct static_key *)(unsigned long)iter->key;
301		if (iterk == key)
302			continue;
303
304		key = iterk;
305		if (__module_address(iter->key) == mod) {
306			/*
307			 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
308			 */
309			*((unsigned long *)&key->entries) += (unsigned long)iter;
310			key->next = NULL;
311			continue;
312		}
313		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
314		if (!jlm)
315			return -ENOMEM;
316		jlm->mod = mod;
317		jlm->entries = iter;
318		jlm->next = key->next;
319		key->next = jlm;
320
321		if (jump_label_type(key) == JUMP_LABEL_ENABLE)
322			__jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
 
323	}
324
325	return 0;
326}
327
328static void jump_label_del_module(struct module *mod)
329{
330	struct jump_entry *iter_start = mod->jump_entries;
331	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
332	struct jump_entry *iter;
333	struct static_key *key = NULL;
334	struct static_key_mod *jlm, **prev;
335
336	for (iter = iter_start; iter < iter_stop; iter++) {
337		if (iter->key == (jump_label_t)(unsigned long)key)
338			continue;
339
340		key = (struct static_key *)(unsigned long)iter->key;
341
342		if (__module_address(iter->key) == mod)
343			continue;
344
345		prev = &key->next;
346		jlm = key->next;
347
348		while (jlm && jlm->mod != mod) {
349			prev = &jlm->next;
350			jlm = jlm->next;
351		}
352
353		if (jlm) {
354			*prev = jlm->next;
355			kfree(jlm);
356		}
357	}
358}
359
360static void jump_label_invalidate_module_init(struct module *mod)
361{
362	struct jump_entry *iter_start = mod->jump_entries;
363	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
364	struct jump_entry *iter;
365
366	for (iter = iter_start; iter < iter_stop; iter++) {
367		if (within_module_init(iter->code, mod))
368			iter->code = 0;
369	}
370}
371
372static int
373jump_label_module_notify(struct notifier_block *self, unsigned long val,
374			 void *data)
375{
376	struct module *mod = data;
377	int ret = 0;
378
379	switch (val) {
380	case MODULE_STATE_COMING:
381		jump_label_lock();
382		ret = jump_label_add_module(mod);
383		if (ret)
384			jump_label_del_module(mod);
385		jump_label_unlock();
386		break;
387	case MODULE_STATE_GOING:
388		jump_label_lock();
389		jump_label_del_module(mod);
390		jump_label_unlock();
391		break;
392	case MODULE_STATE_LIVE:
393		jump_label_lock();
394		jump_label_invalidate_module_init(mod);
395		jump_label_unlock();
396		break;
397	}
398
399	return notifier_from_errno(ret);
400}
401
402struct notifier_block jump_label_module_nb = {
403	.notifier_call = jump_label_module_notify,
404	.priority = 1, /* higher than tracepoints */
405};
406
407static __init int jump_label_init_module(void)
408{
409	return register_module_notifier(&jump_label_module_nb);
410}
411early_initcall(jump_label_init_module);
412
413#endif /* CONFIG_MODULES */
414
415/***
416 * jump_label_text_reserved - check if addr range is reserved
417 * @start: start text addr
418 * @end: end text addr
419 *
420 * checks if the text addr located between @start and @end
421 * overlaps with any of the jump label patch addresses. Code
422 * that wants to modify kernel text should first verify that
423 * it does not overlap with any of the jump label addresses.
424 * Caller must hold jump_label_mutex.
425 *
426 * returns 1 if there is an overlap, 0 otherwise
427 */
428int jump_label_text_reserved(void *start, void *end)
429{
430	int ret = __jump_label_text_reserved(__start___jump_table,
431			__stop___jump_table, start, end);
432
433	if (ret)
434		return ret;
435
436#ifdef CONFIG_MODULES
437	ret = __jump_label_mod_text_reserved(start, end);
438#endif
439	return ret;
440}
441
442static void jump_label_update(struct static_key *key, int enable)
443{
444	struct jump_entry *stop = __stop___jump_table;
445	struct jump_entry *entry = jump_label_get_entries(key);
446
447#ifdef CONFIG_MODULES
448	struct module *mod = __module_address((unsigned long)key);
449
450	__jump_label_mod_update(key, enable);
451
 
 
452	if (mod)
453		stop = mod->jump_entries + mod->num_jump_entries;
 
454#endif
455	/* if there are no users, entry can be NULL */
456	if (entry)
457		__jump_label_update(key, entry, stop, enable);
458}
459
460#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
v4.6
  1/*
  2 * jump label support
  3 *
  4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
  5 * Copyright (C) 2011 Peter Zijlstra
  6 *
  7 */
  8#include <linux/memory.h>
  9#include <linux/uaccess.h>
 10#include <linux/module.h>
 11#include <linux/list.h>
 12#include <linux/slab.h>
 13#include <linux/sort.h>
 14#include <linux/err.h>
 15#include <linux/static_key.h>
 16#include <linux/jump_label_ratelimit.h>
 17
 18#ifdef HAVE_JUMP_LABEL
 19
 20/* mutex to protect coming/going of the the jump_label table */
 21static DEFINE_MUTEX(jump_label_mutex);
 22
 23void jump_label_lock(void)
 24{
 25	mutex_lock(&jump_label_mutex);
 26}
 27
 28void jump_label_unlock(void)
 29{
 30	mutex_unlock(&jump_label_mutex);
 31}
 32
 33static int jump_label_cmp(const void *a, const void *b)
 34{
 35	const struct jump_entry *jea = a;
 36	const struct jump_entry *jeb = b;
 37
 38	if (jea->key < jeb->key)
 39		return -1;
 40
 41	if (jea->key > jeb->key)
 42		return 1;
 43
 44	return 0;
 45}
 46
 47static void
 48jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
 49{
 50	unsigned long size;
 51
 52	size = (((unsigned long)stop - (unsigned long)start)
 53					/ sizeof(struct jump_entry));
 54	sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
 55}
 56
 57static void jump_label_update(struct static_key *key);
 58
 59void static_key_slow_inc(struct static_key *key)
 60{
 61	STATIC_KEY_CHECK_USE();
 62	if (atomic_inc_not_zero(&key->enabled))
 63		return;
 64
 65	jump_label_lock();
 66	if (atomic_inc_return(&key->enabled) == 1)
 67		jump_label_update(key);
 
 
 
 
 
 68	jump_label_unlock();
 69}
 70EXPORT_SYMBOL_GPL(static_key_slow_inc);
 71
 72static void __static_key_slow_dec(struct static_key *key,
 73		unsigned long rate_limit, struct delayed_work *work)
 74{
 75	if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
 76		WARN(atomic_read(&key->enabled) < 0,
 77		     "jump label: negative count!\n");
 78		return;
 79	}
 80
 81	if (rate_limit) {
 82		atomic_inc(&key->enabled);
 83		schedule_delayed_work(work, rate_limit);
 84	} else {
 85		jump_label_update(key);
 
 
 
 86	}
 87	jump_label_unlock();
 88}
 89
 90static void jump_label_update_timeout(struct work_struct *work)
 91{
 92	struct static_key_deferred *key =
 93		container_of(work, struct static_key_deferred, work.work);
 94	__static_key_slow_dec(&key->key, 0, NULL);
 95}
 96
 97void static_key_slow_dec(struct static_key *key)
 98{
 99	STATIC_KEY_CHECK_USE();
100	__static_key_slow_dec(key, 0, NULL);
101}
102EXPORT_SYMBOL_GPL(static_key_slow_dec);
103
104void static_key_slow_dec_deferred(struct static_key_deferred *key)
105{
106	STATIC_KEY_CHECK_USE();
107	__static_key_slow_dec(&key->key, key->timeout, &key->work);
108}
109EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
110
111void jump_label_rate_limit(struct static_key_deferred *key,
112		unsigned long rl)
113{
114	STATIC_KEY_CHECK_USE();
115	key->timeout = rl;
116	INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
117}
118EXPORT_SYMBOL_GPL(jump_label_rate_limit);
119
120static int addr_conflict(struct jump_entry *entry, void *start, void *end)
121{
122	if (entry->code <= (unsigned long)end &&
123		entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
124		return 1;
125
126	return 0;
127}
128
129static int __jump_label_text_reserved(struct jump_entry *iter_start,
130		struct jump_entry *iter_stop, void *start, void *end)
131{
132	struct jump_entry *iter;
133
134	iter = iter_start;
135	while (iter < iter_stop) {
136		if (addr_conflict(iter, start, end))
137			return 1;
138		iter++;
139	}
140
141	return 0;
142}
143
144/*
145 * Update code which is definitely not currently executing.
146 * Architectures which need heavyweight synchronization to modify
147 * running code can override this to make the non-live update case
148 * cheaper.
149 */
150void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
151					    enum jump_label_type type)
152{
153	arch_jump_label_transform(entry, type);
154}
155
156static inline struct jump_entry *static_key_entries(struct static_key *key)
157{
158	return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
159}
160
161static inline bool static_key_type(struct static_key *key)
162{
163	return (unsigned long)key->entries & JUMP_TYPE_MASK;
164}
165
166static inline struct static_key *jump_entry_key(struct jump_entry *entry)
167{
168	return (struct static_key *)((unsigned long)entry->key & ~1UL);
169}
170
171static bool jump_entry_branch(struct jump_entry *entry)
172{
173	return (unsigned long)entry->key & 1UL;
174}
175
176static enum jump_label_type jump_label_type(struct jump_entry *entry)
177{
178	struct static_key *key = jump_entry_key(entry);
179	bool enabled = static_key_enabled(key);
180	bool branch = jump_entry_branch(entry);
181
182	/* See the comment in linux/jump_label.h */
183	return enabled ^ branch;
184}
185
186static void __jump_label_update(struct static_key *key,
187				struct jump_entry *entry,
188				struct jump_entry *stop)
189{
190	for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
 
 
191		/*
192		 * entry->code set to 0 invalidates module init text sections
193		 * kernel_text_address() verifies we are not in core kernel
194		 * init code, see jump_label_invalidate_module_init().
195		 */
196		if (entry->code && kernel_text_address(entry->code))
197			arch_jump_label_transform(entry, jump_label_type(entry));
198	}
199}
200
 
 
 
 
 
 
 
 
 
 
 
201void __init jump_label_init(void)
202{
203	struct jump_entry *iter_start = __start___jump_table;
204	struct jump_entry *iter_stop = __stop___jump_table;
205	struct static_key *key = NULL;
206	struct jump_entry *iter;
207
208	jump_label_lock();
209	jump_label_sort_entries(iter_start, iter_stop);
210
211	for (iter = iter_start; iter < iter_stop; iter++) {
212		struct static_key *iterk;
213
214		/* rewrite NOPs */
215		if (jump_label_type(iter) == JUMP_LABEL_NOP)
216			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
217
218		iterk = jump_entry_key(iter);
219		if (iterk == key)
220			continue;
221
222		key = iterk;
223		/*
224		 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
225		 */
226		*((unsigned long *)&key->entries) += (unsigned long)iter;
227#ifdef CONFIG_MODULES
228		key->next = NULL;
229#endif
230	}
231	static_key_initialized = true;
232	jump_label_unlock();
233}
234
235#ifdef CONFIG_MODULES
236
237static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
238{
239	struct static_key *key = jump_entry_key(entry);
240	bool type = static_key_type(key);
241	bool branch = jump_entry_branch(entry);
242
243	/* See the comment in linux/jump_label.h */
244	return type ^ branch;
245}
246
247struct static_key_mod {
248	struct static_key_mod *next;
249	struct jump_entry *entries;
250	struct module *mod;
251};
252
253static int __jump_label_mod_text_reserved(void *start, void *end)
254{
255	struct module *mod;
256
257	mod = __module_text_address((unsigned long)start);
258	if (!mod)
259		return 0;
260
261	WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
262
263	return __jump_label_text_reserved(mod->jump_entries,
264				mod->jump_entries + mod->num_jump_entries,
265				start, end);
266}
267
268static void __jump_label_mod_update(struct static_key *key)
269{
270	struct static_key_mod *mod;
271
272	for (mod = key->next; mod; mod = mod->next) {
273		struct module *m = mod->mod;
274
275		__jump_label_update(key, mod->entries,
276				    m->jump_entries + m->num_jump_entries);
 
 
277	}
278}
279
280/***
281 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
282 * @mod: module to patch
283 *
284 * Allow for run-time selection of the optimal nops. Before the module
285 * loads patch these with arch_get_jump_label_nop(), which is specified by
286 * the arch specific jump label code.
287 */
288void jump_label_apply_nops(struct module *mod)
289{
290	struct jump_entry *iter_start = mod->jump_entries;
291	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
292	struct jump_entry *iter;
293
294	/* if the module doesn't have jump label entries, just return */
295	if (iter_start == iter_stop)
296		return;
297
298	for (iter = iter_start; iter < iter_stop; iter++) {
299		/* Only write NOPs for arch_branch_static(). */
300		if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
301			arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
302	}
303}
304
305static int jump_label_add_module(struct module *mod)
306{
307	struct jump_entry *iter_start = mod->jump_entries;
308	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
309	struct jump_entry *iter;
310	struct static_key *key = NULL;
311	struct static_key_mod *jlm;
312
313	/* if the module doesn't have jump label entries, just return */
314	if (iter_start == iter_stop)
315		return 0;
316
317	jump_label_sort_entries(iter_start, iter_stop);
318
319	for (iter = iter_start; iter < iter_stop; iter++) {
320		struct static_key *iterk;
321
322		iterk = jump_entry_key(iter);
323		if (iterk == key)
324			continue;
325
326		key = iterk;
327		if (within_module(iter->key, mod)) {
328			/*
329			 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
330			 */
331			*((unsigned long *)&key->entries) += (unsigned long)iter;
332			key->next = NULL;
333			continue;
334		}
335		jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
336		if (!jlm)
337			return -ENOMEM;
338		jlm->mod = mod;
339		jlm->entries = iter;
340		jlm->next = key->next;
341		key->next = jlm;
342
343		/* Only update if we've changed from our initial state */
344		if (jump_label_type(iter) != jump_label_init_type(iter))
345			__jump_label_update(key, iter, iter_stop);
346	}
347
348	return 0;
349}
350
351static void jump_label_del_module(struct module *mod)
352{
353	struct jump_entry *iter_start = mod->jump_entries;
354	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
355	struct jump_entry *iter;
356	struct static_key *key = NULL;
357	struct static_key_mod *jlm, **prev;
358
359	for (iter = iter_start; iter < iter_stop; iter++) {
360		if (jump_entry_key(iter) == key)
361			continue;
362
363		key = jump_entry_key(iter);
364
365		if (within_module(iter->key, mod))
366			continue;
367
368		prev = &key->next;
369		jlm = key->next;
370
371		while (jlm && jlm->mod != mod) {
372			prev = &jlm->next;
373			jlm = jlm->next;
374		}
375
376		if (jlm) {
377			*prev = jlm->next;
378			kfree(jlm);
379		}
380	}
381}
382
383static void jump_label_invalidate_module_init(struct module *mod)
384{
385	struct jump_entry *iter_start = mod->jump_entries;
386	struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
387	struct jump_entry *iter;
388
389	for (iter = iter_start; iter < iter_stop; iter++) {
390		if (within_module_init(iter->code, mod))
391			iter->code = 0;
392	}
393}
394
395static int
396jump_label_module_notify(struct notifier_block *self, unsigned long val,
397			 void *data)
398{
399	struct module *mod = data;
400	int ret = 0;
401
402	switch (val) {
403	case MODULE_STATE_COMING:
404		jump_label_lock();
405		ret = jump_label_add_module(mod);
406		if (ret)
407			jump_label_del_module(mod);
408		jump_label_unlock();
409		break;
410	case MODULE_STATE_GOING:
411		jump_label_lock();
412		jump_label_del_module(mod);
413		jump_label_unlock();
414		break;
415	case MODULE_STATE_LIVE:
416		jump_label_lock();
417		jump_label_invalidate_module_init(mod);
418		jump_label_unlock();
419		break;
420	}
421
422	return notifier_from_errno(ret);
423}
424
425struct notifier_block jump_label_module_nb = {
426	.notifier_call = jump_label_module_notify,
427	.priority = 1, /* higher than tracepoints */
428};
429
430static __init int jump_label_init_module(void)
431{
432	return register_module_notifier(&jump_label_module_nb);
433}
434early_initcall(jump_label_init_module);
435
436#endif /* CONFIG_MODULES */
437
438/***
439 * jump_label_text_reserved - check if addr range is reserved
440 * @start: start text addr
441 * @end: end text addr
442 *
443 * checks if the text addr located between @start and @end
444 * overlaps with any of the jump label patch addresses. Code
445 * that wants to modify kernel text should first verify that
446 * it does not overlap with any of the jump label addresses.
447 * Caller must hold jump_label_mutex.
448 *
449 * returns 1 if there is an overlap, 0 otherwise
450 */
451int jump_label_text_reserved(void *start, void *end)
452{
453	int ret = __jump_label_text_reserved(__start___jump_table,
454			__stop___jump_table, start, end);
455
456	if (ret)
457		return ret;
458
459#ifdef CONFIG_MODULES
460	ret = __jump_label_mod_text_reserved(start, end);
461#endif
462	return ret;
463}
464
465static void jump_label_update(struct static_key *key)
466{
467	struct jump_entry *stop = __stop___jump_table;
468	struct jump_entry *entry = static_key_entries(key);
 
469#ifdef CONFIG_MODULES
470	struct module *mod;
471
472	__jump_label_mod_update(key);
473
474	preempt_disable();
475	mod = __module_address((unsigned long)key);
476	if (mod)
477		stop = mod->jump_entries + mod->num_jump_entries;
478	preempt_enable();
479#endif
480	/* if there are no users, entry can be NULL */
481	if (entry)
482		__jump_label_update(key, entry, stop);
483}
484
485#ifdef CONFIG_STATIC_KEYS_SELFTEST
486static DEFINE_STATIC_KEY_TRUE(sk_true);
487static DEFINE_STATIC_KEY_FALSE(sk_false);
488
489static __init int jump_label_test(void)
490{
491	int i;
492
493	for (i = 0; i < 2; i++) {
494		WARN_ON(static_key_enabled(&sk_true.key) != true);
495		WARN_ON(static_key_enabled(&sk_false.key) != false);
496
497		WARN_ON(!static_branch_likely(&sk_true));
498		WARN_ON(!static_branch_unlikely(&sk_true));
499		WARN_ON(static_branch_likely(&sk_false));
500		WARN_ON(static_branch_unlikely(&sk_false));
501
502		static_branch_disable(&sk_true);
503		static_branch_enable(&sk_false);
504
505		WARN_ON(static_key_enabled(&sk_true.key) == true);
506		WARN_ON(static_key_enabled(&sk_false.key) == false);
507
508		WARN_ON(static_branch_likely(&sk_true));
509		WARN_ON(static_branch_unlikely(&sk_true));
510		WARN_ON(!static_branch_likely(&sk_false));
511		WARN_ON(!static_branch_unlikely(&sk_false));
512
513		static_branch_enable(&sk_true);
514		static_branch_disable(&sk_false);
515	}
516
517	return 0;
518}
519late_initcall(jump_label_test);
520#endif /* STATIC_KEYS_SELFTEST */
521
522#endif /* HAVE_JUMP_LABEL */