Loading...
1/*
2 * jump label support
3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
6 *
7 */
8#include <linux/memory.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13#include <linux/sort.h>
14#include <linux/err.h>
15#include <linux/jump_label.h>
16
17#ifdef HAVE_JUMP_LABEL
18
19/* mutex to protect coming/going of the the jump_label table */
20static DEFINE_MUTEX(jump_label_mutex);
21
22void jump_label_lock(void)
23{
24 mutex_lock(&jump_label_mutex);
25}
26
27void jump_label_unlock(void)
28{
29 mutex_unlock(&jump_label_mutex);
30}
31
32bool jump_label_enabled(struct jump_label_key *key)
33{
34 return !!atomic_read(&key->enabled);
35}
36
37static int jump_label_cmp(const void *a, const void *b)
38{
39 const struct jump_entry *jea = a;
40 const struct jump_entry *jeb = b;
41
42 if (jea->key < jeb->key)
43 return -1;
44
45 if (jea->key > jeb->key)
46 return 1;
47
48 return 0;
49}
50
51static void
52jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
53{
54 unsigned long size;
55
56 size = (((unsigned long)stop - (unsigned long)start)
57 / sizeof(struct jump_entry));
58 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
59}
60
61static void jump_label_update(struct jump_label_key *key, int enable);
62
63void jump_label_inc(struct jump_label_key *key)
64{
65 if (atomic_inc_not_zero(&key->enabled))
66 return;
67
68 jump_label_lock();
69 if (atomic_add_return(1, &key->enabled) == 1)
70 jump_label_update(key, JUMP_LABEL_ENABLE);
71 jump_label_unlock();
72}
73
74void jump_label_dec(struct jump_label_key *key)
75{
76 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))
77 return;
78
79 jump_label_update(key, JUMP_LABEL_DISABLE);
80 jump_label_unlock();
81}
82
83static int addr_conflict(struct jump_entry *entry, void *start, void *end)
84{
85 if (entry->code <= (unsigned long)end &&
86 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
87 return 1;
88
89 return 0;
90}
91
92static int __jump_label_text_reserved(struct jump_entry *iter_start,
93 struct jump_entry *iter_stop, void *start, void *end)
94{
95 struct jump_entry *iter;
96
97 iter = iter_start;
98 while (iter < iter_stop) {
99 if (addr_conflict(iter, start, end))
100 return 1;
101 iter++;
102 }
103
104 return 0;
105}
106
107static void __jump_label_update(struct jump_label_key *key,
108 struct jump_entry *entry,
109 struct jump_entry *stop, int enable)
110{
111 for (; (entry < stop) &&
112 (entry->key == (jump_label_t)(unsigned long)key);
113 entry++) {
114 /*
115 * entry->code set to 0 invalidates module init text sections
116 * kernel_text_address() verifies we are not in core kernel
117 * init code, see jump_label_invalidate_module_init().
118 */
119 if (entry->code && kernel_text_address(entry->code))
120 arch_jump_label_transform(entry, enable);
121 }
122}
123
124/*
125 * Not all archs need this.
126 */
127void __weak arch_jump_label_text_poke_early(jump_label_t addr)
128{
129}
130
131static __init int jump_label_init(void)
132{
133 struct jump_entry *iter_start = __start___jump_table;
134 struct jump_entry *iter_stop = __stop___jump_table;
135 struct jump_label_key *key = NULL;
136 struct jump_entry *iter;
137
138 jump_label_lock();
139 jump_label_sort_entries(iter_start, iter_stop);
140
141 for (iter = iter_start; iter < iter_stop; iter++) {
142 arch_jump_label_text_poke_early(iter->code);
143 if (iter->key == (jump_label_t)(unsigned long)key)
144 continue;
145
146 key = (struct jump_label_key *)(unsigned long)iter->key;
147 atomic_set(&key->enabled, 0);
148 key->entries = iter;
149#ifdef CONFIG_MODULES
150 key->next = NULL;
151#endif
152 }
153 jump_label_unlock();
154
155 return 0;
156}
157early_initcall(jump_label_init);
158
159#ifdef CONFIG_MODULES
160
161struct jump_label_mod {
162 struct jump_label_mod *next;
163 struct jump_entry *entries;
164 struct module *mod;
165};
166
167static int __jump_label_mod_text_reserved(void *start, void *end)
168{
169 struct module *mod;
170
171 mod = __module_text_address((unsigned long)start);
172 if (!mod)
173 return 0;
174
175 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
176
177 return __jump_label_text_reserved(mod->jump_entries,
178 mod->jump_entries + mod->num_jump_entries,
179 start, end);
180}
181
182static void __jump_label_mod_update(struct jump_label_key *key, int enable)
183{
184 struct jump_label_mod *mod = key->next;
185
186 while (mod) {
187 struct module *m = mod->mod;
188
189 __jump_label_update(key, mod->entries,
190 m->jump_entries + m->num_jump_entries,
191 enable);
192 mod = mod->next;
193 }
194}
195
196/***
197 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
198 * @mod: module to patch
199 *
200 * Allow for run-time selection of the optimal nops. Before the module
201 * loads patch these with arch_get_jump_label_nop(), which is specified by
202 * the arch specific jump label code.
203 */
204void jump_label_apply_nops(struct module *mod)
205{
206 struct jump_entry *iter_start = mod->jump_entries;
207 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
208 struct jump_entry *iter;
209
210 /* if the module doesn't have jump label entries, just return */
211 if (iter_start == iter_stop)
212 return;
213
214 for (iter = iter_start; iter < iter_stop; iter++)
215 arch_jump_label_text_poke_early(iter->code);
216}
217
218static int jump_label_add_module(struct module *mod)
219{
220 struct jump_entry *iter_start = mod->jump_entries;
221 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
222 struct jump_entry *iter;
223 struct jump_label_key *key = NULL;
224 struct jump_label_mod *jlm;
225
226 /* if the module doesn't have jump label entries, just return */
227 if (iter_start == iter_stop)
228 return 0;
229
230 jump_label_sort_entries(iter_start, iter_stop);
231
232 for (iter = iter_start; iter < iter_stop; iter++) {
233 if (iter->key == (jump_label_t)(unsigned long)key)
234 continue;
235
236 key = (struct jump_label_key *)(unsigned long)iter->key;
237
238 if (__module_address(iter->key) == mod) {
239 atomic_set(&key->enabled, 0);
240 key->entries = iter;
241 key->next = NULL;
242 continue;
243 }
244
245 jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL);
246 if (!jlm)
247 return -ENOMEM;
248
249 jlm->mod = mod;
250 jlm->entries = iter;
251 jlm->next = key->next;
252 key->next = jlm;
253
254 if (jump_label_enabled(key))
255 __jump_label_update(key, iter, iter_stop,
256 JUMP_LABEL_ENABLE);
257 }
258
259 return 0;
260}
261
262static void jump_label_del_module(struct module *mod)
263{
264 struct jump_entry *iter_start = mod->jump_entries;
265 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
266 struct jump_entry *iter;
267 struct jump_label_key *key = NULL;
268 struct jump_label_mod *jlm, **prev;
269
270 for (iter = iter_start; iter < iter_stop; iter++) {
271 if (iter->key == (jump_label_t)(unsigned long)key)
272 continue;
273
274 key = (struct jump_label_key *)(unsigned long)iter->key;
275
276 if (__module_address(iter->key) == mod)
277 continue;
278
279 prev = &key->next;
280 jlm = key->next;
281
282 while (jlm && jlm->mod != mod) {
283 prev = &jlm->next;
284 jlm = jlm->next;
285 }
286
287 if (jlm) {
288 *prev = jlm->next;
289 kfree(jlm);
290 }
291 }
292}
293
294static void jump_label_invalidate_module_init(struct module *mod)
295{
296 struct jump_entry *iter_start = mod->jump_entries;
297 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
298 struct jump_entry *iter;
299
300 for (iter = iter_start; iter < iter_stop; iter++) {
301 if (within_module_init(iter->code, mod))
302 iter->code = 0;
303 }
304}
305
306static int
307jump_label_module_notify(struct notifier_block *self, unsigned long val,
308 void *data)
309{
310 struct module *mod = data;
311 int ret = 0;
312
313 switch (val) {
314 case MODULE_STATE_COMING:
315 jump_label_lock();
316 ret = jump_label_add_module(mod);
317 if (ret)
318 jump_label_del_module(mod);
319 jump_label_unlock();
320 break;
321 case MODULE_STATE_GOING:
322 jump_label_lock();
323 jump_label_del_module(mod);
324 jump_label_unlock();
325 break;
326 case MODULE_STATE_LIVE:
327 jump_label_lock();
328 jump_label_invalidate_module_init(mod);
329 jump_label_unlock();
330 break;
331 }
332
333 return notifier_from_errno(ret);
334}
335
336struct notifier_block jump_label_module_nb = {
337 .notifier_call = jump_label_module_notify,
338 .priority = 1, /* higher than tracepoints */
339};
340
341static __init int jump_label_init_module(void)
342{
343 return register_module_notifier(&jump_label_module_nb);
344}
345early_initcall(jump_label_init_module);
346
347#endif /* CONFIG_MODULES */
348
349/***
350 * jump_label_text_reserved - check if addr range is reserved
351 * @start: start text addr
352 * @end: end text addr
353 *
354 * checks if the text addr located between @start and @end
355 * overlaps with any of the jump label patch addresses. Code
356 * that wants to modify kernel text should first verify that
357 * it does not overlap with any of the jump label addresses.
358 * Caller must hold jump_label_mutex.
359 *
360 * returns 1 if there is an overlap, 0 otherwise
361 */
362int jump_label_text_reserved(void *start, void *end)
363{
364 int ret = __jump_label_text_reserved(__start___jump_table,
365 __stop___jump_table, start, end);
366
367 if (ret)
368 return ret;
369
370#ifdef CONFIG_MODULES
371 ret = __jump_label_mod_text_reserved(start, end);
372#endif
373 return ret;
374}
375
376static void jump_label_update(struct jump_label_key *key, int enable)
377{
378 struct jump_entry *entry = key->entries, *stop = __stop___jump_table;
379
380#ifdef CONFIG_MODULES
381 struct module *mod = __module_address((jump_label_t)key);
382
383 __jump_label_mod_update(key, enable);
384
385 if (mod)
386 stop = mod->jump_entries + mod->num_jump_entries;
387#endif
388 /* if there are no users, entry can be NULL */
389 if (entry)
390 __jump_label_update(key, entry, stop, enable);
391}
392
393#endif
1/*
2 * jump label support
3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra
6 *
7 */
8#include <linux/memory.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13#include <linux/sort.h>
14#include <linux/err.h>
15#include <linux/static_key.h>
16#include <linux/jump_label_ratelimit.h>
17#include <linux/bug.h>
18
19#ifdef HAVE_JUMP_LABEL
20
21/* mutex to protect coming/going of the the jump_label table */
22static DEFINE_MUTEX(jump_label_mutex);
23
24void jump_label_lock(void)
25{
26 mutex_lock(&jump_label_mutex);
27}
28
29void jump_label_unlock(void)
30{
31 mutex_unlock(&jump_label_mutex);
32}
33
34static int jump_label_cmp(const void *a, const void *b)
35{
36 const struct jump_entry *jea = a;
37 const struct jump_entry *jeb = b;
38
39 if (jea->key < jeb->key)
40 return -1;
41
42 if (jea->key > jeb->key)
43 return 1;
44
45 return 0;
46}
47
48static void
49jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
50{
51 unsigned long size;
52
53 size = (((unsigned long)stop - (unsigned long)start)
54 / sizeof(struct jump_entry));
55 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
56}
57
58static void jump_label_update(struct static_key *key);
59
60/*
61 * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
62 * The use of 'atomic_read()' requires atomic.h and its problematic for some
63 * kernel headers such as kernel.h and others. Since static_key_count() is not
64 * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
65 * to have it be a function here. Similarly, for 'static_key_enable()' and
66 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
67 * to be included from most/all places for HAVE_JUMP_LABEL.
68 */
69int static_key_count(struct static_key *key)
70{
71 /*
72 * -1 means the first static_key_slow_inc() is in progress.
73 * static_key_enabled() must return true, so return 1 here.
74 */
75 int n = atomic_read(&key->enabled);
76
77 return n >= 0 ? n : 1;
78}
79EXPORT_SYMBOL_GPL(static_key_count);
80
81void static_key_enable(struct static_key *key)
82{
83 int count = static_key_count(key);
84
85 WARN_ON_ONCE(count < 0 || count > 1);
86
87 if (!count)
88 static_key_slow_inc(key);
89}
90EXPORT_SYMBOL_GPL(static_key_enable);
91
92void static_key_disable(struct static_key *key)
93{
94 int count = static_key_count(key);
95
96 WARN_ON_ONCE(count < 0 || count > 1);
97
98 if (count)
99 static_key_slow_dec(key);
100}
101EXPORT_SYMBOL_GPL(static_key_disable);
102
103void static_key_slow_inc(struct static_key *key)
104{
105 int v, v1;
106
107 STATIC_KEY_CHECK_USE();
108
109 /*
110 * Careful if we get concurrent static_key_slow_inc() calls;
111 * later calls must wait for the first one to _finish_ the
112 * jump_label_update() process. At the same time, however,
113 * the jump_label_update() call below wants to see
114 * static_key_enabled(&key) for jumps to be updated properly.
115 *
116 * So give a special meaning to negative key->enabled: it sends
117 * static_key_slow_inc() down the slow path, and it is non-zero
118 * so it counts as "enabled" in jump_label_update(). Note that
119 * atomic_inc_unless_negative() checks >= 0, so roll our own.
120 */
121 for (v = atomic_read(&key->enabled); v > 0; v = v1) {
122 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
123 if (likely(v1 == v))
124 return;
125 }
126
127 jump_label_lock();
128 if (atomic_read(&key->enabled) == 0) {
129 atomic_set(&key->enabled, -1);
130 jump_label_update(key);
131 atomic_set(&key->enabled, 1);
132 } else {
133 atomic_inc(&key->enabled);
134 }
135 jump_label_unlock();
136}
137EXPORT_SYMBOL_GPL(static_key_slow_inc);
138
139static void __static_key_slow_dec(struct static_key *key,
140 unsigned long rate_limit, struct delayed_work *work)
141{
142 /*
143 * The negative count check is valid even when a negative
144 * key->enabled is in use by static_key_slow_inc(); a
145 * __static_key_slow_dec() before the first static_key_slow_inc()
146 * returns is unbalanced, because all other static_key_slow_inc()
147 * instances block while the update is in progress.
148 */
149 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
150 WARN(atomic_read(&key->enabled) < 0,
151 "jump label: negative count!\n");
152 return;
153 }
154
155 if (rate_limit) {
156 atomic_inc(&key->enabled);
157 schedule_delayed_work(work, rate_limit);
158 } else {
159 jump_label_update(key);
160 }
161 jump_label_unlock();
162}
163
164static void jump_label_update_timeout(struct work_struct *work)
165{
166 struct static_key_deferred *key =
167 container_of(work, struct static_key_deferred, work.work);
168 __static_key_slow_dec(&key->key, 0, NULL);
169}
170
171void static_key_slow_dec(struct static_key *key)
172{
173 STATIC_KEY_CHECK_USE();
174 __static_key_slow_dec(key, 0, NULL);
175}
176EXPORT_SYMBOL_GPL(static_key_slow_dec);
177
178void static_key_slow_dec_deferred(struct static_key_deferred *key)
179{
180 STATIC_KEY_CHECK_USE();
181 __static_key_slow_dec(&key->key, key->timeout, &key->work);
182}
183EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
184
185void static_key_deferred_flush(struct static_key_deferred *key)
186{
187 STATIC_KEY_CHECK_USE();
188 flush_delayed_work(&key->work);
189}
190EXPORT_SYMBOL_GPL(static_key_deferred_flush);
191
192void jump_label_rate_limit(struct static_key_deferred *key,
193 unsigned long rl)
194{
195 STATIC_KEY_CHECK_USE();
196 key->timeout = rl;
197 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
198}
199EXPORT_SYMBOL_GPL(jump_label_rate_limit);
200
201static int addr_conflict(struct jump_entry *entry, void *start, void *end)
202{
203 if (entry->code <= (unsigned long)end &&
204 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
205 return 1;
206
207 return 0;
208}
209
210static int __jump_label_text_reserved(struct jump_entry *iter_start,
211 struct jump_entry *iter_stop, void *start, void *end)
212{
213 struct jump_entry *iter;
214
215 iter = iter_start;
216 while (iter < iter_stop) {
217 if (addr_conflict(iter, start, end))
218 return 1;
219 iter++;
220 }
221
222 return 0;
223}
224
225/*
226 * Update code which is definitely not currently executing.
227 * Architectures which need heavyweight synchronization to modify
228 * running code can override this to make the non-live update case
229 * cheaper.
230 */
231void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
232 enum jump_label_type type)
233{
234 arch_jump_label_transform(entry, type);
235}
236
237static inline struct jump_entry *static_key_entries(struct static_key *key)
238{
239 return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
240}
241
242static inline bool static_key_type(struct static_key *key)
243{
244 return (unsigned long)key->entries & JUMP_TYPE_MASK;
245}
246
247static inline struct static_key *jump_entry_key(struct jump_entry *entry)
248{
249 return (struct static_key *)((unsigned long)entry->key & ~1UL);
250}
251
252static bool jump_entry_branch(struct jump_entry *entry)
253{
254 return (unsigned long)entry->key & 1UL;
255}
256
257static enum jump_label_type jump_label_type(struct jump_entry *entry)
258{
259 struct static_key *key = jump_entry_key(entry);
260 bool enabled = static_key_enabled(key);
261 bool branch = jump_entry_branch(entry);
262
263 /* See the comment in linux/jump_label.h */
264 return enabled ^ branch;
265}
266
267static void __jump_label_update(struct static_key *key,
268 struct jump_entry *entry,
269 struct jump_entry *stop)
270{
271 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
272 /*
273 * entry->code set to 0 invalidates module init text sections
274 * kernel_text_address() verifies we are not in core kernel
275 * init code, see jump_label_invalidate_module_init().
276 */
277 if (entry->code && kernel_text_address(entry->code))
278 arch_jump_label_transform(entry, jump_label_type(entry));
279 }
280}
281
282void __init jump_label_init(void)
283{
284 struct jump_entry *iter_start = __start___jump_table;
285 struct jump_entry *iter_stop = __stop___jump_table;
286 struct static_key *key = NULL;
287 struct jump_entry *iter;
288
289 /*
290 * Since we are initializing the static_key.enabled field with
291 * with the 'raw' int values (to avoid pulling in atomic.h) in
292 * jump_label.h, let's make sure that is safe. There are only two
293 * cases to check since we initialize to 0 or 1.
294 */
295 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
296 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
297
298 if (static_key_initialized)
299 return;
300
301 jump_label_lock();
302 jump_label_sort_entries(iter_start, iter_stop);
303
304 for (iter = iter_start; iter < iter_stop; iter++) {
305 struct static_key *iterk;
306
307 /* rewrite NOPs */
308 if (jump_label_type(iter) == JUMP_LABEL_NOP)
309 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
310
311 iterk = jump_entry_key(iter);
312 if (iterk == key)
313 continue;
314
315 key = iterk;
316 /*
317 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
318 */
319 *((unsigned long *)&key->entries) += (unsigned long)iter;
320#ifdef CONFIG_MODULES
321 key->next = NULL;
322#endif
323 }
324 static_key_initialized = true;
325 jump_label_unlock();
326}
327
328#ifdef CONFIG_MODULES
329
330static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
331{
332 struct static_key *key = jump_entry_key(entry);
333 bool type = static_key_type(key);
334 bool branch = jump_entry_branch(entry);
335
336 /* See the comment in linux/jump_label.h */
337 return type ^ branch;
338}
339
340struct static_key_mod {
341 struct static_key_mod *next;
342 struct jump_entry *entries;
343 struct module *mod;
344};
345
346static int __jump_label_mod_text_reserved(void *start, void *end)
347{
348 struct module *mod;
349
350 preempt_disable();
351 mod = __module_text_address((unsigned long)start);
352 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
353 preempt_enable();
354
355 if (!mod)
356 return 0;
357
358
359 return __jump_label_text_reserved(mod->jump_entries,
360 mod->jump_entries + mod->num_jump_entries,
361 start, end);
362}
363
364static void __jump_label_mod_update(struct static_key *key)
365{
366 struct static_key_mod *mod;
367
368 for (mod = key->next; mod; mod = mod->next) {
369 struct module *m = mod->mod;
370
371 __jump_label_update(key, mod->entries,
372 m->jump_entries + m->num_jump_entries);
373 }
374}
375
376/***
377 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
378 * @mod: module to patch
379 *
380 * Allow for run-time selection of the optimal nops. Before the module
381 * loads patch these with arch_get_jump_label_nop(), which is specified by
382 * the arch specific jump label code.
383 */
384void jump_label_apply_nops(struct module *mod)
385{
386 struct jump_entry *iter_start = mod->jump_entries;
387 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
388 struct jump_entry *iter;
389
390 /* if the module doesn't have jump label entries, just return */
391 if (iter_start == iter_stop)
392 return;
393
394 for (iter = iter_start; iter < iter_stop; iter++) {
395 /* Only write NOPs for arch_branch_static(). */
396 if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
397 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
398 }
399}
400
401static int jump_label_add_module(struct module *mod)
402{
403 struct jump_entry *iter_start = mod->jump_entries;
404 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
405 struct jump_entry *iter;
406 struct static_key *key = NULL;
407 struct static_key_mod *jlm;
408
409 /* if the module doesn't have jump label entries, just return */
410 if (iter_start == iter_stop)
411 return 0;
412
413 jump_label_sort_entries(iter_start, iter_stop);
414
415 for (iter = iter_start; iter < iter_stop; iter++) {
416 struct static_key *iterk;
417
418 iterk = jump_entry_key(iter);
419 if (iterk == key)
420 continue;
421
422 key = iterk;
423 if (within_module(iter->key, mod)) {
424 /*
425 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
426 */
427 *((unsigned long *)&key->entries) += (unsigned long)iter;
428 key->next = NULL;
429 continue;
430 }
431 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
432 if (!jlm)
433 return -ENOMEM;
434 jlm->mod = mod;
435 jlm->entries = iter;
436 jlm->next = key->next;
437 key->next = jlm;
438
439 /* Only update if we've changed from our initial state */
440 if (jump_label_type(iter) != jump_label_init_type(iter))
441 __jump_label_update(key, iter, iter_stop);
442 }
443
444 return 0;
445}
446
447static void jump_label_del_module(struct module *mod)
448{
449 struct jump_entry *iter_start = mod->jump_entries;
450 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
451 struct jump_entry *iter;
452 struct static_key *key = NULL;
453 struct static_key_mod *jlm, **prev;
454
455 for (iter = iter_start; iter < iter_stop; iter++) {
456 if (jump_entry_key(iter) == key)
457 continue;
458
459 key = jump_entry_key(iter);
460
461 if (within_module(iter->key, mod))
462 continue;
463
464 prev = &key->next;
465 jlm = key->next;
466
467 while (jlm && jlm->mod != mod) {
468 prev = &jlm->next;
469 jlm = jlm->next;
470 }
471
472 if (jlm) {
473 *prev = jlm->next;
474 kfree(jlm);
475 }
476 }
477}
478
479static void jump_label_invalidate_module_init(struct module *mod)
480{
481 struct jump_entry *iter_start = mod->jump_entries;
482 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
483 struct jump_entry *iter;
484
485 for (iter = iter_start; iter < iter_stop; iter++) {
486 if (within_module_init(iter->code, mod))
487 iter->code = 0;
488 }
489}
490
491static int
492jump_label_module_notify(struct notifier_block *self, unsigned long val,
493 void *data)
494{
495 struct module *mod = data;
496 int ret = 0;
497
498 switch (val) {
499 case MODULE_STATE_COMING:
500 jump_label_lock();
501 ret = jump_label_add_module(mod);
502 if (ret)
503 jump_label_del_module(mod);
504 jump_label_unlock();
505 break;
506 case MODULE_STATE_GOING:
507 jump_label_lock();
508 jump_label_del_module(mod);
509 jump_label_unlock();
510 break;
511 case MODULE_STATE_LIVE:
512 jump_label_lock();
513 jump_label_invalidate_module_init(mod);
514 jump_label_unlock();
515 break;
516 }
517
518 return notifier_from_errno(ret);
519}
520
521static struct notifier_block jump_label_module_nb = {
522 .notifier_call = jump_label_module_notify,
523 .priority = 1, /* higher than tracepoints */
524};
525
526static __init int jump_label_init_module(void)
527{
528 return register_module_notifier(&jump_label_module_nb);
529}
530early_initcall(jump_label_init_module);
531
532#endif /* CONFIG_MODULES */
533
534/***
535 * jump_label_text_reserved - check if addr range is reserved
536 * @start: start text addr
537 * @end: end text addr
538 *
539 * checks if the text addr located between @start and @end
540 * overlaps with any of the jump label patch addresses. Code
541 * that wants to modify kernel text should first verify that
542 * it does not overlap with any of the jump label addresses.
543 * Caller must hold jump_label_mutex.
544 *
545 * returns 1 if there is an overlap, 0 otherwise
546 */
547int jump_label_text_reserved(void *start, void *end)
548{
549 int ret = __jump_label_text_reserved(__start___jump_table,
550 __stop___jump_table, start, end);
551
552 if (ret)
553 return ret;
554
555#ifdef CONFIG_MODULES
556 ret = __jump_label_mod_text_reserved(start, end);
557#endif
558 return ret;
559}
560
561static void jump_label_update(struct static_key *key)
562{
563 struct jump_entry *stop = __stop___jump_table;
564 struct jump_entry *entry = static_key_entries(key);
565#ifdef CONFIG_MODULES
566 struct module *mod;
567
568 __jump_label_mod_update(key);
569
570 preempt_disable();
571 mod = __module_address((unsigned long)key);
572 if (mod)
573 stop = mod->jump_entries + mod->num_jump_entries;
574 preempt_enable();
575#endif
576 /* if there are no users, entry can be NULL */
577 if (entry)
578 __jump_label_update(key, entry, stop);
579}
580
581#ifdef CONFIG_STATIC_KEYS_SELFTEST
582static DEFINE_STATIC_KEY_TRUE(sk_true);
583static DEFINE_STATIC_KEY_FALSE(sk_false);
584
585static __init int jump_label_test(void)
586{
587 int i;
588
589 for (i = 0; i < 2; i++) {
590 WARN_ON(static_key_enabled(&sk_true.key) != true);
591 WARN_ON(static_key_enabled(&sk_false.key) != false);
592
593 WARN_ON(!static_branch_likely(&sk_true));
594 WARN_ON(!static_branch_unlikely(&sk_true));
595 WARN_ON(static_branch_likely(&sk_false));
596 WARN_ON(static_branch_unlikely(&sk_false));
597
598 static_branch_disable(&sk_true);
599 static_branch_enable(&sk_false);
600
601 WARN_ON(static_key_enabled(&sk_true.key) == true);
602 WARN_ON(static_key_enabled(&sk_false.key) == false);
603
604 WARN_ON(static_branch_likely(&sk_true));
605 WARN_ON(static_branch_unlikely(&sk_true));
606 WARN_ON(!static_branch_likely(&sk_false));
607 WARN_ON(!static_branch_unlikely(&sk_false));
608
609 static_branch_enable(&sk_true);
610 static_branch_disable(&sk_false);
611 }
612
613 return 0;
614}
615late_initcall(jump_label_test);
616#endif /* STATIC_KEYS_SELFTEST */
617
618#endif /* HAVE_JUMP_LABEL */