Loading...
1/*
2 * jump label support
3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra
6 *
7 */
8#include <linux/memory.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13#include <linux/sort.h>
14#include <linux/err.h>
15#include <linux/static_key.h>
16#include <linux/jump_label_ratelimit.h>
17#include <linux/bug.h>
18
19#ifdef HAVE_JUMP_LABEL
20
21/* mutex to protect coming/going of the the jump_label table */
22static DEFINE_MUTEX(jump_label_mutex);
23
24void jump_label_lock(void)
25{
26 mutex_lock(&jump_label_mutex);
27}
28
29void jump_label_unlock(void)
30{
31 mutex_unlock(&jump_label_mutex);
32}
33
34static int jump_label_cmp(const void *a, const void *b)
35{
36 const struct jump_entry *jea = a;
37 const struct jump_entry *jeb = b;
38
39 if (jea->key < jeb->key)
40 return -1;
41
42 if (jea->key > jeb->key)
43 return 1;
44
45 return 0;
46}
47
48static void
49jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
50{
51 unsigned long size;
52
53 size = (((unsigned long)stop - (unsigned long)start)
54 / sizeof(struct jump_entry));
55 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
56}
57
58static void jump_label_update(struct static_key *key);
59
60/*
61 * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
62 * The use of 'atomic_read()' requires atomic.h and its problematic for some
63 * kernel headers such as kernel.h and others. Since static_key_count() is not
64 * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
65 * to have it be a function here. Similarly, for 'static_key_enable()' and
66 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
67 * to be included from most/all places for HAVE_JUMP_LABEL.
68 */
69int static_key_count(struct static_key *key)
70{
71 /*
72 * -1 means the first static_key_slow_inc() is in progress.
73 * static_key_enabled() must return true, so return 1 here.
74 */
75 int n = atomic_read(&key->enabled);
76
77 return n >= 0 ? n : 1;
78}
79EXPORT_SYMBOL_GPL(static_key_count);
80
81void static_key_enable(struct static_key *key)
82{
83 int count = static_key_count(key);
84
85 WARN_ON_ONCE(count < 0 || count > 1);
86
87 if (!count)
88 static_key_slow_inc(key);
89}
90EXPORT_SYMBOL_GPL(static_key_enable);
91
92void static_key_disable(struct static_key *key)
93{
94 int count = static_key_count(key);
95
96 WARN_ON_ONCE(count < 0 || count > 1);
97
98 if (count)
99 static_key_slow_dec(key);
100}
101EXPORT_SYMBOL_GPL(static_key_disable);
102
103void static_key_slow_inc(struct static_key *key)
104{
105 int v, v1;
106
107 STATIC_KEY_CHECK_USE();
108
109 /*
110 * Careful if we get concurrent static_key_slow_inc() calls;
111 * later calls must wait for the first one to _finish_ the
112 * jump_label_update() process. At the same time, however,
113 * the jump_label_update() call below wants to see
114 * static_key_enabled(&key) for jumps to be updated properly.
115 *
116 * So give a special meaning to negative key->enabled: it sends
117 * static_key_slow_inc() down the slow path, and it is non-zero
118 * so it counts as "enabled" in jump_label_update(). Note that
119 * atomic_inc_unless_negative() checks >= 0, so roll our own.
120 */
121 for (v = atomic_read(&key->enabled); v > 0; v = v1) {
122 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
123 if (likely(v1 == v))
124 return;
125 }
126
127 jump_label_lock();
128 if (atomic_read(&key->enabled) == 0) {
129 atomic_set(&key->enabled, -1);
130 jump_label_update(key);
131 atomic_set(&key->enabled, 1);
132 } else {
133 atomic_inc(&key->enabled);
134 }
135 jump_label_unlock();
136}
137EXPORT_SYMBOL_GPL(static_key_slow_inc);
138
139static void __static_key_slow_dec(struct static_key *key,
140 unsigned long rate_limit, struct delayed_work *work)
141{
142 /*
143 * The negative count check is valid even when a negative
144 * key->enabled is in use by static_key_slow_inc(); a
145 * __static_key_slow_dec() before the first static_key_slow_inc()
146 * returns is unbalanced, because all other static_key_slow_inc()
147 * instances block while the update is in progress.
148 */
149 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
150 WARN(atomic_read(&key->enabled) < 0,
151 "jump label: negative count!\n");
152 return;
153 }
154
155 if (rate_limit) {
156 atomic_inc(&key->enabled);
157 schedule_delayed_work(work, rate_limit);
158 } else {
159 jump_label_update(key);
160 }
161 jump_label_unlock();
162}
163
164static void jump_label_update_timeout(struct work_struct *work)
165{
166 struct static_key_deferred *key =
167 container_of(work, struct static_key_deferred, work.work);
168 __static_key_slow_dec(&key->key, 0, NULL);
169}
170
171void static_key_slow_dec(struct static_key *key)
172{
173 STATIC_KEY_CHECK_USE();
174 __static_key_slow_dec(key, 0, NULL);
175}
176EXPORT_SYMBOL_GPL(static_key_slow_dec);
177
178void static_key_slow_dec_deferred(struct static_key_deferred *key)
179{
180 STATIC_KEY_CHECK_USE();
181 __static_key_slow_dec(&key->key, key->timeout, &key->work);
182}
183EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
184
185void static_key_deferred_flush(struct static_key_deferred *key)
186{
187 STATIC_KEY_CHECK_USE();
188 flush_delayed_work(&key->work);
189}
190EXPORT_SYMBOL_GPL(static_key_deferred_flush);
191
192void jump_label_rate_limit(struct static_key_deferred *key,
193 unsigned long rl)
194{
195 STATIC_KEY_CHECK_USE();
196 key->timeout = rl;
197 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
198}
199EXPORT_SYMBOL_GPL(jump_label_rate_limit);
200
201static int addr_conflict(struct jump_entry *entry, void *start, void *end)
202{
203 if (entry->code <= (unsigned long)end &&
204 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
205 return 1;
206
207 return 0;
208}
209
210static int __jump_label_text_reserved(struct jump_entry *iter_start,
211 struct jump_entry *iter_stop, void *start, void *end)
212{
213 struct jump_entry *iter;
214
215 iter = iter_start;
216 while (iter < iter_stop) {
217 if (addr_conflict(iter, start, end))
218 return 1;
219 iter++;
220 }
221
222 return 0;
223}
224
225/*
226 * Update code which is definitely not currently executing.
227 * Architectures which need heavyweight synchronization to modify
228 * running code can override this to make the non-live update case
229 * cheaper.
230 */
231void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
232 enum jump_label_type type)
233{
234 arch_jump_label_transform(entry, type);
235}
236
237static inline struct jump_entry *static_key_entries(struct static_key *key)
238{
239 return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
240}
241
242static inline bool static_key_type(struct static_key *key)
243{
244 return (unsigned long)key->entries & JUMP_TYPE_MASK;
245}
246
247static inline struct static_key *jump_entry_key(struct jump_entry *entry)
248{
249 return (struct static_key *)((unsigned long)entry->key & ~1UL);
250}
251
252static bool jump_entry_branch(struct jump_entry *entry)
253{
254 return (unsigned long)entry->key & 1UL;
255}
256
257static enum jump_label_type jump_label_type(struct jump_entry *entry)
258{
259 struct static_key *key = jump_entry_key(entry);
260 bool enabled = static_key_enabled(key);
261 bool branch = jump_entry_branch(entry);
262
263 /* See the comment in linux/jump_label.h */
264 return enabled ^ branch;
265}
266
267static void __jump_label_update(struct static_key *key,
268 struct jump_entry *entry,
269 struct jump_entry *stop)
270{
271 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
272 /*
273 * entry->code set to 0 invalidates module init text sections
274 * kernel_text_address() verifies we are not in core kernel
275 * init code, see jump_label_invalidate_module_init().
276 */
277 if (entry->code && kernel_text_address(entry->code))
278 arch_jump_label_transform(entry, jump_label_type(entry));
279 }
280}
281
282void __init jump_label_init(void)
283{
284 struct jump_entry *iter_start = __start___jump_table;
285 struct jump_entry *iter_stop = __stop___jump_table;
286 struct static_key *key = NULL;
287 struct jump_entry *iter;
288
289 /*
290 * Since we are initializing the static_key.enabled field with
291 * with the 'raw' int values (to avoid pulling in atomic.h) in
292 * jump_label.h, let's make sure that is safe. There are only two
293 * cases to check since we initialize to 0 or 1.
294 */
295 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
296 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
297
298 if (static_key_initialized)
299 return;
300
301 jump_label_lock();
302 jump_label_sort_entries(iter_start, iter_stop);
303
304 for (iter = iter_start; iter < iter_stop; iter++) {
305 struct static_key *iterk;
306
307 /* rewrite NOPs */
308 if (jump_label_type(iter) == JUMP_LABEL_NOP)
309 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
310
311 iterk = jump_entry_key(iter);
312 if (iterk == key)
313 continue;
314
315 key = iterk;
316 /*
317 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
318 */
319 *((unsigned long *)&key->entries) += (unsigned long)iter;
320#ifdef CONFIG_MODULES
321 key->next = NULL;
322#endif
323 }
324 static_key_initialized = true;
325 jump_label_unlock();
326}
327
328#ifdef CONFIG_MODULES
329
330static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
331{
332 struct static_key *key = jump_entry_key(entry);
333 bool type = static_key_type(key);
334 bool branch = jump_entry_branch(entry);
335
336 /* See the comment in linux/jump_label.h */
337 return type ^ branch;
338}
339
340struct static_key_mod {
341 struct static_key_mod *next;
342 struct jump_entry *entries;
343 struct module *mod;
344};
345
346static int __jump_label_mod_text_reserved(void *start, void *end)
347{
348 struct module *mod;
349
350 preempt_disable();
351 mod = __module_text_address((unsigned long)start);
352 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
353 preempt_enable();
354
355 if (!mod)
356 return 0;
357
358
359 return __jump_label_text_reserved(mod->jump_entries,
360 mod->jump_entries + mod->num_jump_entries,
361 start, end);
362}
363
364static void __jump_label_mod_update(struct static_key *key)
365{
366 struct static_key_mod *mod;
367
368 for (mod = key->next; mod; mod = mod->next) {
369 struct module *m = mod->mod;
370
371 __jump_label_update(key, mod->entries,
372 m->jump_entries + m->num_jump_entries);
373 }
374}
375
376/***
377 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
378 * @mod: module to patch
379 *
380 * Allow for run-time selection of the optimal nops. Before the module
381 * loads patch these with arch_get_jump_label_nop(), which is specified by
382 * the arch specific jump label code.
383 */
384void jump_label_apply_nops(struct module *mod)
385{
386 struct jump_entry *iter_start = mod->jump_entries;
387 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
388 struct jump_entry *iter;
389
390 /* if the module doesn't have jump label entries, just return */
391 if (iter_start == iter_stop)
392 return;
393
394 for (iter = iter_start; iter < iter_stop; iter++) {
395 /* Only write NOPs for arch_branch_static(). */
396 if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
397 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
398 }
399}
400
401static int jump_label_add_module(struct module *mod)
402{
403 struct jump_entry *iter_start = mod->jump_entries;
404 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
405 struct jump_entry *iter;
406 struct static_key *key = NULL;
407 struct static_key_mod *jlm;
408
409 /* if the module doesn't have jump label entries, just return */
410 if (iter_start == iter_stop)
411 return 0;
412
413 jump_label_sort_entries(iter_start, iter_stop);
414
415 for (iter = iter_start; iter < iter_stop; iter++) {
416 struct static_key *iterk;
417
418 iterk = jump_entry_key(iter);
419 if (iterk == key)
420 continue;
421
422 key = iterk;
423 if (within_module(iter->key, mod)) {
424 /*
425 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
426 */
427 *((unsigned long *)&key->entries) += (unsigned long)iter;
428 key->next = NULL;
429 continue;
430 }
431 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
432 if (!jlm)
433 return -ENOMEM;
434 jlm->mod = mod;
435 jlm->entries = iter;
436 jlm->next = key->next;
437 key->next = jlm;
438
439 /* Only update if we've changed from our initial state */
440 if (jump_label_type(iter) != jump_label_init_type(iter))
441 __jump_label_update(key, iter, iter_stop);
442 }
443
444 return 0;
445}
446
447static void jump_label_del_module(struct module *mod)
448{
449 struct jump_entry *iter_start = mod->jump_entries;
450 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
451 struct jump_entry *iter;
452 struct static_key *key = NULL;
453 struct static_key_mod *jlm, **prev;
454
455 for (iter = iter_start; iter < iter_stop; iter++) {
456 if (jump_entry_key(iter) == key)
457 continue;
458
459 key = jump_entry_key(iter);
460
461 if (within_module(iter->key, mod))
462 continue;
463
464 prev = &key->next;
465 jlm = key->next;
466
467 while (jlm && jlm->mod != mod) {
468 prev = &jlm->next;
469 jlm = jlm->next;
470 }
471
472 if (jlm) {
473 *prev = jlm->next;
474 kfree(jlm);
475 }
476 }
477}
478
479static void jump_label_invalidate_module_init(struct module *mod)
480{
481 struct jump_entry *iter_start = mod->jump_entries;
482 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
483 struct jump_entry *iter;
484
485 for (iter = iter_start; iter < iter_stop; iter++) {
486 if (within_module_init(iter->code, mod))
487 iter->code = 0;
488 }
489}
490
491static int
492jump_label_module_notify(struct notifier_block *self, unsigned long val,
493 void *data)
494{
495 struct module *mod = data;
496 int ret = 0;
497
498 switch (val) {
499 case MODULE_STATE_COMING:
500 jump_label_lock();
501 ret = jump_label_add_module(mod);
502 if (ret)
503 jump_label_del_module(mod);
504 jump_label_unlock();
505 break;
506 case MODULE_STATE_GOING:
507 jump_label_lock();
508 jump_label_del_module(mod);
509 jump_label_unlock();
510 break;
511 case MODULE_STATE_LIVE:
512 jump_label_lock();
513 jump_label_invalidate_module_init(mod);
514 jump_label_unlock();
515 break;
516 }
517
518 return notifier_from_errno(ret);
519}
520
521static struct notifier_block jump_label_module_nb = {
522 .notifier_call = jump_label_module_notify,
523 .priority = 1, /* higher than tracepoints */
524};
525
526static __init int jump_label_init_module(void)
527{
528 return register_module_notifier(&jump_label_module_nb);
529}
530early_initcall(jump_label_init_module);
531
532#endif /* CONFIG_MODULES */
533
534/***
535 * jump_label_text_reserved - check if addr range is reserved
536 * @start: start text addr
537 * @end: end text addr
538 *
539 * checks if the text addr located between @start and @end
540 * overlaps with any of the jump label patch addresses. Code
541 * that wants to modify kernel text should first verify that
542 * it does not overlap with any of the jump label addresses.
543 * Caller must hold jump_label_mutex.
544 *
545 * returns 1 if there is an overlap, 0 otherwise
546 */
547int jump_label_text_reserved(void *start, void *end)
548{
549 int ret = __jump_label_text_reserved(__start___jump_table,
550 __stop___jump_table, start, end);
551
552 if (ret)
553 return ret;
554
555#ifdef CONFIG_MODULES
556 ret = __jump_label_mod_text_reserved(start, end);
557#endif
558 return ret;
559}
560
561static void jump_label_update(struct static_key *key)
562{
563 struct jump_entry *stop = __stop___jump_table;
564 struct jump_entry *entry = static_key_entries(key);
565#ifdef CONFIG_MODULES
566 struct module *mod;
567
568 __jump_label_mod_update(key);
569
570 preempt_disable();
571 mod = __module_address((unsigned long)key);
572 if (mod)
573 stop = mod->jump_entries + mod->num_jump_entries;
574 preempt_enable();
575#endif
576 /* if there are no users, entry can be NULL */
577 if (entry)
578 __jump_label_update(key, entry, stop);
579}
580
581#ifdef CONFIG_STATIC_KEYS_SELFTEST
582static DEFINE_STATIC_KEY_TRUE(sk_true);
583static DEFINE_STATIC_KEY_FALSE(sk_false);
584
585static __init int jump_label_test(void)
586{
587 int i;
588
589 for (i = 0; i < 2; i++) {
590 WARN_ON(static_key_enabled(&sk_true.key) != true);
591 WARN_ON(static_key_enabled(&sk_false.key) != false);
592
593 WARN_ON(!static_branch_likely(&sk_true));
594 WARN_ON(!static_branch_unlikely(&sk_true));
595 WARN_ON(static_branch_likely(&sk_false));
596 WARN_ON(static_branch_unlikely(&sk_false));
597
598 static_branch_disable(&sk_true);
599 static_branch_enable(&sk_false);
600
601 WARN_ON(static_key_enabled(&sk_true.key) == true);
602 WARN_ON(static_key_enabled(&sk_false.key) == false);
603
604 WARN_ON(static_branch_likely(&sk_true));
605 WARN_ON(static_branch_unlikely(&sk_true));
606 WARN_ON(!static_branch_likely(&sk_false));
607 WARN_ON(!static_branch_unlikely(&sk_false));
608
609 static_branch_enable(&sk_true);
610 static_branch_disable(&sk_false);
611 }
612
613 return 0;
614}
615late_initcall(jump_label_test);
616#endif /* STATIC_KEYS_SELFTEST */
617
618#endif /* HAVE_JUMP_LABEL */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * jump label support
4 *
5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6 * Copyright (C) 2011 Peter Zijlstra
7 *
8 */
9#include <linux/memory.h>
10#include <linux/uaccess.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/slab.h>
14#include <linux/sort.h>
15#include <linux/err.h>
16#include <linux/static_key.h>
17#include <linux/jump_label_ratelimit.h>
18#include <linux/bug.h>
19#include <linux/cpu.h>
20#include <asm/sections.h>
21
22/* mutex to protect coming/going of the the jump_label table */
23static DEFINE_MUTEX(jump_label_mutex);
24
25void jump_label_lock(void)
26{
27 mutex_lock(&jump_label_mutex);
28}
29
30void jump_label_unlock(void)
31{
32 mutex_unlock(&jump_label_mutex);
33}
34
35static int jump_label_cmp(const void *a, const void *b)
36{
37 const struct jump_entry *jea = a;
38 const struct jump_entry *jeb = b;
39
40 /*
41 * Entrires are sorted by key.
42 */
43 if (jump_entry_key(jea) < jump_entry_key(jeb))
44 return -1;
45
46 if (jump_entry_key(jea) > jump_entry_key(jeb))
47 return 1;
48
49 /*
50 * In the batching mode, entries should also be sorted by the code
51 * inside the already sorted list of entries, enabling a bsearch in
52 * the vector.
53 */
54 if (jump_entry_code(jea) < jump_entry_code(jeb))
55 return -1;
56
57 if (jump_entry_code(jea) > jump_entry_code(jeb))
58 return 1;
59
60 return 0;
61}
62
63static void jump_label_swap(void *a, void *b, int size)
64{
65 long delta = (unsigned long)a - (unsigned long)b;
66 struct jump_entry *jea = a;
67 struct jump_entry *jeb = b;
68 struct jump_entry tmp = *jea;
69
70 jea->code = jeb->code - delta;
71 jea->target = jeb->target - delta;
72 jea->key = jeb->key - delta;
73
74 jeb->code = tmp.code + delta;
75 jeb->target = tmp.target + delta;
76 jeb->key = tmp.key + delta;
77}
78
79static void
80jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
81{
82 unsigned long size;
83 void *swapfn = NULL;
84
85 if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
86 swapfn = jump_label_swap;
87
88 size = (((unsigned long)stop - (unsigned long)start)
89 / sizeof(struct jump_entry));
90 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
91}
92
93static void jump_label_update(struct static_key *key);
94
95/*
96 * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
97 * The use of 'atomic_read()' requires atomic.h and its problematic for some
98 * kernel headers such as kernel.h and others. Since static_key_count() is not
99 * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100 * to have it be a function here. Similarly, for 'static_key_enable()' and
101 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102 * to be included from most/all places for CONFIG_JUMP_LABEL.
103 */
104int static_key_count(struct static_key *key)
105{
106 /*
107 * -1 means the first static_key_slow_inc() is in progress.
108 * static_key_enabled() must return true, so return 1 here.
109 */
110 int n = atomic_read(&key->enabled);
111
112 return n >= 0 ? n : 1;
113}
114EXPORT_SYMBOL_GPL(static_key_count);
115
116void static_key_slow_inc_cpuslocked(struct static_key *key)
117{
118 int v, v1;
119
120 STATIC_KEY_CHECK_USE(key);
121 lockdep_assert_cpus_held();
122
123 /*
124 * Careful if we get concurrent static_key_slow_inc() calls;
125 * later calls must wait for the first one to _finish_ the
126 * jump_label_update() process. At the same time, however,
127 * the jump_label_update() call below wants to see
128 * static_key_enabled(&key) for jumps to be updated properly.
129 *
130 * So give a special meaning to negative key->enabled: it sends
131 * static_key_slow_inc() down the slow path, and it is non-zero
132 * so it counts as "enabled" in jump_label_update(). Note that
133 * atomic_inc_unless_negative() checks >= 0, so roll our own.
134 */
135 for (v = atomic_read(&key->enabled); v > 0; v = v1) {
136 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
137 if (likely(v1 == v))
138 return;
139 }
140
141 jump_label_lock();
142 if (atomic_read(&key->enabled) == 0) {
143 atomic_set(&key->enabled, -1);
144 jump_label_update(key);
145 /*
146 * Ensure that if the above cmpxchg loop observes our positive
147 * value, it must also observe all the text changes.
148 */
149 atomic_set_release(&key->enabled, 1);
150 } else {
151 atomic_inc(&key->enabled);
152 }
153 jump_label_unlock();
154}
155
156void static_key_slow_inc(struct static_key *key)
157{
158 cpus_read_lock();
159 static_key_slow_inc_cpuslocked(key);
160 cpus_read_unlock();
161}
162EXPORT_SYMBOL_GPL(static_key_slow_inc);
163
164void static_key_enable_cpuslocked(struct static_key *key)
165{
166 STATIC_KEY_CHECK_USE(key);
167 lockdep_assert_cpus_held();
168
169 if (atomic_read(&key->enabled) > 0) {
170 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
171 return;
172 }
173
174 jump_label_lock();
175 if (atomic_read(&key->enabled) == 0) {
176 atomic_set(&key->enabled, -1);
177 jump_label_update(key);
178 /*
179 * See static_key_slow_inc().
180 */
181 atomic_set_release(&key->enabled, 1);
182 }
183 jump_label_unlock();
184}
185EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
186
187void static_key_enable(struct static_key *key)
188{
189 cpus_read_lock();
190 static_key_enable_cpuslocked(key);
191 cpus_read_unlock();
192}
193EXPORT_SYMBOL_GPL(static_key_enable);
194
195void static_key_disable_cpuslocked(struct static_key *key)
196{
197 STATIC_KEY_CHECK_USE(key);
198 lockdep_assert_cpus_held();
199
200 if (atomic_read(&key->enabled) != 1) {
201 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
202 return;
203 }
204
205 jump_label_lock();
206 if (atomic_cmpxchg(&key->enabled, 1, 0))
207 jump_label_update(key);
208 jump_label_unlock();
209}
210EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
211
212void static_key_disable(struct static_key *key)
213{
214 cpus_read_lock();
215 static_key_disable_cpuslocked(key);
216 cpus_read_unlock();
217}
218EXPORT_SYMBOL_GPL(static_key_disable);
219
220static bool static_key_slow_try_dec(struct static_key *key)
221{
222 int val;
223
224 val = atomic_fetch_add_unless(&key->enabled, -1, 1);
225 if (val == 1)
226 return false;
227
228 /*
229 * The negative count check is valid even when a negative
230 * key->enabled is in use by static_key_slow_inc(); a
231 * __static_key_slow_dec() before the first static_key_slow_inc()
232 * returns is unbalanced, because all other static_key_slow_inc()
233 * instances block while the update is in progress.
234 */
235 WARN(val < 0, "jump label: negative count!\n");
236 return true;
237}
238
239static void __static_key_slow_dec_cpuslocked(struct static_key *key)
240{
241 lockdep_assert_cpus_held();
242
243 if (static_key_slow_try_dec(key))
244 return;
245
246 jump_label_lock();
247 if (atomic_dec_and_test(&key->enabled))
248 jump_label_update(key);
249 jump_label_unlock();
250}
251
252static void __static_key_slow_dec(struct static_key *key)
253{
254 cpus_read_lock();
255 __static_key_slow_dec_cpuslocked(key);
256 cpus_read_unlock();
257}
258
259void jump_label_update_timeout(struct work_struct *work)
260{
261 struct static_key_deferred *key =
262 container_of(work, struct static_key_deferred, work.work);
263 __static_key_slow_dec(&key->key);
264}
265EXPORT_SYMBOL_GPL(jump_label_update_timeout);
266
267void static_key_slow_dec(struct static_key *key)
268{
269 STATIC_KEY_CHECK_USE(key);
270 __static_key_slow_dec(key);
271}
272EXPORT_SYMBOL_GPL(static_key_slow_dec);
273
274void static_key_slow_dec_cpuslocked(struct static_key *key)
275{
276 STATIC_KEY_CHECK_USE(key);
277 __static_key_slow_dec_cpuslocked(key);
278}
279
280void __static_key_slow_dec_deferred(struct static_key *key,
281 struct delayed_work *work,
282 unsigned long timeout)
283{
284 STATIC_KEY_CHECK_USE(key);
285
286 if (static_key_slow_try_dec(key))
287 return;
288
289 schedule_delayed_work(work, timeout);
290}
291EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
292
293void __static_key_deferred_flush(void *key, struct delayed_work *work)
294{
295 STATIC_KEY_CHECK_USE(key);
296 flush_delayed_work(work);
297}
298EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
299
300void jump_label_rate_limit(struct static_key_deferred *key,
301 unsigned long rl)
302{
303 STATIC_KEY_CHECK_USE(key);
304 key->timeout = rl;
305 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
306}
307EXPORT_SYMBOL_GPL(jump_label_rate_limit);
308
309static int addr_conflict(struct jump_entry *entry, void *start, void *end)
310{
311 if (jump_entry_code(entry) <= (unsigned long)end &&
312 jump_entry_code(entry) + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
313 return 1;
314
315 return 0;
316}
317
318static int __jump_label_text_reserved(struct jump_entry *iter_start,
319 struct jump_entry *iter_stop, void *start, void *end)
320{
321 struct jump_entry *iter;
322
323 iter = iter_start;
324 while (iter < iter_stop) {
325 if (addr_conflict(iter, start, end))
326 return 1;
327 iter++;
328 }
329
330 return 0;
331}
332
333/*
334 * Update code which is definitely not currently executing.
335 * Architectures which need heavyweight synchronization to modify
336 * running code can override this to make the non-live update case
337 * cheaper.
338 */
339void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
340 enum jump_label_type type)
341{
342 arch_jump_label_transform(entry, type);
343}
344
345static inline struct jump_entry *static_key_entries(struct static_key *key)
346{
347 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
348 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
349}
350
351static inline bool static_key_type(struct static_key *key)
352{
353 return key->type & JUMP_TYPE_TRUE;
354}
355
356static inline bool static_key_linked(struct static_key *key)
357{
358 return key->type & JUMP_TYPE_LINKED;
359}
360
361static inline void static_key_clear_linked(struct static_key *key)
362{
363 key->type &= ~JUMP_TYPE_LINKED;
364}
365
366static inline void static_key_set_linked(struct static_key *key)
367{
368 key->type |= JUMP_TYPE_LINKED;
369}
370
371/***
372 * A 'struct static_key' uses a union such that it either points directly
373 * to a table of 'struct jump_entry' or to a linked list of modules which in
374 * turn point to 'struct jump_entry' tables.
375 *
376 * The two lower bits of the pointer are used to keep track of which pointer
377 * type is in use and to store the initial branch direction, we use an access
378 * function which preserves these bits.
379 */
380static void static_key_set_entries(struct static_key *key,
381 struct jump_entry *entries)
382{
383 unsigned long type;
384
385 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
386 type = key->type & JUMP_TYPE_MASK;
387 key->entries = entries;
388 key->type |= type;
389}
390
391static enum jump_label_type jump_label_type(struct jump_entry *entry)
392{
393 struct static_key *key = jump_entry_key(entry);
394 bool enabled = static_key_enabled(key);
395 bool branch = jump_entry_is_branch(entry);
396
397 /* See the comment in linux/jump_label.h */
398 return enabled ^ branch;
399}
400
401static bool jump_label_can_update(struct jump_entry *entry, bool init)
402{
403 /*
404 * Cannot update code that was in an init text area.
405 */
406 if (!init && jump_entry_is_init(entry))
407 return false;
408
409 if (!kernel_text_address(jump_entry_code(entry))) {
410 WARN_ONCE(!jump_entry_is_init(entry),
411 "can't patch jump_label at %pS",
412 (void *)jump_entry_code(entry));
413 return false;
414 }
415
416 return true;
417}
418
419#ifndef HAVE_JUMP_LABEL_BATCH
420static void __jump_label_update(struct static_key *key,
421 struct jump_entry *entry,
422 struct jump_entry *stop,
423 bool init)
424{
425 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
426 if (jump_label_can_update(entry, init))
427 arch_jump_label_transform(entry, jump_label_type(entry));
428 }
429}
430#else
431static void __jump_label_update(struct static_key *key,
432 struct jump_entry *entry,
433 struct jump_entry *stop,
434 bool init)
435{
436 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
437
438 if (!jump_label_can_update(entry, init))
439 continue;
440
441 if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
442 /*
443 * Queue is full: Apply the current queue and try again.
444 */
445 arch_jump_label_transform_apply();
446 BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
447 }
448 }
449 arch_jump_label_transform_apply();
450}
451#endif
452
453void __init jump_label_init(void)
454{
455 struct jump_entry *iter_start = __start___jump_table;
456 struct jump_entry *iter_stop = __stop___jump_table;
457 struct static_key *key = NULL;
458 struct jump_entry *iter;
459
460 /*
461 * Since we are initializing the static_key.enabled field with
462 * with the 'raw' int values (to avoid pulling in atomic.h) in
463 * jump_label.h, let's make sure that is safe. There are only two
464 * cases to check since we initialize to 0 or 1.
465 */
466 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
467 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
468
469 if (static_key_initialized)
470 return;
471
472 cpus_read_lock();
473 jump_label_lock();
474 jump_label_sort_entries(iter_start, iter_stop);
475
476 for (iter = iter_start; iter < iter_stop; iter++) {
477 struct static_key *iterk;
478
479 /* rewrite NOPs */
480 if (jump_label_type(iter) == JUMP_LABEL_NOP)
481 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
482
483 if (init_section_contains((void *)jump_entry_code(iter), 1))
484 jump_entry_set_init(iter);
485
486 iterk = jump_entry_key(iter);
487 if (iterk == key)
488 continue;
489
490 key = iterk;
491 static_key_set_entries(key, iter);
492 }
493 static_key_initialized = true;
494 jump_label_unlock();
495 cpus_read_unlock();
496}
497
498#ifdef CONFIG_MODULES
499
500static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
501{
502 struct static_key *key = jump_entry_key(entry);
503 bool type = static_key_type(key);
504 bool branch = jump_entry_is_branch(entry);
505
506 /* See the comment in linux/jump_label.h */
507 return type ^ branch;
508}
509
510struct static_key_mod {
511 struct static_key_mod *next;
512 struct jump_entry *entries;
513 struct module *mod;
514};
515
516static inline struct static_key_mod *static_key_mod(struct static_key *key)
517{
518 WARN_ON_ONCE(!static_key_linked(key));
519 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
520}
521
522/***
523 * key->type and key->next are the same via union.
524 * This sets key->next and preserves the type bits.
525 *
526 * See additional comments above static_key_set_entries().
527 */
528static void static_key_set_mod(struct static_key *key,
529 struct static_key_mod *mod)
530{
531 unsigned long type;
532
533 WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
534 type = key->type & JUMP_TYPE_MASK;
535 key->next = mod;
536 key->type |= type;
537}
538
539static int __jump_label_mod_text_reserved(void *start, void *end)
540{
541 struct module *mod;
542
543 preempt_disable();
544 mod = __module_text_address((unsigned long)start);
545 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
546 preempt_enable();
547
548 if (!mod)
549 return 0;
550
551
552 return __jump_label_text_reserved(mod->jump_entries,
553 mod->jump_entries + mod->num_jump_entries,
554 start, end);
555}
556
557static void __jump_label_mod_update(struct static_key *key)
558{
559 struct static_key_mod *mod;
560
561 for (mod = static_key_mod(key); mod; mod = mod->next) {
562 struct jump_entry *stop;
563 struct module *m;
564
565 /*
566 * NULL if the static_key is defined in a module
567 * that does not use it
568 */
569 if (!mod->entries)
570 continue;
571
572 m = mod->mod;
573 if (!m)
574 stop = __stop___jump_table;
575 else
576 stop = m->jump_entries + m->num_jump_entries;
577 __jump_label_update(key, mod->entries, stop,
578 m && m->state == MODULE_STATE_COMING);
579 }
580}
581
582/***
583 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
584 * @mod: module to patch
585 *
586 * Allow for run-time selection of the optimal nops. Before the module
587 * loads patch these with arch_get_jump_label_nop(), which is specified by
588 * the arch specific jump label code.
589 */
590void jump_label_apply_nops(struct module *mod)
591{
592 struct jump_entry *iter_start = mod->jump_entries;
593 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
594 struct jump_entry *iter;
595
596 /* if the module doesn't have jump label entries, just return */
597 if (iter_start == iter_stop)
598 return;
599
600 for (iter = iter_start; iter < iter_stop; iter++) {
601 /* Only write NOPs for arch_branch_static(). */
602 if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
603 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
604 }
605}
606
607static int jump_label_add_module(struct module *mod)
608{
609 struct jump_entry *iter_start = mod->jump_entries;
610 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
611 struct jump_entry *iter;
612 struct static_key *key = NULL;
613 struct static_key_mod *jlm, *jlm2;
614
615 /* if the module doesn't have jump label entries, just return */
616 if (iter_start == iter_stop)
617 return 0;
618
619 jump_label_sort_entries(iter_start, iter_stop);
620
621 for (iter = iter_start; iter < iter_stop; iter++) {
622 struct static_key *iterk;
623
624 if (within_module_init(jump_entry_code(iter), mod))
625 jump_entry_set_init(iter);
626
627 iterk = jump_entry_key(iter);
628 if (iterk == key)
629 continue;
630
631 key = iterk;
632 if (within_module((unsigned long)key, mod)) {
633 static_key_set_entries(key, iter);
634 continue;
635 }
636 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
637 if (!jlm)
638 return -ENOMEM;
639 if (!static_key_linked(key)) {
640 jlm2 = kzalloc(sizeof(struct static_key_mod),
641 GFP_KERNEL);
642 if (!jlm2) {
643 kfree(jlm);
644 return -ENOMEM;
645 }
646 preempt_disable();
647 jlm2->mod = __module_address((unsigned long)key);
648 preempt_enable();
649 jlm2->entries = static_key_entries(key);
650 jlm2->next = NULL;
651 static_key_set_mod(key, jlm2);
652 static_key_set_linked(key);
653 }
654 jlm->mod = mod;
655 jlm->entries = iter;
656 jlm->next = static_key_mod(key);
657 static_key_set_mod(key, jlm);
658 static_key_set_linked(key);
659
660 /* Only update if we've changed from our initial state */
661 if (jump_label_type(iter) != jump_label_init_type(iter))
662 __jump_label_update(key, iter, iter_stop, true);
663 }
664
665 return 0;
666}
667
668static void jump_label_del_module(struct module *mod)
669{
670 struct jump_entry *iter_start = mod->jump_entries;
671 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
672 struct jump_entry *iter;
673 struct static_key *key = NULL;
674 struct static_key_mod *jlm, **prev;
675
676 for (iter = iter_start; iter < iter_stop; iter++) {
677 if (jump_entry_key(iter) == key)
678 continue;
679
680 key = jump_entry_key(iter);
681
682 if (within_module((unsigned long)key, mod))
683 continue;
684
685 /* No memory during module load */
686 if (WARN_ON(!static_key_linked(key)))
687 continue;
688
689 prev = &key->next;
690 jlm = static_key_mod(key);
691
692 while (jlm && jlm->mod != mod) {
693 prev = &jlm->next;
694 jlm = jlm->next;
695 }
696
697 /* No memory during module load */
698 if (WARN_ON(!jlm))
699 continue;
700
701 if (prev == &key->next)
702 static_key_set_mod(key, jlm->next);
703 else
704 *prev = jlm->next;
705
706 kfree(jlm);
707
708 jlm = static_key_mod(key);
709 /* if only one etry is left, fold it back into the static_key */
710 if (jlm->next == NULL) {
711 static_key_set_entries(key, jlm->entries);
712 static_key_clear_linked(key);
713 kfree(jlm);
714 }
715 }
716}
717
718static int
719jump_label_module_notify(struct notifier_block *self, unsigned long val,
720 void *data)
721{
722 struct module *mod = data;
723 int ret = 0;
724
725 cpus_read_lock();
726 jump_label_lock();
727
728 switch (val) {
729 case MODULE_STATE_COMING:
730 ret = jump_label_add_module(mod);
731 if (ret) {
732 WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
733 jump_label_del_module(mod);
734 }
735 break;
736 case MODULE_STATE_GOING:
737 jump_label_del_module(mod);
738 break;
739 }
740
741 jump_label_unlock();
742 cpus_read_unlock();
743
744 return notifier_from_errno(ret);
745}
746
747static struct notifier_block jump_label_module_nb = {
748 .notifier_call = jump_label_module_notify,
749 .priority = 1, /* higher than tracepoints */
750};
751
752static __init int jump_label_init_module(void)
753{
754 return register_module_notifier(&jump_label_module_nb);
755}
756early_initcall(jump_label_init_module);
757
758#endif /* CONFIG_MODULES */
759
760/***
761 * jump_label_text_reserved - check if addr range is reserved
762 * @start: start text addr
763 * @end: end text addr
764 *
765 * checks if the text addr located between @start and @end
766 * overlaps with any of the jump label patch addresses. Code
767 * that wants to modify kernel text should first verify that
768 * it does not overlap with any of the jump label addresses.
769 * Caller must hold jump_label_mutex.
770 *
771 * returns 1 if there is an overlap, 0 otherwise
772 */
773int jump_label_text_reserved(void *start, void *end)
774{
775 int ret = __jump_label_text_reserved(__start___jump_table,
776 __stop___jump_table, start, end);
777
778 if (ret)
779 return ret;
780
781#ifdef CONFIG_MODULES
782 ret = __jump_label_mod_text_reserved(start, end);
783#endif
784 return ret;
785}
786
787static void jump_label_update(struct static_key *key)
788{
789 struct jump_entry *stop = __stop___jump_table;
790 struct jump_entry *entry;
791#ifdef CONFIG_MODULES
792 struct module *mod;
793
794 if (static_key_linked(key)) {
795 __jump_label_mod_update(key);
796 return;
797 }
798
799 preempt_disable();
800 mod = __module_address((unsigned long)key);
801 if (mod)
802 stop = mod->jump_entries + mod->num_jump_entries;
803 preempt_enable();
804#endif
805 entry = static_key_entries(key);
806 /* if there are no users, entry can be NULL */
807 if (entry)
808 __jump_label_update(key, entry, stop,
809 system_state < SYSTEM_RUNNING);
810}
811
812#ifdef CONFIG_STATIC_KEYS_SELFTEST
813static DEFINE_STATIC_KEY_TRUE(sk_true);
814static DEFINE_STATIC_KEY_FALSE(sk_false);
815
816static __init int jump_label_test(void)
817{
818 int i;
819
820 for (i = 0; i < 2; i++) {
821 WARN_ON(static_key_enabled(&sk_true.key) != true);
822 WARN_ON(static_key_enabled(&sk_false.key) != false);
823
824 WARN_ON(!static_branch_likely(&sk_true));
825 WARN_ON(!static_branch_unlikely(&sk_true));
826 WARN_ON(static_branch_likely(&sk_false));
827 WARN_ON(static_branch_unlikely(&sk_false));
828
829 static_branch_disable(&sk_true);
830 static_branch_enable(&sk_false);
831
832 WARN_ON(static_key_enabled(&sk_true.key) == true);
833 WARN_ON(static_key_enabled(&sk_false.key) == false);
834
835 WARN_ON(static_branch_likely(&sk_true));
836 WARN_ON(static_branch_unlikely(&sk_true));
837 WARN_ON(!static_branch_likely(&sk_false));
838 WARN_ON(!static_branch_unlikely(&sk_false));
839
840 static_branch_enable(&sk_true);
841 static_branch_disable(&sk_false);
842 }
843
844 return 0;
845}
846early_initcall(jump_label_test);
847#endif /* STATIC_KEYS_SELFTEST */