Loading...
1/*
2 * jump label support
3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra
6 *
7 */
8#include <linux/memory.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13#include <linux/sort.h>
14#include <linux/err.h>
15#include <linux/static_key.h>
16#include <linux/jump_label_ratelimit.h>
17#include <linux/bug.h>
18#include <linux/cpu.h>
19#include <asm/sections.h>
20
21#ifdef HAVE_JUMP_LABEL
22
23/* mutex to protect coming/going of the the jump_label table */
24static DEFINE_MUTEX(jump_label_mutex);
25
26void jump_label_lock(void)
27{
28 mutex_lock(&jump_label_mutex);
29}
30
31void jump_label_unlock(void)
32{
33 mutex_unlock(&jump_label_mutex);
34}
35
36static int jump_label_cmp(const void *a, const void *b)
37{
38 const struct jump_entry *jea = a;
39 const struct jump_entry *jeb = b;
40
41 if (jea->key < jeb->key)
42 return -1;
43
44 if (jea->key > jeb->key)
45 return 1;
46
47 return 0;
48}
49
50static void
51jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
52{
53 unsigned long size;
54
55 size = (((unsigned long)stop - (unsigned long)start)
56 / sizeof(struct jump_entry));
57 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
58}
59
60static void jump_label_update(struct static_key *key);
61
62/*
63 * There are similar definitions for the !HAVE_JUMP_LABEL case in jump_label.h.
64 * The use of 'atomic_read()' requires atomic.h and its problematic for some
65 * kernel headers such as kernel.h and others. Since static_key_count() is not
66 * used in the branch statements as it is for the !HAVE_JUMP_LABEL case its ok
67 * to have it be a function here. Similarly, for 'static_key_enable()' and
68 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
69 * to be included from most/all places for HAVE_JUMP_LABEL.
70 */
71int static_key_count(struct static_key *key)
72{
73 /*
74 * -1 means the first static_key_slow_inc() is in progress.
75 * static_key_enabled() must return true, so return 1 here.
76 */
77 int n = atomic_read(&key->enabled);
78
79 return n >= 0 ? n : 1;
80}
81EXPORT_SYMBOL_GPL(static_key_count);
82
83void static_key_slow_inc_cpuslocked(struct static_key *key)
84{
85 int v, v1;
86
87 STATIC_KEY_CHECK_USE(key);
88
89 /*
90 * Careful if we get concurrent static_key_slow_inc() calls;
91 * later calls must wait for the first one to _finish_ the
92 * jump_label_update() process. At the same time, however,
93 * the jump_label_update() call below wants to see
94 * static_key_enabled(&key) for jumps to be updated properly.
95 *
96 * So give a special meaning to negative key->enabled: it sends
97 * static_key_slow_inc() down the slow path, and it is non-zero
98 * so it counts as "enabled" in jump_label_update(). Note that
99 * atomic_inc_unless_negative() checks >= 0, so roll our own.
100 */
101 for (v = atomic_read(&key->enabled); v > 0; v = v1) {
102 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
103 if (likely(v1 == v))
104 return;
105 }
106
107 jump_label_lock();
108 if (atomic_read(&key->enabled) == 0) {
109 atomic_set(&key->enabled, -1);
110 jump_label_update(key);
111 /*
112 * Ensure that if the above cmpxchg loop observes our positive
113 * value, it must also observe all the text changes.
114 */
115 atomic_set_release(&key->enabled, 1);
116 } else {
117 atomic_inc(&key->enabled);
118 }
119 jump_label_unlock();
120}
121
122void static_key_slow_inc(struct static_key *key)
123{
124 cpus_read_lock();
125 static_key_slow_inc_cpuslocked(key);
126 cpus_read_unlock();
127}
128EXPORT_SYMBOL_GPL(static_key_slow_inc);
129
130void static_key_enable_cpuslocked(struct static_key *key)
131{
132 STATIC_KEY_CHECK_USE(key);
133
134 if (atomic_read(&key->enabled) > 0) {
135 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
136 return;
137 }
138
139 jump_label_lock();
140 if (atomic_read(&key->enabled) == 0) {
141 atomic_set(&key->enabled, -1);
142 jump_label_update(key);
143 /*
144 * See static_key_slow_inc().
145 */
146 atomic_set_release(&key->enabled, 1);
147 }
148 jump_label_unlock();
149}
150EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
151
152void static_key_enable(struct static_key *key)
153{
154 cpus_read_lock();
155 static_key_enable_cpuslocked(key);
156 cpus_read_unlock();
157}
158EXPORT_SYMBOL_GPL(static_key_enable);
159
160void static_key_disable_cpuslocked(struct static_key *key)
161{
162 STATIC_KEY_CHECK_USE(key);
163
164 if (atomic_read(&key->enabled) != 1) {
165 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
166 return;
167 }
168
169 jump_label_lock();
170 if (atomic_cmpxchg(&key->enabled, 1, 0))
171 jump_label_update(key);
172 jump_label_unlock();
173}
174EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
175
176void static_key_disable(struct static_key *key)
177{
178 cpus_read_lock();
179 static_key_disable_cpuslocked(key);
180 cpus_read_unlock();
181}
182EXPORT_SYMBOL_GPL(static_key_disable);
183
184static void __static_key_slow_dec_cpuslocked(struct static_key *key,
185 unsigned long rate_limit,
186 struct delayed_work *work)
187{
188 /*
189 * The negative count check is valid even when a negative
190 * key->enabled is in use by static_key_slow_inc(); a
191 * __static_key_slow_dec() before the first static_key_slow_inc()
192 * returns is unbalanced, because all other static_key_slow_inc()
193 * instances block while the update is in progress.
194 */
195 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
196 WARN(atomic_read(&key->enabled) < 0,
197 "jump label: negative count!\n");
198 return;
199 }
200
201 if (rate_limit) {
202 atomic_inc(&key->enabled);
203 schedule_delayed_work(work, rate_limit);
204 } else {
205 jump_label_update(key);
206 }
207 jump_label_unlock();
208}
209
210static void __static_key_slow_dec(struct static_key *key,
211 unsigned long rate_limit,
212 struct delayed_work *work)
213{
214 cpus_read_lock();
215 __static_key_slow_dec_cpuslocked(key, rate_limit, work);
216 cpus_read_unlock();
217}
218
219static void jump_label_update_timeout(struct work_struct *work)
220{
221 struct static_key_deferred *key =
222 container_of(work, struct static_key_deferred, work.work);
223 __static_key_slow_dec(&key->key, 0, NULL);
224}
225
226void static_key_slow_dec(struct static_key *key)
227{
228 STATIC_KEY_CHECK_USE(key);
229 __static_key_slow_dec(key, 0, NULL);
230}
231EXPORT_SYMBOL_GPL(static_key_slow_dec);
232
233void static_key_slow_dec_cpuslocked(struct static_key *key)
234{
235 STATIC_KEY_CHECK_USE(key);
236 __static_key_slow_dec_cpuslocked(key, 0, NULL);
237}
238
239void static_key_slow_dec_deferred(struct static_key_deferred *key)
240{
241 STATIC_KEY_CHECK_USE(key);
242 __static_key_slow_dec(&key->key, key->timeout, &key->work);
243}
244EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
245
246void static_key_deferred_flush(struct static_key_deferred *key)
247{
248 STATIC_KEY_CHECK_USE(key);
249 flush_delayed_work(&key->work);
250}
251EXPORT_SYMBOL_GPL(static_key_deferred_flush);
252
253void jump_label_rate_limit(struct static_key_deferred *key,
254 unsigned long rl)
255{
256 STATIC_KEY_CHECK_USE(key);
257 key->timeout = rl;
258 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
259}
260EXPORT_SYMBOL_GPL(jump_label_rate_limit);
261
262static int addr_conflict(struct jump_entry *entry, void *start, void *end)
263{
264 if (entry->code <= (unsigned long)end &&
265 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
266 return 1;
267
268 return 0;
269}
270
271static int __jump_label_text_reserved(struct jump_entry *iter_start,
272 struct jump_entry *iter_stop, void *start, void *end)
273{
274 struct jump_entry *iter;
275
276 iter = iter_start;
277 while (iter < iter_stop) {
278 if (addr_conflict(iter, start, end))
279 return 1;
280 iter++;
281 }
282
283 return 0;
284}
285
286/*
287 * Update code which is definitely not currently executing.
288 * Architectures which need heavyweight synchronization to modify
289 * running code can override this to make the non-live update case
290 * cheaper.
291 */
292void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
293 enum jump_label_type type)
294{
295 arch_jump_label_transform(entry, type);
296}
297
298static inline struct jump_entry *static_key_entries(struct static_key *key)
299{
300 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
301 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
302}
303
304static inline bool static_key_type(struct static_key *key)
305{
306 return key->type & JUMP_TYPE_TRUE;
307}
308
309static inline bool static_key_linked(struct static_key *key)
310{
311 return key->type & JUMP_TYPE_LINKED;
312}
313
314static inline void static_key_clear_linked(struct static_key *key)
315{
316 key->type &= ~JUMP_TYPE_LINKED;
317}
318
319static inline void static_key_set_linked(struct static_key *key)
320{
321 key->type |= JUMP_TYPE_LINKED;
322}
323
324static inline struct static_key *jump_entry_key(struct jump_entry *entry)
325{
326 return (struct static_key *)((unsigned long)entry->key & ~1UL);
327}
328
329static bool jump_entry_branch(struct jump_entry *entry)
330{
331 return (unsigned long)entry->key & 1UL;
332}
333
334/***
335 * A 'struct static_key' uses a union such that it either points directly
336 * to a table of 'struct jump_entry' or to a linked list of modules which in
337 * turn point to 'struct jump_entry' tables.
338 *
339 * The two lower bits of the pointer are used to keep track of which pointer
340 * type is in use and to store the initial branch direction, we use an access
341 * function which preserves these bits.
342 */
343static void static_key_set_entries(struct static_key *key,
344 struct jump_entry *entries)
345{
346 unsigned long type;
347
348 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
349 type = key->type & JUMP_TYPE_MASK;
350 key->entries = entries;
351 key->type |= type;
352}
353
354static enum jump_label_type jump_label_type(struct jump_entry *entry)
355{
356 struct static_key *key = jump_entry_key(entry);
357 bool enabled = static_key_enabled(key);
358 bool branch = jump_entry_branch(entry);
359
360 /* See the comment in linux/jump_label.h */
361 return enabled ^ branch;
362}
363
364static void __jump_label_update(struct static_key *key,
365 struct jump_entry *entry,
366 struct jump_entry *stop)
367{
368 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
369 /*
370 * An entry->code of 0 indicates an entry which has been
371 * disabled because it was in an init text area.
372 */
373 if (entry->code) {
374 if (kernel_text_address(entry->code))
375 arch_jump_label_transform(entry, jump_label_type(entry));
376 else
377 WARN_ONCE(1, "can't patch jump_label at %pS",
378 (void *)(unsigned long)entry->code);
379 }
380 }
381}
382
383void __init jump_label_init(void)
384{
385 struct jump_entry *iter_start = __start___jump_table;
386 struct jump_entry *iter_stop = __stop___jump_table;
387 struct static_key *key = NULL;
388 struct jump_entry *iter;
389
390 /*
391 * Since we are initializing the static_key.enabled field with
392 * with the 'raw' int values (to avoid pulling in atomic.h) in
393 * jump_label.h, let's make sure that is safe. There are only two
394 * cases to check since we initialize to 0 or 1.
395 */
396 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
397 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
398
399 if (static_key_initialized)
400 return;
401
402 cpus_read_lock();
403 jump_label_lock();
404 jump_label_sort_entries(iter_start, iter_stop);
405
406 for (iter = iter_start; iter < iter_stop; iter++) {
407 struct static_key *iterk;
408
409 /* rewrite NOPs */
410 if (jump_label_type(iter) == JUMP_LABEL_NOP)
411 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
412
413 iterk = jump_entry_key(iter);
414 if (iterk == key)
415 continue;
416
417 key = iterk;
418 static_key_set_entries(key, iter);
419 }
420 static_key_initialized = true;
421 jump_label_unlock();
422 cpus_read_unlock();
423}
424
425/* Disable any jump label entries in __init/__exit code */
426void __init jump_label_invalidate_initmem(void)
427{
428 struct jump_entry *iter_start = __start___jump_table;
429 struct jump_entry *iter_stop = __stop___jump_table;
430 struct jump_entry *iter;
431
432 for (iter = iter_start; iter < iter_stop; iter++) {
433 if (init_section_contains((void *)(unsigned long)iter->code, 1))
434 iter->code = 0;
435 }
436}
437
438#ifdef CONFIG_MODULES
439
440static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
441{
442 struct static_key *key = jump_entry_key(entry);
443 bool type = static_key_type(key);
444 bool branch = jump_entry_branch(entry);
445
446 /* See the comment in linux/jump_label.h */
447 return type ^ branch;
448}
449
450struct static_key_mod {
451 struct static_key_mod *next;
452 struct jump_entry *entries;
453 struct module *mod;
454};
455
456static inline struct static_key_mod *static_key_mod(struct static_key *key)
457{
458 WARN_ON_ONCE(!(key->type & JUMP_TYPE_LINKED));
459 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
460}
461
462/***
463 * key->type and key->next are the same via union.
464 * This sets key->next and preserves the type bits.
465 *
466 * See additional comments above static_key_set_entries().
467 */
468static void static_key_set_mod(struct static_key *key,
469 struct static_key_mod *mod)
470{
471 unsigned long type;
472
473 WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
474 type = key->type & JUMP_TYPE_MASK;
475 key->next = mod;
476 key->type |= type;
477}
478
479static int __jump_label_mod_text_reserved(void *start, void *end)
480{
481 struct module *mod;
482
483 preempt_disable();
484 mod = __module_text_address((unsigned long)start);
485 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
486 preempt_enable();
487
488 if (!mod)
489 return 0;
490
491
492 return __jump_label_text_reserved(mod->jump_entries,
493 mod->jump_entries + mod->num_jump_entries,
494 start, end);
495}
496
497static void __jump_label_mod_update(struct static_key *key)
498{
499 struct static_key_mod *mod;
500
501 for (mod = static_key_mod(key); mod; mod = mod->next) {
502 struct jump_entry *stop;
503 struct module *m;
504
505 /*
506 * NULL if the static_key is defined in a module
507 * that does not use it
508 */
509 if (!mod->entries)
510 continue;
511
512 m = mod->mod;
513 if (!m)
514 stop = __stop___jump_table;
515 else
516 stop = m->jump_entries + m->num_jump_entries;
517 __jump_label_update(key, mod->entries, stop);
518 }
519}
520
521/***
522 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
523 * @mod: module to patch
524 *
525 * Allow for run-time selection of the optimal nops. Before the module
526 * loads patch these with arch_get_jump_label_nop(), which is specified by
527 * the arch specific jump label code.
528 */
529void jump_label_apply_nops(struct module *mod)
530{
531 struct jump_entry *iter_start = mod->jump_entries;
532 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
533 struct jump_entry *iter;
534
535 /* if the module doesn't have jump label entries, just return */
536 if (iter_start == iter_stop)
537 return;
538
539 for (iter = iter_start; iter < iter_stop; iter++) {
540 /* Only write NOPs for arch_branch_static(). */
541 if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
542 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
543 }
544}
545
546static int jump_label_add_module(struct module *mod)
547{
548 struct jump_entry *iter_start = mod->jump_entries;
549 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
550 struct jump_entry *iter;
551 struct static_key *key = NULL;
552 struct static_key_mod *jlm, *jlm2;
553
554 /* if the module doesn't have jump label entries, just return */
555 if (iter_start == iter_stop)
556 return 0;
557
558 jump_label_sort_entries(iter_start, iter_stop);
559
560 for (iter = iter_start; iter < iter_stop; iter++) {
561 struct static_key *iterk;
562
563 iterk = jump_entry_key(iter);
564 if (iterk == key)
565 continue;
566
567 key = iterk;
568 if (within_module(iter->key, mod)) {
569 static_key_set_entries(key, iter);
570 continue;
571 }
572 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
573 if (!jlm)
574 return -ENOMEM;
575 if (!static_key_linked(key)) {
576 jlm2 = kzalloc(sizeof(struct static_key_mod),
577 GFP_KERNEL);
578 if (!jlm2) {
579 kfree(jlm);
580 return -ENOMEM;
581 }
582 preempt_disable();
583 jlm2->mod = __module_address((unsigned long)key);
584 preempt_enable();
585 jlm2->entries = static_key_entries(key);
586 jlm2->next = NULL;
587 static_key_set_mod(key, jlm2);
588 static_key_set_linked(key);
589 }
590 jlm->mod = mod;
591 jlm->entries = iter;
592 jlm->next = static_key_mod(key);
593 static_key_set_mod(key, jlm);
594 static_key_set_linked(key);
595
596 /* Only update if we've changed from our initial state */
597 if (jump_label_type(iter) != jump_label_init_type(iter))
598 __jump_label_update(key, iter, iter_stop);
599 }
600
601 return 0;
602}
603
604static void jump_label_del_module(struct module *mod)
605{
606 struct jump_entry *iter_start = mod->jump_entries;
607 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
608 struct jump_entry *iter;
609 struct static_key *key = NULL;
610 struct static_key_mod *jlm, **prev;
611
612 for (iter = iter_start; iter < iter_stop; iter++) {
613 if (jump_entry_key(iter) == key)
614 continue;
615
616 key = jump_entry_key(iter);
617
618 if (within_module(iter->key, mod))
619 continue;
620
621 /* No memory during module load */
622 if (WARN_ON(!static_key_linked(key)))
623 continue;
624
625 prev = &key->next;
626 jlm = static_key_mod(key);
627
628 while (jlm && jlm->mod != mod) {
629 prev = &jlm->next;
630 jlm = jlm->next;
631 }
632
633 /* No memory during module load */
634 if (WARN_ON(!jlm))
635 continue;
636
637 if (prev == &key->next)
638 static_key_set_mod(key, jlm->next);
639 else
640 *prev = jlm->next;
641
642 kfree(jlm);
643
644 jlm = static_key_mod(key);
645 /* if only one etry is left, fold it back into the static_key */
646 if (jlm->next == NULL) {
647 static_key_set_entries(key, jlm->entries);
648 static_key_clear_linked(key);
649 kfree(jlm);
650 }
651 }
652}
653
654/* Disable any jump label entries in module init code */
655static void jump_label_invalidate_module_init(struct module *mod)
656{
657 struct jump_entry *iter_start = mod->jump_entries;
658 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
659 struct jump_entry *iter;
660
661 for (iter = iter_start; iter < iter_stop; iter++) {
662 if (within_module_init(iter->code, mod))
663 iter->code = 0;
664 }
665}
666
667static int
668jump_label_module_notify(struct notifier_block *self, unsigned long val,
669 void *data)
670{
671 struct module *mod = data;
672 int ret = 0;
673
674 cpus_read_lock();
675 jump_label_lock();
676
677 switch (val) {
678 case MODULE_STATE_COMING:
679 ret = jump_label_add_module(mod);
680 if (ret) {
681 WARN(1, "Failed to allocatote memory: jump_label may not work properly.\n");
682 jump_label_del_module(mod);
683 }
684 break;
685 case MODULE_STATE_GOING:
686 jump_label_del_module(mod);
687 break;
688 case MODULE_STATE_LIVE:
689 jump_label_invalidate_module_init(mod);
690 break;
691 }
692
693 jump_label_unlock();
694 cpus_read_unlock();
695
696 return notifier_from_errno(ret);
697}
698
699static struct notifier_block jump_label_module_nb = {
700 .notifier_call = jump_label_module_notify,
701 .priority = 1, /* higher than tracepoints */
702};
703
704static __init int jump_label_init_module(void)
705{
706 return register_module_notifier(&jump_label_module_nb);
707}
708early_initcall(jump_label_init_module);
709
710#endif /* CONFIG_MODULES */
711
712/***
713 * jump_label_text_reserved - check if addr range is reserved
714 * @start: start text addr
715 * @end: end text addr
716 *
717 * checks if the text addr located between @start and @end
718 * overlaps with any of the jump label patch addresses. Code
719 * that wants to modify kernel text should first verify that
720 * it does not overlap with any of the jump label addresses.
721 * Caller must hold jump_label_mutex.
722 *
723 * returns 1 if there is an overlap, 0 otherwise
724 */
725int jump_label_text_reserved(void *start, void *end)
726{
727 int ret = __jump_label_text_reserved(__start___jump_table,
728 __stop___jump_table, start, end);
729
730 if (ret)
731 return ret;
732
733#ifdef CONFIG_MODULES
734 ret = __jump_label_mod_text_reserved(start, end);
735#endif
736 return ret;
737}
738
739static void jump_label_update(struct static_key *key)
740{
741 struct jump_entry *stop = __stop___jump_table;
742 struct jump_entry *entry;
743#ifdef CONFIG_MODULES
744 struct module *mod;
745
746 if (static_key_linked(key)) {
747 __jump_label_mod_update(key);
748 return;
749 }
750
751 preempt_disable();
752 mod = __module_address((unsigned long)key);
753 if (mod)
754 stop = mod->jump_entries + mod->num_jump_entries;
755 preempt_enable();
756#endif
757 entry = static_key_entries(key);
758 /* if there are no users, entry can be NULL */
759 if (entry)
760 __jump_label_update(key, entry, stop);
761}
762
763#ifdef CONFIG_STATIC_KEYS_SELFTEST
764static DEFINE_STATIC_KEY_TRUE(sk_true);
765static DEFINE_STATIC_KEY_FALSE(sk_false);
766
767static __init int jump_label_test(void)
768{
769 int i;
770
771 for (i = 0; i < 2; i++) {
772 WARN_ON(static_key_enabled(&sk_true.key) != true);
773 WARN_ON(static_key_enabled(&sk_false.key) != false);
774
775 WARN_ON(!static_branch_likely(&sk_true));
776 WARN_ON(!static_branch_unlikely(&sk_true));
777 WARN_ON(static_branch_likely(&sk_false));
778 WARN_ON(static_branch_unlikely(&sk_false));
779
780 static_branch_disable(&sk_true);
781 static_branch_enable(&sk_false);
782
783 WARN_ON(static_key_enabled(&sk_true.key) == true);
784 WARN_ON(static_key_enabled(&sk_false.key) == false);
785
786 WARN_ON(static_branch_likely(&sk_true));
787 WARN_ON(static_branch_unlikely(&sk_true));
788 WARN_ON(!static_branch_likely(&sk_false));
789 WARN_ON(!static_branch_unlikely(&sk_false));
790
791 static_branch_enable(&sk_true);
792 static_branch_disable(&sk_false);
793 }
794
795 return 0;
796}
797early_initcall(jump_label_test);
798#endif /* STATIC_KEYS_SELFTEST */
799
800#endif /* HAVE_JUMP_LABEL */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * jump label support
4 *
5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6 * Copyright (C) 2011 Peter Zijlstra
7 *
8 */
9#include <linux/memory.h>
10#include <linux/uaccess.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/slab.h>
14#include <linux/sort.h>
15#include <linux/err.h>
16#include <linux/static_key.h>
17#include <linux/jump_label_ratelimit.h>
18#include <linux/bug.h>
19#include <linux/cpu.h>
20#include <asm/sections.h>
21
22/* mutex to protect coming/going of the jump_label table */
23static DEFINE_MUTEX(jump_label_mutex);
24
25void jump_label_lock(void)
26{
27 mutex_lock(&jump_label_mutex);
28}
29
30void jump_label_unlock(void)
31{
32 mutex_unlock(&jump_label_mutex);
33}
34
35static int jump_label_cmp(const void *a, const void *b)
36{
37 const struct jump_entry *jea = a;
38 const struct jump_entry *jeb = b;
39
40 /*
41 * Entrires are sorted by key.
42 */
43 if (jump_entry_key(jea) < jump_entry_key(jeb))
44 return -1;
45
46 if (jump_entry_key(jea) > jump_entry_key(jeb))
47 return 1;
48
49 /*
50 * In the batching mode, entries should also be sorted by the code
51 * inside the already sorted list of entries, enabling a bsearch in
52 * the vector.
53 */
54 if (jump_entry_code(jea) < jump_entry_code(jeb))
55 return -1;
56
57 if (jump_entry_code(jea) > jump_entry_code(jeb))
58 return 1;
59
60 return 0;
61}
62
63static void jump_label_swap(void *a, void *b, int size)
64{
65 long delta = (unsigned long)a - (unsigned long)b;
66 struct jump_entry *jea = a;
67 struct jump_entry *jeb = b;
68 struct jump_entry tmp = *jea;
69
70 jea->code = jeb->code - delta;
71 jea->target = jeb->target - delta;
72 jea->key = jeb->key - delta;
73
74 jeb->code = tmp.code + delta;
75 jeb->target = tmp.target + delta;
76 jeb->key = tmp.key + delta;
77}
78
79static void
80jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
81{
82 unsigned long size;
83 void *swapfn = NULL;
84
85 if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
86 swapfn = jump_label_swap;
87
88 size = (((unsigned long)stop - (unsigned long)start)
89 / sizeof(struct jump_entry));
90 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
91}
92
93static void jump_label_update(struct static_key *key);
94
95/*
96 * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
97 * The use of 'atomic_read()' requires atomic.h and its problematic for some
98 * kernel headers such as kernel.h and others. Since static_key_count() is not
99 * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100 * to have it be a function here. Similarly, for 'static_key_enable()' and
101 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102 * to be included from most/all places for CONFIG_JUMP_LABEL.
103 */
104int static_key_count(struct static_key *key)
105{
106 /*
107 * -1 means the first static_key_slow_inc() is in progress.
108 * static_key_enabled() must return true, so return 1 here.
109 */
110 int n = atomic_read(&key->enabled);
111
112 return n >= 0 ? n : 1;
113}
114EXPORT_SYMBOL_GPL(static_key_count);
115
116void static_key_slow_inc_cpuslocked(struct static_key *key)
117{
118 int v, v1;
119
120 STATIC_KEY_CHECK_USE(key);
121 lockdep_assert_cpus_held();
122
123 /*
124 * Careful if we get concurrent static_key_slow_inc() calls;
125 * later calls must wait for the first one to _finish_ the
126 * jump_label_update() process. At the same time, however,
127 * the jump_label_update() call below wants to see
128 * static_key_enabled(&key) for jumps to be updated properly.
129 *
130 * So give a special meaning to negative key->enabled: it sends
131 * static_key_slow_inc() down the slow path, and it is non-zero
132 * so it counts as "enabled" in jump_label_update(). Note that
133 * atomic_inc_unless_negative() checks >= 0, so roll our own.
134 */
135 for (v = atomic_read(&key->enabled); v > 0; v = v1) {
136 v1 = atomic_cmpxchg(&key->enabled, v, v + 1);
137 if (likely(v1 == v))
138 return;
139 }
140
141 jump_label_lock();
142 if (atomic_read(&key->enabled) == 0) {
143 atomic_set(&key->enabled, -1);
144 jump_label_update(key);
145 /*
146 * Ensure that if the above cmpxchg loop observes our positive
147 * value, it must also observe all the text changes.
148 */
149 atomic_set_release(&key->enabled, 1);
150 } else {
151 atomic_inc(&key->enabled);
152 }
153 jump_label_unlock();
154}
155
156void static_key_slow_inc(struct static_key *key)
157{
158 cpus_read_lock();
159 static_key_slow_inc_cpuslocked(key);
160 cpus_read_unlock();
161}
162EXPORT_SYMBOL_GPL(static_key_slow_inc);
163
164void static_key_enable_cpuslocked(struct static_key *key)
165{
166 STATIC_KEY_CHECK_USE(key);
167 lockdep_assert_cpus_held();
168
169 if (atomic_read(&key->enabled) > 0) {
170 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
171 return;
172 }
173
174 jump_label_lock();
175 if (atomic_read(&key->enabled) == 0) {
176 atomic_set(&key->enabled, -1);
177 jump_label_update(key);
178 /*
179 * See static_key_slow_inc().
180 */
181 atomic_set_release(&key->enabled, 1);
182 }
183 jump_label_unlock();
184}
185EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
186
187void static_key_enable(struct static_key *key)
188{
189 cpus_read_lock();
190 static_key_enable_cpuslocked(key);
191 cpus_read_unlock();
192}
193EXPORT_SYMBOL_GPL(static_key_enable);
194
195void static_key_disable_cpuslocked(struct static_key *key)
196{
197 STATIC_KEY_CHECK_USE(key);
198 lockdep_assert_cpus_held();
199
200 if (atomic_read(&key->enabled) != 1) {
201 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
202 return;
203 }
204
205 jump_label_lock();
206 if (atomic_cmpxchg(&key->enabled, 1, 0))
207 jump_label_update(key);
208 jump_label_unlock();
209}
210EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
211
212void static_key_disable(struct static_key *key)
213{
214 cpus_read_lock();
215 static_key_disable_cpuslocked(key);
216 cpus_read_unlock();
217}
218EXPORT_SYMBOL_GPL(static_key_disable);
219
220static bool static_key_slow_try_dec(struct static_key *key)
221{
222 int val;
223
224 val = atomic_fetch_add_unless(&key->enabled, -1, 1);
225 if (val == 1)
226 return false;
227
228 /*
229 * The negative count check is valid even when a negative
230 * key->enabled is in use by static_key_slow_inc(); a
231 * __static_key_slow_dec() before the first static_key_slow_inc()
232 * returns is unbalanced, because all other static_key_slow_inc()
233 * instances block while the update is in progress.
234 */
235 WARN(val < 0, "jump label: negative count!\n");
236 return true;
237}
238
239static void __static_key_slow_dec_cpuslocked(struct static_key *key)
240{
241 lockdep_assert_cpus_held();
242
243 if (static_key_slow_try_dec(key))
244 return;
245
246 jump_label_lock();
247 if (atomic_dec_and_test(&key->enabled))
248 jump_label_update(key);
249 jump_label_unlock();
250}
251
252static void __static_key_slow_dec(struct static_key *key)
253{
254 cpus_read_lock();
255 __static_key_slow_dec_cpuslocked(key);
256 cpus_read_unlock();
257}
258
259void jump_label_update_timeout(struct work_struct *work)
260{
261 struct static_key_deferred *key =
262 container_of(work, struct static_key_deferred, work.work);
263 __static_key_slow_dec(&key->key);
264}
265EXPORT_SYMBOL_GPL(jump_label_update_timeout);
266
267void static_key_slow_dec(struct static_key *key)
268{
269 STATIC_KEY_CHECK_USE(key);
270 __static_key_slow_dec(key);
271}
272EXPORT_SYMBOL_GPL(static_key_slow_dec);
273
274void static_key_slow_dec_cpuslocked(struct static_key *key)
275{
276 STATIC_KEY_CHECK_USE(key);
277 __static_key_slow_dec_cpuslocked(key);
278}
279
280void __static_key_slow_dec_deferred(struct static_key *key,
281 struct delayed_work *work,
282 unsigned long timeout)
283{
284 STATIC_KEY_CHECK_USE(key);
285
286 if (static_key_slow_try_dec(key))
287 return;
288
289 schedule_delayed_work(work, timeout);
290}
291EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
292
293void __static_key_deferred_flush(void *key, struct delayed_work *work)
294{
295 STATIC_KEY_CHECK_USE(key);
296 flush_delayed_work(work);
297}
298EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
299
300void jump_label_rate_limit(struct static_key_deferred *key,
301 unsigned long rl)
302{
303 STATIC_KEY_CHECK_USE(key);
304 key->timeout = rl;
305 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
306}
307EXPORT_SYMBOL_GPL(jump_label_rate_limit);
308
309static int addr_conflict(struct jump_entry *entry, void *start, void *end)
310{
311 if (jump_entry_code(entry) <= (unsigned long)end &&
312 jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
313 return 1;
314
315 return 0;
316}
317
318static int __jump_label_text_reserved(struct jump_entry *iter_start,
319 struct jump_entry *iter_stop, void *start, void *end, bool init)
320{
321 struct jump_entry *iter;
322
323 iter = iter_start;
324 while (iter < iter_stop) {
325 if (init || !jump_entry_is_init(iter)) {
326 if (addr_conflict(iter, start, end))
327 return 1;
328 }
329 iter++;
330 }
331
332 return 0;
333}
334
335/*
336 * Update code which is definitely not currently executing.
337 * Architectures which need heavyweight synchronization to modify
338 * running code can override this to make the non-live update case
339 * cheaper.
340 */
341void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
342 enum jump_label_type type)
343{
344 arch_jump_label_transform(entry, type);
345}
346
347static inline struct jump_entry *static_key_entries(struct static_key *key)
348{
349 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
350 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
351}
352
353static inline bool static_key_type(struct static_key *key)
354{
355 return key->type & JUMP_TYPE_TRUE;
356}
357
358static inline bool static_key_linked(struct static_key *key)
359{
360 return key->type & JUMP_TYPE_LINKED;
361}
362
363static inline void static_key_clear_linked(struct static_key *key)
364{
365 key->type &= ~JUMP_TYPE_LINKED;
366}
367
368static inline void static_key_set_linked(struct static_key *key)
369{
370 key->type |= JUMP_TYPE_LINKED;
371}
372
373/***
374 * A 'struct static_key' uses a union such that it either points directly
375 * to a table of 'struct jump_entry' or to a linked list of modules which in
376 * turn point to 'struct jump_entry' tables.
377 *
378 * The two lower bits of the pointer are used to keep track of which pointer
379 * type is in use and to store the initial branch direction, we use an access
380 * function which preserves these bits.
381 */
382static void static_key_set_entries(struct static_key *key,
383 struct jump_entry *entries)
384{
385 unsigned long type;
386
387 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
388 type = key->type & JUMP_TYPE_MASK;
389 key->entries = entries;
390 key->type |= type;
391}
392
393static enum jump_label_type jump_label_type(struct jump_entry *entry)
394{
395 struct static_key *key = jump_entry_key(entry);
396 bool enabled = static_key_enabled(key);
397 bool branch = jump_entry_is_branch(entry);
398
399 /* See the comment in linux/jump_label.h */
400 return enabled ^ branch;
401}
402
403static bool jump_label_can_update(struct jump_entry *entry, bool init)
404{
405 /*
406 * Cannot update code that was in an init text area.
407 */
408 if (!init && jump_entry_is_init(entry))
409 return false;
410
411 if (!kernel_text_address(jump_entry_code(entry))) {
412 /*
413 * This skips patching built-in __exit, which
414 * is part of init_section_contains() but is
415 * not part of kernel_text_address().
416 *
417 * Skipping built-in __exit is fine since it
418 * will never be executed.
419 */
420 WARN_ONCE(!jump_entry_is_init(entry),
421 "can't patch jump_label at %pS",
422 (void *)jump_entry_code(entry));
423 return false;
424 }
425
426 return true;
427}
428
429#ifndef HAVE_JUMP_LABEL_BATCH
430static void __jump_label_update(struct static_key *key,
431 struct jump_entry *entry,
432 struct jump_entry *stop,
433 bool init)
434{
435 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
436 if (jump_label_can_update(entry, init))
437 arch_jump_label_transform(entry, jump_label_type(entry));
438 }
439}
440#else
441static void __jump_label_update(struct static_key *key,
442 struct jump_entry *entry,
443 struct jump_entry *stop,
444 bool init)
445{
446 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
447
448 if (!jump_label_can_update(entry, init))
449 continue;
450
451 if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
452 /*
453 * Queue is full: Apply the current queue and try again.
454 */
455 arch_jump_label_transform_apply();
456 BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
457 }
458 }
459 arch_jump_label_transform_apply();
460}
461#endif
462
463void __init jump_label_init(void)
464{
465 struct jump_entry *iter_start = __start___jump_table;
466 struct jump_entry *iter_stop = __stop___jump_table;
467 struct static_key *key = NULL;
468 struct jump_entry *iter;
469
470 /*
471 * Since we are initializing the static_key.enabled field with
472 * with the 'raw' int values (to avoid pulling in atomic.h) in
473 * jump_label.h, let's make sure that is safe. There are only two
474 * cases to check since we initialize to 0 or 1.
475 */
476 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
477 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
478
479 if (static_key_initialized)
480 return;
481
482 cpus_read_lock();
483 jump_label_lock();
484 jump_label_sort_entries(iter_start, iter_stop);
485
486 for (iter = iter_start; iter < iter_stop; iter++) {
487 struct static_key *iterk;
488 bool in_init;
489
490 /* rewrite NOPs */
491 if (jump_label_type(iter) == JUMP_LABEL_NOP)
492 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
493
494 in_init = init_section_contains((void *)jump_entry_code(iter), 1);
495 jump_entry_set_init(iter, in_init);
496
497 iterk = jump_entry_key(iter);
498 if (iterk == key)
499 continue;
500
501 key = iterk;
502 static_key_set_entries(key, iter);
503 }
504 static_key_initialized = true;
505 jump_label_unlock();
506 cpus_read_unlock();
507}
508
509#ifdef CONFIG_MODULES
510
511static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
512{
513 struct static_key *key = jump_entry_key(entry);
514 bool type = static_key_type(key);
515 bool branch = jump_entry_is_branch(entry);
516
517 /* See the comment in linux/jump_label.h */
518 return type ^ branch;
519}
520
521struct static_key_mod {
522 struct static_key_mod *next;
523 struct jump_entry *entries;
524 struct module *mod;
525};
526
527static inline struct static_key_mod *static_key_mod(struct static_key *key)
528{
529 WARN_ON_ONCE(!static_key_linked(key));
530 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
531}
532
533/***
534 * key->type and key->next are the same via union.
535 * This sets key->next and preserves the type bits.
536 *
537 * See additional comments above static_key_set_entries().
538 */
539static void static_key_set_mod(struct static_key *key,
540 struct static_key_mod *mod)
541{
542 unsigned long type;
543
544 WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
545 type = key->type & JUMP_TYPE_MASK;
546 key->next = mod;
547 key->type |= type;
548}
549
550static int __jump_label_mod_text_reserved(void *start, void *end)
551{
552 struct module *mod;
553 int ret;
554
555 preempt_disable();
556 mod = __module_text_address((unsigned long)start);
557 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
558 if (!try_module_get(mod))
559 mod = NULL;
560 preempt_enable();
561
562 if (!mod)
563 return 0;
564
565 ret = __jump_label_text_reserved(mod->jump_entries,
566 mod->jump_entries + mod->num_jump_entries,
567 start, end, mod->state == MODULE_STATE_COMING);
568
569 module_put(mod);
570
571 return ret;
572}
573
574static void __jump_label_mod_update(struct static_key *key)
575{
576 struct static_key_mod *mod;
577
578 for (mod = static_key_mod(key); mod; mod = mod->next) {
579 struct jump_entry *stop;
580 struct module *m;
581
582 /*
583 * NULL if the static_key is defined in a module
584 * that does not use it
585 */
586 if (!mod->entries)
587 continue;
588
589 m = mod->mod;
590 if (!m)
591 stop = __stop___jump_table;
592 else
593 stop = m->jump_entries + m->num_jump_entries;
594 __jump_label_update(key, mod->entries, stop,
595 m && m->state == MODULE_STATE_COMING);
596 }
597}
598
599/***
600 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
601 * @mod: module to patch
602 *
603 * Allow for run-time selection of the optimal nops. Before the module
604 * loads patch these with arch_get_jump_label_nop(), which is specified by
605 * the arch specific jump label code.
606 */
607void jump_label_apply_nops(struct module *mod)
608{
609 struct jump_entry *iter_start = mod->jump_entries;
610 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
611 struct jump_entry *iter;
612
613 /* if the module doesn't have jump label entries, just return */
614 if (iter_start == iter_stop)
615 return;
616
617 for (iter = iter_start; iter < iter_stop; iter++) {
618 /* Only write NOPs for arch_branch_static(). */
619 if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
620 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
621 }
622}
623
624static int jump_label_add_module(struct module *mod)
625{
626 struct jump_entry *iter_start = mod->jump_entries;
627 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
628 struct jump_entry *iter;
629 struct static_key *key = NULL;
630 struct static_key_mod *jlm, *jlm2;
631
632 /* if the module doesn't have jump label entries, just return */
633 if (iter_start == iter_stop)
634 return 0;
635
636 jump_label_sort_entries(iter_start, iter_stop);
637
638 for (iter = iter_start; iter < iter_stop; iter++) {
639 struct static_key *iterk;
640 bool in_init;
641
642 in_init = within_module_init(jump_entry_code(iter), mod);
643 jump_entry_set_init(iter, in_init);
644
645 iterk = jump_entry_key(iter);
646 if (iterk == key)
647 continue;
648
649 key = iterk;
650 if (within_module((unsigned long)key, mod)) {
651 static_key_set_entries(key, iter);
652 continue;
653 }
654 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
655 if (!jlm)
656 return -ENOMEM;
657 if (!static_key_linked(key)) {
658 jlm2 = kzalloc(sizeof(struct static_key_mod),
659 GFP_KERNEL);
660 if (!jlm2) {
661 kfree(jlm);
662 return -ENOMEM;
663 }
664 preempt_disable();
665 jlm2->mod = __module_address((unsigned long)key);
666 preempt_enable();
667 jlm2->entries = static_key_entries(key);
668 jlm2->next = NULL;
669 static_key_set_mod(key, jlm2);
670 static_key_set_linked(key);
671 }
672 jlm->mod = mod;
673 jlm->entries = iter;
674 jlm->next = static_key_mod(key);
675 static_key_set_mod(key, jlm);
676 static_key_set_linked(key);
677
678 /* Only update if we've changed from our initial state */
679 if (jump_label_type(iter) != jump_label_init_type(iter))
680 __jump_label_update(key, iter, iter_stop, true);
681 }
682
683 return 0;
684}
685
686static void jump_label_del_module(struct module *mod)
687{
688 struct jump_entry *iter_start = mod->jump_entries;
689 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
690 struct jump_entry *iter;
691 struct static_key *key = NULL;
692 struct static_key_mod *jlm, **prev;
693
694 for (iter = iter_start; iter < iter_stop; iter++) {
695 if (jump_entry_key(iter) == key)
696 continue;
697
698 key = jump_entry_key(iter);
699
700 if (within_module((unsigned long)key, mod))
701 continue;
702
703 /* No memory during module load */
704 if (WARN_ON(!static_key_linked(key)))
705 continue;
706
707 prev = &key->next;
708 jlm = static_key_mod(key);
709
710 while (jlm && jlm->mod != mod) {
711 prev = &jlm->next;
712 jlm = jlm->next;
713 }
714
715 /* No memory during module load */
716 if (WARN_ON(!jlm))
717 continue;
718
719 if (prev == &key->next)
720 static_key_set_mod(key, jlm->next);
721 else
722 *prev = jlm->next;
723
724 kfree(jlm);
725
726 jlm = static_key_mod(key);
727 /* if only one etry is left, fold it back into the static_key */
728 if (jlm->next == NULL) {
729 static_key_set_entries(key, jlm->entries);
730 static_key_clear_linked(key);
731 kfree(jlm);
732 }
733 }
734}
735
736static int
737jump_label_module_notify(struct notifier_block *self, unsigned long val,
738 void *data)
739{
740 struct module *mod = data;
741 int ret = 0;
742
743 cpus_read_lock();
744 jump_label_lock();
745
746 switch (val) {
747 case MODULE_STATE_COMING:
748 ret = jump_label_add_module(mod);
749 if (ret) {
750 WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
751 jump_label_del_module(mod);
752 }
753 break;
754 case MODULE_STATE_GOING:
755 jump_label_del_module(mod);
756 break;
757 }
758
759 jump_label_unlock();
760 cpus_read_unlock();
761
762 return notifier_from_errno(ret);
763}
764
765static struct notifier_block jump_label_module_nb = {
766 .notifier_call = jump_label_module_notify,
767 .priority = 1, /* higher than tracepoints */
768};
769
770static __init int jump_label_init_module(void)
771{
772 return register_module_notifier(&jump_label_module_nb);
773}
774early_initcall(jump_label_init_module);
775
776#endif /* CONFIG_MODULES */
777
778/***
779 * jump_label_text_reserved - check if addr range is reserved
780 * @start: start text addr
781 * @end: end text addr
782 *
783 * checks if the text addr located between @start and @end
784 * overlaps with any of the jump label patch addresses. Code
785 * that wants to modify kernel text should first verify that
786 * it does not overlap with any of the jump label addresses.
787 * Caller must hold jump_label_mutex.
788 *
789 * returns 1 if there is an overlap, 0 otherwise
790 */
791int jump_label_text_reserved(void *start, void *end)
792{
793 bool init = system_state < SYSTEM_RUNNING;
794 int ret = __jump_label_text_reserved(__start___jump_table,
795 __stop___jump_table, start, end, init);
796
797 if (ret)
798 return ret;
799
800#ifdef CONFIG_MODULES
801 ret = __jump_label_mod_text_reserved(start, end);
802#endif
803 return ret;
804}
805
806static void jump_label_update(struct static_key *key)
807{
808 struct jump_entry *stop = __stop___jump_table;
809 bool init = system_state < SYSTEM_RUNNING;
810 struct jump_entry *entry;
811#ifdef CONFIG_MODULES
812 struct module *mod;
813
814 if (static_key_linked(key)) {
815 __jump_label_mod_update(key);
816 return;
817 }
818
819 preempt_disable();
820 mod = __module_address((unsigned long)key);
821 if (mod) {
822 stop = mod->jump_entries + mod->num_jump_entries;
823 init = mod->state == MODULE_STATE_COMING;
824 }
825 preempt_enable();
826#endif
827 entry = static_key_entries(key);
828 /* if there are no users, entry can be NULL */
829 if (entry)
830 __jump_label_update(key, entry, stop, init);
831}
832
833#ifdef CONFIG_STATIC_KEYS_SELFTEST
834static DEFINE_STATIC_KEY_TRUE(sk_true);
835static DEFINE_STATIC_KEY_FALSE(sk_false);
836
837static __init int jump_label_test(void)
838{
839 int i;
840
841 for (i = 0; i < 2; i++) {
842 WARN_ON(static_key_enabled(&sk_true.key) != true);
843 WARN_ON(static_key_enabled(&sk_false.key) != false);
844
845 WARN_ON(!static_branch_likely(&sk_true));
846 WARN_ON(!static_branch_unlikely(&sk_true));
847 WARN_ON(static_branch_likely(&sk_false));
848 WARN_ON(static_branch_unlikely(&sk_false));
849
850 static_branch_disable(&sk_true);
851 static_branch_enable(&sk_false);
852
853 WARN_ON(static_key_enabled(&sk_true.key) == true);
854 WARN_ON(static_key_enabled(&sk_false.key) == false);
855
856 WARN_ON(static_branch_likely(&sk_true));
857 WARN_ON(static_branch_unlikely(&sk_true));
858 WARN_ON(!static_branch_likely(&sk_false));
859 WARN_ON(!static_branch_unlikely(&sk_false));
860
861 static_branch_enable(&sk_true);
862 static_branch_disable(&sk_false);
863 }
864
865 return 0;
866}
867early_initcall(jump_label_test);
868#endif /* STATIC_KEYS_SELFTEST */