Loading...
1/*
2 * jump label support
3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra
6 *
7 */
8#include <linux/memory.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13#include <linux/sort.h>
14#include <linux/err.h>
15#include <linux/static_key.h>
16#include <linux/jump_label_ratelimit.h>
17
18#ifdef HAVE_JUMP_LABEL
19
20/* mutex to protect coming/going of the the jump_label table */
21static DEFINE_MUTEX(jump_label_mutex);
22
23void jump_label_lock(void)
24{
25 mutex_lock(&jump_label_mutex);
26}
27
28void jump_label_unlock(void)
29{
30 mutex_unlock(&jump_label_mutex);
31}
32
33static int jump_label_cmp(const void *a, const void *b)
34{
35 const struct jump_entry *jea = a;
36 const struct jump_entry *jeb = b;
37
38 if (jea->key < jeb->key)
39 return -1;
40
41 if (jea->key > jeb->key)
42 return 1;
43
44 return 0;
45}
46
47static void
48jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
49{
50 unsigned long size;
51
52 size = (((unsigned long)stop - (unsigned long)start)
53 / sizeof(struct jump_entry));
54 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
55}
56
57static void jump_label_update(struct static_key *key);
58
59void static_key_slow_inc(struct static_key *key)
60{
61 STATIC_KEY_CHECK_USE();
62 if (atomic_inc_not_zero(&key->enabled))
63 return;
64
65 jump_label_lock();
66 if (atomic_inc_return(&key->enabled) == 1)
67 jump_label_update(key);
68 jump_label_unlock();
69}
70EXPORT_SYMBOL_GPL(static_key_slow_inc);
71
72static void __static_key_slow_dec(struct static_key *key,
73 unsigned long rate_limit, struct delayed_work *work)
74{
75 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
76 WARN(atomic_read(&key->enabled) < 0,
77 "jump label: negative count!\n");
78 return;
79 }
80
81 if (rate_limit) {
82 atomic_inc(&key->enabled);
83 schedule_delayed_work(work, rate_limit);
84 } else {
85 jump_label_update(key);
86 }
87 jump_label_unlock();
88}
89
90static void jump_label_update_timeout(struct work_struct *work)
91{
92 struct static_key_deferred *key =
93 container_of(work, struct static_key_deferred, work.work);
94 __static_key_slow_dec(&key->key, 0, NULL);
95}
96
97void static_key_slow_dec(struct static_key *key)
98{
99 STATIC_KEY_CHECK_USE();
100 __static_key_slow_dec(key, 0, NULL);
101}
102EXPORT_SYMBOL_GPL(static_key_slow_dec);
103
104void static_key_slow_dec_deferred(struct static_key_deferred *key)
105{
106 STATIC_KEY_CHECK_USE();
107 __static_key_slow_dec(&key->key, key->timeout, &key->work);
108}
109EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
110
111void jump_label_rate_limit(struct static_key_deferred *key,
112 unsigned long rl)
113{
114 STATIC_KEY_CHECK_USE();
115 key->timeout = rl;
116 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
117}
118EXPORT_SYMBOL_GPL(jump_label_rate_limit);
119
120static int addr_conflict(struct jump_entry *entry, void *start, void *end)
121{
122 if (entry->code <= (unsigned long)end &&
123 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
124 return 1;
125
126 return 0;
127}
128
129static int __jump_label_text_reserved(struct jump_entry *iter_start,
130 struct jump_entry *iter_stop, void *start, void *end)
131{
132 struct jump_entry *iter;
133
134 iter = iter_start;
135 while (iter < iter_stop) {
136 if (addr_conflict(iter, start, end))
137 return 1;
138 iter++;
139 }
140
141 return 0;
142}
143
144/*
145 * Update code which is definitely not currently executing.
146 * Architectures which need heavyweight synchronization to modify
147 * running code can override this to make the non-live update case
148 * cheaper.
149 */
150void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
151 enum jump_label_type type)
152{
153 arch_jump_label_transform(entry, type);
154}
155
156static inline struct jump_entry *static_key_entries(struct static_key *key)
157{
158 return (struct jump_entry *)((unsigned long)key->entries & ~JUMP_TYPE_MASK);
159}
160
161static inline bool static_key_type(struct static_key *key)
162{
163 return (unsigned long)key->entries & JUMP_TYPE_MASK;
164}
165
166static inline struct static_key *jump_entry_key(struct jump_entry *entry)
167{
168 return (struct static_key *)((unsigned long)entry->key & ~1UL);
169}
170
171static bool jump_entry_branch(struct jump_entry *entry)
172{
173 return (unsigned long)entry->key & 1UL;
174}
175
176static enum jump_label_type jump_label_type(struct jump_entry *entry)
177{
178 struct static_key *key = jump_entry_key(entry);
179 bool enabled = static_key_enabled(key);
180 bool branch = jump_entry_branch(entry);
181
182 /* See the comment in linux/jump_label.h */
183 return enabled ^ branch;
184}
185
186static void __jump_label_update(struct static_key *key,
187 struct jump_entry *entry,
188 struct jump_entry *stop)
189{
190 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
191 /*
192 * entry->code set to 0 invalidates module init text sections
193 * kernel_text_address() verifies we are not in core kernel
194 * init code, see jump_label_invalidate_module_init().
195 */
196 if (entry->code && kernel_text_address(entry->code))
197 arch_jump_label_transform(entry, jump_label_type(entry));
198 }
199}
200
201void __init jump_label_init(void)
202{
203 struct jump_entry *iter_start = __start___jump_table;
204 struct jump_entry *iter_stop = __stop___jump_table;
205 struct static_key *key = NULL;
206 struct jump_entry *iter;
207
208 jump_label_lock();
209 jump_label_sort_entries(iter_start, iter_stop);
210
211 for (iter = iter_start; iter < iter_stop; iter++) {
212 struct static_key *iterk;
213
214 /* rewrite NOPs */
215 if (jump_label_type(iter) == JUMP_LABEL_NOP)
216 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
217
218 iterk = jump_entry_key(iter);
219 if (iterk == key)
220 continue;
221
222 key = iterk;
223 /*
224 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
225 */
226 *((unsigned long *)&key->entries) += (unsigned long)iter;
227#ifdef CONFIG_MODULES
228 key->next = NULL;
229#endif
230 }
231 static_key_initialized = true;
232 jump_label_unlock();
233}
234
235#ifdef CONFIG_MODULES
236
237static enum jump_label_type jump_label_init_type(struct jump_entry *entry)
238{
239 struct static_key *key = jump_entry_key(entry);
240 bool type = static_key_type(key);
241 bool branch = jump_entry_branch(entry);
242
243 /* See the comment in linux/jump_label.h */
244 return type ^ branch;
245}
246
247struct static_key_mod {
248 struct static_key_mod *next;
249 struct jump_entry *entries;
250 struct module *mod;
251};
252
253static int __jump_label_mod_text_reserved(void *start, void *end)
254{
255 struct module *mod;
256
257 mod = __module_text_address((unsigned long)start);
258 if (!mod)
259 return 0;
260
261 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
262
263 return __jump_label_text_reserved(mod->jump_entries,
264 mod->jump_entries + mod->num_jump_entries,
265 start, end);
266}
267
268static void __jump_label_mod_update(struct static_key *key)
269{
270 struct static_key_mod *mod;
271
272 for (mod = key->next; mod; mod = mod->next) {
273 struct module *m = mod->mod;
274
275 __jump_label_update(key, mod->entries,
276 m->jump_entries + m->num_jump_entries);
277 }
278}
279
280/***
281 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
282 * @mod: module to patch
283 *
284 * Allow for run-time selection of the optimal nops. Before the module
285 * loads patch these with arch_get_jump_label_nop(), which is specified by
286 * the arch specific jump label code.
287 */
288void jump_label_apply_nops(struct module *mod)
289{
290 struct jump_entry *iter_start = mod->jump_entries;
291 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
292 struct jump_entry *iter;
293
294 /* if the module doesn't have jump label entries, just return */
295 if (iter_start == iter_stop)
296 return;
297
298 for (iter = iter_start; iter < iter_stop; iter++) {
299 /* Only write NOPs for arch_branch_static(). */
300 if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
301 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
302 }
303}
304
305static int jump_label_add_module(struct module *mod)
306{
307 struct jump_entry *iter_start = mod->jump_entries;
308 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
309 struct jump_entry *iter;
310 struct static_key *key = NULL;
311 struct static_key_mod *jlm;
312
313 /* if the module doesn't have jump label entries, just return */
314 if (iter_start == iter_stop)
315 return 0;
316
317 jump_label_sort_entries(iter_start, iter_stop);
318
319 for (iter = iter_start; iter < iter_stop; iter++) {
320 struct static_key *iterk;
321
322 iterk = jump_entry_key(iter);
323 if (iterk == key)
324 continue;
325
326 key = iterk;
327 if (within_module(iter->key, mod)) {
328 /*
329 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
330 */
331 *((unsigned long *)&key->entries) += (unsigned long)iter;
332 key->next = NULL;
333 continue;
334 }
335 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
336 if (!jlm)
337 return -ENOMEM;
338 jlm->mod = mod;
339 jlm->entries = iter;
340 jlm->next = key->next;
341 key->next = jlm;
342
343 /* Only update if we've changed from our initial state */
344 if (jump_label_type(iter) != jump_label_init_type(iter))
345 __jump_label_update(key, iter, iter_stop);
346 }
347
348 return 0;
349}
350
351static void jump_label_del_module(struct module *mod)
352{
353 struct jump_entry *iter_start = mod->jump_entries;
354 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
355 struct jump_entry *iter;
356 struct static_key *key = NULL;
357 struct static_key_mod *jlm, **prev;
358
359 for (iter = iter_start; iter < iter_stop; iter++) {
360 if (jump_entry_key(iter) == key)
361 continue;
362
363 key = jump_entry_key(iter);
364
365 if (within_module(iter->key, mod))
366 continue;
367
368 prev = &key->next;
369 jlm = key->next;
370
371 while (jlm && jlm->mod != mod) {
372 prev = &jlm->next;
373 jlm = jlm->next;
374 }
375
376 if (jlm) {
377 *prev = jlm->next;
378 kfree(jlm);
379 }
380 }
381}
382
383static void jump_label_invalidate_module_init(struct module *mod)
384{
385 struct jump_entry *iter_start = mod->jump_entries;
386 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
387 struct jump_entry *iter;
388
389 for (iter = iter_start; iter < iter_stop; iter++) {
390 if (within_module_init(iter->code, mod))
391 iter->code = 0;
392 }
393}
394
395static int
396jump_label_module_notify(struct notifier_block *self, unsigned long val,
397 void *data)
398{
399 struct module *mod = data;
400 int ret = 0;
401
402 switch (val) {
403 case MODULE_STATE_COMING:
404 jump_label_lock();
405 ret = jump_label_add_module(mod);
406 if (ret)
407 jump_label_del_module(mod);
408 jump_label_unlock();
409 break;
410 case MODULE_STATE_GOING:
411 jump_label_lock();
412 jump_label_del_module(mod);
413 jump_label_unlock();
414 break;
415 case MODULE_STATE_LIVE:
416 jump_label_lock();
417 jump_label_invalidate_module_init(mod);
418 jump_label_unlock();
419 break;
420 }
421
422 return notifier_from_errno(ret);
423}
424
425struct notifier_block jump_label_module_nb = {
426 .notifier_call = jump_label_module_notify,
427 .priority = 1, /* higher than tracepoints */
428};
429
430static __init int jump_label_init_module(void)
431{
432 return register_module_notifier(&jump_label_module_nb);
433}
434early_initcall(jump_label_init_module);
435
436#endif /* CONFIG_MODULES */
437
438/***
439 * jump_label_text_reserved - check if addr range is reserved
440 * @start: start text addr
441 * @end: end text addr
442 *
443 * checks if the text addr located between @start and @end
444 * overlaps with any of the jump label patch addresses. Code
445 * that wants to modify kernel text should first verify that
446 * it does not overlap with any of the jump label addresses.
447 * Caller must hold jump_label_mutex.
448 *
449 * returns 1 if there is an overlap, 0 otherwise
450 */
451int jump_label_text_reserved(void *start, void *end)
452{
453 int ret = __jump_label_text_reserved(__start___jump_table,
454 __stop___jump_table, start, end);
455
456 if (ret)
457 return ret;
458
459#ifdef CONFIG_MODULES
460 ret = __jump_label_mod_text_reserved(start, end);
461#endif
462 return ret;
463}
464
465static void jump_label_update(struct static_key *key)
466{
467 struct jump_entry *stop = __stop___jump_table;
468 struct jump_entry *entry = static_key_entries(key);
469#ifdef CONFIG_MODULES
470 struct module *mod;
471
472 __jump_label_mod_update(key);
473
474 preempt_disable();
475 mod = __module_address((unsigned long)key);
476 if (mod)
477 stop = mod->jump_entries + mod->num_jump_entries;
478 preempt_enable();
479#endif
480 /* if there are no users, entry can be NULL */
481 if (entry)
482 __jump_label_update(key, entry, stop);
483}
484
485#ifdef CONFIG_STATIC_KEYS_SELFTEST
486static DEFINE_STATIC_KEY_TRUE(sk_true);
487static DEFINE_STATIC_KEY_FALSE(sk_false);
488
489static __init int jump_label_test(void)
490{
491 int i;
492
493 for (i = 0; i < 2; i++) {
494 WARN_ON(static_key_enabled(&sk_true.key) != true);
495 WARN_ON(static_key_enabled(&sk_false.key) != false);
496
497 WARN_ON(!static_branch_likely(&sk_true));
498 WARN_ON(!static_branch_unlikely(&sk_true));
499 WARN_ON(static_branch_likely(&sk_false));
500 WARN_ON(static_branch_unlikely(&sk_false));
501
502 static_branch_disable(&sk_true);
503 static_branch_enable(&sk_false);
504
505 WARN_ON(static_key_enabled(&sk_true.key) == true);
506 WARN_ON(static_key_enabled(&sk_false.key) == false);
507
508 WARN_ON(static_branch_likely(&sk_true));
509 WARN_ON(static_branch_unlikely(&sk_true));
510 WARN_ON(!static_branch_likely(&sk_false));
511 WARN_ON(!static_branch_unlikely(&sk_false));
512
513 static_branch_enable(&sk_true);
514 static_branch_disable(&sk_false);
515 }
516
517 return 0;
518}
519late_initcall(jump_label_test);
520#endif /* STATIC_KEYS_SELFTEST */
521
522#endif /* HAVE_JUMP_LABEL */
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * jump label support
4 *
5 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
6 * Copyright (C) 2011 Peter Zijlstra
7 *
8 */
9#include <linux/memory.h>
10#include <linux/uaccess.h>
11#include <linux/module.h>
12#include <linux/list.h>
13#include <linux/slab.h>
14#include <linux/sort.h>
15#include <linux/err.h>
16#include <linux/static_key.h>
17#include <linux/jump_label_ratelimit.h>
18#include <linux/bug.h>
19#include <linux/cpu.h>
20#include <asm/sections.h>
21
22/* mutex to protect coming/going of the jump_label table */
23static DEFINE_MUTEX(jump_label_mutex);
24
25void jump_label_lock(void)
26{
27 mutex_lock(&jump_label_mutex);
28}
29
30void jump_label_unlock(void)
31{
32 mutex_unlock(&jump_label_mutex);
33}
34
35static int jump_label_cmp(const void *a, const void *b)
36{
37 const struct jump_entry *jea = a;
38 const struct jump_entry *jeb = b;
39
40 /*
41 * Entrires are sorted by key.
42 */
43 if (jump_entry_key(jea) < jump_entry_key(jeb))
44 return -1;
45
46 if (jump_entry_key(jea) > jump_entry_key(jeb))
47 return 1;
48
49 /*
50 * In the batching mode, entries should also be sorted by the code
51 * inside the already sorted list of entries, enabling a bsearch in
52 * the vector.
53 */
54 if (jump_entry_code(jea) < jump_entry_code(jeb))
55 return -1;
56
57 if (jump_entry_code(jea) > jump_entry_code(jeb))
58 return 1;
59
60 return 0;
61}
62
63static void jump_label_swap(void *a, void *b, int size)
64{
65 long delta = (unsigned long)a - (unsigned long)b;
66 struct jump_entry *jea = a;
67 struct jump_entry *jeb = b;
68 struct jump_entry tmp = *jea;
69
70 jea->code = jeb->code - delta;
71 jea->target = jeb->target - delta;
72 jea->key = jeb->key - delta;
73
74 jeb->code = tmp.code + delta;
75 jeb->target = tmp.target + delta;
76 jeb->key = tmp.key + delta;
77}
78
79static void
80jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
81{
82 unsigned long size;
83 void *swapfn = NULL;
84
85 if (IS_ENABLED(CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE))
86 swapfn = jump_label_swap;
87
88 size = (((unsigned long)stop - (unsigned long)start)
89 / sizeof(struct jump_entry));
90 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, swapfn);
91}
92
93static void jump_label_update(struct static_key *key);
94
95/*
96 * There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
97 * The use of 'atomic_read()' requires atomic.h and its problematic for some
98 * kernel headers such as kernel.h and others. Since static_key_count() is not
99 * used in the branch statements as it is for the !CONFIG_JUMP_LABEL case its ok
100 * to have it be a function here. Similarly, for 'static_key_enable()' and
101 * 'static_key_disable()', which require bug.h. This should allow jump_label.h
102 * to be included from most/all places for CONFIG_JUMP_LABEL.
103 */
104int static_key_count(struct static_key *key)
105{
106 /*
107 * -1 means the first static_key_slow_inc() is in progress.
108 * static_key_enabled() must return true, so return 1 here.
109 */
110 int n = atomic_read(&key->enabled);
111
112 return n >= 0 ? n : 1;
113}
114EXPORT_SYMBOL_GPL(static_key_count);
115
116/*
117 * static_key_fast_inc_not_disabled - adds a user for a static key
118 * @key: static key that must be already enabled
119 *
120 * The caller must make sure that the static key can't get disabled while
121 * in this function. It doesn't patch jump labels, only adds a user to
122 * an already enabled static key.
123 *
124 * Returns true if the increment was done. Unlike refcount_t the ref counter
125 * is not saturated, but will fail to increment on overflow.
126 */
127bool static_key_fast_inc_not_disabled(struct static_key *key)
128{
129 int v;
130
131 STATIC_KEY_CHECK_USE(key);
132 /*
133 * Negative key->enabled has a special meaning: it sends
134 * static_key_slow_inc/dec() down the slow path, and it is non-zero
135 * so it counts as "enabled" in jump_label_update().
136 *
137 * The INT_MAX overflow condition is either used by the networking
138 * code to reset or detected in the slow path of
139 * static_key_slow_inc_cpuslocked().
140 */
141 v = atomic_read(&key->enabled);
142 do {
143 if (v <= 0 || v == INT_MAX)
144 return false;
145 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v + 1)));
146
147 return true;
148}
149EXPORT_SYMBOL_GPL(static_key_fast_inc_not_disabled);
150
151bool static_key_slow_inc_cpuslocked(struct static_key *key)
152{
153 lockdep_assert_cpus_held();
154
155 /*
156 * Careful if we get concurrent static_key_slow_inc/dec() calls;
157 * later calls must wait for the first one to _finish_ the
158 * jump_label_update() process. At the same time, however,
159 * the jump_label_update() call below wants to see
160 * static_key_enabled(&key) for jumps to be updated properly.
161 */
162 if (static_key_fast_inc_not_disabled(key))
163 return true;
164
165 guard(mutex)(&jump_label_mutex);
166 /* Try to mark it as 'enabling in progress. */
167 if (!atomic_cmpxchg(&key->enabled, 0, -1)) {
168 jump_label_update(key);
169 /*
170 * Ensure that when static_key_fast_inc_not_disabled() or
171 * static_key_dec_not_one() observe the positive value,
172 * they must also observe all the text changes.
173 */
174 atomic_set_release(&key->enabled, 1);
175 } else {
176 /*
177 * While holding the mutex this should never observe
178 * anything else than a value >= 1 and succeed
179 */
180 if (WARN_ON_ONCE(!static_key_fast_inc_not_disabled(key)))
181 return false;
182 }
183 return true;
184}
185
186bool static_key_slow_inc(struct static_key *key)
187{
188 bool ret;
189
190 cpus_read_lock();
191 ret = static_key_slow_inc_cpuslocked(key);
192 cpus_read_unlock();
193 return ret;
194}
195EXPORT_SYMBOL_GPL(static_key_slow_inc);
196
197void static_key_enable_cpuslocked(struct static_key *key)
198{
199 STATIC_KEY_CHECK_USE(key);
200 lockdep_assert_cpus_held();
201
202 if (atomic_read(&key->enabled) > 0) {
203 WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
204 return;
205 }
206
207 jump_label_lock();
208 if (atomic_read(&key->enabled) == 0) {
209 atomic_set(&key->enabled, -1);
210 jump_label_update(key);
211 /*
212 * See static_key_slow_inc().
213 */
214 atomic_set_release(&key->enabled, 1);
215 }
216 jump_label_unlock();
217}
218EXPORT_SYMBOL_GPL(static_key_enable_cpuslocked);
219
220void static_key_enable(struct static_key *key)
221{
222 cpus_read_lock();
223 static_key_enable_cpuslocked(key);
224 cpus_read_unlock();
225}
226EXPORT_SYMBOL_GPL(static_key_enable);
227
228void static_key_disable_cpuslocked(struct static_key *key)
229{
230 STATIC_KEY_CHECK_USE(key);
231 lockdep_assert_cpus_held();
232
233 if (atomic_read(&key->enabled) != 1) {
234 WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
235 return;
236 }
237
238 jump_label_lock();
239 if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
240 jump_label_update(key);
241 jump_label_unlock();
242}
243EXPORT_SYMBOL_GPL(static_key_disable_cpuslocked);
244
245void static_key_disable(struct static_key *key)
246{
247 cpus_read_lock();
248 static_key_disable_cpuslocked(key);
249 cpus_read_unlock();
250}
251EXPORT_SYMBOL_GPL(static_key_disable);
252
253static bool static_key_dec_not_one(struct static_key *key)
254{
255 int v;
256
257 /*
258 * Go into the slow path if key::enabled is less than or equal than
259 * one. One is valid to shut down the key, anything less than one
260 * is an imbalance, which is handled at the call site.
261 *
262 * That includes the special case of '-1' which is set in
263 * static_key_slow_inc_cpuslocked(), but that's harmless as it is
264 * fully serialized in the slow path below. By the time this task
265 * acquires the jump label lock the value is back to one and the
266 * retry under the lock must succeed.
267 */
268 v = atomic_read(&key->enabled);
269 do {
270 /*
271 * Warn about the '-1' case though; since that means a
272 * decrement is concurrent with a first (0->1) increment. IOW
273 * people are trying to disable something that wasn't yet fully
274 * enabled. This suggests an ordering problem on the user side.
275 */
276 WARN_ON_ONCE(v < 0);
277
278 /*
279 * Warn about underflow, and lie about success in an attempt to
280 * not make things worse.
281 */
282 if (WARN_ON_ONCE(v == 0))
283 return true;
284
285 if (v <= 1)
286 return false;
287 } while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
288
289 return true;
290}
291
292static void __static_key_slow_dec_cpuslocked(struct static_key *key)
293{
294 lockdep_assert_cpus_held();
295 int val;
296
297 if (static_key_dec_not_one(key))
298 return;
299
300 guard(mutex)(&jump_label_mutex);
301 val = atomic_read(&key->enabled);
302 /*
303 * It should be impossible to observe -1 with jump_label_mutex held,
304 * see static_key_slow_inc_cpuslocked().
305 */
306 if (WARN_ON_ONCE(val == -1))
307 return;
308 /*
309 * Cannot already be 0, something went sideways.
310 */
311 if (WARN_ON_ONCE(val == 0))
312 return;
313
314 if (atomic_dec_and_test(&key->enabled))
315 jump_label_update(key);
316}
317
318static void __static_key_slow_dec(struct static_key *key)
319{
320 cpus_read_lock();
321 __static_key_slow_dec_cpuslocked(key);
322 cpus_read_unlock();
323}
324
325void jump_label_update_timeout(struct work_struct *work)
326{
327 struct static_key_deferred *key =
328 container_of(work, struct static_key_deferred, work.work);
329 __static_key_slow_dec(&key->key);
330}
331EXPORT_SYMBOL_GPL(jump_label_update_timeout);
332
333void static_key_slow_dec(struct static_key *key)
334{
335 STATIC_KEY_CHECK_USE(key);
336 __static_key_slow_dec(key);
337}
338EXPORT_SYMBOL_GPL(static_key_slow_dec);
339
340void static_key_slow_dec_cpuslocked(struct static_key *key)
341{
342 STATIC_KEY_CHECK_USE(key);
343 __static_key_slow_dec_cpuslocked(key);
344}
345
346void __static_key_slow_dec_deferred(struct static_key *key,
347 struct delayed_work *work,
348 unsigned long timeout)
349{
350 STATIC_KEY_CHECK_USE(key);
351
352 if (static_key_dec_not_one(key))
353 return;
354
355 schedule_delayed_work(work, timeout);
356}
357EXPORT_SYMBOL_GPL(__static_key_slow_dec_deferred);
358
359void __static_key_deferred_flush(void *key, struct delayed_work *work)
360{
361 STATIC_KEY_CHECK_USE(key);
362 flush_delayed_work(work);
363}
364EXPORT_SYMBOL_GPL(__static_key_deferred_flush);
365
366void jump_label_rate_limit(struct static_key_deferred *key,
367 unsigned long rl)
368{
369 STATIC_KEY_CHECK_USE(key);
370 key->timeout = rl;
371 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
372}
373EXPORT_SYMBOL_GPL(jump_label_rate_limit);
374
375static int addr_conflict(struct jump_entry *entry, void *start, void *end)
376{
377 if (jump_entry_code(entry) <= (unsigned long)end &&
378 jump_entry_code(entry) + jump_entry_size(entry) > (unsigned long)start)
379 return 1;
380
381 return 0;
382}
383
384static int __jump_label_text_reserved(struct jump_entry *iter_start,
385 struct jump_entry *iter_stop, void *start, void *end, bool init)
386{
387 struct jump_entry *iter;
388
389 iter = iter_start;
390 while (iter < iter_stop) {
391 if (init || !jump_entry_is_init(iter)) {
392 if (addr_conflict(iter, start, end))
393 return 1;
394 }
395 iter++;
396 }
397
398 return 0;
399}
400
401#ifndef arch_jump_label_transform_static
402static void arch_jump_label_transform_static(struct jump_entry *entry,
403 enum jump_label_type type)
404{
405 /* nothing to do on most architectures */
406}
407#endif
408
409static inline struct jump_entry *static_key_entries(struct static_key *key)
410{
411 WARN_ON_ONCE(key->type & JUMP_TYPE_LINKED);
412 return (struct jump_entry *)(key->type & ~JUMP_TYPE_MASK);
413}
414
415static inline bool static_key_type(struct static_key *key)
416{
417 return key->type & JUMP_TYPE_TRUE;
418}
419
420static inline bool static_key_linked(struct static_key *key)
421{
422 return key->type & JUMP_TYPE_LINKED;
423}
424
425static inline void static_key_clear_linked(struct static_key *key)
426{
427 key->type &= ~JUMP_TYPE_LINKED;
428}
429
430static inline void static_key_set_linked(struct static_key *key)
431{
432 key->type |= JUMP_TYPE_LINKED;
433}
434
435/***
436 * A 'struct static_key' uses a union such that it either points directly
437 * to a table of 'struct jump_entry' or to a linked list of modules which in
438 * turn point to 'struct jump_entry' tables.
439 *
440 * The two lower bits of the pointer are used to keep track of which pointer
441 * type is in use and to store the initial branch direction, we use an access
442 * function which preserves these bits.
443 */
444static void static_key_set_entries(struct static_key *key,
445 struct jump_entry *entries)
446{
447 unsigned long type;
448
449 WARN_ON_ONCE((unsigned long)entries & JUMP_TYPE_MASK);
450 type = key->type & JUMP_TYPE_MASK;
451 key->entries = entries;
452 key->type |= type;
453}
454
455static enum jump_label_type jump_label_type(struct jump_entry *entry)
456{
457 struct static_key *key = jump_entry_key(entry);
458 bool enabled = static_key_enabled(key);
459 bool branch = jump_entry_is_branch(entry);
460
461 /* See the comment in linux/jump_label.h */
462 return enabled ^ branch;
463}
464
465static bool jump_label_can_update(struct jump_entry *entry, bool init)
466{
467 /*
468 * Cannot update code that was in an init text area.
469 */
470 if (!init && jump_entry_is_init(entry))
471 return false;
472
473 if (!kernel_text_address(jump_entry_code(entry))) {
474 /*
475 * This skips patching built-in __exit, which
476 * is part of init_section_contains() but is
477 * not part of kernel_text_address().
478 *
479 * Skipping built-in __exit is fine since it
480 * will never be executed.
481 */
482 WARN_ONCE(!jump_entry_is_init(entry),
483 "can't patch jump_label at %pS",
484 (void *)jump_entry_code(entry));
485 return false;
486 }
487
488 return true;
489}
490
491#ifndef HAVE_JUMP_LABEL_BATCH
492static void __jump_label_update(struct static_key *key,
493 struct jump_entry *entry,
494 struct jump_entry *stop,
495 bool init)
496{
497 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
498 if (jump_label_can_update(entry, init))
499 arch_jump_label_transform(entry, jump_label_type(entry));
500 }
501}
502#else
503static void __jump_label_update(struct static_key *key,
504 struct jump_entry *entry,
505 struct jump_entry *stop,
506 bool init)
507{
508 for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
509
510 if (!jump_label_can_update(entry, init))
511 continue;
512
513 if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
514 /*
515 * Queue is full: Apply the current queue and try again.
516 */
517 arch_jump_label_transform_apply();
518 BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
519 }
520 }
521 arch_jump_label_transform_apply();
522}
523#endif
524
525void __init jump_label_init(void)
526{
527 struct jump_entry *iter_start = __start___jump_table;
528 struct jump_entry *iter_stop = __stop___jump_table;
529 struct static_key *key = NULL;
530 struct jump_entry *iter;
531
532 /*
533 * Since we are initializing the static_key.enabled field with
534 * with the 'raw' int values (to avoid pulling in atomic.h) in
535 * jump_label.h, let's make sure that is safe. There are only two
536 * cases to check since we initialize to 0 or 1.
537 */
538 BUILD_BUG_ON((int)ATOMIC_INIT(0) != 0);
539 BUILD_BUG_ON((int)ATOMIC_INIT(1) != 1);
540
541 if (static_key_initialized)
542 return;
543
544 cpus_read_lock();
545 jump_label_lock();
546 jump_label_sort_entries(iter_start, iter_stop);
547
548 for (iter = iter_start; iter < iter_stop; iter++) {
549 struct static_key *iterk;
550 bool in_init;
551
552 /* rewrite NOPs */
553 if (jump_label_type(iter) == JUMP_LABEL_NOP)
554 arch_jump_label_transform_static(iter, JUMP_LABEL_NOP);
555
556 in_init = init_section_contains((void *)jump_entry_code(iter), 1);
557 jump_entry_set_init(iter, in_init);
558
559 iterk = jump_entry_key(iter);
560 if (iterk == key)
561 continue;
562
563 key = iterk;
564 static_key_set_entries(key, iter);
565 }
566 static_key_initialized = true;
567 jump_label_unlock();
568 cpus_read_unlock();
569}
570
571static inline bool static_key_sealed(struct static_key *key)
572{
573 return (key->type & JUMP_TYPE_LINKED) && !(key->type & ~JUMP_TYPE_MASK);
574}
575
576static inline void static_key_seal(struct static_key *key)
577{
578 unsigned long type = key->type & JUMP_TYPE_TRUE;
579 key->type = JUMP_TYPE_LINKED | type;
580}
581
582void jump_label_init_ro(void)
583{
584 struct jump_entry *iter_start = __start___jump_table;
585 struct jump_entry *iter_stop = __stop___jump_table;
586 struct jump_entry *iter;
587
588 if (WARN_ON_ONCE(!static_key_initialized))
589 return;
590
591 cpus_read_lock();
592 jump_label_lock();
593
594 for (iter = iter_start; iter < iter_stop; iter++) {
595 struct static_key *iterk = jump_entry_key(iter);
596
597 if (!is_kernel_ro_after_init((unsigned long)iterk))
598 continue;
599
600 if (static_key_sealed(iterk))
601 continue;
602
603 static_key_seal(iterk);
604 }
605
606 jump_label_unlock();
607 cpus_read_unlock();
608}
609
610#ifdef CONFIG_MODULES
611
612enum jump_label_type jump_label_init_type(struct jump_entry *entry)
613{
614 struct static_key *key = jump_entry_key(entry);
615 bool type = static_key_type(key);
616 bool branch = jump_entry_is_branch(entry);
617
618 /* See the comment in linux/jump_label.h */
619 return type ^ branch;
620}
621
622struct static_key_mod {
623 struct static_key_mod *next;
624 struct jump_entry *entries;
625 struct module *mod;
626};
627
628static inline struct static_key_mod *static_key_mod(struct static_key *key)
629{
630 WARN_ON_ONCE(!static_key_linked(key));
631 return (struct static_key_mod *)(key->type & ~JUMP_TYPE_MASK);
632}
633
634/***
635 * key->type and key->next are the same via union.
636 * This sets key->next and preserves the type bits.
637 *
638 * See additional comments above static_key_set_entries().
639 */
640static void static_key_set_mod(struct static_key *key,
641 struct static_key_mod *mod)
642{
643 unsigned long type;
644
645 WARN_ON_ONCE((unsigned long)mod & JUMP_TYPE_MASK);
646 type = key->type & JUMP_TYPE_MASK;
647 key->next = mod;
648 key->type |= type;
649}
650
651static int __jump_label_mod_text_reserved(void *start, void *end)
652{
653 struct module *mod;
654 int ret;
655
656 preempt_disable();
657 mod = __module_text_address((unsigned long)start);
658 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
659 if (!try_module_get(mod))
660 mod = NULL;
661 preempt_enable();
662
663 if (!mod)
664 return 0;
665
666 ret = __jump_label_text_reserved(mod->jump_entries,
667 mod->jump_entries + mod->num_jump_entries,
668 start, end, mod->state == MODULE_STATE_COMING);
669
670 module_put(mod);
671
672 return ret;
673}
674
675static void __jump_label_mod_update(struct static_key *key)
676{
677 struct static_key_mod *mod;
678
679 for (mod = static_key_mod(key); mod; mod = mod->next) {
680 struct jump_entry *stop;
681 struct module *m;
682
683 /*
684 * NULL if the static_key is defined in a module
685 * that does not use it
686 */
687 if (!mod->entries)
688 continue;
689
690 m = mod->mod;
691 if (!m)
692 stop = __stop___jump_table;
693 else
694 stop = m->jump_entries + m->num_jump_entries;
695 __jump_label_update(key, mod->entries, stop,
696 m && m->state == MODULE_STATE_COMING);
697 }
698}
699
700static int jump_label_add_module(struct module *mod)
701{
702 struct jump_entry *iter_start = mod->jump_entries;
703 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
704 struct jump_entry *iter;
705 struct static_key *key = NULL;
706 struct static_key_mod *jlm, *jlm2;
707
708 /* if the module doesn't have jump label entries, just return */
709 if (iter_start == iter_stop)
710 return 0;
711
712 jump_label_sort_entries(iter_start, iter_stop);
713
714 for (iter = iter_start; iter < iter_stop; iter++) {
715 struct static_key *iterk;
716 bool in_init;
717
718 in_init = within_module_init(jump_entry_code(iter), mod);
719 jump_entry_set_init(iter, in_init);
720
721 iterk = jump_entry_key(iter);
722 if (iterk == key)
723 continue;
724
725 key = iterk;
726 if (within_module((unsigned long)key, mod)) {
727 static_key_set_entries(key, iter);
728 continue;
729 }
730
731 /*
732 * If the key was sealed at init, then there's no need to keep a
733 * reference to its module entries - just patch them now and be
734 * done with it.
735 */
736 if (static_key_sealed(key))
737 goto do_poke;
738
739 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
740 if (!jlm)
741 return -ENOMEM;
742 if (!static_key_linked(key)) {
743 jlm2 = kzalloc(sizeof(struct static_key_mod),
744 GFP_KERNEL);
745 if (!jlm2) {
746 kfree(jlm);
747 return -ENOMEM;
748 }
749 preempt_disable();
750 jlm2->mod = __module_address((unsigned long)key);
751 preempt_enable();
752 jlm2->entries = static_key_entries(key);
753 jlm2->next = NULL;
754 static_key_set_mod(key, jlm2);
755 static_key_set_linked(key);
756 }
757 jlm->mod = mod;
758 jlm->entries = iter;
759 jlm->next = static_key_mod(key);
760 static_key_set_mod(key, jlm);
761 static_key_set_linked(key);
762
763 /* Only update if we've changed from our initial state */
764do_poke:
765 if (jump_label_type(iter) != jump_label_init_type(iter))
766 __jump_label_update(key, iter, iter_stop, true);
767 }
768
769 return 0;
770}
771
772static void jump_label_del_module(struct module *mod)
773{
774 struct jump_entry *iter_start = mod->jump_entries;
775 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
776 struct jump_entry *iter;
777 struct static_key *key = NULL;
778 struct static_key_mod *jlm, **prev;
779
780 for (iter = iter_start; iter < iter_stop; iter++) {
781 if (jump_entry_key(iter) == key)
782 continue;
783
784 key = jump_entry_key(iter);
785
786 if (within_module((unsigned long)key, mod))
787 continue;
788
789 /* No @jlm allocated because key was sealed at init. */
790 if (static_key_sealed(key))
791 continue;
792
793 /* No memory during module load */
794 if (WARN_ON(!static_key_linked(key)))
795 continue;
796
797 prev = &key->next;
798 jlm = static_key_mod(key);
799
800 while (jlm && jlm->mod != mod) {
801 prev = &jlm->next;
802 jlm = jlm->next;
803 }
804
805 /* No memory during module load */
806 if (WARN_ON(!jlm))
807 continue;
808
809 if (prev == &key->next)
810 static_key_set_mod(key, jlm->next);
811 else
812 *prev = jlm->next;
813
814 kfree(jlm);
815
816 jlm = static_key_mod(key);
817 /* if only one etry is left, fold it back into the static_key */
818 if (jlm->next == NULL) {
819 static_key_set_entries(key, jlm->entries);
820 static_key_clear_linked(key);
821 kfree(jlm);
822 }
823 }
824}
825
826static int
827jump_label_module_notify(struct notifier_block *self, unsigned long val,
828 void *data)
829{
830 struct module *mod = data;
831 int ret = 0;
832
833 cpus_read_lock();
834 jump_label_lock();
835
836 switch (val) {
837 case MODULE_STATE_COMING:
838 ret = jump_label_add_module(mod);
839 if (ret) {
840 WARN(1, "Failed to allocate memory: jump_label may not work properly.\n");
841 jump_label_del_module(mod);
842 }
843 break;
844 case MODULE_STATE_GOING:
845 jump_label_del_module(mod);
846 break;
847 }
848
849 jump_label_unlock();
850 cpus_read_unlock();
851
852 return notifier_from_errno(ret);
853}
854
855static struct notifier_block jump_label_module_nb = {
856 .notifier_call = jump_label_module_notify,
857 .priority = 1, /* higher than tracepoints */
858};
859
860static __init int jump_label_init_module(void)
861{
862 return register_module_notifier(&jump_label_module_nb);
863}
864early_initcall(jump_label_init_module);
865
866#endif /* CONFIG_MODULES */
867
868/***
869 * jump_label_text_reserved - check if addr range is reserved
870 * @start: start text addr
871 * @end: end text addr
872 *
873 * checks if the text addr located between @start and @end
874 * overlaps with any of the jump label patch addresses. Code
875 * that wants to modify kernel text should first verify that
876 * it does not overlap with any of the jump label addresses.
877 * Caller must hold jump_label_mutex.
878 *
879 * returns 1 if there is an overlap, 0 otherwise
880 */
881int jump_label_text_reserved(void *start, void *end)
882{
883 bool init = system_state < SYSTEM_RUNNING;
884 int ret = __jump_label_text_reserved(__start___jump_table,
885 __stop___jump_table, start, end, init);
886
887 if (ret)
888 return ret;
889
890#ifdef CONFIG_MODULES
891 ret = __jump_label_mod_text_reserved(start, end);
892#endif
893 return ret;
894}
895
896static void jump_label_update(struct static_key *key)
897{
898 struct jump_entry *stop = __stop___jump_table;
899 bool init = system_state < SYSTEM_RUNNING;
900 struct jump_entry *entry;
901#ifdef CONFIG_MODULES
902 struct module *mod;
903
904 if (static_key_linked(key)) {
905 __jump_label_mod_update(key);
906 return;
907 }
908
909 preempt_disable();
910 mod = __module_address((unsigned long)key);
911 if (mod) {
912 stop = mod->jump_entries + mod->num_jump_entries;
913 init = mod->state == MODULE_STATE_COMING;
914 }
915 preempt_enable();
916#endif
917 entry = static_key_entries(key);
918 /* if there are no users, entry can be NULL */
919 if (entry)
920 __jump_label_update(key, entry, stop, init);
921}
922
923#ifdef CONFIG_STATIC_KEYS_SELFTEST
924static DEFINE_STATIC_KEY_TRUE(sk_true);
925static DEFINE_STATIC_KEY_FALSE(sk_false);
926
927static __init int jump_label_test(void)
928{
929 int i;
930
931 for (i = 0; i < 2; i++) {
932 WARN_ON(static_key_enabled(&sk_true.key) != true);
933 WARN_ON(static_key_enabled(&sk_false.key) != false);
934
935 WARN_ON(!static_branch_likely(&sk_true));
936 WARN_ON(!static_branch_unlikely(&sk_true));
937 WARN_ON(static_branch_likely(&sk_false));
938 WARN_ON(static_branch_unlikely(&sk_false));
939
940 static_branch_disable(&sk_true);
941 static_branch_enable(&sk_false);
942
943 WARN_ON(static_key_enabled(&sk_true.key) == true);
944 WARN_ON(static_key_enabled(&sk_false.key) == false);
945
946 WARN_ON(static_branch_likely(&sk_true));
947 WARN_ON(static_branch_unlikely(&sk_true));
948 WARN_ON(!static_branch_likely(&sk_false));
949 WARN_ON(!static_branch_unlikely(&sk_false));
950
951 static_branch_enable(&sk_true);
952 static_branch_disable(&sk_false);
953 }
954
955 return 0;
956}
957early_initcall(jump_label_test);
958#endif /* STATIC_KEYS_SELFTEST */