Loading...
1/*
2 * jump label support
3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
6 *
7 */
8#include <linux/memory.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13#include <linux/sort.h>
14#include <linux/err.h>
15#include <linux/static_key.h>
16#include <linux/jump_label_ratelimit.h>
17
18#ifdef HAVE_JUMP_LABEL
19
20/* mutex to protect coming/going of the the jump_label table */
21static DEFINE_MUTEX(jump_label_mutex);
22
23void jump_label_lock(void)
24{
25 mutex_lock(&jump_label_mutex);
26}
27
28void jump_label_unlock(void)
29{
30 mutex_unlock(&jump_label_mutex);
31}
32
33static int jump_label_cmp(const void *a, const void *b)
34{
35 const struct jump_entry *jea = a;
36 const struct jump_entry *jeb = b;
37
38 if (jea->key < jeb->key)
39 return -1;
40
41 if (jea->key > jeb->key)
42 return 1;
43
44 return 0;
45}
46
47static void
48jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
49{
50 unsigned long size;
51
52 size = (((unsigned long)stop - (unsigned long)start)
53 / sizeof(struct jump_entry));
54 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
55}
56
57static void jump_label_update(struct static_key *key, int enable);
58
59void static_key_slow_inc(struct static_key *key)
60{
61 STATIC_KEY_CHECK_USE();
62 if (atomic_inc_not_zero(&key->enabled))
63 return;
64
65 jump_label_lock();
66 if (atomic_read(&key->enabled) == 0) {
67 if (!jump_label_get_branch_default(key))
68 jump_label_update(key, JUMP_LABEL_ENABLE);
69 else
70 jump_label_update(key, JUMP_LABEL_DISABLE);
71 }
72 atomic_inc(&key->enabled);
73 jump_label_unlock();
74}
75EXPORT_SYMBOL_GPL(static_key_slow_inc);
76
77static void __static_key_slow_dec(struct static_key *key,
78 unsigned long rate_limit, struct delayed_work *work)
79{
80 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
81 WARN(atomic_read(&key->enabled) < 0,
82 "jump label: negative count!\n");
83 return;
84 }
85
86 if (rate_limit) {
87 atomic_inc(&key->enabled);
88 schedule_delayed_work(work, rate_limit);
89 } else {
90 if (!jump_label_get_branch_default(key))
91 jump_label_update(key, JUMP_LABEL_DISABLE);
92 else
93 jump_label_update(key, JUMP_LABEL_ENABLE);
94 }
95 jump_label_unlock();
96}
97
98static void jump_label_update_timeout(struct work_struct *work)
99{
100 struct static_key_deferred *key =
101 container_of(work, struct static_key_deferred, work.work);
102 __static_key_slow_dec(&key->key, 0, NULL);
103}
104
105void static_key_slow_dec(struct static_key *key)
106{
107 STATIC_KEY_CHECK_USE();
108 __static_key_slow_dec(key, 0, NULL);
109}
110EXPORT_SYMBOL_GPL(static_key_slow_dec);
111
112void static_key_slow_dec_deferred(struct static_key_deferred *key)
113{
114 STATIC_KEY_CHECK_USE();
115 __static_key_slow_dec(&key->key, key->timeout, &key->work);
116}
117EXPORT_SYMBOL_GPL(static_key_slow_dec_deferred);
118
119void jump_label_rate_limit(struct static_key_deferred *key,
120 unsigned long rl)
121{
122 STATIC_KEY_CHECK_USE();
123 key->timeout = rl;
124 INIT_DELAYED_WORK(&key->work, jump_label_update_timeout);
125}
126EXPORT_SYMBOL_GPL(jump_label_rate_limit);
127
128static int addr_conflict(struct jump_entry *entry, void *start, void *end)
129{
130 if (entry->code <= (unsigned long)end &&
131 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
132 return 1;
133
134 return 0;
135}
136
137static int __jump_label_text_reserved(struct jump_entry *iter_start,
138 struct jump_entry *iter_stop, void *start, void *end)
139{
140 struct jump_entry *iter;
141
142 iter = iter_start;
143 while (iter < iter_stop) {
144 if (addr_conflict(iter, start, end))
145 return 1;
146 iter++;
147 }
148
149 return 0;
150}
151
152/*
153 * Update code which is definitely not currently executing.
154 * Architectures which need heavyweight synchronization to modify
155 * running code can override this to make the non-live update case
156 * cheaper.
157 */
158void __weak __init_or_module arch_jump_label_transform_static(struct jump_entry *entry,
159 enum jump_label_type type)
160{
161 arch_jump_label_transform(entry, type);
162}
163
164static void __jump_label_update(struct static_key *key,
165 struct jump_entry *entry,
166 struct jump_entry *stop, int enable)
167{
168 for (; (entry < stop) &&
169 (entry->key == (jump_label_t)(unsigned long)key);
170 entry++) {
171 /*
172 * entry->code set to 0 invalidates module init text sections
173 * kernel_text_address() verifies we are not in core kernel
174 * init code, see jump_label_invalidate_module_init().
175 */
176 if (entry->code && kernel_text_address(entry->code))
177 arch_jump_label_transform(entry, enable);
178 }
179}
180
181static enum jump_label_type jump_label_type(struct static_key *key)
182{
183 bool true_branch = jump_label_get_branch_default(key);
184 bool state = static_key_enabled(key);
185
186 if ((!true_branch && state) || (true_branch && !state))
187 return JUMP_LABEL_ENABLE;
188
189 return JUMP_LABEL_DISABLE;
190}
191
192void __init jump_label_init(void)
193{
194 struct jump_entry *iter_start = __start___jump_table;
195 struct jump_entry *iter_stop = __stop___jump_table;
196 struct static_key *key = NULL;
197 struct jump_entry *iter;
198
199 jump_label_lock();
200 jump_label_sort_entries(iter_start, iter_stop);
201
202 for (iter = iter_start; iter < iter_stop; iter++) {
203 struct static_key *iterk;
204
205 iterk = (struct static_key *)(unsigned long)iter->key;
206 arch_jump_label_transform_static(iter, jump_label_type(iterk));
207 if (iterk == key)
208 continue;
209
210 key = iterk;
211 /*
212 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
213 */
214 *((unsigned long *)&key->entries) += (unsigned long)iter;
215#ifdef CONFIG_MODULES
216 key->next = NULL;
217#endif
218 }
219 static_key_initialized = true;
220 jump_label_unlock();
221}
222
223#ifdef CONFIG_MODULES
224
225struct static_key_mod {
226 struct static_key_mod *next;
227 struct jump_entry *entries;
228 struct module *mod;
229};
230
231static int __jump_label_mod_text_reserved(void *start, void *end)
232{
233 struct module *mod;
234
235 mod = __module_text_address((unsigned long)start);
236 if (!mod)
237 return 0;
238
239 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
240
241 return __jump_label_text_reserved(mod->jump_entries,
242 mod->jump_entries + mod->num_jump_entries,
243 start, end);
244}
245
246static void __jump_label_mod_update(struct static_key *key, int enable)
247{
248 struct static_key_mod *mod = key->next;
249
250 while (mod) {
251 struct module *m = mod->mod;
252
253 __jump_label_update(key, mod->entries,
254 m->jump_entries + m->num_jump_entries,
255 enable);
256 mod = mod->next;
257 }
258}
259
260/***
261 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
262 * @mod: module to patch
263 *
264 * Allow for run-time selection of the optimal nops. Before the module
265 * loads patch these with arch_get_jump_label_nop(), which is specified by
266 * the arch specific jump label code.
267 */
268void jump_label_apply_nops(struct module *mod)
269{
270 struct jump_entry *iter_start = mod->jump_entries;
271 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
272 struct jump_entry *iter;
273
274 /* if the module doesn't have jump label entries, just return */
275 if (iter_start == iter_stop)
276 return;
277
278 for (iter = iter_start; iter < iter_stop; iter++) {
279 arch_jump_label_transform_static(iter, JUMP_LABEL_DISABLE);
280 }
281}
282
283static int jump_label_add_module(struct module *mod)
284{
285 struct jump_entry *iter_start = mod->jump_entries;
286 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
287 struct jump_entry *iter;
288 struct static_key *key = NULL;
289 struct static_key_mod *jlm;
290
291 /* if the module doesn't have jump label entries, just return */
292 if (iter_start == iter_stop)
293 return 0;
294
295 jump_label_sort_entries(iter_start, iter_stop);
296
297 for (iter = iter_start; iter < iter_stop; iter++) {
298 struct static_key *iterk;
299
300 iterk = (struct static_key *)(unsigned long)iter->key;
301 if (iterk == key)
302 continue;
303
304 key = iterk;
305 if (__module_address(iter->key) == mod) {
306 /*
307 * Set key->entries to iter, but preserve JUMP_LABEL_TRUE_BRANCH.
308 */
309 *((unsigned long *)&key->entries) += (unsigned long)iter;
310 key->next = NULL;
311 continue;
312 }
313 jlm = kzalloc(sizeof(struct static_key_mod), GFP_KERNEL);
314 if (!jlm)
315 return -ENOMEM;
316 jlm->mod = mod;
317 jlm->entries = iter;
318 jlm->next = key->next;
319 key->next = jlm;
320
321 if (jump_label_type(key) == JUMP_LABEL_ENABLE)
322 __jump_label_update(key, iter, iter_stop, JUMP_LABEL_ENABLE);
323 }
324
325 return 0;
326}
327
328static void jump_label_del_module(struct module *mod)
329{
330 struct jump_entry *iter_start = mod->jump_entries;
331 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
332 struct jump_entry *iter;
333 struct static_key *key = NULL;
334 struct static_key_mod *jlm, **prev;
335
336 for (iter = iter_start; iter < iter_stop; iter++) {
337 if (iter->key == (jump_label_t)(unsigned long)key)
338 continue;
339
340 key = (struct static_key *)(unsigned long)iter->key;
341
342 if (__module_address(iter->key) == mod)
343 continue;
344
345 prev = &key->next;
346 jlm = key->next;
347
348 while (jlm && jlm->mod != mod) {
349 prev = &jlm->next;
350 jlm = jlm->next;
351 }
352
353 if (jlm) {
354 *prev = jlm->next;
355 kfree(jlm);
356 }
357 }
358}
359
360static void jump_label_invalidate_module_init(struct module *mod)
361{
362 struct jump_entry *iter_start = mod->jump_entries;
363 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
364 struct jump_entry *iter;
365
366 for (iter = iter_start; iter < iter_stop; iter++) {
367 if (within_module_init(iter->code, mod))
368 iter->code = 0;
369 }
370}
371
372static int
373jump_label_module_notify(struct notifier_block *self, unsigned long val,
374 void *data)
375{
376 struct module *mod = data;
377 int ret = 0;
378
379 switch (val) {
380 case MODULE_STATE_COMING:
381 jump_label_lock();
382 ret = jump_label_add_module(mod);
383 if (ret)
384 jump_label_del_module(mod);
385 jump_label_unlock();
386 break;
387 case MODULE_STATE_GOING:
388 jump_label_lock();
389 jump_label_del_module(mod);
390 jump_label_unlock();
391 break;
392 case MODULE_STATE_LIVE:
393 jump_label_lock();
394 jump_label_invalidate_module_init(mod);
395 jump_label_unlock();
396 break;
397 }
398
399 return notifier_from_errno(ret);
400}
401
402struct notifier_block jump_label_module_nb = {
403 .notifier_call = jump_label_module_notify,
404 .priority = 1, /* higher than tracepoints */
405};
406
407static __init int jump_label_init_module(void)
408{
409 return register_module_notifier(&jump_label_module_nb);
410}
411early_initcall(jump_label_init_module);
412
413#endif /* CONFIG_MODULES */
414
415/***
416 * jump_label_text_reserved - check if addr range is reserved
417 * @start: start text addr
418 * @end: end text addr
419 *
420 * checks if the text addr located between @start and @end
421 * overlaps with any of the jump label patch addresses. Code
422 * that wants to modify kernel text should first verify that
423 * it does not overlap with any of the jump label addresses.
424 * Caller must hold jump_label_mutex.
425 *
426 * returns 1 if there is an overlap, 0 otherwise
427 */
428int jump_label_text_reserved(void *start, void *end)
429{
430 int ret = __jump_label_text_reserved(__start___jump_table,
431 __stop___jump_table, start, end);
432
433 if (ret)
434 return ret;
435
436#ifdef CONFIG_MODULES
437 ret = __jump_label_mod_text_reserved(start, end);
438#endif
439 return ret;
440}
441
442static void jump_label_update(struct static_key *key, int enable)
443{
444 struct jump_entry *stop = __stop___jump_table;
445 struct jump_entry *entry = jump_label_get_entries(key);
446
447#ifdef CONFIG_MODULES
448 struct module *mod = __module_address((unsigned long)key);
449
450 __jump_label_mod_update(key, enable);
451
452 if (mod)
453 stop = mod->jump_entries + mod->num_jump_entries;
454#endif
455 /* if there are no users, entry can be NULL */
456 if (entry)
457 __jump_label_update(key, entry, stop, enable);
458}
459
460#endif
1/*
2 * jump label support
3 *
4 * Copyright (C) 2009 Jason Baron <jbaron@redhat.com>
5 * Copyright (C) 2011 Peter Zijlstra <pzijlstr@redhat.com>
6 *
7 */
8#include <linux/memory.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/list.h>
12#include <linux/slab.h>
13#include <linux/sort.h>
14#include <linux/err.h>
15#include <linux/jump_label.h>
16
17#ifdef HAVE_JUMP_LABEL
18
19/* mutex to protect coming/going of the the jump_label table */
20static DEFINE_MUTEX(jump_label_mutex);
21
22void jump_label_lock(void)
23{
24 mutex_lock(&jump_label_mutex);
25}
26
27void jump_label_unlock(void)
28{
29 mutex_unlock(&jump_label_mutex);
30}
31
32bool jump_label_enabled(struct jump_label_key *key)
33{
34 return !!atomic_read(&key->enabled);
35}
36
37static int jump_label_cmp(const void *a, const void *b)
38{
39 const struct jump_entry *jea = a;
40 const struct jump_entry *jeb = b;
41
42 if (jea->key < jeb->key)
43 return -1;
44
45 if (jea->key > jeb->key)
46 return 1;
47
48 return 0;
49}
50
51static void
52jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
53{
54 unsigned long size;
55
56 size = (((unsigned long)stop - (unsigned long)start)
57 / sizeof(struct jump_entry));
58 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
59}
60
61static void jump_label_update(struct jump_label_key *key, int enable);
62
63void jump_label_inc(struct jump_label_key *key)
64{
65 if (atomic_inc_not_zero(&key->enabled))
66 return;
67
68 jump_label_lock();
69 if (atomic_add_return(1, &key->enabled) == 1)
70 jump_label_update(key, JUMP_LABEL_ENABLE);
71 jump_label_unlock();
72}
73
74void jump_label_dec(struct jump_label_key *key)
75{
76 if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex))
77 return;
78
79 jump_label_update(key, JUMP_LABEL_DISABLE);
80 jump_label_unlock();
81}
82
83static int addr_conflict(struct jump_entry *entry, void *start, void *end)
84{
85 if (entry->code <= (unsigned long)end &&
86 entry->code + JUMP_LABEL_NOP_SIZE > (unsigned long)start)
87 return 1;
88
89 return 0;
90}
91
92static int __jump_label_text_reserved(struct jump_entry *iter_start,
93 struct jump_entry *iter_stop, void *start, void *end)
94{
95 struct jump_entry *iter;
96
97 iter = iter_start;
98 while (iter < iter_stop) {
99 if (addr_conflict(iter, start, end))
100 return 1;
101 iter++;
102 }
103
104 return 0;
105}
106
107static void __jump_label_update(struct jump_label_key *key,
108 struct jump_entry *entry,
109 struct jump_entry *stop, int enable)
110{
111 for (; (entry < stop) &&
112 (entry->key == (jump_label_t)(unsigned long)key);
113 entry++) {
114 /*
115 * entry->code set to 0 invalidates module init text sections
116 * kernel_text_address() verifies we are not in core kernel
117 * init code, see jump_label_invalidate_module_init().
118 */
119 if (entry->code && kernel_text_address(entry->code))
120 arch_jump_label_transform(entry, enable);
121 }
122}
123
124/*
125 * Not all archs need this.
126 */
127void __weak arch_jump_label_text_poke_early(jump_label_t addr)
128{
129}
130
131static __init int jump_label_init(void)
132{
133 struct jump_entry *iter_start = __start___jump_table;
134 struct jump_entry *iter_stop = __stop___jump_table;
135 struct jump_label_key *key = NULL;
136 struct jump_entry *iter;
137
138 jump_label_lock();
139 jump_label_sort_entries(iter_start, iter_stop);
140
141 for (iter = iter_start; iter < iter_stop; iter++) {
142 arch_jump_label_text_poke_early(iter->code);
143 if (iter->key == (jump_label_t)(unsigned long)key)
144 continue;
145
146 key = (struct jump_label_key *)(unsigned long)iter->key;
147 atomic_set(&key->enabled, 0);
148 key->entries = iter;
149#ifdef CONFIG_MODULES
150 key->next = NULL;
151#endif
152 }
153 jump_label_unlock();
154
155 return 0;
156}
157early_initcall(jump_label_init);
158
159#ifdef CONFIG_MODULES
160
161struct jump_label_mod {
162 struct jump_label_mod *next;
163 struct jump_entry *entries;
164 struct module *mod;
165};
166
167static int __jump_label_mod_text_reserved(void *start, void *end)
168{
169 struct module *mod;
170
171 mod = __module_text_address((unsigned long)start);
172 if (!mod)
173 return 0;
174
175 WARN_ON_ONCE(__module_text_address((unsigned long)end) != mod);
176
177 return __jump_label_text_reserved(mod->jump_entries,
178 mod->jump_entries + mod->num_jump_entries,
179 start, end);
180}
181
182static void __jump_label_mod_update(struct jump_label_key *key, int enable)
183{
184 struct jump_label_mod *mod = key->next;
185
186 while (mod) {
187 struct module *m = mod->mod;
188
189 __jump_label_update(key, mod->entries,
190 m->jump_entries + m->num_jump_entries,
191 enable);
192 mod = mod->next;
193 }
194}
195
196/***
197 * apply_jump_label_nops - patch module jump labels with arch_get_jump_label_nop()
198 * @mod: module to patch
199 *
200 * Allow for run-time selection of the optimal nops. Before the module
201 * loads patch these with arch_get_jump_label_nop(), which is specified by
202 * the arch specific jump label code.
203 */
204void jump_label_apply_nops(struct module *mod)
205{
206 struct jump_entry *iter_start = mod->jump_entries;
207 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
208 struct jump_entry *iter;
209
210 /* if the module doesn't have jump label entries, just return */
211 if (iter_start == iter_stop)
212 return;
213
214 for (iter = iter_start; iter < iter_stop; iter++)
215 arch_jump_label_text_poke_early(iter->code);
216}
217
218static int jump_label_add_module(struct module *mod)
219{
220 struct jump_entry *iter_start = mod->jump_entries;
221 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
222 struct jump_entry *iter;
223 struct jump_label_key *key = NULL;
224 struct jump_label_mod *jlm;
225
226 /* if the module doesn't have jump label entries, just return */
227 if (iter_start == iter_stop)
228 return 0;
229
230 jump_label_sort_entries(iter_start, iter_stop);
231
232 for (iter = iter_start; iter < iter_stop; iter++) {
233 if (iter->key == (jump_label_t)(unsigned long)key)
234 continue;
235
236 key = (struct jump_label_key *)(unsigned long)iter->key;
237
238 if (__module_address(iter->key) == mod) {
239 atomic_set(&key->enabled, 0);
240 key->entries = iter;
241 key->next = NULL;
242 continue;
243 }
244
245 jlm = kzalloc(sizeof(struct jump_label_mod), GFP_KERNEL);
246 if (!jlm)
247 return -ENOMEM;
248
249 jlm->mod = mod;
250 jlm->entries = iter;
251 jlm->next = key->next;
252 key->next = jlm;
253
254 if (jump_label_enabled(key))
255 __jump_label_update(key, iter, iter_stop,
256 JUMP_LABEL_ENABLE);
257 }
258
259 return 0;
260}
261
262static void jump_label_del_module(struct module *mod)
263{
264 struct jump_entry *iter_start = mod->jump_entries;
265 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
266 struct jump_entry *iter;
267 struct jump_label_key *key = NULL;
268 struct jump_label_mod *jlm, **prev;
269
270 for (iter = iter_start; iter < iter_stop; iter++) {
271 if (iter->key == (jump_label_t)(unsigned long)key)
272 continue;
273
274 key = (struct jump_label_key *)(unsigned long)iter->key;
275
276 if (__module_address(iter->key) == mod)
277 continue;
278
279 prev = &key->next;
280 jlm = key->next;
281
282 while (jlm && jlm->mod != mod) {
283 prev = &jlm->next;
284 jlm = jlm->next;
285 }
286
287 if (jlm) {
288 *prev = jlm->next;
289 kfree(jlm);
290 }
291 }
292}
293
294static void jump_label_invalidate_module_init(struct module *mod)
295{
296 struct jump_entry *iter_start = mod->jump_entries;
297 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
298 struct jump_entry *iter;
299
300 for (iter = iter_start; iter < iter_stop; iter++) {
301 if (within_module_init(iter->code, mod))
302 iter->code = 0;
303 }
304}
305
306static int
307jump_label_module_notify(struct notifier_block *self, unsigned long val,
308 void *data)
309{
310 struct module *mod = data;
311 int ret = 0;
312
313 switch (val) {
314 case MODULE_STATE_COMING:
315 jump_label_lock();
316 ret = jump_label_add_module(mod);
317 if (ret)
318 jump_label_del_module(mod);
319 jump_label_unlock();
320 break;
321 case MODULE_STATE_GOING:
322 jump_label_lock();
323 jump_label_del_module(mod);
324 jump_label_unlock();
325 break;
326 case MODULE_STATE_LIVE:
327 jump_label_lock();
328 jump_label_invalidate_module_init(mod);
329 jump_label_unlock();
330 break;
331 }
332
333 return notifier_from_errno(ret);
334}
335
336struct notifier_block jump_label_module_nb = {
337 .notifier_call = jump_label_module_notify,
338 .priority = 1, /* higher than tracepoints */
339};
340
341static __init int jump_label_init_module(void)
342{
343 return register_module_notifier(&jump_label_module_nb);
344}
345early_initcall(jump_label_init_module);
346
347#endif /* CONFIG_MODULES */
348
349/***
350 * jump_label_text_reserved - check if addr range is reserved
351 * @start: start text addr
352 * @end: end text addr
353 *
354 * checks if the text addr located between @start and @end
355 * overlaps with any of the jump label patch addresses. Code
356 * that wants to modify kernel text should first verify that
357 * it does not overlap with any of the jump label addresses.
358 * Caller must hold jump_label_mutex.
359 *
360 * returns 1 if there is an overlap, 0 otherwise
361 */
362int jump_label_text_reserved(void *start, void *end)
363{
364 int ret = __jump_label_text_reserved(__start___jump_table,
365 __stop___jump_table, start, end);
366
367 if (ret)
368 return ret;
369
370#ifdef CONFIG_MODULES
371 ret = __jump_label_mod_text_reserved(start, end);
372#endif
373 return ret;
374}
375
376static void jump_label_update(struct jump_label_key *key, int enable)
377{
378 struct jump_entry *entry = key->entries, *stop = __stop___jump_table;
379
380#ifdef CONFIG_MODULES
381 struct module *mod = __module_address((jump_label_t)key);
382
383 __jump_label_mod_update(key, enable);
384
385 if (mod)
386 stop = mod->jump_entries + mod->num_jump_entries;
387#endif
388 /* if there are no users, entry can be NULL */
389 if (entry)
390 __jump_label_update(key, entry, stop, enable);
391}
392
393#endif