Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Rewritten by Rusty Russell, on the backs of many others...
3 Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
4
5*/
6#include <linux/ftrace.h>
7#include <linux/memory.h>
8#include <linux/extable.h>
9#include <linux/module.h>
10#include <linux/mutex.h>
11#include <linux/init.h>
12#include <linux/kprobes.h>
13#include <linux/filter.h>
14
15#include <asm/sections.h>
16#include <linux/uaccess.h>
17
18/*
19 * mutex protecting text section modification (dynamic code patching).
20 * some users need to sleep (allocating memory...) while they hold this lock.
21 *
22 * Note: Also protects SMP-alternatives modification on x86.
23 *
24 * NOT exported to modules - patching kernel text is a really delicate matter.
25 */
26DEFINE_MUTEX(text_mutex);
27
28extern struct exception_table_entry __start___ex_table[];
29extern struct exception_table_entry __stop___ex_table[];
30
31/* Cleared by build time tools if the table is already sorted. */
32u32 __initdata __visible main_extable_sort_needed = 1;
33
34/* Sort the kernel's built-in exception table */
35void __init sort_main_extable(void)
36{
37 if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) {
38 pr_notice("Sorting __ex_table...\n");
39 sort_extable(__start___ex_table, __stop___ex_table);
40 }
41}
42
43/* Given an address, look for it in the kernel exception table */
44const
45struct exception_table_entry *search_kernel_exception_table(unsigned long addr)
46{
47 return search_extable(__start___ex_table,
48 __stop___ex_table - __start___ex_table, addr);
49}
50
51/* Given an address, look for it in the exception tables. */
52const struct exception_table_entry *search_exception_tables(unsigned long addr)
53{
54 const struct exception_table_entry *e;
55
56 e = search_kernel_exception_table(addr);
57 if (!e)
58 e = search_module_extables(addr);
59 return e;
60}
61
62int init_kernel_text(unsigned long addr)
63{
64 if (addr >= (unsigned long)_sinittext &&
65 addr < (unsigned long)_einittext)
66 return 1;
67 return 0;
68}
69
70int notrace core_kernel_text(unsigned long addr)
71{
72 if (addr >= (unsigned long)_stext &&
73 addr < (unsigned long)_etext)
74 return 1;
75
76 if (system_state < SYSTEM_RUNNING &&
77 init_kernel_text(addr))
78 return 1;
79 return 0;
80}
81
82/**
83 * core_kernel_data - tell if addr points to kernel data
84 * @addr: address to test
85 *
86 * Returns true if @addr passed in is from the core kernel data
87 * section.
88 *
89 * Note: On some archs it may return true for core RODATA, and false
90 * for others. But will always be true for core RW data.
91 */
92int core_kernel_data(unsigned long addr)
93{
94 if (addr >= (unsigned long)_sdata &&
95 addr < (unsigned long)_edata)
96 return 1;
97 return 0;
98}
99
100int __kernel_text_address(unsigned long addr)
101{
102 if (kernel_text_address(addr))
103 return 1;
104 /*
105 * There might be init symbols in saved stacktraces.
106 * Give those symbols a chance to be printed in
107 * backtraces (such as lockdep traces).
108 *
109 * Since we are after the module-symbols check, there's
110 * no danger of address overlap:
111 */
112 if (init_kernel_text(addr))
113 return 1;
114 return 0;
115}
116
117int kernel_text_address(unsigned long addr)
118{
119 bool no_rcu;
120 int ret = 1;
121
122 if (core_kernel_text(addr))
123 return 1;
124
125 /*
126 * If a stack dump happens while RCU is not watching, then
127 * RCU needs to be notified that it requires to start
128 * watching again. This can happen either by tracing that
129 * triggers a stack trace, or a WARN() that happens during
130 * coming back from idle, or cpu on or offlining.
131 *
132 * is_module_text_address() as well as the kprobe slots
133 * and is_bpf_text_address() require RCU to be watching.
134 */
135 no_rcu = !rcu_is_watching();
136
137 /* Treat this like an NMI as it can happen anywhere */
138 if (no_rcu)
139 rcu_nmi_enter();
140
141 if (is_module_text_address(addr))
142 goto out;
143 if (is_ftrace_trampoline(addr))
144 goto out;
145 if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
146 goto out;
147 if (is_bpf_text_address(addr))
148 goto out;
149 ret = 0;
150out:
151 if (no_rcu)
152 rcu_nmi_exit();
153
154 return ret;
155}
156
157/*
158 * On some architectures (PPC64, IA64) function pointers
159 * are actually only tokens to some data that then holds the
160 * real function address. As a result, to find if a function
161 * pointer is part of the kernel text, we need to do some
162 * special dereferencing first.
163 */
164int func_ptr_is_kernel_text(void *ptr)
165{
166 unsigned long addr;
167 addr = (unsigned long) dereference_function_descriptor(ptr);
168 if (core_kernel_text(addr))
169 return 1;
170 return is_module_text_address(addr);
171}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/* Rewritten by Rusty Russell, on the backs of many others...
3 Copyright (C) 2001 Rusty Russell, 2002 Rusty Russell IBM.
4
5*/
6#include <linux/elf.h>
7#include <linux/ftrace.h>
8#include <linux/memory.h>
9#include <linux/extable.h>
10#include <linux/module.h>
11#include <linux/mutex.h>
12#include <linux/init.h>
13#include <linux/kprobes.h>
14#include <linux/filter.h>
15
16#include <asm/sections.h>
17#include <linux/uaccess.h>
18
19/*
20 * mutex protecting text section modification (dynamic code patching).
21 * some users need to sleep (allocating memory...) while they hold this lock.
22 *
23 * Note: Also protects SMP-alternatives modification on x86.
24 *
25 * NOT exported to modules - patching kernel text is a really delicate matter.
26 */
27DEFINE_MUTEX(text_mutex);
28
29extern struct exception_table_entry __start___ex_table[];
30extern struct exception_table_entry __stop___ex_table[];
31
32/* Cleared by build time tools if the table is already sorted. */
33u32 __initdata __visible main_extable_sort_needed = 1;
34
35/* Sort the kernel's built-in exception table */
36void __init sort_main_extable(void)
37{
38 if (main_extable_sort_needed &&
39 &__stop___ex_table > &__start___ex_table) {
40 pr_notice("Sorting __ex_table...\n");
41 sort_extable(__start___ex_table, __stop___ex_table);
42 }
43}
44
45/* Given an address, look for it in the kernel exception table */
46const
47struct exception_table_entry *search_kernel_exception_table(unsigned long addr)
48{
49 return search_extable(__start___ex_table,
50 __stop___ex_table - __start___ex_table, addr);
51}
52
53/* Given an address, look for it in the exception tables. */
54const struct exception_table_entry *search_exception_tables(unsigned long addr)
55{
56 const struct exception_table_entry *e;
57
58 e = search_kernel_exception_table(addr);
59 if (!e)
60 e = search_module_extables(addr);
61 if (!e)
62 e = search_bpf_extables(addr);
63 return e;
64}
65
66int notrace core_kernel_text(unsigned long addr)
67{
68 if (is_kernel_text(addr))
69 return 1;
70
71 if (system_state < SYSTEM_FREEING_INITMEM &&
72 is_kernel_inittext(addr))
73 return 1;
74 return 0;
75}
76
77int __kernel_text_address(unsigned long addr)
78{
79 if (kernel_text_address(addr))
80 return 1;
81 /*
82 * There might be init symbols in saved stacktraces.
83 * Give those symbols a chance to be printed in
84 * backtraces (such as lockdep traces).
85 *
86 * Since we are after the module-symbols check, there's
87 * no danger of address overlap:
88 */
89 if (is_kernel_inittext(addr))
90 return 1;
91 return 0;
92}
93
94int kernel_text_address(unsigned long addr)
95{
96 bool no_rcu;
97 int ret = 1;
98
99 if (core_kernel_text(addr))
100 return 1;
101
102 /*
103 * If a stack dump happens while RCU is not watching, then
104 * RCU needs to be notified that it requires to start
105 * watching again. This can happen either by tracing that
106 * triggers a stack trace, or a WARN() that happens during
107 * coming back from idle, or cpu on or offlining.
108 *
109 * is_module_text_address() as well as the kprobe slots,
110 * is_bpf_text_address() and is_bpf_image_address require
111 * RCU to be watching.
112 */
113 no_rcu = !rcu_is_watching();
114
115 /* Treat this like an NMI as it can happen anywhere */
116 if (no_rcu)
117 ct_nmi_enter();
118
119 if (is_module_text_address(addr))
120 goto out;
121 if (is_ftrace_trampoline(addr))
122 goto out;
123 if (is_kprobe_optinsn_slot(addr) || is_kprobe_insn_slot(addr))
124 goto out;
125 if (is_bpf_text_address(addr))
126 goto out;
127 ret = 0;
128out:
129 if (no_rcu)
130 ct_nmi_exit();
131
132 return ret;
133}
134
135/*
136 * On some architectures (PPC64, IA64, PARISC) function pointers
137 * are actually only tokens to some data that then holds the
138 * real function address. As a result, to find if a function
139 * pointer is part of the kernel text, we need to do some
140 * special dereferencing first.
141 */
142#ifdef CONFIG_HAVE_FUNCTION_DESCRIPTORS
143void *dereference_function_descriptor(void *ptr)
144{
145 func_desc_t *desc = ptr;
146 void *p;
147
148 if (!get_kernel_nofault(p, (void *)&desc->addr))
149 ptr = p;
150 return ptr;
151}
152EXPORT_SYMBOL_GPL(dereference_function_descriptor);
153
154void *dereference_kernel_function_descriptor(void *ptr)
155{
156 if (ptr < (void *)__start_opd || ptr >= (void *)__end_opd)
157 return ptr;
158
159 return dereference_function_descriptor(ptr);
160}
161#endif
162
163int func_ptr_is_kernel_text(void *ptr)
164{
165 unsigned long addr;
166 addr = (unsigned long) dereference_function_descriptor(ptr);
167 if (core_kernel_text(addr))
168 return 1;
169 return is_module_text_address(addr);
170}