Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/compiler.h>
3#include <linux/rbtree.h>
4#include <inttypes.h>
5#include <string.h>
6#include <ctype.h>
7#include <stdlib.h>
8#include "dso.h"
9#include "map.h"
10#include "symbol.h"
11#include <internal/lib.h> // page_size
12#include "tests.h"
13#include "debug.h"
14#include "machine.h"
15
16#define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x))
17
18static bool is_ignored_symbol(const char *name, char type)
19{
20 /* Symbol names that exactly match to the following are ignored.*/
21 static const char * const ignored_symbols[] = {
22 /*
23 * Symbols which vary between passes. Passes 1 and 2 must have
24 * identical symbol lists. The kallsyms_* symbols below are
25 * only added after pass 1, they would be included in pass 2
26 * when --all-symbols is specified so exclude them to get a
27 * stable symbol list.
28 */
29 "kallsyms_addresses",
30 "kallsyms_offsets",
31 "kallsyms_relative_base",
32 "kallsyms_num_syms",
33 "kallsyms_names",
34 "kallsyms_markers",
35 "kallsyms_token_table",
36 "kallsyms_token_index",
37 /* Exclude linker generated symbols which vary between passes */
38 "_SDA_BASE_", /* ppc */
39 "_SDA2_BASE_", /* ppc */
40 NULL
41 };
42
43 /* Symbol names that begin with the following are ignored.*/
44 static const char * const ignored_prefixes[] = {
45 "$", /* local symbols for ARM, MIPS, etc. */
46 ".L", /* local labels, .LBB,.Ltmpxxx,.L__unnamed_xx,.LASANPC, etc. */
47 "__crc_", /* modversions */
48 "__efistub_", /* arm64 EFI stub namespace */
49 "__kvm_nvhe_$", /* arm64 local symbols in non-VHE KVM namespace */
50 "__kvm_nvhe_.L", /* arm64 local symbols in non-VHE KVM namespace */
51 "__AArch64ADRPThunk_", /* arm64 lld */
52 "__ARMV5PILongThunk_", /* arm lld */
53 "__ARMV7PILongThunk_",
54 "__ThumbV7PILongThunk_",
55 "__LA25Thunk_", /* mips lld */
56 "__microLA25Thunk_",
57 NULL
58 };
59
60 /* Symbol names that end with the following are ignored.*/
61 static const char * const ignored_suffixes[] = {
62 "_from_arm", /* arm */
63 "_from_thumb", /* arm */
64 "_veneer", /* arm */
65 NULL
66 };
67
68 /* Symbol names that contain the following are ignored.*/
69 static const char * const ignored_matches[] = {
70 ".long_branch.", /* ppc stub */
71 ".plt_branch.", /* ppc stub */
72 NULL
73 };
74
75 const char * const *p;
76
77 for (p = ignored_symbols; *p; p++)
78 if (!strcmp(name, *p))
79 return true;
80
81 for (p = ignored_prefixes; *p; p++)
82 if (!strncmp(name, *p, strlen(*p)))
83 return true;
84
85 for (p = ignored_suffixes; *p; p++) {
86 int l = strlen(name) - strlen(*p);
87
88 if (l >= 0 && !strcmp(name + l, *p))
89 return true;
90 }
91
92 for (p = ignored_matches; *p; p++) {
93 if (strstr(name, *p))
94 return true;
95 }
96
97 if (type == 'U' || type == 'u')
98 return true;
99 /* exclude debugging symbols */
100 if (type == 'N' || type == 'n')
101 return true;
102
103 if (toupper(type) == 'A') {
104 /* Keep these useful absolute symbols */
105 if (strcmp(name, "__kernel_syscall_via_break") &&
106 strcmp(name, "__kernel_syscall_via_epc") &&
107 strcmp(name, "__kernel_sigtramp") &&
108 strcmp(name, "__gp"))
109 return true;
110 }
111
112 return false;
113}
114
115static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused,
116 int subtest __maybe_unused)
117{
118 int err = TEST_FAIL;
119 struct rb_node *nd;
120 struct symbol *sym;
121 struct map *kallsyms_map, *vmlinux_map, *map;
122 struct machine kallsyms, vmlinux;
123 struct maps *maps;
124 u64 mem_start, mem_end;
125 bool header_printed;
126
127 /*
128 * Step 1:
129 *
130 * Init the machines that will hold kernel, modules obtained from
131 * both vmlinux + .ko files and from /proc/kallsyms split by modules.
132 */
133 machine__init(&kallsyms, "", HOST_KERNEL_ID);
134 machine__init(&vmlinux, "", HOST_KERNEL_ID);
135
136 maps = machine__kernel_maps(&vmlinux);
137
138 /*
139 * Step 2:
140 *
141 * Create the kernel maps for kallsyms and the DSO where we will then
142 * load /proc/kallsyms. Also create the modules maps from /proc/modules
143 * and find the .ko files that match them in /lib/modules/`uname -r`/.
144 */
145 if (machine__create_kernel_maps(&kallsyms) < 0) {
146 pr_debug("machine__create_kernel_maps failed");
147 err = TEST_SKIP;
148 goto out;
149 }
150
151 /*
152 * Step 3:
153 *
154 * Load and split /proc/kallsyms into multiple maps, one per module.
155 * Do not use kcore, as this test was designed before kcore support
156 * and has parts that only make sense if using the non-kcore code.
157 * XXX: extend it to stress the kcorre code as well, hint: the list
158 * of modules extracted from /proc/kcore, in its current form, can't
159 * be compacted against the list of modules found in the "vmlinux"
160 * code and with the one got from /proc/modules from the "kallsyms" code.
161 */
162 if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms") <= 0) {
163 pr_debug("machine__load_kallsyms failed");
164 err = TEST_SKIP;
165 goto out;
166 }
167
168 /*
169 * Step 4:
170 *
171 * kallsyms will be internally on demand sorted by name so that we can
172 * find the reference relocation * symbol, i.e. the symbol we will use
173 * to see if the running kernel was relocated by checking if it has the
174 * same value in the vmlinux file we load.
175 */
176 kallsyms_map = machine__kernel_map(&kallsyms);
177
178 /*
179 * Step 5:
180 *
181 * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
182 */
183 if (machine__create_kernel_maps(&vmlinux) < 0) {
184 pr_info("machine__create_kernel_maps failed");
185 goto out;
186 }
187
188 vmlinux_map = machine__kernel_map(&vmlinux);
189
190 /*
191 * Step 6:
192 *
193 * Locate a vmlinux file in the vmlinux path that has a buildid that
194 * matches the one of the running kernel.
195 *
196 * While doing that look if we find the ref reloc symbol, if we find it
197 * we'll have its ref_reloc_symbol.unrelocated_addr and then
198 * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
199 * to fixup the symbols.
200 */
201 if (machine__load_vmlinux_path(&vmlinux) <= 0) {
202 pr_info("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n");
203 err = TEST_SKIP;
204 goto out;
205 }
206
207 err = 0;
208 /*
209 * Step 7:
210 *
211 * Now look at the symbols in the vmlinux DSO and check if we find all of them
212 * in the kallsyms dso. For the ones that are in both, check its names and
213 * end addresses too.
214 */
215 map__for_each_symbol(vmlinux_map, sym, nd) {
216 struct symbol *pair, *first_pair;
217
218 sym = rb_entry(nd, struct symbol, rb_node);
219
220 if (sym->start == sym->end)
221 continue;
222
223 mem_start = vmlinux_map->unmap_ip(vmlinux_map, sym->start);
224 mem_end = vmlinux_map->unmap_ip(vmlinux_map, sym->end);
225
226 first_pair = machine__find_kernel_symbol(&kallsyms, mem_start, NULL);
227 pair = first_pair;
228
229 if (pair && UM(pair->start) == mem_start) {
230next_pair:
231 if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
232 /*
233 * kallsyms don't have the symbol end, so we
234 * set that by using the next symbol start - 1,
235 * in some cases we get this up to a page
236 * wrong, trace_kmalloc when I was developing
237 * this code was one such example, 2106 bytes
238 * off the real size. More than that and we
239 * _really_ have a problem.
240 */
241 s64 skew = mem_end - UM(pair->end);
242 if (llabs(skew) >= page_size)
243 pr_debug("WARN: %#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
244 mem_start, sym->name, mem_end,
245 UM(pair->end));
246
247 /*
248 * Do not count this as a failure, because we
249 * could really find a case where it's not
250 * possible to get proper function end from
251 * kallsyms.
252 */
253 continue;
254 } else {
255 pair = machine__find_kernel_symbol_by_name(&kallsyms, sym->name, NULL);
256 if (pair) {
257 if (UM(pair->start) == mem_start)
258 goto next_pair;
259
260 pr_debug("WARN: %#" PRIx64 ": diff name v: %s k: %s\n",
261 mem_start, sym->name, pair->name);
262 } else {
263 pr_debug("WARN: %#" PRIx64 ": diff name v: %s k: %s\n",
264 mem_start, sym->name, first_pair->name);
265 }
266
267 continue;
268 }
269 } else if (mem_start == kallsyms.vmlinux_map->end) {
270 /*
271 * Ignore aliases to _etext, i.e. to the end of the kernel text area,
272 * such as __indirect_thunk_end.
273 */
274 continue;
275 } else if (is_ignored_symbol(sym->name, sym->type)) {
276 /*
277 * Ignore hidden symbols, see scripts/kallsyms.c for the details
278 */
279 continue;
280 } else {
281 pr_debug("ERR : %#" PRIx64 ": %s not on kallsyms\n",
282 mem_start, sym->name);
283 }
284
285 err = -1;
286 }
287
288 if (verbose <= 0)
289 goto out;
290
291 header_printed = false;
292
293 maps__for_each_entry(maps, map) {
294 struct map *
295 /*
296 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
297 * the kernel will have the path for the vmlinux file being used,
298 * so use the short name, less descriptive but the same ("[kernel]" in
299 * both cases.
300 */
301 pair = maps__find_by_name(kallsyms.kmaps, (map->dso->kernel ?
302 map->dso->short_name :
303 map->dso->name));
304 if (pair) {
305 pair->priv = 1;
306 } else {
307 if (!header_printed) {
308 pr_info("WARN: Maps only in vmlinux:\n");
309 header_printed = true;
310 }
311 map__fprintf(map, stderr);
312 }
313 }
314
315 header_printed = false;
316
317 maps__for_each_entry(maps, map) {
318 struct map *pair;
319
320 mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start);
321 mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end);
322
323 pair = maps__find(kallsyms.kmaps, mem_start);
324 if (pair == NULL || pair->priv)
325 continue;
326
327 if (pair->start == mem_start) {
328 if (!header_printed) {
329 pr_info("WARN: Maps in vmlinux with a different name in kallsyms:\n");
330 header_printed = true;
331 }
332
333 pr_info("WARN: %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
334 map->start, map->end, map->pgoff, map->dso->name);
335 if (mem_end != pair->end)
336 pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64,
337 pair->start, pair->end, pair->pgoff);
338 pr_info(" %s\n", pair->dso->name);
339 pair->priv = 1;
340 }
341 }
342
343 header_printed = false;
344
345 maps = machine__kernel_maps(&kallsyms);
346
347 maps__for_each_entry(maps, map) {
348 if (!map->priv) {
349 if (!header_printed) {
350 pr_info("WARN: Maps only in kallsyms:\n");
351 header_printed = true;
352 }
353 map__fprintf(map, stderr);
354 }
355 }
356out:
357 machine__exit(&kallsyms);
358 machine__exit(&vmlinux);
359 return err;
360}
361
362DEFINE_SUITE("vmlinux symtab matches kallsyms", vmlinux_matches_kallsyms);
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/compiler.h>
3#include <linux/rbtree.h>
4#include <inttypes.h>
5#include <string.h>
6#include <stdlib.h>
7#include "dso.h"
8#include "map.h"
9#include "symbol.h"
10#include <internal/lib.h> // page_size
11#include "tests.h"
12#include "debug.h"
13#include "machine.h"
14
15#define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x))
16
17int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest __maybe_unused)
18{
19 int err = -1;
20 struct rb_node *nd;
21 struct symbol *sym;
22 struct map *kallsyms_map, *vmlinux_map, *map;
23 struct machine kallsyms, vmlinux;
24 struct maps *maps = machine__kernel_maps(&vmlinux);
25 u64 mem_start, mem_end;
26 bool header_printed;
27
28 /*
29 * Step 1:
30 *
31 * Init the machines that will hold kernel, modules obtained from
32 * both vmlinux + .ko files and from /proc/kallsyms split by modules.
33 */
34 machine__init(&kallsyms, "", HOST_KERNEL_ID);
35 machine__init(&vmlinux, "", HOST_KERNEL_ID);
36
37 /*
38 * Step 2:
39 *
40 * Create the kernel maps for kallsyms and the DSO where we will then
41 * load /proc/kallsyms. Also create the modules maps from /proc/modules
42 * and find the .ko files that match them in /lib/modules/`uname -r`/.
43 */
44 if (machine__create_kernel_maps(&kallsyms) < 0) {
45 pr_debug("machine__create_kernel_maps ");
46 goto out;
47 }
48
49 /*
50 * Step 3:
51 *
52 * Load and split /proc/kallsyms into multiple maps, one per module.
53 * Do not use kcore, as this test was designed before kcore support
54 * and has parts that only make sense if using the non-kcore code.
55 * XXX: extend it to stress the kcorre code as well, hint: the list
56 * of modules extracted from /proc/kcore, in its current form, can't
57 * be compacted against the list of modules found in the "vmlinux"
58 * code and with the one got from /proc/modules from the "kallsyms" code.
59 */
60 if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms") <= 0) {
61 pr_debug("dso__load_kallsyms ");
62 goto out;
63 }
64
65 /*
66 * Step 4:
67 *
68 * kallsyms will be internally on demand sorted by name so that we can
69 * find the reference relocation * symbol, i.e. the symbol we will use
70 * to see if the running kernel was relocated by checking if it has the
71 * same value in the vmlinux file we load.
72 */
73 kallsyms_map = machine__kernel_map(&kallsyms);
74
75 /*
76 * Step 5:
77 *
78 * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
79 */
80 if (machine__create_kernel_maps(&vmlinux) < 0) {
81 pr_debug("machine__create_kernel_maps ");
82 goto out;
83 }
84
85 vmlinux_map = machine__kernel_map(&vmlinux);
86
87 /*
88 * Step 6:
89 *
90 * Locate a vmlinux file in the vmlinux path that has a buildid that
91 * matches the one of the running kernel.
92 *
93 * While doing that look if we find the ref reloc symbol, if we find it
94 * we'll have its ref_reloc_symbol.unrelocated_addr and then
95 * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
96 * to fixup the symbols.
97 */
98 if (machine__load_vmlinux_path(&vmlinux) <= 0) {
99 pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n");
100 err = TEST_SKIP;
101 goto out;
102 }
103
104 err = 0;
105 /*
106 * Step 7:
107 *
108 * Now look at the symbols in the vmlinux DSO and check if we find all of them
109 * in the kallsyms dso. For the ones that are in both, check its names and
110 * end addresses too.
111 */
112 map__for_each_symbol(vmlinux_map, sym, nd) {
113 struct symbol *pair, *first_pair;
114
115 sym = rb_entry(nd, struct symbol, rb_node);
116
117 if (sym->start == sym->end)
118 continue;
119
120 mem_start = vmlinux_map->unmap_ip(vmlinux_map, sym->start);
121 mem_end = vmlinux_map->unmap_ip(vmlinux_map, sym->end);
122
123 first_pair = machine__find_kernel_symbol(&kallsyms, mem_start, NULL);
124 pair = first_pair;
125
126 if (pair && UM(pair->start) == mem_start) {
127next_pair:
128 if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
129 /*
130 * kallsyms don't have the symbol end, so we
131 * set that by using the next symbol start - 1,
132 * in some cases we get this up to a page
133 * wrong, trace_kmalloc when I was developing
134 * this code was one such example, 2106 bytes
135 * off the real size. More than that and we
136 * _really_ have a problem.
137 */
138 s64 skew = mem_end - UM(pair->end);
139 if (llabs(skew) >= page_size)
140 pr_debug("WARN: %#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
141 mem_start, sym->name, mem_end,
142 UM(pair->end));
143
144 /*
145 * Do not count this as a failure, because we
146 * could really find a case where it's not
147 * possible to get proper function end from
148 * kallsyms.
149 */
150 continue;
151 } else {
152 pair = machine__find_kernel_symbol_by_name(&kallsyms, sym->name, NULL);
153 if (pair) {
154 if (UM(pair->start) == mem_start)
155 goto next_pair;
156
157 pr_debug("WARN: %#" PRIx64 ": diff name v: %s k: %s\n",
158 mem_start, sym->name, pair->name);
159 } else {
160 pr_debug("WARN: %#" PRIx64 ": diff name v: %s k: %s\n",
161 mem_start, sym->name, first_pair->name);
162 }
163
164 continue;
165 }
166 } else if (mem_start == kallsyms.vmlinux_map->end) {
167 /*
168 * Ignore aliases to _etext, i.e. to the end of the kernel text area,
169 * such as __indirect_thunk_end.
170 */
171 continue;
172 } else {
173 pr_debug("ERR : %#" PRIx64 ": %s not on kallsyms\n",
174 mem_start, sym->name);
175 }
176
177 err = -1;
178 }
179
180 if (verbose <= 0)
181 goto out;
182
183 header_printed = false;
184
185 maps__for_each_entry(maps, map) {
186 struct map *
187 /*
188 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
189 * the kernel will have the path for the vmlinux file being used,
190 * so use the short name, less descriptive but the same ("[kernel]" in
191 * both cases.
192 */
193 pair = maps__find_by_name(&kallsyms.kmaps, (map->dso->kernel ?
194 map->dso->short_name :
195 map->dso->name));
196 if (pair) {
197 pair->priv = 1;
198 } else {
199 if (!header_printed) {
200 pr_info("WARN: Maps only in vmlinux:\n");
201 header_printed = true;
202 }
203 map__fprintf(map, stderr);
204 }
205 }
206
207 header_printed = false;
208
209 maps__for_each_entry(maps, map) {
210 struct map *pair;
211
212 mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start);
213 mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end);
214
215 pair = maps__find(&kallsyms.kmaps, mem_start);
216 if (pair == NULL || pair->priv)
217 continue;
218
219 if (pair->start == mem_start) {
220 if (!header_printed) {
221 pr_info("WARN: Maps in vmlinux with a different name in kallsyms:\n");
222 header_printed = true;
223 }
224
225 pr_info("WARN: %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
226 map->start, map->end, map->pgoff, map->dso->name);
227 if (mem_end != pair->end)
228 pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64,
229 pair->start, pair->end, pair->pgoff);
230 pr_info(" %s\n", pair->dso->name);
231 pair->priv = 1;
232 }
233 }
234
235 header_printed = false;
236
237 maps = machine__kernel_maps(&kallsyms);
238
239 maps__for_each_entry(maps, map) {
240 if (!map->priv) {
241 if (!header_printed) {
242 pr_info("WARN: Maps only in kallsyms:\n");
243 header_printed = true;
244 }
245 map__fprintf(map, stderr);
246 }
247 }
248out:
249 machine__exit(&kallsyms);
250 machine__exit(&vmlinux);
251 return err;
252}