Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/compiler.h>
3#include <linux/rbtree.h>
4#include <inttypes.h>
5#include <string.h>
6#include <stdlib.h>
7#include "dso.h"
8#include "map.h"
9#include "symbol.h"
10#include <internal/lib.h> // page_size
11#include "tests.h"
12#include "debug.h"
13#include "machine.h"
14
15#define UM(x) kallsyms_map->unmap_ip(kallsyms_map, (x))
16
17int test__vmlinux_matches_kallsyms(struct test *test __maybe_unused, int subtest __maybe_unused)
18{
19 int err = -1;
20 struct rb_node *nd;
21 struct symbol *sym;
22 struct map *kallsyms_map, *vmlinux_map, *map;
23 struct machine kallsyms, vmlinux;
24 struct maps *maps = machine__kernel_maps(&vmlinux);
25 u64 mem_start, mem_end;
26 bool header_printed;
27
28 /*
29 * Step 1:
30 *
31 * Init the machines that will hold kernel, modules obtained from
32 * both vmlinux + .ko files and from /proc/kallsyms split by modules.
33 */
34 machine__init(&kallsyms, "", HOST_KERNEL_ID);
35 machine__init(&vmlinux, "", HOST_KERNEL_ID);
36
37 /*
38 * Step 2:
39 *
40 * Create the kernel maps for kallsyms and the DSO where we will then
41 * load /proc/kallsyms. Also create the modules maps from /proc/modules
42 * and find the .ko files that match them in /lib/modules/`uname -r`/.
43 */
44 if (machine__create_kernel_maps(&kallsyms) < 0) {
45 pr_debug("machine__create_kernel_maps ");
46 goto out;
47 }
48
49 /*
50 * Step 3:
51 *
52 * Load and split /proc/kallsyms into multiple maps, one per module.
53 * Do not use kcore, as this test was designed before kcore support
54 * and has parts that only make sense if using the non-kcore code.
55 * XXX: extend it to stress the kcorre code as well, hint: the list
56 * of modules extracted from /proc/kcore, in its current form, can't
57 * be compacted against the list of modules found in the "vmlinux"
58 * code and with the one got from /proc/modules from the "kallsyms" code.
59 */
60 if (machine__load_kallsyms(&kallsyms, "/proc/kallsyms") <= 0) {
61 pr_debug("dso__load_kallsyms ");
62 goto out;
63 }
64
65 /*
66 * Step 4:
67 *
68 * kallsyms will be internally on demand sorted by name so that we can
69 * find the reference relocation * symbol, i.e. the symbol we will use
70 * to see if the running kernel was relocated by checking if it has the
71 * same value in the vmlinux file we load.
72 */
73 kallsyms_map = machine__kernel_map(&kallsyms);
74
75 /*
76 * Step 5:
77 *
78 * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
79 */
80 if (machine__create_kernel_maps(&vmlinux) < 0) {
81 pr_debug("machine__create_kernel_maps ");
82 goto out;
83 }
84
85 vmlinux_map = machine__kernel_map(&vmlinux);
86
87 /*
88 * Step 6:
89 *
90 * Locate a vmlinux file in the vmlinux path that has a buildid that
91 * matches the one of the running kernel.
92 *
93 * While doing that look if we find the ref reloc symbol, if we find it
94 * we'll have its ref_reloc_symbol.unrelocated_addr and then
95 * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
96 * to fixup the symbols.
97 */
98 if (machine__load_vmlinux_path(&vmlinux) <= 0) {
99 pr_debug("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n");
100 err = TEST_SKIP;
101 goto out;
102 }
103
104 err = 0;
105 /*
106 * Step 7:
107 *
108 * Now look at the symbols in the vmlinux DSO and check if we find all of them
109 * in the kallsyms dso. For the ones that are in both, check its names and
110 * end addresses too.
111 */
112 map__for_each_symbol(vmlinux_map, sym, nd) {
113 struct symbol *pair, *first_pair;
114
115 sym = rb_entry(nd, struct symbol, rb_node);
116
117 if (sym->start == sym->end)
118 continue;
119
120 mem_start = vmlinux_map->unmap_ip(vmlinux_map, sym->start);
121 mem_end = vmlinux_map->unmap_ip(vmlinux_map, sym->end);
122
123 first_pair = machine__find_kernel_symbol(&kallsyms, mem_start, NULL);
124 pair = first_pair;
125
126 if (pair && UM(pair->start) == mem_start) {
127next_pair:
128 if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
129 /*
130 * kallsyms don't have the symbol end, so we
131 * set that by using the next symbol start - 1,
132 * in some cases we get this up to a page
133 * wrong, trace_kmalloc when I was developing
134 * this code was one such example, 2106 bytes
135 * off the real size. More than that and we
136 * _really_ have a problem.
137 */
138 s64 skew = mem_end - UM(pair->end);
139 if (llabs(skew) >= page_size)
140 pr_debug("WARN: %#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
141 mem_start, sym->name, mem_end,
142 UM(pair->end));
143
144 /*
145 * Do not count this as a failure, because we
146 * could really find a case where it's not
147 * possible to get proper function end from
148 * kallsyms.
149 */
150 continue;
151 } else {
152 pair = machine__find_kernel_symbol_by_name(&kallsyms, sym->name, NULL);
153 if (pair) {
154 if (UM(pair->start) == mem_start)
155 goto next_pair;
156
157 pr_debug("WARN: %#" PRIx64 ": diff name v: %s k: %s\n",
158 mem_start, sym->name, pair->name);
159 } else {
160 pr_debug("WARN: %#" PRIx64 ": diff name v: %s k: %s\n",
161 mem_start, sym->name, first_pair->name);
162 }
163
164 continue;
165 }
166 } else if (mem_start == kallsyms.vmlinux_map->end) {
167 /*
168 * Ignore aliases to _etext, i.e. to the end of the kernel text area,
169 * such as __indirect_thunk_end.
170 */
171 continue;
172 } else {
173 pr_debug("ERR : %#" PRIx64 ": %s not on kallsyms\n",
174 mem_start, sym->name);
175 }
176
177 err = -1;
178 }
179
180 if (verbose <= 0)
181 goto out;
182
183 header_printed = false;
184
185 for (map = maps__first(maps); map; map = map__next(map)) {
186 struct map *
187 /*
188 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
189 * the kernel will have the path for the vmlinux file being used,
190 * so use the short name, less descriptive but the same ("[kernel]" in
191 * both cases.
192 */
193 pair = map_groups__find_by_name(&kallsyms.kmaps,
194 (map->dso->kernel ?
195 map->dso->short_name :
196 map->dso->name));
197 if (pair) {
198 pair->priv = 1;
199 } else {
200 if (!header_printed) {
201 pr_info("WARN: Maps only in vmlinux:\n");
202 header_printed = true;
203 }
204 map__fprintf(map, stderr);
205 }
206 }
207
208 header_printed = false;
209
210 for (map = maps__first(maps); map; map = map__next(map)) {
211 struct map *pair;
212
213 mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start);
214 mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end);
215
216 pair = map_groups__find(&kallsyms.kmaps, mem_start);
217 if (pair == NULL || pair->priv)
218 continue;
219
220 if (pair->start == mem_start) {
221 if (!header_printed) {
222 pr_info("WARN: Maps in vmlinux with a different name in kallsyms:\n");
223 header_printed = true;
224 }
225
226 pr_info("WARN: %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
227 map->start, map->end, map->pgoff, map->dso->name);
228 if (mem_end != pair->end)
229 pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64,
230 pair->start, pair->end, pair->pgoff);
231 pr_info(" %s\n", pair->dso->name);
232 pair->priv = 1;
233 }
234 }
235
236 header_printed = false;
237
238 maps = machine__kernel_maps(&kallsyms);
239
240 for (map = maps__first(maps); map; map = map__next(map)) {
241 if (!map->priv) {
242 if (!header_printed) {
243 pr_info("WARN: Maps only in kallsyms:\n");
244 header_printed = true;
245 }
246 map__fprintf(map, stderr);
247 }
248 }
249out:
250 machine__exit(&kallsyms);
251 machine__exit(&vmlinux);
252 return err;
253}
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/compiler.h>
3#include <linux/rbtree.h>
4#include <inttypes.h>
5#include <string.h>
6#include <ctype.h>
7#include <stdlib.h>
8#include "dso.h"
9#include "map.h"
10#include "symbol.h"
11#include <internal/lib.h> // page_size
12#include "tests.h"
13#include "debug.h"
14#include "machine.h"
15
16#define UM(x) map__unmap_ip(kallsyms_map, (x))
17
18static bool is_ignored_symbol(const char *name, char type)
19{
20 /* Symbol names that exactly match to the following are ignored.*/
21 static const char * const ignored_symbols[] = {
22 /*
23 * Symbols which vary between passes. Passes 1 and 2 must have
24 * identical symbol lists. The kallsyms_* symbols below are
25 * only added after pass 1, they would be included in pass 2
26 * when --all-symbols is specified so exclude them to get a
27 * stable symbol list.
28 */
29 "kallsyms_addresses",
30 "kallsyms_offsets",
31 "kallsyms_relative_base",
32 "kallsyms_num_syms",
33 "kallsyms_names",
34 "kallsyms_markers",
35 "kallsyms_token_table",
36 "kallsyms_token_index",
37 /* Exclude linker generated symbols which vary between passes */
38 "_SDA_BASE_", /* ppc */
39 "_SDA2_BASE_", /* ppc */
40 NULL
41 };
42
43 /* Symbol names that begin with the following are ignored.*/
44 static const char * const ignored_prefixes[] = {
45 "$", /* local symbols for ARM, MIPS, etc. */
46 ".L", /* local labels, .LBB,.Ltmpxxx,.L__unnamed_xx,.LASANPC, etc. */
47 "__crc_", /* modversions */
48 "__efistub_", /* arm64 EFI stub namespace */
49 "__kvm_nvhe_$", /* arm64 local symbols in non-VHE KVM namespace */
50 "__kvm_nvhe_.L", /* arm64 local symbols in non-VHE KVM namespace */
51 "__AArch64ADRPThunk_", /* arm64 lld */
52 "__ARMV5PILongThunk_", /* arm lld */
53 "__ARMV7PILongThunk_",
54 "__ThumbV7PILongThunk_",
55 "__LA25Thunk_", /* mips lld */
56 "__microLA25Thunk_",
57 NULL
58 };
59
60 /* Symbol names that end with the following are ignored.*/
61 static const char * const ignored_suffixes[] = {
62 "_from_arm", /* arm */
63 "_from_thumb", /* arm */
64 "_veneer", /* arm */
65 NULL
66 };
67
68 /* Symbol names that contain the following are ignored.*/
69 static const char * const ignored_matches[] = {
70 ".long_branch.", /* ppc stub */
71 ".plt_branch.", /* ppc stub */
72 NULL
73 };
74
75 const char * const *p;
76
77 for (p = ignored_symbols; *p; p++)
78 if (!strcmp(name, *p))
79 return true;
80
81 for (p = ignored_prefixes; *p; p++)
82 if (!strncmp(name, *p, strlen(*p)))
83 return true;
84
85 for (p = ignored_suffixes; *p; p++) {
86 int l = strlen(name) - strlen(*p);
87
88 if (l >= 0 && !strcmp(name + l, *p))
89 return true;
90 }
91
92 for (p = ignored_matches; *p; p++) {
93 if (strstr(name, *p))
94 return true;
95 }
96
97 if (type == 'U' || type == 'u')
98 return true;
99 /* exclude debugging symbols */
100 if (type == 'N' || type == 'n')
101 return true;
102
103 if (toupper(type) == 'A') {
104 /* Keep these useful absolute symbols */
105 if (strcmp(name, "__kernel_syscall_via_break") &&
106 strcmp(name, "__kernel_syscall_via_epc") &&
107 strcmp(name, "__kernel_sigtramp") &&
108 strcmp(name, "__gp"))
109 return true;
110 }
111
112 return false;
113}
114
115struct test__vmlinux_matches_kallsyms_cb_args {
116 struct machine kallsyms;
117 struct map *vmlinux_map;
118 bool header_printed;
119};
120
121static int test__vmlinux_matches_kallsyms_cb1(struct map *map, void *data)
122{
123 struct test__vmlinux_matches_kallsyms_cb_args *args = data;
124 struct dso *dso = map__dso(map);
125 /*
126 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
127 * the kernel will have the path for the vmlinux file being used, so use
128 * the short name, less descriptive but the same ("[kernel]" in both
129 * cases.
130 */
131 struct map *pair = maps__find_by_name(args->kallsyms.kmaps,
132 (dso->kernel ? dso->short_name : dso->name));
133
134 if (pair)
135 map__set_priv(pair, 1);
136 else {
137 if (!args->header_printed) {
138 pr_info("WARN: Maps only in vmlinux:\n");
139 args->header_printed = true;
140 }
141 map__fprintf(map, stderr);
142 }
143 return 0;
144}
145
146static int test__vmlinux_matches_kallsyms_cb2(struct map *map, void *data)
147{
148 struct test__vmlinux_matches_kallsyms_cb_args *args = data;
149 struct map *pair;
150 u64 mem_start = map__unmap_ip(args->vmlinux_map, map__start(map));
151 u64 mem_end = map__unmap_ip(args->vmlinux_map, map__end(map));
152
153 pair = maps__find(args->kallsyms.kmaps, mem_start);
154 if (pair == NULL || map__priv(pair))
155 return 0;
156
157 if (map__start(pair) == mem_start) {
158 struct dso *dso = map__dso(map);
159
160 if (!args->header_printed) {
161 pr_info("WARN: Maps in vmlinux with a different name in kallsyms:\n");
162 args->header_printed = true;
163 }
164
165 pr_info("WARN: %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
166 map__start(map), map__end(map), map__pgoff(map), dso->name);
167 if (mem_end != map__end(pair))
168 pr_info(":\nWARN: *%" PRIx64 "-%" PRIx64 " %" PRIx64,
169 map__start(pair), map__end(pair), map__pgoff(pair));
170 pr_info(" %s\n", dso->name);
171 map__set_priv(pair, 1);
172 }
173 return 0;
174}
175
176static int test__vmlinux_matches_kallsyms_cb3(struct map *map, void *data)
177{
178 struct test__vmlinux_matches_kallsyms_cb_args *args = data;
179
180 if (!map__priv(map)) {
181 if (!args->header_printed) {
182 pr_info("WARN: Maps only in kallsyms:\n");
183 args->header_printed = true;
184 }
185 map__fprintf(map, stderr);
186 }
187 return 0;
188}
189
190static int test__vmlinux_matches_kallsyms(struct test_suite *test __maybe_unused,
191 int subtest __maybe_unused)
192{
193 int err = TEST_FAIL;
194 struct rb_node *nd;
195 struct symbol *sym;
196 struct map *kallsyms_map;
197 struct machine vmlinux;
198 struct maps *maps;
199 u64 mem_start, mem_end;
200 struct test__vmlinux_matches_kallsyms_cb_args args;
201
202 /*
203 * Step 1:
204 *
205 * Init the machines that will hold kernel, modules obtained from
206 * both vmlinux + .ko files and from /proc/kallsyms split by modules.
207 */
208 machine__init(&args.kallsyms, "", HOST_KERNEL_ID);
209 machine__init(&vmlinux, "", HOST_KERNEL_ID);
210
211 maps = machine__kernel_maps(&vmlinux);
212
213 /*
214 * Step 2:
215 *
216 * Create the kernel maps for kallsyms and the DSO where we will then
217 * load /proc/kallsyms. Also create the modules maps from /proc/modules
218 * and find the .ko files that match them in /lib/modules/`uname -r`/.
219 */
220 if (machine__create_kernel_maps(&args.kallsyms) < 0) {
221 pr_debug("machine__create_kernel_maps failed");
222 err = TEST_SKIP;
223 goto out;
224 }
225
226 /*
227 * Step 3:
228 *
229 * Load and split /proc/kallsyms into multiple maps, one per module.
230 * Do not use kcore, as this test was designed before kcore support
231 * and has parts that only make sense if using the non-kcore code.
232 * XXX: extend it to stress the kcorre code as well, hint: the list
233 * of modules extracted from /proc/kcore, in its current form, can't
234 * be compacted against the list of modules found in the "vmlinux"
235 * code and with the one got from /proc/modules from the "kallsyms" code.
236 */
237 if (machine__load_kallsyms(&args.kallsyms, "/proc/kallsyms") <= 0) {
238 pr_debug("machine__load_kallsyms failed");
239 err = TEST_SKIP;
240 goto out;
241 }
242
243 /*
244 * Step 4:
245 *
246 * kallsyms will be internally on demand sorted by name so that we can
247 * find the reference relocation * symbol, i.e. the symbol we will use
248 * to see if the running kernel was relocated by checking if it has the
249 * same value in the vmlinux file we load.
250 */
251 kallsyms_map = machine__kernel_map(&args.kallsyms);
252
253 /*
254 * Step 5:
255 *
256 * Now repeat step 2, this time for the vmlinux file we'll auto-locate.
257 */
258 if (machine__create_kernel_maps(&vmlinux) < 0) {
259 pr_info("machine__create_kernel_maps failed");
260 goto out;
261 }
262
263 args.vmlinux_map = machine__kernel_map(&vmlinux);
264
265 /*
266 * Step 6:
267 *
268 * Locate a vmlinux file in the vmlinux path that has a buildid that
269 * matches the one of the running kernel.
270 *
271 * While doing that look if we find the ref reloc symbol, if we find it
272 * we'll have its ref_reloc_symbol.unrelocated_addr and then
273 * maps__reloc_vmlinux will notice and set proper ->[un]map_ip routines
274 * to fixup the symbols.
275 */
276 if (machine__load_vmlinux_path(&vmlinux) <= 0) {
277 pr_info("Couldn't find a vmlinux that matches the kernel running on this machine, skipping test\n");
278 err = TEST_SKIP;
279 goto out;
280 }
281
282 err = 0;
283 /*
284 * Step 7:
285 *
286 * Now look at the symbols in the vmlinux DSO and check if we find all of them
287 * in the kallsyms dso. For the ones that are in both, check its names and
288 * end addresses too.
289 */
290 map__for_each_symbol(args.vmlinux_map, sym, nd) {
291 struct symbol *pair, *first_pair;
292
293 sym = rb_entry(nd, struct symbol, rb_node);
294
295 if (sym->start == sym->end)
296 continue;
297
298 mem_start = map__unmap_ip(args.vmlinux_map, sym->start);
299 mem_end = map__unmap_ip(args.vmlinux_map, sym->end);
300
301 first_pair = machine__find_kernel_symbol(&args.kallsyms, mem_start, NULL);
302 pair = first_pair;
303
304 if (pair && UM(pair->start) == mem_start) {
305next_pair:
306 if (arch__compare_symbol_names(sym->name, pair->name) == 0) {
307 /*
308 * kallsyms don't have the symbol end, so we
309 * set that by using the next symbol start - 1,
310 * in some cases we get this up to a page
311 * wrong, trace_kmalloc when I was developing
312 * this code was one such example, 2106 bytes
313 * off the real size. More than that and we
314 * _really_ have a problem.
315 */
316 s64 skew = mem_end - UM(pair->end);
317 if (llabs(skew) >= page_size)
318 pr_debug("WARN: %#" PRIx64 ": diff end addr for %s v: %#" PRIx64 " k: %#" PRIx64 "\n",
319 mem_start, sym->name, mem_end,
320 UM(pair->end));
321
322 /*
323 * Do not count this as a failure, because we
324 * could really find a case where it's not
325 * possible to get proper function end from
326 * kallsyms.
327 */
328 continue;
329 } else {
330 pair = machine__find_kernel_symbol_by_name(&args.kallsyms,
331 sym->name, NULL);
332 if (pair) {
333 if (UM(pair->start) == mem_start)
334 goto next_pair;
335
336 pr_debug("WARN: %#" PRIx64 ": diff name v: %s k: %s\n",
337 mem_start, sym->name, pair->name);
338 } else {
339 pr_debug("WARN: %#" PRIx64 ": diff name v: %s k: %s\n",
340 mem_start, sym->name, first_pair->name);
341 }
342
343 continue;
344 }
345 } else if (mem_start == map__end(args.kallsyms.vmlinux_map)) {
346 /*
347 * Ignore aliases to _etext, i.e. to the end of the kernel text area,
348 * such as __indirect_thunk_end.
349 */
350 continue;
351 } else if (is_ignored_symbol(sym->name, sym->type)) {
352 /*
353 * Ignore hidden symbols, see scripts/kallsyms.c for the details
354 */
355 continue;
356 } else {
357 pr_debug("ERR : %#" PRIx64 ": %s not on kallsyms\n",
358 mem_start, sym->name);
359 }
360
361 err = -1;
362 }
363
364 if (verbose <= 0)
365 goto out;
366
367 args.header_printed = false;
368 maps__for_each_map(maps, test__vmlinux_matches_kallsyms_cb1, &args);
369
370 args.header_printed = false;
371 maps__for_each_map(maps, test__vmlinux_matches_kallsyms_cb2, &args);
372
373 args.header_printed = false;
374 maps = machine__kernel_maps(&args.kallsyms);
375 maps__for_each_map(maps, test__vmlinux_matches_kallsyms_cb3, &args);
376
377out:
378 machine__exit(&args.kallsyms);
379 machine__exit(&vmlinux);
380 return err;
381}
382
383DEFINE_SUITE("vmlinux symtab matches kallsyms", vmlinux_matches_kallsyms);