Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kallsyms.c: in-kernel printing of symbolic oopses and stack traces.
4 *
5 * Rewritten and vastly simplified by Rusty Russell for in-kernel
6 * module loader:
7 * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
8 *
9 * ChangeLog:
10 *
11 * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com>
12 * Changed the compression method from stem compression to "table lookup"
13 * compression (see scripts/kallsyms.c for a more complete description)
14 */
15#include <linux/kallsyms.h>
16#include <linux/init.h>
17#include <linux/seq_file.h>
18#include <linux/fs.h>
19#include <linux/kdb.h>
20#include <linux/err.h>
21#include <linux/proc_fs.h>
22#include <linux/sched.h> /* for cond_resched */
23#include <linux/ctype.h>
24#include <linux/slab.h>
25#include <linux/filter.h>
26#include <linux/ftrace.h>
27#include <linux/kprobes.h>
28#include <linux/build_bug.h>
29#include <linux/compiler.h>
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/bsearch.h>
33#include <linux/btf_ids.h>
34
35#include "kallsyms_internal.h"
36
37/*
38 * Expand a compressed symbol data into the resulting uncompressed string,
39 * if uncompressed string is too long (>= maxlen), it will be truncated,
40 * given the offset to where the symbol is in the compressed stream.
41 */
42static unsigned int kallsyms_expand_symbol(unsigned int off,
43 char *result, size_t maxlen)
44{
45 int len, skipped_first = 0;
46 const char *tptr;
47 const u8 *data;
48
49 /* Get the compressed symbol length from the first symbol byte. */
50 data = &kallsyms_names[off];
51 len = *data;
52 data++;
53 off++;
54
55 /* If MSB is 1, it is a "big" symbol, so needs an additional byte. */
56 if ((len & 0x80) != 0) {
57 len = (len & 0x7F) | (*data << 7);
58 data++;
59 off++;
60 }
61
62 /*
63 * Update the offset to return the offset for the next symbol on
64 * the compressed stream.
65 */
66 off += len;
67
68 /*
69 * For every byte on the compressed symbol data, copy the table
70 * entry for that byte.
71 */
72 while (len) {
73 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
74 data++;
75 len--;
76
77 while (*tptr) {
78 if (skipped_first) {
79 if (maxlen <= 1)
80 goto tail;
81 *result = *tptr;
82 result++;
83 maxlen--;
84 } else
85 skipped_first = 1;
86 tptr++;
87 }
88 }
89
90tail:
91 if (maxlen)
92 *result = '\0';
93
94 /* Return to offset to the next symbol. */
95 return off;
96}
97
98/*
99 * Get symbol type information. This is encoded as a single char at the
100 * beginning of the symbol name.
101 */
102static char kallsyms_get_symbol_type(unsigned int off)
103{
104 /*
105 * Get just the first code, look it up in the token table,
106 * and return the first char from this token.
107 */
108 return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]];
109}
110
111
112/*
113 * Find the offset on the compressed stream given and index in the
114 * kallsyms array.
115 */
116static unsigned int get_symbol_offset(unsigned long pos)
117{
118 const u8 *name;
119 int i, len;
120
121 /*
122 * Use the closest marker we have. We have markers every 256 positions,
123 * so that should be close enough.
124 */
125 name = &kallsyms_names[kallsyms_markers[pos >> 8]];
126
127 /*
128 * Sequentially scan all the symbols up to the point we're searching
129 * for. Every symbol is stored in a [<len>][<len> bytes of data] format,
130 * so we just need to add the len to the current pointer for every
131 * symbol we wish to skip.
132 */
133 for (i = 0; i < (pos & 0xFF); i++) {
134 len = *name;
135
136 /*
137 * If MSB is 1, it is a "big" symbol, so we need to look into
138 * the next byte (and skip it, too).
139 */
140 if ((len & 0x80) != 0)
141 len = ((len & 0x7F) | (name[1] << 7)) + 1;
142
143 name = name + len + 1;
144 }
145
146 return name - kallsyms_names;
147}
148
149unsigned long kallsyms_sym_address(int idx)
150{
151 if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE))
152 return kallsyms_addresses[idx];
153
154 /* values are unsigned offsets if --absolute-percpu is not in effect */
155 if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU))
156 return kallsyms_relative_base + (u32)kallsyms_offsets[idx];
157
158 /* ...otherwise, positive offsets are absolute values */
159 if (kallsyms_offsets[idx] >= 0)
160 return kallsyms_offsets[idx];
161
162 /* ...and negative offsets are relative to kallsyms_relative_base - 1 */
163 return kallsyms_relative_base - 1 - kallsyms_offsets[idx];
164}
165
166static bool cleanup_symbol_name(char *s)
167{
168 char *res;
169
170 if (!IS_ENABLED(CONFIG_LTO_CLANG))
171 return false;
172
173 /*
174 * LLVM appends various suffixes for local functions and variables that
175 * must be promoted to global scope as part of LTO. This can break
176 * hooking of static functions with kprobes. '.' is not a valid
177 * character in an identifier in C. Suffixes observed:
178 * - foo.llvm.[0-9a-f]+
179 * - foo.[0-9a-f]+
180 */
181 res = strchr(s, '.');
182 if (res) {
183 *res = '\0';
184 return true;
185 }
186
187 return false;
188}
189
190static int compare_symbol_name(const char *name, char *namebuf)
191{
192 int ret;
193
194 ret = strcmp(name, namebuf);
195 if (!ret)
196 return ret;
197
198 if (cleanup_symbol_name(namebuf) && !strcmp(name, namebuf))
199 return 0;
200
201 return ret;
202}
203
204static unsigned int get_symbol_seq(int index)
205{
206 unsigned int i, seq = 0;
207
208 for (i = 0; i < 3; i++)
209 seq = (seq << 8) | kallsyms_seqs_of_names[3 * index + i];
210
211 return seq;
212}
213
214static int kallsyms_lookup_names(const char *name,
215 unsigned int *start,
216 unsigned int *end)
217{
218 int ret;
219 int low, mid, high;
220 unsigned int seq, off;
221 char namebuf[KSYM_NAME_LEN];
222
223 low = 0;
224 high = kallsyms_num_syms - 1;
225
226 while (low <= high) {
227 mid = low + (high - low) / 2;
228 seq = get_symbol_seq(mid);
229 off = get_symbol_offset(seq);
230 kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
231 ret = compare_symbol_name(name, namebuf);
232 if (ret > 0)
233 low = mid + 1;
234 else if (ret < 0)
235 high = mid - 1;
236 else
237 break;
238 }
239
240 if (low > high)
241 return -ESRCH;
242
243 low = mid;
244 while (low) {
245 seq = get_symbol_seq(low - 1);
246 off = get_symbol_offset(seq);
247 kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
248 if (compare_symbol_name(name, namebuf))
249 break;
250 low--;
251 }
252 *start = low;
253
254 if (end) {
255 high = mid;
256 while (high < kallsyms_num_syms - 1) {
257 seq = get_symbol_seq(high + 1);
258 off = get_symbol_offset(seq);
259 kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
260 if (compare_symbol_name(name, namebuf))
261 break;
262 high++;
263 }
264 *end = high;
265 }
266
267 return 0;
268}
269
270/* Lookup the address for this symbol. Returns 0 if not found. */
271unsigned long kallsyms_lookup_name(const char *name)
272{
273 int ret;
274 unsigned int i;
275
276 /* Skip the search for empty string. */
277 if (!*name)
278 return 0;
279
280 ret = kallsyms_lookup_names(name, &i, NULL);
281 if (!ret)
282 return kallsyms_sym_address(get_symbol_seq(i));
283
284 return module_kallsyms_lookup_name(name);
285}
286
287/*
288 * Iterate over all symbols in vmlinux. For symbols from modules use
289 * module_kallsyms_on_each_symbol instead.
290 */
291int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
292 unsigned long),
293 void *data)
294{
295 char namebuf[KSYM_NAME_LEN];
296 unsigned long i;
297 unsigned int off;
298 int ret;
299
300 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
301 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
302 ret = fn(data, namebuf, NULL, kallsyms_sym_address(i));
303 if (ret != 0)
304 return ret;
305 cond_resched();
306 }
307 return 0;
308}
309
310int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
311 const char *name, void *data)
312{
313 int ret;
314 unsigned int i, start, end;
315
316 ret = kallsyms_lookup_names(name, &start, &end);
317 if (ret)
318 return 0;
319
320 for (i = start; !ret && i <= end; i++) {
321 ret = fn(data, kallsyms_sym_address(get_symbol_seq(i)));
322 cond_resched();
323 }
324
325 return ret;
326}
327
328static unsigned long get_symbol_pos(unsigned long addr,
329 unsigned long *symbolsize,
330 unsigned long *offset)
331{
332 unsigned long symbol_start = 0, symbol_end = 0;
333 unsigned long i, low, high, mid;
334
335 /* This kernel should never had been booted. */
336 if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE))
337 BUG_ON(!kallsyms_addresses);
338 else
339 BUG_ON(!kallsyms_offsets);
340
341 /* Do a binary search on the sorted kallsyms_addresses array. */
342 low = 0;
343 high = kallsyms_num_syms;
344
345 while (high - low > 1) {
346 mid = low + (high - low) / 2;
347 if (kallsyms_sym_address(mid) <= addr)
348 low = mid;
349 else
350 high = mid;
351 }
352
353 /*
354 * Search for the first aliased symbol. Aliased
355 * symbols are symbols with the same address.
356 */
357 while (low && kallsyms_sym_address(low-1) == kallsyms_sym_address(low))
358 --low;
359
360 symbol_start = kallsyms_sym_address(low);
361
362 /* Search for next non-aliased symbol. */
363 for (i = low + 1; i < kallsyms_num_syms; i++) {
364 if (kallsyms_sym_address(i) > symbol_start) {
365 symbol_end = kallsyms_sym_address(i);
366 break;
367 }
368 }
369
370 /* If we found no next symbol, we use the end of the section. */
371 if (!symbol_end) {
372 if (is_kernel_inittext(addr))
373 symbol_end = (unsigned long)_einittext;
374 else if (IS_ENABLED(CONFIG_KALLSYMS_ALL))
375 symbol_end = (unsigned long)_end;
376 else
377 symbol_end = (unsigned long)_etext;
378 }
379
380 if (symbolsize)
381 *symbolsize = symbol_end - symbol_start;
382 if (offset)
383 *offset = addr - symbol_start;
384
385 return low;
386}
387
388/*
389 * Lookup an address but don't bother to find any names.
390 */
391int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
392 unsigned long *offset)
393{
394 char namebuf[KSYM_NAME_LEN];
395
396 if (is_ksym_addr(addr)) {
397 get_symbol_pos(addr, symbolsize, offset);
398 return 1;
399 }
400 return !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf) ||
401 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
402}
403
404static const char *kallsyms_lookup_buildid(unsigned long addr,
405 unsigned long *symbolsize,
406 unsigned long *offset, char **modname,
407 const unsigned char **modbuildid, char *namebuf)
408{
409 const char *ret;
410
411 namebuf[KSYM_NAME_LEN - 1] = 0;
412 namebuf[0] = 0;
413
414 if (is_ksym_addr(addr)) {
415 unsigned long pos;
416
417 pos = get_symbol_pos(addr, symbolsize, offset);
418 /* Grab name */
419 kallsyms_expand_symbol(get_symbol_offset(pos),
420 namebuf, KSYM_NAME_LEN);
421 if (modname)
422 *modname = NULL;
423 if (modbuildid)
424 *modbuildid = NULL;
425
426 ret = namebuf;
427 goto found;
428 }
429
430 /* See if it's in a module or a BPF JITed image. */
431 ret = module_address_lookup(addr, symbolsize, offset,
432 modname, modbuildid, namebuf);
433 if (!ret)
434 ret = bpf_address_lookup(addr, symbolsize,
435 offset, modname, namebuf);
436
437 if (!ret)
438 ret = ftrace_mod_address_lookup(addr, symbolsize,
439 offset, modname, namebuf);
440
441found:
442 cleanup_symbol_name(namebuf);
443 return ret;
444}
445
446/*
447 * Lookup an address
448 * - modname is set to NULL if it's in the kernel.
449 * - We guarantee that the returned name is valid until we reschedule even if.
450 * It resides in a module.
451 * - We also guarantee that modname will be valid until rescheduled.
452 */
453const char *kallsyms_lookup(unsigned long addr,
454 unsigned long *symbolsize,
455 unsigned long *offset,
456 char **modname, char *namebuf)
457{
458 return kallsyms_lookup_buildid(addr, symbolsize, offset, modname,
459 NULL, namebuf);
460}
461
462int lookup_symbol_name(unsigned long addr, char *symname)
463{
464 int res;
465
466 symname[0] = '\0';
467 symname[KSYM_NAME_LEN - 1] = '\0';
468
469 if (is_ksym_addr(addr)) {
470 unsigned long pos;
471
472 pos = get_symbol_pos(addr, NULL, NULL);
473 /* Grab name */
474 kallsyms_expand_symbol(get_symbol_offset(pos),
475 symname, KSYM_NAME_LEN);
476 goto found;
477 }
478 /* See if it's in a module. */
479 res = lookup_module_symbol_name(addr, symname);
480 if (res)
481 return res;
482
483found:
484 cleanup_symbol_name(symname);
485 return 0;
486}
487
488int lookup_symbol_attrs(unsigned long addr, unsigned long *size,
489 unsigned long *offset, char *modname, char *name)
490{
491 int res;
492
493 name[0] = '\0';
494 name[KSYM_NAME_LEN - 1] = '\0';
495
496 if (is_ksym_addr(addr)) {
497 unsigned long pos;
498
499 pos = get_symbol_pos(addr, size, offset);
500 /* Grab name */
501 kallsyms_expand_symbol(get_symbol_offset(pos),
502 name, KSYM_NAME_LEN);
503 modname[0] = '\0';
504 goto found;
505 }
506 /* See if it's in a module. */
507 res = lookup_module_symbol_attrs(addr, size, offset, modname, name);
508 if (res)
509 return res;
510
511found:
512 cleanup_symbol_name(name);
513 return 0;
514}
515
516/* Look up a kernel symbol and return it in a text buffer. */
517static int __sprint_symbol(char *buffer, unsigned long address,
518 int symbol_offset, int add_offset, int add_buildid)
519{
520 char *modname;
521 const unsigned char *buildid;
522 const char *name;
523 unsigned long offset, size;
524 int len;
525
526 address += symbol_offset;
527 name = kallsyms_lookup_buildid(address, &size, &offset, &modname, &buildid,
528 buffer);
529 if (!name)
530 return sprintf(buffer, "0x%lx", address - symbol_offset);
531
532 if (name != buffer)
533 strcpy(buffer, name);
534 len = strlen(buffer);
535 offset -= symbol_offset;
536
537 if (add_offset)
538 len += sprintf(buffer + len, "+%#lx/%#lx", offset, size);
539
540 if (modname) {
541 len += sprintf(buffer + len, " [%s", modname);
542#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID)
543 if (add_buildid && buildid) {
544 /* build ID should match length of sprintf */
545#if IS_ENABLED(CONFIG_MODULES)
546 static_assert(sizeof(typeof_member(struct module, build_id)) == 20);
547#endif
548 len += sprintf(buffer + len, " %20phN", buildid);
549 }
550#endif
551 len += sprintf(buffer + len, "]");
552 }
553
554 return len;
555}
556
557/**
558 * sprint_symbol - Look up a kernel symbol and return it in a text buffer
559 * @buffer: buffer to be stored
560 * @address: address to lookup
561 *
562 * This function looks up a kernel symbol with @address and stores its name,
563 * offset, size and module name to @buffer if possible. If no symbol was found,
564 * just saves its @address as is.
565 *
566 * This function returns the number of bytes stored in @buffer.
567 */
568int sprint_symbol(char *buffer, unsigned long address)
569{
570 return __sprint_symbol(buffer, address, 0, 1, 0);
571}
572EXPORT_SYMBOL_GPL(sprint_symbol);
573
574/**
575 * sprint_symbol_build_id - Look up a kernel symbol and return it in a text buffer
576 * @buffer: buffer to be stored
577 * @address: address to lookup
578 *
579 * This function looks up a kernel symbol with @address and stores its name,
580 * offset, size, module name and module build ID to @buffer if possible. If no
581 * symbol was found, just saves its @address as is.
582 *
583 * This function returns the number of bytes stored in @buffer.
584 */
585int sprint_symbol_build_id(char *buffer, unsigned long address)
586{
587 return __sprint_symbol(buffer, address, 0, 1, 1);
588}
589EXPORT_SYMBOL_GPL(sprint_symbol_build_id);
590
591/**
592 * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer
593 * @buffer: buffer to be stored
594 * @address: address to lookup
595 *
596 * This function looks up a kernel symbol with @address and stores its name
597 * and module name to @buffer if possible. If no symbol was found, just saves
598 * its @address as is.
599 *
600 * This function returns the number of bytes stored in @buffer.
601 */
602int sprint_symbol_no_offset(char *buffer, unsigned long address)
603{
604 return __sprint_symbol(buffer, address, 0, 0, 0);
605}
606EXPORT_SYMBOL_GPL(sprint_symbol_no_offset);
607
608/**
609 * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer
610 * @buffer: buffer to be stored
611 * @address: address to lookup
612 *
613 * This function is for stack backtrace and does the same thing as
614 * sprint_symbol() but with modified/decreased @address. If there is a
615 * tail-call to the function marked "noreturn", gcc optimized out code after
616 * the call so that the stack-saved return address could point outside of the
617 * caller. This function ensures that kallsyms will find the original caller
618 * by decreasing @address.
619 *
620 * This function returns the number of bytes stored in @buffer.
621 */
622int sprint_backtrace(char *buffer, unsigned long address)
623{
624 return __sprint_symbol(buffer, address, -1, 1, 0);
625}
626
627/**
628 * sprint_backtrace_build_id - Look up a backtrace symbol and return it in a text buffer
629 * @buffer: buffer to be stored
630 * @address: address to lookup
631 *
632 * This function is for stack backtrace and does the same thing as
633 * sprint_symbol() but with modified/decreased @address. If there is a
634 * tail-call to the function marked "noreturn", gcc optimized out code after
635 * the call so that the stack-saved return address could point outside of the
636 * caller. This function ensures that kallsyms will find the original caller
637 * by decreasing @address. This function also appends the module build ID to
638 * the @buffer if @address is within a kernel module.
639 *
640 * This function returns the number of bytes stored in @buffer.
641 */
642int sprint_backtrace_build_id(char *buffer, unsigned long address)
643{
644 return __sprint_symbol(buffer, address, -1, 1, 1);
645}
646
647/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
648struct kallsym_iter {
649 loff_t pos;
650 loff_t pos_arch_end;
651 loff_t pos_mod_end;
652 loff_t pos_ftrace_mod_end;
653 loff_t pos_bpf_end;
654 unsigned long value;
655 unsigned int nameoff; /* If iterating in core kernel symbols. */
656 char type;
657 char name[KSYM_NAME_LEN];
658 char module_name[MODULE_NAME_LEN];
659 int exported;
660 int show_value;
661};
662
663int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value,
664 char *type, char *name)
665{
666 return -EINVAL;
667}
668
669static int get_ksymbol_arch(struct kallsym_iter *iter)
670{
671 int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms,
672 &iter->value, &iter->type,
673 iter->name);
674
675 if (ret < 0) {
676 iter->pos_arch_end = iter->pos;
677 return 0;
678 }
679
680 return 1;
681}
682
683static int get_ksymbol_mod(struct kallsym_iter *iter)
684{
685 int ret = module_get_kallsym(iter->pos - iter->pos_arch_end,
686 &iter->value, &iter->type,
687 iter->name, iter->module_name,
688 &iter->exported);
689 if (ret < 0) {
690 iter->pos_mod_end = iter->pos;
691 return 0;
692 }
693
694 return 1;
695}
696
697/*
698 * ftrace_mod_get_kallsym() may also get symbols for pages allocated for ftrace
699 * purposes. In that case "__builtin__ftrace" is used as a module name, even
700 * though "__builtin__ftrace" is not a module.
701 */
702static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
703{
704 int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end,
705 &iter->value, &iter->type,
706 iter->name, iter->module_name,
707 &iter->exported);
708 if (ret < 0) {
709 iter->pos_ftrace_mod_end = iter->pos;
710 return 0;
711 }
712
713 return 1;
714}
715
716static int get_ksymbol_bpf(struct kallsym_iter *iter)
717{
718 int ret;
719
720 strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN);
721 iter->exported = 0;
722 ret = bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
723 &iter->value, &iter->type,
724 iter->name);
725 if (ret < 0) {
726 iter->pos_bpf_end = iter->pos;
727 return 0;
728 }
729
730 return 1;
731}
732
733/*
734 * This uses "__builtin__kprobes" as a module name for symbols for pages
735 * allocated for kprobes' purposes, even though "__builtin__kprobes" is not a
736 * module.
737 */
738static int get_ksymbol_kprobe(struct kallsym_iter *iter)
739{
740 strlcpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN);
741 iter->exported = 0;
742 return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end,
743 &iter->value, &iter->type,
744 iter->name) < 0 ? 0 : 1;
745}
746
747/* Returns space to next name. */
748static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
749{
750 unsigned off = iter->nameoff;
751
752 iter->module_name[0] = '\0';
753 iter->value = kallsyms_sym_address(iter->pos);
754
755 iter->type = kallsyms_get_symbol_type(off);
756
757 off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name));
758
759 return off - iter->nameoff;
760}
761
762static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
763{
764 iter->name[0] = '\0';
765 iter->nameoff = get_symbol_offset(new_pos);
766 iter->pos = new_pos;
767 if (new_pos == 0) {
768 iter->pos_arch_end = 0;
769 iter->pos_mod_end = 0;
770 iter->pos_ftrace_mod_end = 0;
771 iter->pos_bpf_end = 0;
772 }
773}
774
775/*
776 * The end position (last + 1) of each additional kallsyms section is recorded
777 * in iter->pos_..._end as each section is added, and so can be used to
778 * determine which get_ksymbol_...() function to call next.
779 */
780static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
781{
782 iter->pos = pos;
783
784 if ((!iter->pos_arch_end || iter->pos_arch_end > pos) &&
785 get_ksymbol_arch(iter))
786 return 1;
787
788 if ((!iter->pos_mod_end || iter->pos_mod_end > pos) &&
789 get_ksymbol_mod(iter))
790 return 1;
791
792 if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) &&
793 get_ksymbol_ftrace_mod(iter))
794 return 1;
795
796 if ((!iter->pos_bpf_end || iter->pos_bpf_end > pos) &&
797 get_ksymbol_bpf(iter))
798 return 1;
799
800 return get_ksymbol_kprobe(iter);
801}
802
803/* Returns false if pos at or past end of file. */
804static int update_iter(struct kallsym_iter *iter, loff_t pos)
805{
806 /* Module symbols can be accessed randomly. */
807 if (pos >= kallsyms_num_syms)
808 return update_iter_mod(iter, pos);
809
810 /* If we're not on the desired position, reset to new position. */
811 if (pos != iter->pos)
812 reset_iter(iter, pos);
813
814 iter->nameoff += get_ksymbol_core(iter);
815 iter->pos++;
816
817 return 1;
818}
819
820static void *s_next(struct seq_file *m, void *p, loff_t *pos)
821{
822 (*pos)++;
823
824 if (!update_iter(m->private, *pos))
825 return NULL;
826 return p;
827}
828
829static void *s_start(struct seq_file *m, loff_t *pos)
830{
831 if (!update_iter(m->private, *pos))
832 return NULL;
833 return m->private;
834}
835
836static void s_stop(struct seq_file *m, void *p)
837{
838}
839
840static int s_show(struct seq_file *m, void *p)
841{
842 void *value;
843 struct kallsym_iter *iter = m->private;
844
845 /* Some debugging symbols have no name. Ignore them. */
846 if (!iter->name[0])
847 return 0;
848
849 value = iter->show_value ? (void *)iter->value : NULL;
850
851 if (iter->module_name[0]) {
852 char type;
853
854 /*
855 * Label it "global" if it is exported,
856 * "local" if not exported.
857 */
858 type = iter->exported ? toupper(iter->type) :
859 tolower(iter->type);
860 seq_printf(m, "%px %c %s\t[%s]\n", value,
861 type, iter->name, iter->module_name);
862 } else
863 seq_printf(m, "%px %c %s\n", value,
864 iter->type, iter->name);
865 return 0;
866}
867
868static const struct seq_operations kallsyms_op = {
869 .start = s_start,
870 .next = s_next,
871 .stop = s_stop,
872 .show = s_show
873};
874
875#ifdef CONFIG_BPF_SYSCALL
876
877struct bpf_iter__ksym {
878 __bpf_md_ptr(struct bpf_iter_meta *, meta);
879 __bpf_md_ptr(struct kallsym_iter *, ksym);
880};
881
882static int ksym_prog_seq_show(struct seq_file *m, bool in_stop)
883{
884 struct bpf_iter__ksym ctx;
885 struct bpf_iter_meta meta;
886 struct bpf_prog *prog;
887
888 meta.seq = m;
889 prog = bpf_iter_get_info(&meta, in_stop);
890 if (!prog)
891 return 0;
892
893 ctx.meta = &meta;
894 ctx.ksym = m ? m->private : NULL;
895 return bpf_iter_run_prog(prog, &ctx);
896}
897
898static int bpf_iter_ksym_seq_show(struct seq_file *m, void *p)
899{
900 return ksym_prog_seq_show(m, false);
901}
902
903static void bpf_iter_ksym_seq_stop(struct seq_file *m, void *p)
904{
905 if (!p)
906 (void) ksym_prog_seq_show(m, true);
907 else
908 s_stop(m, p);
909}
910
911static const struct seq_operations bpf_iter_ksym_ops = {
912 .start = s_start,
913 .next = s_next,
914 .stop = bpf_iter_ksym_seq_stop,
915 .show = bpf_iter_ksym_seq_show,
916};
917
918static int bpf_iter_ksym_init(void *priv_data, struct bpf_iter_aux_info *aux)
919{
920 struct kallsym_iter *iter = priv_data;
921
922 reset_iter(iter, 0);
923
924 /* cache here as in kallsyms_open() case; use current process
925 * credentials to tell BPF iterators if values should be shown.
926 */
927 iter->show_value = kallsyms_show_value(current_cred());
928
929 return 0;
930}
931
932DEFINE_BPF_ITER_FUNC(ksym, struct bpf_iter_meta *meta, struct kallsym_iter *ksym)
933
934static const struct bpf_iter_seq_info ksym_iter_seq_info = {
935 .seq_ops = &bpf_iter_ksym_ops,
936 .init_seq_private = bpf_iter_ksym_init,
937 .fini_seq_private = NULL,
938 .seq_priv_size = sizeof(struct kallsym_iter),
939};
940
941static struct bpf_iter_reg ksym_iter_reg_info = {
942 .target = "ksym",
943 .feature = BPF_ITER_RESCHED,
944 .ctx_arg_info_size = 1,
945 .ctx_arg_info = {
946 { offsetof(struct bpf_iter__ksym, ksym),
947 PTR_TO_BTF_ID_OR_NULL },
948 },
949 .seq_info = &ksym_iter_seq_info,
950};
951
952BTF_ID_LIST(btf_ksym_iter_id)
953BTF_ID(struct, kallsym_iter)
954
955static int __init bpf_ksym_iter_register(void)
956{
957 ksym_iter_reg_info.ctx_arg_info[0].btf_id = *btf_ksym_iter_id;
958 return bpf_iter_reg_target(&ksym_iter_reg_info);
959}
960
961late_initcall(bpf_ksym_iter_register);
962
963#endif /* CONFIG_BPF_SYSCALL */
964
965static inline int kallsyms_for_perf(void)
966{
967#ifdef CONFIG_PERF_EVENTS
968 extern int sysctl_perf_event_paranoid;
969 if (sysctl_perf_event_paranoid <= 1)
970 return 1;
971#endif
972 return 0;
973}
974
975/*
976 * We show kallsyms information even to normal users if we've enabled
977 * kernel profiling and are explicitly not paranoid (so kptr_restrict
978 * is clear, and sysctl_perf_event_paranoid isn't set).
979 *
980 * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
981 * block even that).
982 */
983bool kallsyms_show_value(const struct cred *cred)
984{
985 switch (kptr_restrict) {
986 case 0:
987 if (kallsyms_for_perf())
988 return true;
989 fallthrough;
990 case 1:
991 if (security_capable(cred, &init_user_ns, CAP_SYSLOG,
992 CAP_OPT_NOAUDIT) == 0)
993 return true;
994 fallthrough;
995 default:
996 return false;
997 }
998}
999
1000static int kallsyms_open(struct inode *inode, struct file *file)
1001{
1002 /*
1003 * We keep iterator in m->private, since normal case is to
1004 * s_start from where we left off, so we avoid doing
1005 * using get_symbol_offset for every symbol.
1006 */
1007 struct kallsym_iter *iter;
1008 iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter));
1009 if (!iter)
1010 return -ENOMEM;
1011 reset_iter(iter, 0);
1012
1013 /*
1014 * Instead of checking this on every s_show() call, cache
1015 * the result here at open time.
1016 */
1017 iter->show_value = kallsyms_show_value(file->f_cred);
1018 return 0;
1019}
1020
1021#ifdef CONFIG_KGDB_KDB
1022const char *kdb_walk_kallsyms(loff_t *pos)
1023{
1024 static struct kallsym_iter kdb_walk_kallsyms_iter;
1025 if (*pos == 0) {
1026 memset(&kdb_walk_kallsyms_iter, 0,
1027 sizeof(kdb_walk_kallsyms_iter));
1028 reset_iter(&kdb_walk_kallsyms_iter, 0);
1029 }
1030 while (1) {
1031 if (!update_iter(&kdb_walk_kallsyms_iter, *pos))
1032 return NULL;
1033 ++*pos;
1034 /* Some debugging symbols have no name. Ignore them. */
1035 if (kdb_walk_kallsyms_iter.name[0])
1036 return kdb_walk_kallsyms_iter.name;
1037 }
1038}
1039#endif /* CONFIG_KGDB_KDB */
1040
1041static const struct proc_ops kallsyms_proc_ops = {
1042 .proc_open = kallsyms_open,
1043 .proc_read = seq_read,
1044 .proc_lseek = seq_lseek,
1045 .proc_release = seq_release_private,
1046};
1047
1048static int __init kallsyms_init(void)
1049{
1050 proc_create("kallsyms", 0444, NULL, &kallsyms_proc_ops);
1051 return 0;
1052}
1053device_initcall(kallsyms_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kallsyms.c: in-kernel printing of symbolic oopses and stack traces.
4 *
5 * Rewritten and vastly simplified by Rusty Russell for in-kernel
6 * module loader:
7 * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
8 *
9 * ChangeLog:
10 *
11 * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com>
12 * Changed the compression method from stem compression to "table lookup"
13 * compression (see scripts/kallsyms.c for a more complete description)
14 */
15#include <linux/kallsyms.h>
16#include <linux/init.h>
17#include <linux/seq_file.h>
18#include <linux/fs.h>
19#include <linux/kdb.h>
20#include <linux/err.h>
21#include <linux/proc_fs.h>
22#include <linux/sched.h> /* for cond_resched */
23#include <linux/ctype.h>
24#include <linux/slab.h>
25#include <linux/filter.h>
26#include <linux/ftrace.h>
27#include <linux/compiler.h>
28
29/*
30 * These will be re-linked against their real values
31 * during the second link stage.
32 */
33extern const unsigned long kallsyms_addresses[] __weak;
34extern const int kallsyms_offsets[] __weak;
35extern const u8 kallsyms_names[] __weak;
36
37/*
38 * Tell the compiler that the count isn't in the small data section if the arch
39 * has one (eg: FRV).
40 */
41extern const unsigned int kallsyms_num_syms
42__attribute__((weak, section(".rodata")));
43
44extern const unsigned long kallsyms_relative_base
45__attribute__((weak, section(".rodata")));
46
47extern const u8 kallsyms_token_table[] __weak;
48extern const u16 kallsyms_token_index[] __weak;
49
50extern const unsigned int kallsyms_markers[] __weak;
51
52/*
53 * Expand a compressed symbol data into the resulting uncompressed string,
54 * if uncompressed string is too long (>= maxlen), it will be truncated,
55 * given the offset to where the symbol is in the compressed stream.
56 */
57static unsigned int kallsyms_expand_symbol(unsigned int off,
58 char *result, size_t maxlen)
59{
60 int len, skipped_first = 0;
61 const u8 *tptr, *data;
62
63 /* Get the compressed symbol length from the first symbol byte. */
64 data = &kallsyms_names[off];
65 len = *data;
66 data++;
67
68 /*
69 * Update the offset to return the offset for the next symbol on
70 * the compressed stream.
71 */
72 off += len + 1;
73
74 /*
75 * For every byte on the compressed symbol data, copy the table
76 * entry for that byte.
77 */
78 while (len) {
79 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
80 data++;
81 len--;
82
83 while (*tptr) {
84 if (skipped_first) {
85 if (maxlen <= 1)
86 goto tail;
87 *result = *tptr;
88 result++;
89 maxlen--;
90 } else
91 skipped_first = 1;
92 tptr++;
93 }
94 }
95
96tail:
97 if (maxlen)
98 *result = '\0';
99
100 /* Return to offset to the next symbol. */
101 return off;
102}
103
104/*
105 * Get symbol type information. This is encoded as a single char at the
106 * beginning of the symbol name.
107 */
108static char kallsyms_get_symbol_type(unsigned int off)
109{
110 /*
111 * Get just the first code, look it up in the token table,
112 * and return the first char from this token.
113 */
114 return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]];
115}
116
117
118/*
119 * Find the offset on the compressed stream given and index in the
120 * kallsyms array.
121 */
122static unsigned int get_symbol_offset(unsigned long pos)
123{
124 const u8 *name;
125 int i;
126
127 /*
128 * Use the closest marker we have. We have markers every 256 positions,
129 * so that should be close enough.
130 */
131 name = &kallsyms_names[kallsyms_markers[pos >> 8]];
132
133 /*
134 * Sequentially scan all the symbols up to the point we're searching
135 * for. Every symbol is stored in a [<len>][<len> bytes of data] format,
136 * so we just need to add the len to the current pointer for every
137 * symbol we wish to skip.
138 */
139 for (i = 0; i < (pos & 0xFF); i++)
140 name = name + (*name) + 1;
141
142 return name - kallsyms_names;
143}
144
145static unsigned long kallsyms_sym_address(int idx)
146{
147 if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE))
148 return kallsyms_addresses[idx];
149
150 /* values are unsigned offsets if --absolute-percpu is not in effect */
151 if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU))
152 return kallsyms_relative_base + (u32)kallsyms_offsets[idx];
153
154 /* ...otherwise, positive offsets are absolute values */
155 if (kallsyms_offsets[idx] >= 0)
156 return kallsyms_offsets[idx];
157
158 /* ...and negative offsets are relative to kallsyms_relative_base - 1 */
159 return kallsyms_relative_base - 1 - kallsyms_offsets[idx];
160}
161
162/* Lookup the address for this symbol. Returns 0 if not found. */
163unsigned long kallsyms_lookup_name(const char *name)
164{
165 char namebuf[KSYM_NAME_LEN];
166 unsigned long i;
167 unsigned int off;
168
169 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
170 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
171
172 if (strcmp(namebuf, name) == 0)
173 return kallsyms_sym_address(i);
174 }
175 return module_kallsyms_lookup_name(name);
176}
177EXPORT_SYMBOL_GPL(kallsyms_lookup_name);
178
179int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
180 unsigned long),
181 void *data)
182{
183 char namebuf[KSYM_NAME_LEN];
184 unsigned long i;
185 unsigned int off;
186 int ret;
187
188 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
189 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
190 ret = fn(data, namebuf, NULL, kallsyms_sym_address(i));
191 if (ret != 0)
192 return ret;
193 }
194 return module_kallsyms_on_each_symbol(fn, data);
195}
196EXPORT_SYMBOL_GPL(kallsyms_on_each_symbol);
197
198static unsigned long get_symbol_pos(unsigned long addr,
199 unsigned long *symbolsize,
200 unsigned long *offset)
201{
202 unsigned long symbol_start = 0, symbol_end = 0;
203 unsigned long i, low, high, mid;
204
205 /* This kernel should never had been booted. */
206 if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE))
207 BUG_ON(!kallsyms_addresses);
208 else
209 BUG_ON(!kallsyms_offsets);
210
211 /* Do a binary search on the sorted kallsyms_addresses array. */
212 low = 0;
213 high = kallsyms_num_syms;
214
215 while (high - low > 1) {
216 mid = low + (high - low) / 2;
217 if (kallsyms_sym_address(mid) <= addr)
218 low = mid;
219 else
220 high = mid;
221 }
222
223 /*
224 * Search for the first aliased symbol. Aliased
225 * symbols are symbols with the same address.
226 */
227 while (low && kallsyms_sym_address(low-1) == kallsyms_sym_address(low))
228 --low;
229
230 symbol_start = kallsyms_sym_address(low);
231
232 /* Search for next non-aliased symbol. */
233 for (i = low + 1; i < kallsyms_num_syms; i++) {
234 if (kallsyms_sym_address(i) > symbol_start) {
235 symbol_end = kallsyms_sym_address(i);
236 break;
237 }
238 }
239
240 /* If we found no next symbol, we use the end of the section. */
241 if (!symbol_end) {
242 if (is_kernel_inittext(addr))
243 symbol_end = (unsigned long)_einittext;
244 else if (IS_ENABLED(CONFIG_KALLSYMS_ALL))
245 symbol_end = (unsigned long)_end;
246 else
247 symbol_end = (unsigned long)_etext;
248 }
249
250 if (symbolsize)
251 *symbolsize = symbol_end - symbol_start;
252 if (offset)
253 *offset = addr - symbol_start;
254
255 return low;
256}
257
258/*
259 * Lookup an address but don't bother to find any names.
260 */
261int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
262 unsigned long *offset)
263{
264 char namebuf[KSYM_NAME_LEN];
265
266 if (is_ksym_addr(addr)) {
267 get_symbol_pos(addr, symbolsize, offset);
268 return 1;
269 }
270 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
271 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
272}
273
274/*
275 * Lookup an address
276 * - modname is set to NULL if it's in the kernel.
277 * - We guarantee that the returned name is valid until we reschedule even if.
278 * It resides in a module.
279 * - We also guarantee that modname will be valid until rescheduled.
280 */
281const char *kallsyms_lookup(unsigned long addr,
282 unsigned long *symbolsize,
283 unsigned long *offset,
284 char **modname, char *namebuf)
285{
286 const char *ret;
287
288 namebuf[KSYM_NAME_LEN - 1] = 0;
289 namebuf[0] = 0;
290
291 if (is_ksym_addr(addr)) {
292 unsigned long pos;
293
294 pos = get_symbol_pos(addr, symbolsize, offset);
295 /* Grab name */
296 kallsyms_expand_symbol(get_symbol_offset(pos),
297 namebuf, KSYM_NAME_LEN);
298 if (modname)
299 *modname = NULL;
300 return namebuf;
301 }
302
303 /* See if it's in a module or a BPF JITed image. */
304 ret = module_address_lookup(addr, symbolsize, offset,
305 modname, namebuf);
306 if (!ret)
307 ret = bpf_address_lookup(addr, symbolsize,
308 offset, modname, namebuf);
309
310 if (!ret)
311 ret = ftrace_mod_address_lookup(addr, symbolsize,
312 offset, modname, namebuf);
313 return ret;
314}
315
316int lookup_symbol_name(unsigned long addr, char *symname)
317{
318 symname[0] = '\0';
319 symname[KSYM_NAME_LEN - 1] = '\0';
320
321 if (is_ksym_addr(addr)) {
322 unsigned long pos;
323
324 pos = get_symbol_pos(addr, NULL, NULL);
325 /* Grab name */
326 kallsyms_expand_symbol(get_symbol_offset(pos),
327 symname, KSYM_NAME_LEN);
328 return 0;
329 }
330 /* See if it's in a module. */
331 return lookup_module_symbol_name(addr, symname);
332}
333
334int lookup_symbol_attrs(unsigned long addr, unsigned long *size,
335 unsigned long *offset, char *modname, char *name)
336{
337 name[0] = '\0';
338 name[KSYM_NAME_LEN - 1] = '\0';
339
340 if (is_ksym_addr(addr)) {
341 unsigned long pos;
342
343 pos = get_symbol_pos(addr, size, offset);
344 /* Grab name */
345 kallsyms_expand_symbol(get_symbol_offset(pos),
346 name, KSYM_NAME_LEN);
347 modname[0] = '\0';
348 return 0;
349 }
350 /* See if it's in a module. */
351 return lookup_module_symbol_attrs(addr, size, offset, modname, name);
352}
353
354/* Look up a kernel symbol and return it in a text buffer. */
355static int __sprint_symbol(char *buffer, unsigned long address,
356 int symbol_offset, int add_offset)
357{
358 char *modname;
359 const char *name;
360 unsigned long offset, size;
361 int len;
362
363 address += symbol_offset;
364 name = kallsyms_lookup(address, &size, &offset, &modname, buffer);
365 if (!name)
366 return sprintf(buffer, "0x%lx", address - symbol_offset);
367
368 if (name != buffer)
369 strcpy(buffer, name);
370 len = strlen(buffer);
371 offset -= symbol_offset;
372
373 if (add_offset)
374 len += sprintf(buffer + len, "+%#lx/%#lx", offset, size);
375
376 if (modname)
377 len += sprintf(buffer + len, " [%s]", modname);
378
379 return len;
380}
381
382/**
383 * sprint_symbol - Look up a kernel symbol and return it in a text buffer
384 * @buffer: buffer to be stored
385 * @address: address to lookup
386 *
387 * This function looks up a kernel symbol with @address and stores its name,
388 * offset, size and module name to @buffer if possible. If no symbol was found,
389 * just saves its @address as is.
390 *
391 * This function returns the number of bytes stored in @buffer.
392 */
393int sprint_symbol(char *buffer, unsigned long address)
394{
395 return __sprint_symbol(buffer, address, 0, 1);
396}
397EXPORT_SYMBOL_GPL(sprint_symbol);
398
399/**
400 * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer
401 * @buffer: buffer to be stored
402 * @address: address to lookup
403 *
404 * This function looks up a kernel symbol with @address and stores its name
405 * and module name to @buffer if possible. If no symbol was found, just saves
406 * its @address as is.
407 *
408 * This function returns the number of bytes stored in @buffer.
409 */
410int sprint_symbol_no_offset(char *buffer, unsigned long address)
411{
412 return __sprint_symbol(buffer, address, 0, 0);
413}
414EXPORT_SYMBOL_GPL(sprint_symbol_no_offset);
415
416/**
417 * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer
418 * @buffer: buffer to be stored
419 * @address: address to lookup
420 *
421 * This function is for stack backtrace and does the same thing as
422 * sprint_symbol() but with modified/decreased @address. If there is a
423 * tail-call to the function marked "noreturn", gcc optimized out code after
424 * the call so that the stack-saved return address could point outside of the
425 * caller. This function ensures that kallsyms will find the original caller
426 * by decreasing @address.
427 *
428 * This function returns the number of bytes stored in @buffer.
429 */
430int sprint_backtrace(char *buffer, unsigned long address)
431{
432 return __sprint_symbol(buffer, address, -1, 1);
433}
434
435/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
436struct kallsym_iter {
437 loff_t pos;
438 loff_t pos_arch_end;
439 loff_t pos_mod_end;
440 loff_t pos_ftrace_mod_end;
441 unsigned long value;
442 unsigned int nameoff; /* If iterating in core kernel symbols. */
443 char type;
444 char name[KSYM_NAME_LEN];
445 char module_name[MODULE_NAME_LEN];
446 int exported;
447 int show_value;
448};
449
450int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value,
451 char *type, char *name)
452{
453 return -EINVAL;
454}
455
456static int get_ksymbol_arch(struct kallsym_iter *iter)
457{
458 int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms,
459 &iter->value, &iter->type,
460 iter->name);
461
462 if (ret < 0) {
463 iter->pos_arch_end = iter->pos;
464 return 0;
465 }
466
467 return 1;
468}
469
470static int get_ksymbol_mod(struct kallsym_iter *iter)
471{
472 int ret = module_get_kallsym(iter->pos - iter->pos_arch_end,
473 &iter->value, &iter->type,
474 iter->name, iter->module_name,
475 &iter->exported);
476 if (ret < 0) {
477 iter->pos_mod_end = iter->pos;
478 return 0;
479 }
480
481 return 1;
482}
483
484static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
485{
486 int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end,
487 &iter->value, &iter->type,
488 iter->name, iter->module_name,
489 &iter->exported);
490 if (ret < 0) {
491 iter->pos_ftrace_mod_end = iter->pos;
492 return 0;
493 }
494
495 return 1;
496}
497
498static int get_ksymbol_bpf(struct kallsym_iter *iter)
499{
500 strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN);
501 iter->exported = 0;
502 return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
503 &iter->value, &iter->type,
504 iter->name) < 0 ? 0 : 1;
505}
506
507/* Returns space to next name. */
508static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
509{
510 unsigned off = iter->nameoff;
511
512 iter->module_name[0] = '\0';
513 iter->value = kallsyms_sym_address(iter->pos);
514
515 iter->type = kallsyms_get_symbol_type(off);
516
517 off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name));
518
519 return off - iter->nameoff;
520}
521
522static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
523{
524 iter->name[0] = '\0';
525 iter->nameoff = get_symbol_offset(new_pos);
526 iter->pos = new_pos;
527 if (new_pos == 0) {
528 iter->pos_arch_end = 0;
529 iter->pos_mod_end = 0;
530 iter->pos_ftrace_mod_end = 0;
531 }
532}
533
534/*
535 * The end position (last + 1) of each additional kallsyms section is recorded
536 * in iter->pos_..._end as each section is added, and so can be used to
537 * determine which get_ksymbol_...() function to call next.
538 */
539static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
540{
541 iter->pos = pos;
542
543 if ((!iter->pos_arch_end || iter->pos_arch_end > pos) &&
544 get_ksymbol_arch(iter))
545 return 1;
546
547 if ((!iter->pos_mod_end || iter->pos_mod_end > pos) &&
548 get_ksymbol_mod(iter))
549 return 1;
550
551 if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) &&
552 get_ksymbol_ftrace_mod(iter))
553 return 1;
554
555 return get_ksymbol_bpf(iter);
556}
557
558/* Returns false if pos at or past end of file. */
559static int update_iter(struct kallsym_iter *iter, loff_t pos)
560{
561 /* Module symbols can be accessed randomly. */
562 if (pos >= kallsyms_num_syms)
563 return update_iter_mod(iter, pos);
564
565 /* If we're not on the desired position, reset to new position. */
566 if (pos != iter->pos)
567 reset_iter(iter, pos);
568
569 iter->nameoff += get_ksymbol_core(iter);
570 iter->pos++;
571
572 return 1;
573}
574
575static void *s_next(struct seq_file *m, void *p, loff_t *pos)
576{
577 (*pos)++;
578
579 if (!update_iter(m->private, *pos))
580 return NULL;
581 return p;
582}
583
584static void *s_start(struct seq_file *m, loff_t *pos)
585{
586 if (!update_iter(m->private, *pos))
587 return NULL;
588 return m->private;
589}
590
591static void s_stop(struct seq_file *m, void *p)
592{
593}
594
595static int s_show(struct seq_file *m, void *p)
596{
597 void *value;
598 struct kallsym_iter *iter = m->private;
599
600 /* Some debugging symbols have no name. Ignore them. */
601 if (!iter->name[0])
602 return 0;
603
604 value = iter->show_value ? (void *)iter->value : NULL;
605
606 if (iter->module_name[0]) {
607 char type;
608
609 /*
610 * Label it "global" if it is exported,
611 * "local" if not exported.
612 */
613 type = iter->exported ? toupper(iter->type) :
614 tolower(iter->type);
615 seq_printf(m, "%px %c %s\t[%s]\n", value,
616 type, iter->name, iter->module_name);
617 } else
618 seq_printf(m, "%px %c %s\n", value,
619 iter->type, iter->name);
620 return 0;
621}
622
623static const struct seq_operations kallsyms_op = {
624 .start = s_start,
625 .next = s_next,
626 .stop = s_stop,
627 .show = s_show
628};
629
630static inline int kallsyms_for_perf(void)
631{
632#ifdef CONFIG_PERF_EVENTS
633 extern int sysctl_perf_event_paranoid;
634 if (sysctl_perf_event_paranoid <= 1)
635 return 1;
636#endif
637 return 0;
638}
639
640/*
641 * We show kallsyms information even to normal users if we've enabled
642 * kernel profiling and are explicitly not paranoid (so kptr_restrict
643 * is clear, and sysctl_perf_event_paranoid isn't set).
644 *
645 * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
646 * block even that).
647 */
648int kallsyms_show_value(void)
649{
650 switch (kptr_restrict) {
651 case 0:
652 if (kallsyms_for_perf())
653 return 1;
654 /* fallthrough */
655 case 1:
656 if (has_capability_noaudit(current, CAP_SYSLOG))
657 return 1;
658 /* fallthrough */
659 default:
660 return 0;
661 }
662}
663
664static int kallsyms_open(struct inode *inode, struct file *file)
665{
666 /*
667 * We keep iterator in m->private, since normal case is to
668 * s_start from where we left off, so we avoid doing
669 * using get_symbol_offset for every symbol.
670 */
671 struct kallsym_iter *iter;
672 iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter));
673 if (!iter)
674 return -ENOMEM;
675 reset_iter(iter, 0);
676
677 iter->show_value = kallsyms_show_value();
678 return 0;
679}
680
681#ifdef CONFIG_KGDB_KDB
682const char *kdb_walk_kallsyms(loff_t *pos)
683{
684 static struct kallsym_iter kdb_walk_kallsyms_iter;
685 if (*pos == 0) {
686 memset(&kdb_walk_kallsyms_iter, 0,
687 sizeof(kdb_walk_kallsyms_iter));
688 reset_iter(&kdb_walk_kallsyms_iter, 0);
689 }
690 while (1) {
691 if (!update_iter(&kdb_walk_kallsyms_iter, *pos))
692 return NULL;
693 ++*pos;
694 /* Some debugging symbols have no name. Ignore them. */
695 if (kdb_walk_kallsyms_iter.name[0])
696 return kdb_walk_kallsyms_iter.name;
697 }
698}
699#endif /* CONFIG_KGDB_KDB */
700
701static const struct file_operations kallsyms_operations = {
702 .open = kallsyms_open,
703 .read = seq_read,
704 .llseek = seq_lseek,
705 .release = seq_release_private,
706};
707
708static int __init kallsyms_init(void)
709{
710 proc_create("kallsyms", 0444, NULL, &kallsyms_operations);
711 return 0;
712}
713device_initcall(kallsyms_init);