Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kallsyms.c: in-kernel printing of symbolic oopses and stack traces.
4 *
5 * Rewritten and vastly simplified by Rusty Russell for in-kernel
6 * module loader:
7 * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
8 *
9 * ChangeLog:
10 *
11 * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com>
12 * Changed the compression method from stem compression to "table lookup"
13 * compression (see scripts/kallsyms.c for a more complete description)
14 */
15#include <linux/kallsyms.h>
16#include <linux/init.h>
17#include <linux/seq_file.h>
18#include <linux/fs.h>
19#include <linux/kdb.h>
20#include <linux/err.h>
21#include <linux/proc_fs.h>
22#include <linux/sched.h> /* for cond_resched */
23#include <linux/ctype.h>
24#include <linux/slab.h>
25#include <linux/filter.h>
26#include <linux/ftrace.h>
27#include <linux/compiler.h>
28
29/*
30 * These will be re-linked against their real values
31 * during the second link stage.
32 */
33extern const unsigned long kallsyms_addresses[] __weak;
34extern const int kallsyms_offsets[] __weak;
35extern const u8 kallsyms_names[] __weak;
36
37/*
38 * Tell the compiler that the count isn't in the small data section if the arch
39 * has one (eg: FRV).
40 */
41extern const unsigned int kallsyms_num_syms
42__attribute__((weak, section(".rodata")));
43
44extern const unsigned long kallsyms_relative_base
45__attribute__((weak, section(".rodata")));
46
47extern const u8 kallsyms_token_table[] __weak;
48extern const u16 kallsyms_token_index[] __weak;
49
50extern const unsigned int kallsyms_markers[] __weak;
51
52/*
53 * Expand a compressed symbol data into the resulting uncompressed string,
54 * if uncompressed string is too long (>= maxlen), it will be truncated,
55 * given the offset to where the symbol is in the compressed stream.
56 */
57static unsigned int kallsyms_expand_symbol(unsigned int off,
58 char *result, size_t maxlen)
59{
60 int len, skipped_first = 0;
61 const u8 *tptr, *data;
62
63 /* Get the compressed symbol length from the first symbol byte. */
64 data = &kallsyms_names[off];
65 len = *data;
66 data++;
67
68 /*
69 * Update the offset to return the offset for the next symbol on
70 * the compressed stream.
71 */
72 off += len + 1;
73
74 /*
75 * For every byte on the compressed symbol data, copy the table
76 * entry for that byte.
77 */
78 while (len) {
79 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
80 data++;
81 len--;
82
83 while (*tptr) {
84 if (skipped_first) {
85 if (maxlen <= 1)
86 goto tail;
87 *result = *tptr;
88 result++;
89 maxlen--;
90 } else
91 skipped_first = 1;
92 tptr++;
93 }
94 }
95
96tail:
97 if (maxlen)
98 *result = '\0';
99
100 /* Return to offset to the next symbol. */
101 return off;
102}
103
104/*
105 * Get symbol type information. This is encoded as a single char at the
106 * beginning of the symbol name.
107 */
108static char kallsyms_get_symbol_type(unsigned int off)
109{
110 /*
111 * Get just the first code, look it up in the token table,
112 * and return the first char from this token.
113 */
114 return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]];
115}
116
117
118/*
119 * Find the offset on the compressed stream given and index in the
120 * kallsyms array.
121 */
122static unsigned int get_symbol_offset(unsigned long pos)
123{
124 const u8 *name;
125 int i;
126
127 /*
128 * Use the closest marker we have. We have markers every 256 positions,
129 * so that should be close enough.
130 */
131 name = &kallsyms_names[kallsyms_markers[pos >> 8]];
132
133 /*
134 * Sequentially scan all the symbols up to the point we're searching
135 * for. Every symbol is stored in a [<len>][<len> bytes of data] format,
136 * so we just need to add the len to the current pointer for every
137 * symbol we wish to skip.
138 */
139 for (i = 0; i < (pos & 0xFF); i++)
140 name = name + (*name) + 1;
141
142 return name - kallsyms_names;
143}
144
145static unsigned long kallsyms_sym_address(int idx)
146{
147 if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE))
148 return kallsyms_addresses[idx];
149
150 /* values are unsigned offsets if --absolute-percpu is not in effect */
151 if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU))
152 return kallsyms_relative_base + (u32)kallsyms_offsets[idx];
153
154 /* ...otherwise, positive offsets are absolute values */
155 if (kallsyms_offsets[idx] >= 0)
156 return kallsyms_offsets[idx];
157
158 /* ...and negative offsets are relative to kallsyms_relative_base - 1 */
159 return kallsyms_relative_base - 1 - kallsyms_offsets[idx];
160}
161
162/* Lookup the address for this symbol. Returns 0 if not found. */
163unsigned long kallsyms_lookup_name(const char *name)
164{
165 char namebuf[KSYM_NAME_LEN];
166 unsigned long i;
167 unsigned int off;
168
169 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
170 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
171
172 if (strcmp(namebuf, name) == 0)
173 return kallsyms_sym_address(i);
174 }
175 return module_kallsyms_lookup_name(name);
176}
177EXPORT_SYMBOL_GPL(kallsyms_lookup_name);
178
179int kallsyms_on_each_symbol(int (*fn)(void *, const char *, struct module *,
180 unsigned long),
181 void *data)
182{
183 char namebuf[KSYM_NAME_LEN];
184 unsigned long i;
185 unsigned int off;
186 int ret;
187
188 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
189 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
190 ret = fn(data, namebuf, NULL, kallsyms_sym_address(i));
191 if (ret != 0)
192 return ret;
193 }
194 return module_kallsyms_on_each_symbol(fn, data);
195}
196EXPORT_SYMBOL_GPL(kallsyms_on_each_symbol);
197
198static unsigned long get_symbol_pos(unsigned long addr,
199 unsigned long *symbolsize,
200 unsigned long *offset)
201{
202 unsigned long symbol_start = 0, symbol_end = 0;
203 unsigned long i, low, high, mid;
204
205 /* This kernel should never had been booted. */
206 if (!IS_ENABLED(CONFIG_KALLSYMS_BASE_RELATIVE))
207 BUG_ON(!kallsyms_addresses);
208 else
209 BUG_ON(!kallsyms_offsets);
210
211 /* Do a binary search on the sorted kallsyms_addresses array. */
212 low = 0;
213 high = kallsyms_num_syms;
214
215 while (high - low > 1) {
216 mid = low + (high - low) / 2;
217 if (kallsyms_sym_address(mid) <= addr)
218 low = mid;
219 else
220 high = mid;
221 }
222
223 /*
224 * Search for the first aliased symbol. Aliased
225 * symbols are symbols with the same address.
226 */
227 while (low && kallsyms_sym_address(low-1) == kallsyms_sym_address(low))
228 --low;
229
230 symbol_start = kallsyms_sym_address(low);
231
232 /* Search for next non-aliased symbol. */
233 for (i = low + 1; i < kallsyms_num_syms; i++) {
234 if (kallsyms_sym_address(i) > symbol_start) {
235 symbol_end = kallsyms_sym_address(i);
236 break;
237 }
238 }
239
240 /* If we found no next symbol, we use the end of the section. */
241 if (!symbol_end) {
242 if (is_kernel_inittext(addr))
243 symbol_end = (unsigned long)_einittext;
244 else if (IS_ENABLED(CONFIG_KALLSYMS_ALL))
245 symbol_end = (unsigned long)_end;
246 else
247 symbol_end = (unsigned long)_etext;
248 }
249
250 if (symbolsize)
251 *symbolsize = symbol_end - symbol_start;
252 if (offset)
253 *offset = addr - symbol_start;
254
255 return low;
256}
257
258/*
259 * Lookup an address but don't bother to find any names.
260 */
261int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
262 unsigned long *offset)
263{
264 char namebuf[KSYM_NAME_LEN];
265
266 if (is_ksym_addr(addr)) {
267 get_symbol_pos(addr, symbolsize, offset);
268 return 1;
269 }
270 return !!module_address_lookup(addr, symbolsize, offset, NULL, namebuf) ||
271 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
272}
273
274/*
275 * Lookup an address
276 * - modname is set to NULL if it's in the kernel.
277 * - We guarantee that the returned name is valid until we reschedule even if.
278 * It resides in a module.
279 * - We also guarantee that modname will be valid until rescheduled.
280 */
281const char *kallsyms_lookup(unsigned long addr,
282 unsigned long *symbolsize,
283 unsigned long *offset,
284 char **modname, char *namebuf)
285{
286 const char *ret;
287
288 namebuf[KSYM_NAME_LEN - 1] = 0;
289 namebuf[0] = 0;
290
291 if (is_ksym_addr(addr)) {
292 unsigned long pos;
293
294 pos = get_symbol_pos(addr, symbolsize, offset);
295 /* Grab name */
296 kallsyms_expand_symbol(get_symbol_offset(pos),
297 namebuf, KSYM_NAME_LEN);
298 if (modname)
299 *modname = NULL;
300 return namebuf;
301 }
302
303 /* See if it's in a module or a BPF JITed image. */
304 ret = module_address_lookup(addr, symbolsize, offset,
305 modname, namebuf);
306 if (!ret)
307 ret = bpf_address_lookup(addr, symbolsize,
308 offset, modname, namebuf);
309
310 if (!ret)
311 ret = ftrace_mod_address_lookup(addr, symbolsize,
312 offset, modname, namebuf);
313 return ret;
314}
315
316int lookup_symbol_name(unsigned long addr, char *symname)
317{
318 symname[0] = '\0';
319 symname[KSYM_NAME_LEN - 1] = '\0';
320
321 if (is_ksym_addr(addr)) {
322 unsigned long pos;
323
324 pos = get_symbol_pos(addr, NULL, NULL);
325 /* Grab name */
326 kallsyms_expand_symbol(get_symbol_offset(pos),
327 symname, KSYM_NAME_LEN);
328 return 0;
329 }
330 /* See if it's in a module. */
331 return lookup_module_symbol_name(addr, symname);
332}
333
334int lookup_symbol_attrs(unsigned long addr, unsigned long *size,
335 unsigned long *offset, char *modname, char *name)
336{
337 name[0] = '\0';
338 name[KSYM_NAME_LEN - 1] = '\0';
339
340 if (is_ksym_addr(addr)) {
341 unsigned long pos;
342
343 pos = get_symbol_pos(addr, size, offset);
344 /* Grab name */
345 kallsyms_expand_symbol(get_symbol_offset(pos),
346 name, KSYM_NAME_LEN);
347 modname[0] = '\0';
348 return 0;
349 }
350 /* See if it's in a module. */
351 return lookup_module_symbol_attrs(addr, size, offset, modname, name);
352}
353
354/* Look up a kernel symbol and return it in a text buffer. */
355static int __sprint_symbol(char *buffer, unsigned long address,
356 int symbol_offset, int add_offset)
357{
358 char *modname;
359 const char *name;
360 unsigned long offset, size;
361 int len;
362
363 address += symbol_offset;
364 name = kallsyms_lookup(address, &size, &offset, &modname, buffer);
365 if (!name)
366 return sprintf(buffer, "0x%lx", address - symbol_offset);
367
368 if (name != buffer)
369 strcpy(buffer, name);
370 len = strlen(buffer);
371 offset -= symbol_offset;
372
373 if (add_offset)
374 len += sprintf(buffer + len, "+%#lx/%#lx", offset, size);
375
376 if (modname)
377 len += sprintf(buffer + len, " [%s]", modname);
378
379 return len;
380}
381
382/**
383 * sprint_symbol - Look up a kernel symbol and return it in a text buffer
384 * @buffer: buffer to be stored
385 * @address: address to lookup
386 *
387 * This function looks up a kernel symbol with @address and stores its name,
388 * offset, size and module name to @buffer if possible. If no symbol was found,
389 * just saves its @address as is.
390 *
391 * This function returns the number of bytes stored in @buffer.
392 */
393int sprint_symbol(char *buffer, unsigned long address)
394{
395 return __sprint_symbol(buffer, address, 0, 1);
396}
397EXPORT_SYMBOL_GPL(sprint_symbol);
398
399/**
400 * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer
401 * @buffer: buffer to be stored
402 * @address: address to lookup
403 *
404 * This function looks up a kernel symbol with @address and stores its name
405 * and module name to @buffer if possible. If no symbol was found, just saves
406 * its @address as is.
407 *
408 * This function returns the number of bytes stored in @buffer.
409 */
410int sprint_symbol_no_offset(char *buffer, unsigned long address)
411{
412 return __sprint_symbol(buffer, address, 0, 0);
413}
414EXPORT_SYMBOL_GPL(sprint_symbol_no_offset);
415
416/**
417 * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer
418 * @buffer: buffer to be stored
419 * @address: address to lookup
420 *
421 * This function is for stack backtrace and does the same thing as
422 * sprint_symbol() but with modified/decreased @address. If there is a
423 * tail-call to the function marked "noreturn", gcc optimized out code after
424 * the call so that the stack-saved return address could point outside of the
425 * caller. This function ensures that kallsyms will find the original caller
426 * by decreasing @address.
427 *
428 * This function returns the number of bytes stored in @buffer.
429 */
430int sprint_backtrace(char *buffer, unsigned long address)
431{
432 return __sprint_symbol(buffer, address, -1, 1);
433}
434
435/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
436struct kallsym_iter {
437 loff_t pos;
438 loff_t pos_arch_end;
439 loff_t pos_mod_end;
440 loff_t pos_ftrace_mod_end;
441 unsigned long value;
442 unsigned int nameoff; /* If iterating in core kernel symbols. */
443 char type;
444 char name[KSYM_NAME_LEN];
445 char module_name[MODULE_NAME_LEN];
446 int exported;
447 int show_value;
448};
449
450int __weak arch_get_kallsym(unsigned int symnum, unsigned long *value,
451 char *type, char *name)
452{
453 return -EINVAL;
454}
455
456static int get_ksymbol_arch(struct kallsym_iter *iter)
457{
458 int ret = arch_get_kallsym(iter->pos - kallsyms_num_syms,
459 &iter->value, &iter->type,
460 iter->name);
461
462 if (ret < 0) {
463 iter->pos_arch_end = iter->pos;
464 return 0;
465 }
466
467 return 1;
468}
469
470static int get_ksymbol_mod(struct kallsym_iter *iter)
471{
472 int ret = module_get_kallsym(iter->pos - iter->pos_arch_end,
473 &iter->value, &iter->type,
474 iter->name, iter->module_name,
475 &iter->exported);
476 if (ret < 0) {
477 iter->pos_mod_end = iter->pos;
478 return 0;
479 }
480
481 return 1;
482}
483
484static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
485{
486 int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end,
487 &iter->value, &iter->type,
488 iter->name, iter->module_name,
489 &iter->exported);
490 if (ret < 0) {
491 iter->pos_ftrace_mod_end = iter->pos;
492 return 0;
493 }
494
495 return 1;
496}
497
498static int get_ksymbol_bpf(struct kallsym_iter *iter)
499{
500 strlcpy(iter->module_name, "bpf", MODULE_NAME_LEN);
501 iter->exported = 0;
502 return bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
503 &iter->value, &iter->type,
504 iter->name) < 0 ? 0 : 1;
505}
506
507/* Returns space to next name. */
508static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
509{
510 unsigned off = iter->nameoff;
511
512 iter->module_name[0] = '\0';
513 iter->value = kallsyms_sym_address(iter->pos);
514
515 iter->type = kallsyms_get_symbol_type(off);
516
517 off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name));
518
519 return off - iter->nameoff;
520}
521
522static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
523{
524 iter->name[0] = '\0';
525 iter->nameoff = get_symbol_offset(new_pos);
526 iter->pos = new_pos;
527 if (new_pos == 0) {
528 iter->pos_arch_end = 0;
529 iter->pos_mod_end = 0;
530 iter->pos_ftrace_mod_end = 0;
531 }
532}
533
534/*
535 * The end position (last + 1) of each additional kallsyms section is recorded
536 * in iter->pos_..._end as each section is added, and so can be used to
537 * determine which get_ksymbol_...() function to call next.
538 */
539static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
540{
541 iter->pos = pos;
542
543 if ((!iter->pos_arch_end || iter->pos_arch_end > pos) &&
544 get_ksymbol_arch(iter))
545 return 1;
546
547 if ((!iter->pos_mod_end || iter->pos_mod_end > pos) &&
548 get_ksymbol_mod(iter))
549 return 1;
550
551 if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) &&
552 get_ksymbol_ftrace_mod(iter))
553 return 1;
554
555 return get_ksymbol_bpf(iter);
556}
557
558/* Returns false if pos at or past end of file. */
559static int update_iter(struct kallsym_iter *iter, loff_t pos)
560{
561 /* Module symbols can be accessed randomly. */
562 if (pos >= kallsyms_num_syms)
563 return update_iter_mod(iter, pos);
564
565 /* If we're not on the desired position, reset to new position. */
566 if (pos != iter->pos)
567 reset_iter(iter, pos);
568
569 iter->nameoff += get_ksymbol_core(iter);
570 iter->pos++;
571
572 return 1;
573}
574
575static void *s_next(struct seq_file *m, void *p, loff_t *pos)
576{
577 (*pos)++;
578
579 if (!update_iter(m->private, *pos))
580 return NULL;
581 return p;
582}
583
584static void *s_start(struct seq_file *m, loff_t *pos)
585{
586 if (!update_iter(m->private, *pos))
587 return NULL;
588 return m->private;
589}
590
591static void s_stop(struct seq_file *m, void *p)
592{
593}
594
595static int s_show(struct seq_file *m, void *p)
596{
597 void *value;
598 struct kallsym_iter *iter = m->private;
599
600 /* Some debugging symbols have no name. Ignore them. */
601 if (!iter->name[0])
602 return 0;
603
604 value = iter->show_value ? (void *)iter->value : NULL;
605
606 if (iter->module_name[0]) {
607 char type;
608
609 /*
610 * Label it "global" if it is exported,
611 * "local" if not exported.
612 */
613 type = iter->exported ? toupper(iter->type) :
614 tolower(iter->type);
615 seq_printf(m, "%px %c %s\t[%s]\n", value,
616 type, iter->name, iter->module_name);
617 } else
618 seq_printf(m, "%px %c %s\n", value,
619 iter->type, iter->name);
620 return 0;
621}
622
623static const struct seq_operations kallsyms_op = {
624 .start = s_start,
625 .next = s_next,
626 .stop = s_stop,
627 .show = s_show
628};
629
630static inline int kallsyms_for_perf(void)
631{
632#ifdef CONFIG_PERF_EVENTS
633 extern int sysctl_perf_event_paranoid;
634 if (sysctl_perf_event_paranoid <= 1)
635 return 1;
636#endif
637 return 0;
638}
639
640/*
641 * We show kallsyms information even to normal users if we've enabled
642 * kernel profiling and are explicitly not paranoid (so kptr_restrict
643 * is clear, and sysctl_perf_event_paranoid isn't set).
644 *
645 * Otherwise, require CAP_SYSLOG (assuming kptr_restrict isn't set to
646 * block even that).
647 */
648int kallsyms_show_value(void)
649{
650 switch (kptr_restrict) {
651 case 0:
652 if (kallsyms_for_perf())
653 return 1;
654 /* fallthrough */
655 case 1:
656 if (has_capability_noaudit(current, CAP_SYSLOG))
657 return 1;
658 /* fallthrough */
659 default:
660 return 0;
661 }
662}
663
664static int kallsyms_open(struct inode *inode, struct file *file)
665{
666 /*
667 * We keep iterator in m->private, since normal case is to
668 * s_start from where we left off, so we avoid doing
669 * using get_symbol_offset for every symbol.
670 */
671 struct kallsym_iter *iter;
672 iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter));
673 if (!iter)
674 return -ENOMEM;
675 reset_iter(iter, 0);
676
677 iter->show_value = kallsyms_show_value();
678 return 0;
679}
680
681#ifdef CONFIG_KGDB_KDB
682const char *kdb_walk_kallsyms(loff_t *pos)
683{
684 static struct kallsym_iter kdb_walk_kallsyms_iter;
685 if (*pos == 0) {
686 memset(&kdb_walk_kallsyms_iter, 0,
687 sizeof(kdb_walk_kallsyms_iter));
688 reset_iter(&kdb_walk_kallsyms_iter, 0);
689 }
690 while (1) {
691 if (!update_iter(&kdb_walk_kallsyms_iter, *pos))
692 return NULL;
693 ++*pos;
694 /* Some debugging symbols have no name. Ignore them. */
695 if (kdb_walk_kallsyms_iter.name[0])
696 return kdb_walk_kallsyms_iter.name;
697 }
698}
699#endif /* CONFIG_KGDB_KDB */
700
701static const struct file_operations kallsyms_operations = {
702 .open = kallsyms_open,
703 .read = seq_read,
704 .llseek = seq_lseek,
705 .release = seq_release_private,
706};
707
708static int __init kallsyms_init(void)
709{
710 proc_create("kallsyms", 0444, NULL, &kallsyms_operations);
711 return 0;
712}
713device_initcall(kallsyms_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * kallsyms.c: in-kernel printing of symbolic oopses and stack traces.
4 *
5 * Rewritten and vastly simplified by Rusty Russell for in-kernel
6 * module loader:
7 * Copyright 2002 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
8 *
9 * ChangeLog:
10 *
11 * (25/Aug/2004) Paulo Marques <pmarques@grupopie.com>
12 * Changed the compression method from stem compression to "table lookup"
13 * compression (see scripts/kallsyms.c for a more complete description)
14 */
15#include <linux/kallsyms.h>
16#include <linux/init.h>
17#include <linux/seq_file.h>
18#include <linux/fs.h>
19#include <linux/kdb.h>
20#include <linux/err.h>
21#include <linux/proc_fs.h>
22#include <linux/sched.h> /* for cond_resched */
23#include <linux/ctype.h>
24#include <linux/slab.h>
25#include <linux/filter.h>
26#include <linux/ftrace.h>
27#include <linux/kprobes.h>
28#include <linux/build_bug.h>
29#include <linux/compiler.h>
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/bsearch.h>
33#include <linux/btf_ids.h>
34
35#include "kallsyms_internal.h"
36
37/*
38 * Expand a compressed symbol data into the resulting uncompressed string,
39 * if uncompressed string is too long (>= maxlen), it will be truncated,
40 * given the offset to where the symbol is in the compressed stream.
41 */
42static unsigned int kallsyms_expand_symbol(unsigned int off,
43 char *result, size_t maxlen)
44{
45 int len, skipped_first = 0;
46 const char *tptr;
47 const u8 *data;
48
49 /* Get the compressed symbol length from the first symbol byte. */
50 data = &kallsyms_names[off];
51 len = *data;
52 data++;
53 off++;
54
55 /* If MSB is 1, it is a "big" symbol, so needs an additional byte. */
56 if ((len & 0x80) != 0) {
57 len = (len & 0x7F) | (*data << 7);
58 data++;
59 off++;
60 }
61
62 /*
63 * Update the offset to return the offset for the next symbol on
64 * the compressed stream.
65 */
66 off += len;
67
68 /*
69 * For every byte on the compressed symbol data, copy the table
70 * entry for that byte.
71 */
72 while (len) {
73 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
74 data++;
75 len--;
76
77 while (*tptr) {
78 if (skipped_first) {
79 if (maxlen <= 1)
80 goto tail;
81 *result = *tptr;
82 result++;
83 maxlen--;
84 } else
85 skipped_first = 1;
86 tptr++;
87 }
88 }
89
90tail:
91 if (maxlen)
92 *result = '\0';
93
94 /* Return to offset to the next symbol. */
95 return off;
96}
97
98/*
99 * Get symbol type information. This is encoded as a single char at the
100 * beginning of the symbol name.
101 */
102static char kallsyms_get_symbol_type(unsigned int off)
103{
104 /*
105 * Get just the first code, look it up in the token table,
106 * and return the first char from this token.
107 */
108 return kallsyms_token_table[kallsyms_token_index[kallsyms_names[off + 1]]];
109}
110
111
112/*
113 * Find the offset on the compressed stream given and index in the
114 * kallsyms array.
115 */
116static unsigned int get_symbol_offset(unsigned long pos)
117{
118 const u8 *name;
119 int i, len;
120
121 /*
122 * Use the closest marker we have. We have markers every 256 positions,
123 * so that should be close enough.
124 */
125 name = &kallsyms_names[kallsyms_markers[pos >> 8]];
126
127 /*
128 * Sequentially scan all the symbols up to the point we're searching
129 * for. Every symbol is stored in a [<len>][<len> bytes of data] format,
130 * so we just need to add the len to the current pointer for every
131 * symbol we wish to skip.
132 */
133 for (i = 0; i < (pos & 0xFF); i++) {
134 len = *name;
135
136 /*
137 * If MSB is 1, it is a "big" symbol, so we need to look into
138 * the next byte (and skip it, too).
139 */
140 if ((len & 0x80) != 0)
141 len = ((len & 0x7F) | (name[1] << 7)) + 1;
142
143 name = name + len + 1;
144 }
145
146 return name - kallsyms_names;
147}
148
149unsigned long kallsyms_sym_address(int idx)
150{
151 /* values are unsigned offsets if --absolute-percpu is not in effect */
152 if (!IS_ENABLED(CONFIG_KALLSYMS_ABSOLUTE_PERCPU))
153 return kallsyms_relative_base + (u32)kallsyms_offsets[idx];
154
155 /* ...otherwise, positive offsets are absolute values */
156 if (kallsyms_offsets[idx] >= 0)
157 return kallsyms_offsets[idx];
158
159 /* ...and negative offsets are relative to kallsyms_relative_base - 1 */
160 return kallsyms_relative_base - 1 - kallsyms_offsets[idx];
161}
162
163static unsigned int get_symbol_seq(int index)
164{
165 unsigned int i, seq = 0;
166
167 for (i = 0; i < 3; i++)
168 seq = (seq << 8) | kallsyms_seqs_of_names[3 * index + i];
169
170 return seq;
171}
172
173static int kallsyms_lookup_names(const char *name,
174 unsigned int *start,
175 unsigned int *end)
176{
177 int ret;
178 int low, mid, high;
179 unsigned int seq, off;
180 char namebuf[KSYM_NAME_LEN];
181
182 low = 0;
183 high = kallsyms_num_syms - 1;
184
185 while (low <= high) {
186 mid = low + (high - low) / 2;
187 seq = get_symbol_seq(mid);
188 off = get_symbol_offset(seq);
189 kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
190 ret = strcmp(name, namebuf);
191 if (ret > 0)
192 low = mid + 1;
193 else if (ret < 0)
194 high = mid - 1;
195 else
196 break;
197 }
198
199 if (low > high)
200 return -ESRCH;
201
202 low = mid;
203 while (low) {
204 seq = get_symbol_seq(low - 1);
205 off = get_symbol_offset(seq);
206 kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
207 if (strcmp(name, namebuf))
208 break;
209 low--;
210 }
211 *start = low;
212
213 if (end) {
214 high = mid;
215 while (high < kallsyms_num_syms - 1) {
216 seq = get_symbol_seq(high + 1);
217 off = get_symbol_offset(seq);
218 kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
219 if (strcmp(name, namebuf))
220 break;
221 high++;
222 }
223 *end = high;
224 }
225
226 return 0;
227}
228
229/* Lookup the address for this symbol. Returns 0 if not found. */
230unsigned long kallsyms_lookup_name(const char *name)
231{
232 int ret;
233 unsigned int i;
234
235 /* Skip the search for empty string. */
236 if (!*name)
237 return 0;
238
239 ret = kallsyms_lookup_names(name, &i, NULL);
240 if (!ret)
241 return kallsyms_sym_address(get_symbol_seq(i));
242
243 return module_kallsyms_lookup_name(name);
244}
245
246/*
247 * Iterate over all symbols in vmlinux. For symbols from modules use
248 * module_kallsyms_on_each_symbol instead.
249 */
250int kallsyms_on_each_symbol(int (*fn)(void *, const char *, unsigned long),
251 void *data)
252{
253 char namebuf[KSYM_NAME_LEN];
254 unsigned long i;
255 unsigned int off;
256 int ret;
257
258 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
259 off = kallsyms_expand_symbol(off, namebuf, ARRAY_SIZE(namebuf));
260 ret = fn(data, namebuf, kallsyms_sym_address(i));
261 if (ret != 0)
262 return ret;
263 cond_resched();
264 }
265 return 0;
266}
267
268int kallsyms_on_each_match_symbol(int (*fn)(void *, unsigned long),
269 const char *name, void *data)
270{
271 int ret;
272 unsigned int i, start, end;
273
274 ret = kallsyms_lookup_names(name, &start, &end);
275 if (ret)
276 return 0;
277
278 for (i = start; !ret && i <= end; i++) {
279 ret = fn(data, kallsyms_sym_address(get_symbol_seq(i)));
280 cond_resched();
281 }
282
283 return ret;
284}
285
286static unsigned long get_symbol_pos(unsigned long addr,
287 unsigned long *symbolsize,
288 unsigned long *offset)
289{
290 unsigned long symbol_start = 0, symbol_end = 0;
291 unsigned long i, low, high, mid;
292
293 /* Do a binary search on the sorted kallsyms_offsets array. */
294 low = 0;
295 high = kallsyms_num_syms;
296
297 while (high - low > 1) {
298 mid = low + (high - low) / 2;
299 if (kallsyms_sym_address(mid) <= addr)
300 low = mid;
301 else
302 high = mid;
303 }
304
305 /*
306 * Search for the first aliased symbol. Aliased
307 * symbols are symbols with the same address.
308 */
309 while (low && kallsyms_sym_address(low-1) == kallsyms_sym_address(low))
310 --low;
311
312 symbol_start = kallsyms_sym_address(low);
313
314 /* Search for next non-aliased symbol. */
315 for (i = low + 1; i < kallsyms_num_syms; i++) {
316 if (kallsyms_sym_address(i) > symbol_start) {
317 symbol_end = kallsyms_sym_address(i);
318 break;
319 }
320 }
321
322 /* If we found no next symbol, we use the end of the section. */
323 if (!symbol_end) {
324 if (is_kernel_inittext(addr))
325 symbol_end = (unsigned long)_einittext;
326 else if (IS_ENABLED(CONFIG_KALLSYMS_ALL))
327 symbol_end = (unsigned long)_end;
328 else
329 symbol_end = (unsigned long)_etext;
330 }
331
332 if (symbolsize)
333 *symbolsize = symbol_end - symbol_start;
334 if (offset)
335 *offset = addr - symbol_start;
336
337 return low;
338}
339
340/*
341 * Lookup an address but don't bother to find any names.
342 */
343int kallsyms_lookup_size_offset(unsigned long addr, unsigned long *symbolsize,
344 unsigned long *offset)
345{
346 char namebuf[KSYM_NAME_LEN];
347
348 if (is_ksym_addr(addr)) {
349 get_symbol_pos(addr, symbolsize, offset);
350 return 1;
351 }
352 return !!module_address_lookup(addr, symbolsize, offset, NULL, NULL, namebuf) ||
353 !!__bpf_address_lookup(addr, symbolsize, offset, namebuf);
354}
355
356static int kallsyms_lookup_buildid(unsigned long addr,
357 unsigned long *symbolsize,
358 unsigned long *offset, char **modname,
359 const unsigned char **modbuildid, char *namebuf)
360{
361 int ret;
362
363 namebuf[KSYM_NAME_LEN - 1] = 0;
364 namebuf[0] = 0;
365
366 if (is_ksym_addr(addr)) {
367 unsigned long pos;
368
369 pos = get_symbol_pos(addr, symbolsize, offset);
370 /* Grab name */
371 kallsyms_expand_symbol(get_symbol_offset(pos),
372 namebuf, KSYM_NAME_LEN);
373 if (modname)
374 *modname = NULL;
375 if (modbuildid)
376 *modbuildid = NULL;
377
378 return strlen(namebuf);
379 }
380
381 /* See if it's in a module or a BPF JITed image. */
382 ret = module_address_lookup(addr, symbolsize, offset,
383 modname, modbuildid, namebuf);
384 if (!ret)
385 ret = bpf_address_lookup(addr, symbolsize,
386 offset, modname, namebuf);
387
388 if (!ret)
389 ret = ftrace_mod_address_lookup(addr, symbolsize,
390 offset, modname, namebuf);
391
392 return ret;
393}
394
395/*
396 * Lookup an address
397 * - modname is set to NULL if it's in the kernel.
398 * - We guarantee that the returned name is valid until we reschedule even if.
399 * It resides in a module.
400 * - We also guarantee that modname will be valid until rescheduled.
401 */
402const char *kallsyms_lookup(unsigned long addr,
403 unsigned long *symbolsize,
404 unsigned long *offset,
405 char **modname, char *namebuf)
406{
407 int ret = kallsyms_lookup_buildid(addr, symbolsize, offset, modname,
408 NULL, namebuf);
409
410 if (!ret)
411 return NULL;
412
413 return namebuf;
414}
415
416int lookup_symbol_name(unsigned long addr, char *symname)
417{
418 symname[0] = '\0';
419 symname[KSYM_NAME_LEN - 1] = '\0';
420
421 if (is_ksym_addr(addr)) {
422 unsigned long pos;
423
424 pos = get_symbol_pos(addr, NULL, NULL);
425 /* Grab name */
426 kallsyms_expand_symbol(get_symbol_offset(pos),
427 symname, KSYM_NAME_LEN);
428 return 0;
429 }
430 /* See if it's in a module. */
431 return lookup_module_symbol_name(addr, symname);
432}
433
434/* Look up a kernel symbol and return it in a text buffer. */
435static int __sprint_symbol(char *buffer, unsigned long address,
436 int symbol_offset, int add_offset, int add_buildid)
437{
438 char *modname;
439 const unsigned char *buildid;
440 unsigned long offset, size;
441 int len;
442
443 address += symbol_offset;
444 len = kallsyms_lookup_buildid(address, &size, &offset, &modname, &buildid,
445 buffer);
446 if (!len)
447 return sprintf(buffer, "0x%lx", address - symbol_offset);
448
449 offset -= symbol_offset;
450
451 if (add_offset)
452 len += sprintf(buffer + len, "+%#lx/%#lx", offset, size);
453
454 if (modname) {
455 len += sprintf(buffer + len, " [%s", modname);
456#if IS_ENABLED(CONFIG_STACKTRACE_BUILD_ID)
457 if (add_buildid && buildid) {
458 /* build ID should match length of sprintf */
459#if IS_ENABLED(CONFIG_MODULES)
460 static_assert(sizeof(typeof_member(struct module, build_id)) == 20);
461#endif
462 len += sprintf(buffer + len, " %20phN", buildid);
463 }
464#endif
465 len += sprintf(buffer + len, "]");
466 }
467
468 return len;
469}
470
471/**
472 * sprint_symbol - Look up a kernel symbol and return it in a text buffer
473 * @buffer: buffer to be stored
474 * @address: address to lookup
475 *
476 * This function looks up a kernel symbol with @address and stores its name,
477 * offset, size and module name to @buffer if possible. If no symbol was found,
478 * just saves its @address as is.
479 *
480 * This function returns the number of bytes stored in @buffer.
481 */
482int sprint_symbol(char *buffer, unsigned long address)
483{
484 return __sprint_symbol(buffer, address, 0, 1, 0);
485}
486EXPORT_SYMBOL_GPL(sprint_symbol);
487
488/**
489 * sprint_symbol_build_id - Look up a kernel symbol and return it in a text buffer
490 * @buffer: buffer to be stored
491 * @address: address to lookup
492 *
493 * This function looks up a kernel symbol with @address and stores its name,
494 * offset, size, module name and module build ID to @buffer if possible. If no
495 * symbol was found, just saves its @address as is.
496 *
497 * This function returns the number of bytes stored in @buffer.
498 */
499int sprint_symbol_build_id(char *buffer, unsigned long address)
500{
501 return __sprint_symbol(buffer, address, 0, 1, 1);
502}
503EXPORT_SYMBOL_GPL(sprint_symbol_build_id);
504
505/**
506 * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer
507 * @buffer: buffer to be stored
508 * @address: address to lookup
509 *
510 * This function looks up a kernel symbol with @address and stores its name
511 * and module name to @buffer if possible. If no symbol was found, just saves
512 * its @address as is.
513 *
514 * This function returns the number of bytes stored in @buffer.
515 */
516int sprint_symbol_no_offset(char *buffer, unsigned long address)
517{
518 return __sprint_symbol(buffer, address, 0, 0, 0);
519}
520EXPORT_SYMBOL_GPL(sprint_symbol_no_offset);
521
522/**
523 * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer
524 * @buffer: buffer to be stored
525 * @address: address to lookup
526 *
527 * This function is for stack backtrace and does the same thing as
528 * sprint_symbol() but with modified/decreased @address. If there is a
529 * tail-call to the function marked "noreturn", gcc optimized out code after
530 * the call so that the stack-saved return address could point outside of the
531 * caller. This function ensures that kallsyms will find the original caller
532 * by decreasing @address.
533 *
534 * This function returns the number of bytes stored in @buffer.
535 */
536int sprint_backtrace(char *buffer, unsigned long address)
537{
538 return __sprint_symbol(buffer, address, -1, 1, 0);
539}
540
541/**
542 * sprint_backtrace_build_id - Look up a backtrace symbol and return it in a text buffer
543 * @buffer: buffer to be stored
544 * @address: address to lookup
545 *
546 * This function is for stack backtrace and does the same thing as
547 * sprint_symbol() but with modified/decreased @address. If there is a
548 * tail-call to the function marked "noreturn", gcc optimized out code after
549 * the call so that the stack-saved return address could point outside of the
550 * caller. This function ensures that kallsyms will find the original caller
551 * by decreasing @address. This function also appends the module build ID to
552 * the @buffer if @address is within a kernel module.
553 *
554 * This function returns the number of bytes stored in @buffer.
555 */
556int sprint_backtrace_build_id(char *buffer, unsigned long address)
557{
558 return __sprint_symbol(buffer, address, -1, 1, 1);
559}
560
561/* To avoid using get_symbol_offset for every symbol, we carry prefix along. */
562struct kallsym_iter {
563 loff_t pos;
564 loff_t pos_mod_end;
565 loff_t pos_ftrace_mod_end;
566 loff_t pos_bpf_end;
567 unsigned long value;
568 unsigned int nameoff; /* If iterating in core kernel symbols. */
569 char type;
570 char name[KSYM_NAME_LEN];
571 char module_name[MODULE_NAME_LEN];
572 int exported;
573 int show_value;
574};
575
576static int get_ksymbol_mod(struct kallsym_iter *iter)
577{
578 int ret = module_get_kallsym(iter->pos - kallsyms_num_syms,
579 &iter->value, &iter->type,
580 iter->name, iter->module_name,
581 &iter->exported);
582 if (ret < 0) {
583 iter->pos_mod_end = iter->pos;
584 return 0;
585 }
586
587 return 1;
588}
589
590/*
591 * ftrace_mod_get_kallsym() may also get symbols for pages allocated for ftrace
592 * purposes. In that case "__builtin__ftrace" is used as a module name, even
593 * though "__builtin__ftrace" is not a module.
594 */
595static int get_ksymbol_ftrace_mod(struct kallsym_iter *iter)
596{
597 int ret = ftrace_mod_get_kallsym(iter->pos - iter->pos_mod_end,
598 &iter->value, &iter->type,
599 iter->name, iter->module_name,
600 &iter->exported);
601 if (ret < 0) {
602 iter->pos_ftrace_mod_end = iter->pos;
603 return 0;
604 }
605
606 return 1;
607}
608
609static int get_ksymbol_bpf(struct kallsym_iter *iter)
610{
611 int ret;
612
613 strscpy(iter->module_name, "bpf", MODULE_NAME_LEN);
614 iter->exported = 0;
615 ret = bpf_get_kallsym(iter->pos - iter->pos_ftrace_mod_end,
616 &iter->value, &iter->type,
617 iter->name);
618 if (ret < 0) {
619 iter->pos_bpf_end = iter->pos;
620 return 0;
621 }
622
623 return 1;
624}
625
626/*
627 * This uses "__builtin__kprobes" as a module name for symbols for pages
628 * allocated for kprobes' purposes, even though "__builtin__kprobes" is not a
629 * module.
630 */
631static int get_ksymbol_kprobe(struct kallsym_iter *iter)
632{
633 strscpy(iter->module_name, "__builtin__kprobes", MODULE_NAME_LEN);
634 iter->exported = 0;
635 return kprobe_get_kallsym(iter->pos - iter->pos_bpf_end,
636 &iter->value, &iter->type,
637 iter->name) < 0 ? 0 : 1;
638}
639
640/* Returns space to next name. */
641static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
642{
643 unsigned off = iter->nameoff;
644
645 iter->module_name[0] = '\0';
646 iter->value = kallsyms_sym_address(iter->pos);
647
648 iter->type = kallsyms_get_symbol_type(off);
649
650 off = kallsyms_expand_symbol(off, iter->name, ARRAY_SIZE(iter->name));
651
652 return off - iter->nameoff;
653}
654
655static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
656{
657 iter->name[0] = '\0';
658 iter->nameoff = get_symbol_offset(new_pos);
659 iter->pos = new_pos;
660 if (new_pos == 0) {
661 iter->pos_mod_end = 0;
662 iter->pos_ftrace_mod_end = 0;
663 iter->pos_bpf_end = 0;
664 }
665}
666
667/*
668 * The end position (last + 1) of each additional kallsyms section is recorded
669 * in iter->pos_..._end as each section is added, and so can be used to
670 * determine which get_ksymbol_...() function to call next.
671 */
672static int update_iter_mod(struct kallsym_iter *iter, loff_t pos)
673{
674 iter->pos = pos;
675
676 if ((!iter->pos_mod_end || iter->pos_mod_end > pos) &&
677 get_ksymbol_mod(iter))
678 return 1;
679
680 if ((!iter->pos_ftrace_mod_end || iter->pos_ftrace_mod_end > pos) &&
681 get_ksymbol_ftrace_mod(iter))
682 return 1;
683
684 if ((!iter->pos_bpf_end || iter->pos_bpf_end > pos) &&
685 get_ksymbol_bpf(iter))
686 return 1;
687
688 return get_ksymbol_kprobe(iter);
689}
690
691/* Returns false if pos at or past end of file. */
692static int update_iter(struct kallsym_iter *iter, loff_t pos)
693{
694 /* Module symbols can be accessed randomly. */
695 if (pos >= kallsyms_num_syms)
696 return update_iter_mod(iter, pos);
697
698 /* If we're not on the desired position, reset to new position. */
699 if (pos != iter->pos)
700 reset_iter(iter, pos);
701
702 iter->nameoff += get_ksymbol_core(iter);
703 iter->pos++;
704
705 return 1;
706}
707
708static void *s_next(struct seq_file *m, void *p, loff_t *pos)
709{
710 (*pos)++;
711
712 if (!update_iter(m->private, *pos))
713 return NULL;
714 return p;
715}
716
717static void *s_start(struct seq_file *m, loff_t *pos)
718{
719 if (!update_iter(m->private, *pos))
720 return NULL;
721 return m->private;
722}
723
724static void s_stop(struct seq_file *m, void *p)
725{
726}
727
728static int s_show(struct seq_file *m, void *p)
729{
730 void *value;
731 struct kallsym_iter *iter = m->private;
732
733 /* Some debugging symbols have no name. Ignore them. */
734 if (!iter->name[0])
735 return 0;
736
737 value = iter->show_value ? (void *)iter->value : NULL;
738
739 if (iter->module_name[0]) {
740 char type;
741
742 /*
743 * Label it "global" if it is exported,
744 * "local" if not exported.
745 */
746 type = iter->exported ? toupper(iter->type) :
747 tolower(iter->type);
748 seq_printf(m, "%px %c %s\t[%s]\n", value,
749 type, iter->name, iter->module_name);
750 } else
751 seq_printf(m, "%px %c %s\n", value,
752 iter->type, iter->name);
753 return 0;
754}
755
756static const struct seq_operations kallsyms_op = {
757 .start = s_start,
758 .next = s_next,
759 .stop = s_stop,
760 .show = s_show
761};
762
763#ifdef CONFIG_BPF_SYSCALL
764
765struct bpf_iter__ksym {
766 __bpf_md_ptr(struct bpf_iter_meta *, meta);
767 __bpf_md_ptr(struct kallsym_iter *, ksym);
768};
769
770static int ksym_prog_seq_show(struct seq_file *m, bool in_stop)
771{
772 struct bpf_iter__ksym ctx;
773 struct bpf_iter_meta meta;
774 struct bpf_prog *prog;
775
776 meta.seq = m;
777 prog = bpf_iter_get_info(&meta, in_stop);
778 if (!prog)
779 return 0;
780
781 ctx.meta = &meta;
782 ctx.ksym = m ? m->private : NULL;
783 return bpf_iter_run_prog(prog, &ctx);
784}
785
786static int bpf_iter_ksym_seq_show(struct seq_file *m, void *p)
787{
788 return ksym_prog_seq_show(m, false);
789}
790
791static void bpf_iter_ksym_seq_stop(struct seq_file *m, void *p)
792{
793 if (!p)
794 (void) ksym_prog_seq_show(m, true);
795 else
796 s_stop(m, p);
797}
798
799static const struct seq_operations bpf_iter_ksym_ops = {
800 .start = s_start,
801 .next = s_next,
802 .stop = bpf_iter_ksym_seq_stop,
803 .show = bpf_iter_ksym_seq_show,
804};
805
806static int bpf_iter_ksym_init(void *priv_data, struct bpf_iter_aux_info *aux)
807{
808 struct kallsym_iter *iter = priv_data;
809
810 reset_iter(iter, 0);
811
812 /* cache here as in kallsyms_open() case; use current process
813 * credentials to tell BPF iterators if values should be shown.
814 */
815 iter->show_value = kallsyms_show_value(current_cred());
816
817 return 0;
818}
819
820DEFINE_BPF_ITER_FUNC(ksym, struct bpf_iter_meta *meta, struct kallsym_iter *ksym)
821
822static const struct bpf_iter_seq_info ksym_iter_seq_info = {
823 .seq_ops = &bpf_iter_ksym_ops,
824 .init_seq_private = bpf_iter_ksym_init,
825 .fini_seq_private = NULL,
826 .seq_priv_size = sizeof(struct kallsym_iter),
827};
828
829static struct bpf_iter_reg ksym_iter_reg_info = {
830 .target = "ksym",
831 .feature = BPF_ITER_RESCHED,
832 .ctx_arg_info_size = 1,
833 .ctx_arg_info = {
834 { offsetof(struct bpf_iter__ksym, ksym),
835 PTR_TO_BTF_ID_OR_NULL },
836 },
837 .seq_info = &ksym_iter_seq_info,
838};
839
840BTF_ID_LIST(btf_ksym_iter_id)
841BTF_ID(struct, kallsym_iter)
842
843static int __init bpf_ksym_iter_register(void)
844{
845 ksym_iter_reg_info.ctx_arg_info[0].btf_id = *btf_ksym_iter_id;
846 return bpf_iter_reg_target(&ksym_iter_reg_info);
847}
848
849late_initcall(bpf_ksym_iter_register);
850
851#endif /* CONFIG_BPF_SYSCALL */
852
853static int kallsyms_open(struct inode *inode, struct file *file)
854{
855 /*
856 * We keep iterator in m->private, since normal case is to
857 * s_start from where we left off, so we avoid doing
858 * using get_symbol_offset for every symbol.
859 */
860 struct kallsym_iter *iter;
861 iter = __seq_open_private(file, &kallsyms_op, sizeof(*iter));
862 if (!iter)
863 return -ENOMEM;
864 reset_iter(iter, 0);
865
866 /*
867 * Instead of checking this on every s_show() call, cache
868 * the result here at open time.
869 */
870 iter->show_value = kallsyms_show_value(file->f_cred);
871 return 0;
872}
873
874#ifdef CONFIG_KGDB_KDB
875const char *kdb_walk_kallsyms(loff_t *pos)
876{
877 static struct kallsym_iter kdb_walk_kallsyms_iter;
878 if (*pos == 0) {
879 memset(&kdb_walk_kallsyms_iter, 0,
880 sizeof(kdb_walk_kallsyms_iter));
881 reset_iter(&kdb_walk_kallsyms_iter, 0);
882 }
883 while (1) {
884 if (!update_iter(&kdb_walk_kallsyms_iter, *pos))
885 return NULL;
886 ++*pos;
887 /* Some debugging symbols have no name. Ignore them. */
888 if (kdb_walk_kallsyms_iter.name[0])
889 return kdb_walk_kallsyms_iter.name;
890 }
891}
892#endif /* CONFIG_KGDB_KDB */
893
894static const struct proc_ops kallsyms_proc_ops = {
895 .proc_open = kallsyms_open,
896 .proc_read = seq_read,
897 .proc_lseek = seq_lseek,
898 .proc_release = seq_release_private,
899};
900
901static int __init kallsyms_init(void)
902{
903 proc_create("kallsyms", 0444, NULL, &kallsyms_proc_ops);
904 return 0;
905}
906device_initcall(kallsyms_init);