Loading...
Note: File does not exist in v6.2.
1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3 * Copyright (c) 2016 Facebook
4 * Copyright (c) 2018 Covalent IO, Inc. http://covalent.io
5 */
6#include <uapi/linux/btf.h>
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/bpf.h>
10#include <linux/bpf_verifier.h>
11#include <linux/math64.h>
12
13#define verbose(env, fmt, args...) bpf_verifier_log_write(env, fmt, ##args)
14
15static bool bpf_verifier_log_attr_valid(const struct bpf_verifier_log *log)
16{
17 /* ubuf and len_total should both be specified (or not) together */
18 if (!!log->ubuf != !!log->len_total)
19 return false;
20 /* log buf without log_level is meaningless */
21 if (log->ubuf && log->level == 0)
22 return false;
23 if (log->level & ~BPF_LOG_MASK)
24 return false;
25 if (log->len_total > UINT_MAX >> 2)
26 return false;
27 return true;
28}
29
30int bpf_vlog_init(struct bpf_verifier_log *log, u32 log_level,
31 char __user *log_buf, u32 log_size)
32{
33 log->level = log_level;
34 log->ubuf = log_buf;
35 log->len_total = log_size;
36
37 /* log attributes have to be sane */
38 if (!bpf_verifier_log_attr_valid(log))
39 return -EINVAL;
40
41 return 0;
42}
43
44static void bpf_vlog_update_len_max(struct bpf_verifier_log *log, u32 add_len)
45{
46 /* add_len includes terminal \0, so no need for +1. */
47 u64 len = log->end_pos + add_len;
48
49 /* log->len_max could be larger than our current len due to
50 * bpf_vlog_reset() calls, so we maintain the max of any length at any
51 * previous point
52 */
53 if (len > UINT_MAX)
54 log->len_max = UINT_MAX;
55 else if (len > log->len_max)
56 log->len_max = len;
57}
58
59void bpf_verifier_vlog(struct bpf_verifier_log *log, const char *fmt,
60 va_list args)
61{
62 u64 cur_pos;
63 u32 new_n, n;
64
65 n = vscnprintf(log->kbuf, BPF_VERIFIER_TMP_LOG_SIZE, fmt, args);
66
67 if (log->level == BPF_LOG_KERNEL) {
68 bool newline = n > 0 && log->kbuf[n - 1] == '\n';
69
70 pr_err("BPF: %s%s", log->kbuf, newline ? "" : "\n");
71 return;
72 }
73
74 n += 1; /* include terminating zero */
75 bpf_vlog_update_len_max(log, n);
76
77 if (log->level & BPF_LOG_FIXED) {
78 /* check if we have at least something to put into user buf */
79 new_n = 0;
80 if (log->end_pos < log->len_total) {
81 new_n = min_t(u32, log->len_total - log->end_pos, n);
82 log->kbuf[new_n - 1] = '\0';
83 }
84
85 cur_pos = log->end_pos;
86 log->end_pos += n - 1; /* don't count terminating '\0' */
87
88 if (log->ubuf && new_n &&
89 copy_to_user(log->ubuf + cur_pos, log->kbuf, new_n))
90 goto fail;
91 } else {
92 u64 new_end, new_start;
93 u32 buf_start, buf_end, new_n;
94
95 new_end = log->end_pos + n;
96 if (new_end - log->start_pos >= log->len_total)
97 new_start = new_end - log->len_total;
98 else
99 new_start = log->start_pos;
100
101 log->start_pos = new_start;
102 log->end_pos = new_end - 1; /* don't count terminating '\0' */
103
104 if (!log->ubuf)
105 return;
106
107 new_n = min(n, log->len_total);
108 cur_pos = new_end - new_n;
109 div_u64_rem(cur_pos, log->len_total, &buf_start);
110 div_u64_rem(new_end, log->len_total, &buf_end);
111 /* new_end and buf_end are exclusive indices, so if buf_end is
112 * exactly zero, then it actually points right to the end of
113 * ubuf and there is no wrap around
114 */
115 if (buf_end == 0)
116 buf_end = log->len_total;
117
118 /* if buf_start > buf_end, we wrapped around;
119 * if buf_start == buf_end, then we fill ubuf completely; we
120 * can't have buf_start == buf_end to mean that there is
121 * nothing to write, because we always write at least
122 * something, even if terminal '\0'
123 */
124 if (buf_start < buf_end) {
125 /* message fits within contiguous chunk of ubuf */
126 if (copy_to_user(log->ubuf + buf_start,
127 log->kbuf + n - new_n,
128 buf_end - buf_start))
129 goto fail;
130 } else {
131 /* message wraps around the end of ubuf, copy in two chunks */
132 if (copy_to_user(log->ubuf + buf_start,
133 log->kbuf + n - new_n,
134 log->len_total - buf_start))
135 goto fail;
136 if (copy_to_user(log->ubuf,
137 log->kbuf + n - buf_end,
138 buf_end))
139 goto fail;
140 }
141 }
142
143 return;
144fail:
145 log->ubuf = NULL;
146}
147
148void bpf_vlog_reset(struct bpf_verifier_log *log, u64 new_pos)
149{
150 char zero = 0;
151 u32 pos;
152
153 if (WARN_ON_ONCE(new_pos > log->end_pos))
154 return;
155
156 if (!bpf_verifier_log_needed(log) || log->level == BPF_LOG_KERNEL)
157 return;
158
159 /* if position to which we reset is beyond current log window,
160 * then we didn't preserve any useful content and should adjust
161 * start_pos to end up with an empty log (start_pos == end_pos)
162 */
163 log->end_pos = new_pos;
164 if (log->end_pos < log->start_pos)
165 log->start_pos = log->end_pos;
166
167 if (!log->ubuf)
168 return;
169
170 if (log->level & BPF_LOG_FIXED)
171 pos = log->end_pos + 1;
172 else
173 div_u64_rem(new_pos, log->len_total, &pos);
174
175 if (pos < log->len_total && put_user(zero, log->ubuf + pos))
176 log->ubuf = NULL;
177}
178
179static void bpf_vlog_reverse_kbuf(char *buf, int len)
180{
181 int i, j;
182
183 for (i = 0, j = len - 1; i < j; i++, j--)
184 swap(buf[i], buf[j]);
185}
186
187static int bpf_vlog_reverse_ubuf(struct bpf_verifier_log *log, int start, int end)
188{
189 /* we split log->kbuf into two equal parts for both ends of array */
190 int n = sizeof(log->kbuf) / 2, nn;
191 char *lbuf = log->kbuf, *rbuf = log->kbuf + n;
192
193 /* Read ubuf's section [start, end) two chunks at a time, from left
194 * and right side; within each chunk, swap all the bytes; after that
195 * reverse the order of lbuf and rbuf and write result back to ubuf.
196 * This way we'll end up with swapped contents of specified
197 * [start, end) ubuf segment.
198 */
199 while (end - start > 1) {
200 nn = min(n, (end - start ) / 2);
201
202 if (copy_from_user(lbuf, log->ubuf + start, nn))
203 return -EFAULT;
204 if (copy_from_user(rbuf, log->ubuf + end - nn, nn))
205 return -EFAULT;
206
207 bpf_vlog_reverse_kbuf(lbuf, nn);
208 bpf_vlog_reverse_kbuf(rbuf, nn);
209
210 /* we write lbuf to the right end of ubuf, while rbuf to the
211 * left one to end up with properly reversed overall ubuf
212 */
213 if (copy_to_user(log->ubuf + start, rbuf, nn))
214 return -EFAULT;
215 if (copy_to_user(log->ubuf + end - nn, lbuf, nn))
216 return -EFAULT;
217
218 start += nn;
219 end -= nn;
220 }
221
222 return 0;
223}
224
225int bpf_vlog_finalize(struct bpf_verifier_log *log, u32 *log_size_actual)
226{
227 u32 sublen;
228 int err;
229
230 *log_size_actual = 0;
231 if (!log || log->level == 0 || log->level == BPF_LOG_KERNEL)
232 return 0;
233
234 if (!log->ubuf)
235 goto skip_log_rotate;
236 /* If we never truncated log, there is nothing to move around. */
237 if (log->start_pos == 0)
238 goto skip_log_rotate;
239
240 /* Otherwise we need to rotate log contents to make it start from the
241 * buffer beginning and be a continuous zero-terminated string. Note
242 * that if log->start_pos != 0 then we definitely filled up entire log
243 * buffer with no gaps, and we just need to shift buffer contents to
244 * the left by (log->start_pos % log->len_total) bytes.
245 *
246 * Unfortunately, user buffer could be huge and we don't want to
247 * allocate temporary kernel memory of the same size just to shift
248 * contents in a straightforward fashion. Instead, we'll be clever and
249 * do in-place array rotation. This is a leetcode-style problem, which
250 * could be solved by three rotations.
251 *
252 * Let's say we have log buffer that has to be shifted left by 7 bytes
253 * (spaces and vertical bar is just for demonstrative purposes):
254 * E F G H I J K | A B C D
255 *
256 * First, we reverse entire array:
257 * D C B A | K J I H G F E
258 *
259 * Then we rotate first 4 bytes (DCBA) and separately last 7 bytes
260 * (KJIHGFE), resulting in a properly rotated array:
261 * A B C D | E F G H I J K
262 *
263 * We'll utilize log->kbuf to read user memory chunk by chunk, swap
264 * bytes, and write them back. Doing it byte-by-byte would be
265 * unnecessarily inefficient. Altogether we are going to read and
266 * write each byte twice, for total 4 memory copies between kernel and
267 * user space.
268 */
269
270 /* length of the chopped off part that will be the beginning;
271 * len(ABCD) in the example above
272 */
273 div_u64_rem(log->start_pos, log->len_total, &sublen);
274 sublen = log->len_total - sublen;
275
276 err = bpf_vlog_reverse_ubuf(log, 0, log->len_total);
277 err = err ?: bpf_vlog_reverse_ubuf(log, 0, sublen);
278 err = err ?: bpf_vlog_reverse_ubuf(log, sublen, log->len_total);
279 if (err)
280 log->ubuf = NULL;
281
282skip_log_rotate:
283 *log_size_actual = log->len_max;
284
285 /* properly initialized log has either both ubuf!=NULL and len_total>0
286 * or ubuf==NULL and len_total==0, so if this condition doesn't hold,
287 * we got a fault somewhere along the way, so report it back
288 */
289 if (!!log->ubuf != !!log->len_total)
290 return -EFAULT;
291
292 /* did truncation actually happen? */
293 if (log->ubuf && log->len_max > log->len_total)
294 return -ENOSPC;
295
296 return 0;
297}
298
299/* log_level controls verbosity level of eBPF verifier.
300 * bpf_verifier_log_write() is used to dump the verification trace to the log,
301 * so the user can figure out what's wrong with the program
302 */
303__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
304 const char *fmt, ...)
305{
306 va_list args;
307
308 if (!bpf_verifier_log_needed(&env->log))
309 return;
310
311 va_start(args, fmt);
312 bpf_verifier_vlog(&env->log, fmt, args);
313 va_end(args);
314}
315EXPORT_SYMBOL_GPL(bpf_verifier_log_write);
316
317__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
318 const char *fmt, ...)
319{
320 va_list args;
321
322 if (!bpf_verifier_log_needed(log))
323 return;
324
325 va_start(args, fmt);
326 bpf_verifier_vlog(log, fmt, args);
327 va_end(args);
328}
329EXPORT_SYMBOL_GPL(bpf_log);
330
331static const struct bpf_line_info *
332find_linfo(const struct bpf_verifier_env *env, u32 insn_off)
333{
334 const struct bpf_line_info *linfo;
335 const struct bpf_prog *prog;
336 u32 i, nr_linfo;
337
338 prog = env->prog;
339 nr_linfo = prog->aux->nr_linfo;
340
341 if (!nr_linfo || insn_off >= prog->len)
342 return NULL;
343
344 linfo = prog->aux->linfo;
345 for (i = 1; i < nr_linfo; i++)
346 if (insn_off < linfo[i].insn_off)
347 break;
348
349 return &linfo[i - 1];
350}
351
352static const char *ltrim(const char *s)
353{
354 while (isspace(*s))
355 s++;
356
357 return s;
358}
359
360__printf(3, 4) void verbose_linfo(struct bpf_verifier_env *env,
361 u32 insn_off,
362 const char *prefix_fmt, ...)
363{
364 const struct bpf_line_info *linfo;
365
366 if (!bpf_verifier_log_needed(&env->log))
367 return;
368
369 linfo = find_linfo(env, insn_off);
370 if (!linfo || linfo == env->prev_linfo)
371 return;
372
373 if (prefix_fmt) {
374 va_list args;
375
376 va_start(args, prefix_fmt);
377 bpf_verifier_vlog(&env->log, prefix_fmt, args);
378 va_end(args);
379 }
380
381 verbose(env, "%s\n",
382 ltrim(btf_name_by_offset(env->prog->aux->btf,
383 linfo->line_off)));
384
385 env->prev_linfo = linfo;
386}
387
388static const char *btf_type_name(const struct btf *btf, u32 id)
389{
390 return btf_name_by_offset(btf, btf_type_by_id(btf, id)->name_off);
391}
392
393/* string representation of 'enum bpf_reg_type'
394 *
395 * Note that reg_type_str() can not appear more than once in a single verbose()
396 * statement.
397 */
398const char *reg_type_str(struct bpf_verifier_env *env, enum bpf_reg_type type)
399{
400 char postfix[16] = {0}, prefix[64] = {0};
401 static const char * const str[] = {
402 [NOT_INIT] = "?",
403 [SCALAR_VALUE] = "scalar",
404 [PTR_TO_CTX] = "ctx",
405 [CONST_PTR_TO_MAP] = "map_ptr",
406 [PTR_TO_MAP_VALUE] = "map_value",
407 [PTR_TO_STACK] = "fp",
408 [PTR_TO_PACKET] = "pkt",
409 [PTR_TO_PACKET_META] = "pkt_meta",
410 [PTR_TO_PACKET_END] = "pkt_end",
411 [PTR_TO_FLOW_KEYS] = "flow_keys",
412 [PTR_TO_SOCKET] = "sock",
413 [PTR_TO_SOCK_COMMON] = "sock_common",
414 [PTR_TO_TCP_SOCK] = "tcp_sock",
415 [PTR_TO_TP_BUFFER] = "tp_buffer",
416 [PTR_TO_XDP_SOCK] = "xdp_sock",
417 [PTR_TO_BTF_ID] = "ptr_",
418 [PTR_TO_MEM] = "mem",
419 [PTR_TO_BUF] = "buf",
420 [PTR_TO_FUNC] = "func",
421 [PTR_TO_MAP_KEY] = "map_key",
422 [CONST_PTR_TO_DYNPTR] = "dynptr_ptr",
423 };
424
425 if (type & PTR_MAYBE_NULL) {
426 if (base_type(type) == PTR_TO_BTF_ID)
427 strncpy(postfix, "or_null_", 16);
428 else
429 strncpy(postfix, "_or_null", 16);
430 }
431
432 snprintf(prefix, sizeof(prefix), "%s%s%s%s%s%s%s",
433 type & MEM_RDONLY ? "rdonly_" : "",
434 type & MEM_RINGBUF ? "ringbuf_" : "",
435 type & MEM_USER ? "user_" : "",
436 type & MEM_PERCPU ? "percpu_" : "",
437 type & MEM_RCU ? "rcu_" : "",
438 type & PTR_UNTRUSTED ? "untrusted_" : "",
439 type & PTR_TRUSTED ? "trusted_" : ""
440 );
441
442 snprintf(env->tmp_str_buf, TMP_STR_BUF_LEN, "%s%s%s",
443 prefix, str[base_type(type)], postfix);
444 return env->tmp_str_buf;
445}
446
447const char *dynptr_type_str(enum bpf_dynptr_type type)
448{
449 switch (type) {
450 case BPF_DYNPTR_TYPE_LOCAL:
451 return "local";
452 case BPF_DYNPTR_TYPE_RINGBUF:
453 return "ringbuf";
454 case BPF_DYNPTR_TYPE_SKB:
455 return "skb";
456 case BPF_DYNPTR_TYPE_XDP:
457 return "xdp";
458 case BPF_DYNPTR_TYPE_INVALID:
459 return "<invalid>";
460 default:
461 WARN_ONCE(1, "unknown dynptr type %d\n", type);
462 return "<unknown>";
463 }
464}
465
466const char *iter_type_str(const struct btf *btf, u32 btf_id)
467{
468 if (!btf || btf_id == 0)
469 return "<invalid>";
470
471 /* we already validated that type is valid and has conforming name */
472 return btf_type_name(btf, btf_id) + sizeof(ITER_PREFIX) - 1;
473}
474
475const char *iter_state_str(enum bpf_iter_state state)
476{
477 switch (state) {
478 case BPF_ITER_STATE_ACTIVE:
479 return "active";
480 case BPF_ITER_STATE_DRAINED:
481 return "drained";
482 case BPF_ITER_STATE_INVALID:
483 return "<invalid>";
484 default:
485 WARN_ONCE(1, "unknown iter state %d\n", state);
486 return "<unknown>";
487 }
488}
489
490static char slot_type_char[] = {
491 [STACK_INVALID] = '?',
492 [STACK_SPILL] = 'r',
493 [STACK_MISC] = 'm',
494 [STACK_ZERO] = '0',
495 [STACK_DYNPTR] = 'd',
496 [STACK_ITER] = 'i',
497};
498
499static void print_liveness(struct bpf_verifier_env *env,
500 enum bpf_reg_liveness live)
501{
502 if (live & (REG_LIVE_READ | REG_LIVE_WRITTEN | REG_LIVE_DONE))
503 verbose(env, "_");
504 if (live & REG_LIVE_READ)
505 verbose(env, "r");
506 if (live & REG_LIVE_WRITTEN)
507 verbose(env, "w");
508 if (live & REG_LIVE_DONE)
509 verbose(env, "D");
510}
511
512#define UNUM_MAX_DECIMAL U16_MAX
513#define SNUM_MAX_DECIMAL S16_MAX
514#define SNUM_MIN_DECIMAL S16_MIN
515
516static bool is_unum_decimal(u64 num)
517{
518 return num <= UNUM_MAX_DECIMAL;
519}
520
521static bool is_snum_decimal(s64 num)
522{
523 return num >= SNUM_MIN_DECIMAL && num <= SNUM_MAX_DECIMAL;
524}
525
526static void verbose_unum(struct bpf_verifier_env *env, u64 num)
527{
528 if (is_unum_decimal(num))
529 verbose(env, "%llu", num);
530 else
531 verbose(env, "%#llx", num);
532}
533
534static void verbose_snum(struct bpf_verifier_env *env, s64 num)
535{
536 if (is_snum_decimal(num))
537 verbose(env, "%lld", num);
538 else
539 verbose(env, "%#llx", num);
540}
541
542int tnum_strn(char *str, size_t size, struct tnum a)
543{
544 /* print as a constant, if tnum is fully known */
545 if (a.mask == 0) {
546 if (is_unum_decimal(a.value))
547 return snprintf(str, size, "%llu", a.value);
548 else
549 return snprintf(str, size, "%#llx", a.value);
550 }
551 return snprintf(str, size, "(%#llx; %#llx)", a.value, a.mask);
552}
553EXPORT_SYMBOL_GPL(tnum_strn);
554
555static void print_scalar_ranges(struct bpf_verifier_env *env,
556 const struct bpf_reg_state *reg,
557 const char **sep)
558{
559 /* For signed ranges, we want to unify 64-bit and 32-bit values in the
560 * output as much as possible, but there is a bit of a complication.
561 * If we choose to print values as decimals, this is natural to do,
562 * because negative 64-bit and 32-bit values >= -S32_MIN have the same
563 * representation due to sign extension. But if we choose to print
564 * them in hex format (see is_snum_decimal()), then sign extension is
565 * misleading.
566 * E.g., smin=-2 and smin32=-2 are exactly the same in decimal, but in
567 * hex they will be smin=0xfffffffffffffffe and smin32=0xfffffffe, two
568 * very different numbers.
569 * So we avoid sign extension if we choose to print values in hex.
570 */
571 struct {
572 const char *name;
573 u64 val;
574 bool omit;
575 } minmaxs[] = {
576 {"smin", reg->smin_value, reg->smin_value == S64_MIN},
577 {"smax", reg->smax_value, reg->smax_value == S64_MAX},
578 {"umin", reg->umin_value, reg->umin_value == 0},
579 {"umax", reg->umax_value, reg->umax_value == U64_MAX},
580 {"smin32",
581 is_snum_decimal((s64)reg->s32_min_value)
582 ? (s64)reg->s32_min_value
583 : (u32)reg->s32_min_value, reg->s32_min_value == S32_MIN},
584 {"smax32",
585 is_snum_decimal((s64)reg->s32_max_value)
586 ? (s64)reg->s32_max_value
587 : (u32)reg->s32_max_value, reg->s32_max_value == S32_MAX},
588 {"umin32", reg->u32_min_value, reg->u32_min_value == 0},
589 {"umax32", reg->u32_max_value, reg->u32_max_value == U32_MAX},
590 }, *m1, *m2, *mend = &minmaxs[ARRAY_SIZE(minmaxs)];
591 bool neg1, neg2;
592
593 for (m1 = &minmaxs[0]; m1 < mend; m1++) {
594 if (m1->omit)
595 continue;
596
597 neg1 = m1->name[0] == 's' && (s64)m1->val < 0;
598
599 verbose(env, "%s%s=", *sep, m1->name);
600 *sep = ",";
601
602 for (m2 = m1 + 2; m2 < mend; m2 += 2) {
603 if (m2->omit || m2->val != m1->val)
604 continue;
605 /* don't mix negatives with positives */
606 neg2 = m2->name[0] == 's' && (s64)m2->val < 0;
607 if (neg2 != neg1)
608 continue;
609 m2->omit = true;
610 verbose(env, "%s=", m2->name);
611 }
612
613 if (m1->name[0] == 's')
614 verbose_snum(env, m1->val);
615 else
616 verbose_unum(env, m1->val);
617 }
618}
619
620static bool type_is_map_ptr(enum bpf_reg_type t) {
621 switch (base_type(t)) {
622 case CONST_PTR_TO_MAP:
623 case PTR_TO_MAP_KEY:
624 case PTR_TO_MAP_VALUE:
625 return true;
626 default:
627 return false;
628 }
629}
630
631/*
632 * _a stands for append, was shortened to avoid multiline statements below.
633 * This macro is used to output a comma separated list of attributes.
634 */
635#define verbose_a(fmt, ...) ({ verbose(env, "%s" fmt, sep, ##__VA_ARGS__); sep = ","; })
636
637static void print_reg_state(struct bpf_verifier_env *env,
638 const struct bpf_func_state *state,
639 const struct bpf_reg_state *reg)
640{
641 enum bpf_reg_type t;
642 const char *sep = "";
643
644 t = reg->type;
645 if (t == SCALAR_VALUE && reg->precise)
646 verbose(env, "P");
647 if (t == SCALAR_VALUE && tnum_is_const(reg->var_off)) {
648 /* reg->off should be 0 for SCALAR_VALUE */
649 verbose_snum(env, reg->var_off.value + reg->off);
650 return;
651 }
652
653 verbose(env, "%s", reg_type_str(env, t));
654 if (t == PTR_TO_STACK) {
655 if (state->frameno != reg->frameno)
656 verbose(env, "[%d]", reg->frameno);
657 if (tnum_is_const(reg->var_off)) {
658 verbose_snum(env, reg->var_off.value + reg->off);
659 return;
660 }
661 }
662 if (base_type(t) == PTR_TO_BTF_ID)
663 verbose(env, "%s", btf_type_name(reg->btf, reg->btf_id));
664 verbose(env, "(");
665 if (reg->id)
666 verbose_a("id=%d", reg->id);
667 if (reg->ref_obj_id)
668 verbose_a("ref_obj_id=%d", reg->ref_obj_id);
669 if (type_is_non_owning_ref(reg->type))
670 verbose_a("%s", "non_own_ref");
671 if (type_is_map_ptr(t)) {
672 if (reg->map_ptr->name[0])
673 verbose_a("map=%s", reg->map_ptr->name);
674 verbose_a("ks=%d,vs=%d",
675 reg->map_ptr->key_size,
676 reg->map_ptr->value_size);
677 }
678 if (t != SCALAR_VALUE && reg->off) {
679 verbose_a("off=");
680 verbose_snum(env, reg->off);
681 }
682 if (type_is_pkt_pointer(t)) {
683 verbose_a("r=");
684 verbose_unum(env, reg->range);
685 }
686 if (base_type(t) == PTR_TO_MEM) {
687 verbose_a("sz=");
688 verbose_unum(env, reg->mem_size);
689 }
690 if (t == CONST_PTR_TO_DYNPTR)
691 verbose_a("type=%s", dynptr_type_str(reg->dynptr.type));
692 if (tnum_is_const(reg->var_off)) {
693 /* a pointer register with fixed offset */
694 if (reg->var_off.value) {
695 verbose_a("imm=");
696 verbose_snum(env, reg->var_off.value);
697 }
698 } else {
699 print_scalar_ranges(env, reg, &sep);
700 if (!tnum_is_unknown(reg->var_off)) {
701 char tn_buf[48];
702
703 tnum_strn(tn_buf, sizeof(tn_buf), reg->var_off);
704 verbose_a("var_off=%s", tn_buf);
705 }
706 }
707 verbose(env, ")");
708}
709
710void print_verifier_state(struct bpf_verifier_env *env, const struct bpf_func_state *state,
711 bool print_all)
712{
713 const struct bpf_reg_state *reg;
714 int i;
715
716 if (state->frameno)
717 verbose(env, " frame%d:", state->frameno);
718 for (i = 0; i < MAX_BPF_REG; i++) {
719 reg = &state->regs[i];
720 if (reg->type == NOT_INIT)
721 continue;
722 if (!print_all && !reg_scratched(env, i))
723 continue;
724 verbose(env, " R%d", i);
725 print_liveness(env, reg->live);
726 verbose(env, "=");
727 print_reg_state(env, state, reg);
728 }
729 for (i = 0; i < state->allocated_stack / BPF_REG_SIZE; i++) {
730 char types_buf[BPF_REG_SIZE + 1];
731 const char *sep = "";
732 bool valid = false;
733 u8 slot_type;
734 int j;
735
736 if (!print_all && !stack_slot_scratched(env, i))
737 continue;
738
739 for (j = 0; j < BPF_REG_SIZE; j++) {
740 slot_type = state->stack[i].slot_type[j];
741 if (slot_type != STACK_INVALID)
742 valid = true;
743 types_buf[j] = slot_type_char[slot_type];
744 }
745 types_buf[BPF_REG_SIZE] = 0;
746 if (!valid)
747 continue;
748
749 reg = &state->stack[i].spilled_ptr;
750 switch (state->stack[i].slot_type[BPF_REG_SIZE - 1]) {
751 case STACK_SPILL:
752 /* print MISC/ZERO/INVALID slots above subreg spill */
753 for (j = 0; j < BPF_REG_SIZE; j++)
754 if (state->stack[i].slot_type[j] == STACK_SPILL)
755 break;
756 types_buf[j] = '\0';
757
758 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
759 print_liveness(env, reg->live);
760 verbose(env, "=%s", types_buf);
761 print_reg_state(env, state, reg);
762 break;
763 case STACK_DYNPTR:
764 /* skip to main dynptr slot */
765 i += BPF_DYNPTR_NR_SLOTS - 1;
766 reg = &state->stack[i].spilled_ptr;
767
768 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
769 print_liveness(env, reg->live);
770 verbose(env, "=dynptr_%s(", dynptr_type_str(reg->dynptr.type));
771 if (reg->id)
772 verbose_a("id=%d", reg->id);
773 if (reg->ref_obj_id)
774 verbose_a("ref_id=%d", reg->ref_obj_id);
775 if (reg->dynptr_id)
776 verbose_a("dynptr_id=%d", reg->dynptr_id);
777 verbose(env, ")");
778 break;
779 case STACK_ITER:
780 /* only main slot has ref_obj_id set; skip others */
781 if (!reg->ref_obj_id)
782 continue;
783
784 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
785 print_liveness(env, reg->live);
786 verbose(env, "=iter_%s(ref_id=%d,state=%s,depth=%u)",
787 iter_type_str(reg->iter.btf, reg->iter.btf_id),
788 reg->ref_obj_id, iter_state_str(reg->iter.state),
789 reg->iter.depth);
790 break;
791 case STACK_MISC:
792 case STACK_ZERO:
793 default:
794 verbose(env, " fp%d", (-i - 1) * BPF_REG_SIZE);
795 print_liveness(env, reg->live);
796 verbose(env, "=%s", types_buf);
797 break;
798 }
799 }
800 if (state->acquired_refs && state->refs[0].id) {
801 verbose(env, " refs=%d", state->refs[0].id);
802 for (i = 1; i < state->acquired_refs; i++)
803 if (state->refs[i].id)
804 verbose(env, ",%d", state->refs[i].id);
805 }
806 if (state->in_callback_fn)
807 verbose(env, " cb");
808 if (state->in_async_callback_fn)
809 verbose(env, " async_cb");
810 verbose(env, "\n");
811 if (!print_all)
812 mark_verifier_state_clean(env);
813}
814
815static inline u32 vlog_alignment(u32 pos)
816{
817 return round_up(max(pos + BPF_LOG_MIN_ALIGNMENT / 2, BPF_LOG_ALIGNMENT),
818 BPF_LOG_MIN_ALIGNMENT) - pos - 1;
819}
820
821void print_insn_state(struct bpf_verifier_env *env, const struct bpf_func_state *state)
822{
823 if (env->prev_log_pos && env->prev_log_pos == env->log.end_pos) {
824 /* remove new line character */
825 bpf_vlog_reset(&env->log, env->prev_log_pos - 1);
826 verbose(env, "%*c;", vlog_alignment(env->prev_insn_print_pos), ' ');
827 } else {
828 verbose(env, "%d:", env->insn_idx);
829 }
830 print_verifier_state(env, state, false);
831}