Loading...
Note: File does not exist in v5.4.
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright (c) 2023 Meta Platforms, Inc. and affiliates. */
3
4#define _GNU_SOURCE
5#include <limits.h>
6#include <test_progs.h>
7#include <linux/filter.h>
8#include <linux/bpf.h>
9
10/* =================================
11 * SHORT AND CONSISTENT NUMBER TYPES
12 * =================================
13 */
14#define U64_MAX ((u64)UINT64_MAX)
15#define U32_MAX ((u32)UINT_MAX)
16#define U16_MAX ((u32)UINT_MAX)
17#define S64_MIN ((s64)INT64_MIN)
18#define S64_MAX ((s64)INT64_MAX)
19#define S32_MIN ((s32)INT_MIN)
20#define S32_MAX ((s32)INT_MAX)
21#define S16_MIN ((s16)0x80000000)
22#define S16_MAX ((s16)0x7fffffff)
23
24typedef unsigned long long ___u64;
25typedef unsigned int ___u32;
26typedef long long ___s64;
27typedef int ___s32;
28
29/* avoid conflicts with already defined types in kernel headers */
30#define u64 ___u64
31#define u32 ___u32
32#define s64 ___s64
33#define s32 ___s32
34
35/* ==================================
36 * STRING BUF ABSTRACTION AND HELPERS
37 * ==================================
38 */
39struct strbuf {
40 size_t buf_sz;
41 int pos;
42 char buf[0];
43};
44
45#define DEFINE_STRBUF(name, N) \
46 struct { struct strbuf buf; char data[(N)]; } ___##name; \
47 struct strbuf *name = (___##name.buf.buf_sz = (N), ___##name.buf.pos = 0, &___##name.buf)
48
49__printf(2, 3)
50static inline void snappendf(struct strbuf *s, const char *fmt, ...)
51{
52 va_list args;
53
54 va_start(args, fmt);
55 s->pos += vsnprintf(s->buf + s->pos,
56 s->pos < s->buf_sz ? s->buf_sz - s->pos : 0,
57 fmt, args);
58 va_end(args);
59}
60
61/* ==================================
62 * GENERIC NUMBER TYPE AND OPERATIONS
63 * ==================================
64 */
65enum num_t { U64, first_t = U64, U32, S64, S32, last_t = S32 };
66
67static __always_inline u64 min_t(enum num_t t, u64 x, u64 y)
68{
69 switch (t) {
70 case U64: return (u64)x < (u64)y ? (u64)x : (u64)y;
71 case U32: return (u32)x < (u32)y ? (u32)x : (u32)y;
72 case S64: return (s64)x < (s64)y ? (s64)x : (s64)y;
73 case S32: return (s32)x < (s32)y ? (s32)x : (s32)y;
74 default: printf("min_t!\n"); exit(1);
75 }
76}
77
78static __always_inline u64 max_t(enum num_t t, u64 x, u64 y)
79{
80 switch (t) {
81 case U64: return (u64)x > (u64)y ? (u64)x : (u64)y;
82 case U32: return (u32)x > (u32)y ? (u32)x : (u32)y;
83 case S64: return (s64)x > (s64)y ? (s64)x : (s64)y;
84 case S32: return (s32)x > (s32)y ? (u32)(s32)x : (u32)(s32)y;
85 default: printf("max_t!\n"); exit(1);
86 }
87}
88
89static __always_inline u64 cast_t(enum num_t t, u64 x)
90{
91 switch (t) {
92 case U64: return (u64)x;
93 case U32: return (u32)x;
94 case S64: return (s64)x;
95 case S32: return (u32)(s32)x;
96 default: printf("cast_t!\n"); exit(1);
97 }
98}
99
100static const char *t_str(enum num_t t)
101{
102 switch (t) {
103 case U64: return "u64";
104 case U32: return "u32";
105 case S64: return "s64";
106 case S32: return "s32";
107 default: printf("t_str!\n"); exit(1);
108 }
109}
110
111static enum num_t t_is_32(enum num_t t)
112{
113 switch (t) {
114 case U64: return false;
115 case U32: return true;
116 case S64: return false;
117 case S32: return true;
118 default: printf("t_is_32!\n"); exit(1);
119 }
120}
121
122static enum num_t t_signed(enum num_t t)
123{
124 switch (t) {
125 case U64: return S64;
126 case U32: return S32;
127 case S64: return S64;
128 case S32: return S32;
129 default: printf("t_signed!\n"); exit(1);
130 }
131}
132
133static enum num_t t_unsigned(enum num_t t)
134{
135 switch (t) {
136 case U64: return U64;
137 case U32: return U32;
138 case S64: return U64;
139 case S32: return U32;
140 default: printf("t_unsigned!\n"); exit(1);
141 }
142}
143
144#define UNUM_MAX_DECIMAL U16_MAX
145#define SNUM_MAX_DECIMAL S16_MAX
146#define SNUM_MIN_DECIMAL S16_MIN
147
148static bool num_is_small(enum num_t t, u64 x)
149{
150 switch (t) {
151 case U64: return (u64)x <= UNUM_MAX_DECIMAL;
152 case U32: return (u32)x <= UNUM_MAX_DECIMAL;
153 case S64: return (s64)x >= SNUM_MIN_DECIMAL && (s64)x <= SNUM_MAX_DECIMAL;
154 case S32: return (s32)x >= SNUM_MIN_DECIMAL && (s32)x <= SNUM_MAX_DECIMAL;
155 default: printf("num_is_small!\n"); exit(1);
156 }
157}
158
159static void snprintf_num(enum num_t t, struct strbuf *sb, u64 x)
160{
161 bool is_small = num_is_small(t, x);
162
163 if (is_small) {
164 switch (t) {
165 case U64: return snappendf(sb, "%llu", (u64)x);
166 case U32: return snappendf(sb, "%u", (u32)x);
167 case S64: return snappendf(sb, "%lld", (s64)x);
168 case S32: return snappendf(sb, "%d", (s32)x);
169 default: printf("snprintf_num!\n"); exit(1);
170 }
171 } else {
172 switch (t) {
173 case U64:
174 if (x == U64_MAX)
175 return snappendf(sb, "U64_MAX");
176 else if (x >= U64_MAX - 256)
177 return snappendf(sb, "U64_MAX-%llu", U64_MAX - x);
178 else
179 return snappendf(sb, "%#llx", (u64)x);
180 case U32:
181 if ((u32)x == U32_MAX)
182 return snappendf(sb, "U32_MAX");
183 else if ((u32)x >= U32_MAX - 256)
184 return snappendf(sb, "U32_MAX-%u", U32_MAX - (u32)x);
185 else
186 return snappendf(sb, "%#x", (u32)x);
187 case S64:
188 if ((s64)x == S64_MAX)
189 return snappendf(sb, "S64_MAX");
190 else if ((s64)x >= S64_MAX - 256)
191 return snappendf(sb, "S64_MAX-%lld", S64_MAX - (s64)x);
192 else if ((s64)x == S64_MIN)
193 return snappendf(sb, "S64_MIN");
194 else if ((s64)x <= S64_MIN + 256)
195 return snappendf(sb, "S64_MIN+%lld", (s64)x - S64_MIN);
196 else
197 return snappendf(sb, "%#llx", (s64)x);
198 case S32:
199 if ((s32)x == S32_MAX)
200 return snappendf(sb, "S32_MAX");
201 else if ((s32)x >= S32_MAX - 256)
202 return snappendf(sb, "S32_MAX-%d", S32_MAX - (s32)x);
203 else if ((s32)x == S32_MIN)
204 return snappendf(sb, "S32_MIN");
205 else if ((s32)x <= S32_MIN + 256)
206 return snappendf(sb, "S32_MIN+%d", (s32)x - S32_MIN);
207 else
208 return snappendf(sb, "%#x", (s32)x);
209 default: printf("snprintf_num!\n"); exit(1);
210 }
211 }
212}
213
214/* ===================================
215 * GENERIC RANGE STRUCT AND OPERATIONS
216 * ===================================
217 */
218struct range {
219 u64 a, b;
220};
221
222static void snprintf_range(enum num_t t, struct strbuf *sb, struct range x)
223{
224 if (x.a == x.b)
225 return snprintf_num(t, sb, x.a);
226
227 snappendf(sb, "[");
228 snprintf_num(t, sb, x.a);
229 snappendf(sb, "; ");
230 snprintf_num(t, sb, x.b);
231 snappendf(sb, "]");
232}
233
234static void print_range(enum num_t t, struct range x, const char *sfx)
235{
236 DEFINE_STRBUF(sb, 128);
237
238 snprintf_range(t, sb, x);
239 printf("%s%s", sb->buf, sfx);
240}
241
242static const struct range unkn[] = {
243 [U64] = { 0, U64_MAX },
244 [U32] = { 0, U32_MAX },
245 [S64] = { (u64)S64_MIN, (u64)S64_MAX },
246 [S32] = { (u64)(u32)S32_MIN, (u64)(u32)S32_MAX },
247};
248
249static struct range unkn_subreg(enum num_t t)
250{
251 switch (t) {
252 case U64: return unkn[U32];
253 case U32: return unkn[U32];
254 case S64: return unkn[U32];
255 case S32: return unkn[S32];
256 default: printf("unkn_subreg!\n"); exit(1);
257 }
258}
259
260static struct range range(enum num_t t, u64 a, u64 b)
261{
262 switch (t) {
263 case U64: return (struct range){ (u64)a, (u64)b };
264 case U32: return (struct range){ (u32)a, (u32)b };
265 case S64: return (struct range){ (s64)a, (s64)b };
266 case S32: return (struct range){ (u32)(s32)a, (u32)(s32)b };
267 default: printf("range!\n"); exit(1);
268 }
269}
270
271static __always_inline u32 sign64(u64 x) { return (x >> 63) & 1; }
272static __always_inline u32 sign32(u64 x) { return ((u32)x >> 31) & 1; }
273static __always_inline u32 upper32(u64 x) { return (u32)(x >> 32); }
274static __always_inline u64 swap_low32(u64 x, u32 y) { return (x & 0xffffffff00000000ULL) | y; }
275
276static bool range_eq(struct range x, struct range y)
277{
278 return x.a == y.a && x.b == y.b;
279}
280
281static struct range range_cast_to_s32(struct range x)
282{
283 u64 a = x.a, b = x.b;
284
285 /* if upper 32 bits are constant, lower 32 bits should form a proper
286 * s32 range to be correct
287 */
288 if (upper32(a) == upper32(b) && (s32)a <= (s32)b)
289 return range(S32, a, b);
290
291 /* Special case where upper bits form a small sequence of two
292 * sequential numbers (in 32-bit unsigned space, so 0xffffffff to
293 * 0x00000000 is also valid), while lower bits form a proper s32 range
294 * going from negative numbers to positive numbers.
295 *
296 * E.g.: [0xfffffff0ffffff00; 0xfffffff100000010]. Iterating
297 * over full 64-bit numbers range will form a proper [-16, 16]
298 * ([0xffffff00; 0x00000010]) range in its lower 32 bits.
299 */
300 if (upper32(a) + 1 == upper32(b) && (s32)a < 0 && (s32)b >= 0)
301 return range(S32, a, b);
302
303 /* otherwise we can't derive much meaningful information */
304 return unkn[S32];
305}
306
307static struct range range_cast_u64(enum num_t to_t, struct range x)
308{
309 u64 a = (u64)x.a, b = (u64)x.b;
310
311 switch (to_t) {
312 case U64:
313 return x;
314 case U32:
315 if (upper32(a) != upper32(b))
316 return unkn[U32];
317 return range(U32, a, b);
318 case S64:
319 if (sign64(a) != sign64(b))
320 return unkn[S64];
321 return range(S64, a, b);
322 case S32:
323 return range_cast_to_s32(x);
324 default: printf("range_cast_u64!\n"); exit(1);
325 }
326}
327
328static struct range range_cast_s64(enum num_t to_t, struct range x)
329{
330 s64 a = (s64)x.a, b = (s64)x.b;
331
332 switch (to_t) {
333 case U64:
334 /* equivalent to (s64)a <= (s64)b check */
335 if (sign64(a) != sign64(b))
336 return unkn[U64];
337 return range(U64, a, b);
338 case U32:
339 if (upper32(a) != upper32(b) || sign32(a) != sign32(b))
340 return unkn[U32];
341 return range(U32, a, b);
342 case S64:
343 return x;
344 case S32:
345 return range_cast_to_s32(x);
346 default: printf("range_cast_s64!\n"); exit(1);
347 }
348}
349
350static struct range range_cast_u32(enum num_t to_t, struct range x)
351{
352 u32 a = (u32)x.a, b = (u32)x.b;
353
354 switch (to_t) {
355 case U64:
356 case S64:
357 /* u32 is always a valid zero-extended u64/s64 */
358 return range(to_t, a, b);
359 case U32:
360 return x;
361 case S32:
362 return range_cast_to_s32(range(U32, a, b));
363 default: printf("range_cast_u32!\n"); exit(1);
364 }
365}
366
367static struct range range_cast_s32(enum num_t to_t, struct range x)
368{
369 s32 a = (s32)x.a, b = (s32)x.b;
370
371 switch (to_t) {
372 case U64:
373 case U32:
374 case S64:
375 if (sign32(a) != sign32(b))
376 return unkn[to_t];
377 return range(to_t, a, b);
378 case S32:
379 return x;
380 default: printf("range_cast_s32!\n"); exit(1);
381 }
382}
383
384/* Reinterpret range in *from_t* domain as a range in *to_t* domain preserving
385 * all possible information. Worst case, it will be unknown range within
386 * *to_t* domain, if nothing more specific can be guaranteed during the
387 * conversion
388 */
389static struct range range_cast(enum num_t from_t, enum num_t to_t, struct range from)
390{
391 switch (from_t) {
392 case U64: return range_cast_u64(to_t, from);
393 case U32: return range_cast_u32(to_t, from);
394 case S64: return range_cast_s64(to_t, from);
395 case S32: return range_cast_s32(to_t, from);
396 default: printf("range_cast!\n"); exit(1);
397 }
398}
399
400static bool is_valid_num(enum num_t t, u64 x)
401{
402 switch (t) {
403 case U64: return true;
404 case U32: return upper32(x) == 0;
405 case S64: return true;
406 case S32: return upper32(x) == 0;
407 default: printf("is_valid_num!\n"); exit(1);
408 }
409}
410
411static bool is_valid_range(enum num_t t, struct range x)
412{
413 if (!is_valid_num(t, x.a) || !is_valid_num(t, x.b))
414 return false;
415
416 switch (t) {
417 case U64: return (u64)x.a <= (u64)x.b;
418 case U32: return (u32)x.a <= (u32)x.b;
419 case S64: return (s64)x.a <= (s64)x.b;
420 case S32: return (s32)x.a <= (s32)x.b;
421 default: printf("is_valid_range!\n"); exit(1);
422 }
423}
424
425static struct range range_improve(enum num_t t, struct range old, struct range new)
426{
427 return range(t, max_t(t, old.a, new.a), min_t(t, old.b, new.b));
428}
429
430static struct range range_refine(enum num_t x_t, struct range x, enum num_t y_t, struct range y)
431{
432 struct range y_cast;
433
434 y_cast = range_cast(y_t, x_t, y);
435
436 /* the case when new range knowledge, *y*, is a 32-bit subregister
437 * range, while previous range knowledge, *x*, is a full register
438 * 64-bit range, needs special treatment to take into account upper 32
439 * bits of full register range
440 */
441 if (t_is_32(y_t) && !t_is_32(x_t)) {
442 struct range x_swap;
443
444 /* some combinations of upper 32 bits and sign bit can lead to
445 * invalid ranges, in such cases it's easier to detect them
446 * after cast/swap than try to enumerate all the conditions
447 * under which transformation and knowledge transfer is valid
448 */
449 x_swap = range(x_t, swap_low32(x.a, y_cast.a), swap_low32(x.b, y_cast.b));
450 if (!is_valid_range(x_t, x_swap))
451 return x;
452 return range_improve(x_t, x, x_swap);
453 }
454
455 /* otherwise, plain range cast and intersection works */
456 return range_improve(x_t, x, y_cast);
457}
458
459/* =======================
460 * GENERIC CONDITIONAL OPS
461 * =======================
462 */
463enum op { OP_LT, OP_LE, OP_GT, OP_GE, OP_EQ, OP_NE, first_op = OP_LT, last_op = OP_NE };
464
465static enum op complement_op(enum op op)
466{
467 switch (op) {
468 case OP_LT: return OP_GE;
469 case OP_LE: return OP_GT;
470 case OP_GT: return OP_LE;
471 case OP_GE: return OP_LT;
472 case OP_EQ: return OP_NE;
473 case OP_NE: return OP_EQ;
474 default: printf("complement_op!\n"); exit(1);
475 }
476}
477
478static const char *op_str(enum op op)
479{
480 switch (op) {
481 case OP_LT: return "<";
482 case OP_LE: return "<=";
483 case OP_GT: return ">";
484 case OP_GE: return ">=";
485 case OP_EQ: return "==";
486 case OP_NE: return "!=";
487 default: printf("op_str!\n"); exit(1);
488 }
489}
490
491/* Can register with range [x.a, x.b] *EVER* satisfy
492 * OP (<, <=, >, >=, ==, !=) relation to
493 * a regsiter with range [y.a, y.b]
494 * _in *num_t* domain_
495 */
496static bool range_canbe_op(enum num_t t, struct range x, struct range y, enum op op)
497{
498#define range_canbe(T) do { \
499 switch (op) { \
500 case OP_LT: return (T)x.a < (T)y.b; \
501 case OP_LE: return (T)x.a <= (T)y.b; \
502 case OP_GT: return (T)x.b > (T)y.a; \
503 case OP_GE: return (T)x.b >= (T)y.a; \
504 case OP_EQ: return (T)max_t(t, x.a, y.a) <= (T)min_t(t, x.b, y.b); \
505 case OP_NE: return !((T)x.a == (T)x.b && (T)y.a == (T)y.b && (T)x.a == (T)y.a); \
506 default: printf("range_canbe op %d\n", op); exit(1); \
507 } \
508} while (0)
509
510 switch (t) {
511 case U64: { range_canbe(u64); }
512 case U32: { range_canbe(u32); }
513 case S64: { range_canbe(s64); }
514 case S32: { range_canbe(s32); }
515 default: printf("range_canbe!\n"); exit(1);
516 }
517#undef range_canbe
518}
519
520/* Does register with range [x.a, x.b] *ALWAYS* satisfy
521 * OP (<, <=, >, >=, ==, !=) relation to
522 * a regsiter with range [y.a, y.b]
523 * _in *num_t* domain_
524 */
525static bool range_always_op(enum num_t t, struct range x, struct range y, enum op op)
526{
527 /* always op <=> ! canbe complement(op) */
528 return !range_canbe_op(t, x, y, complement_op(op));
529}
530
531/* Does register with range [x.a, x.b] *NEVER* satisfy
532 * OP (<, <=, >, >=, ==, !=) relation to
533 * a regsiter with range [y.a, y.b]
534 * _in *num_t* domain_
535 */
536static bool range_never_op(enum num_t t, struct range x, struct range y, enum op op)
537{
538 return !range_canbe_op(t, x, y, op);
539}
540
541/* similar to verifier's is_branch_taken():
542 * 1 - always taken;
543 * 0 - never taken,
544 * -1 - unsure.
545 */
546static int range_branch_taken_op(enum num_t t, struct range x, struct range y, enum op op)
547{
548 if (range_always_op(t, x, y, op))
549 return 1;
550 if (range_never_op(t, x, y, op))
551 return 0;
552 return -1;
553}
554
555/* What would be the new estimates for register x and y ranges assuming truthful
556 * OP comparison between them. I.e., (x OP y == true) => x <- newx, y <- newy.
557 *
558 * We assume "interesting" cases where ranges overlap. Cases where it's
559 * obvious that (x OP y) is either always true or false should be filtered with
560 * range_never and range_always checks.
561 */
562static void range_cond(enum num_t t, struct range x, struct range y,
563 enum op op, struct range *newx, struct range *newy)
564{
565 if (!range_canbe_op(t, x, y, op)) {
566 /* nothing to adjust, can't happen, return original values */
567 *newx = x;
568 *newy = y;
569 return;
570 }
571 switch (op) {
572 case OP_LT:
573 *newx = range(t, x.a, min_t(t, x.b, y.b - 1));
574 *newy = range(t, max_t(t, x.a + 1, y.a), y.b);
575 break;
576 case OP_LE:
577 *newx = range(t, x.a, min_t(t, x.b, y.b));
578 *newy = range(t, max_t(t, x.a, y.a), y.b);
579 break;
580 case OP_GT:
581 *newx = range(t, max_t(t, x.a, y.a + 1), x.b);
582 *newy = range(t, y.a, min_t(t, x.b - 1, y.b));
583 break;
584 case OP_GE:
585 *newx = range(t, max_t(t, x.a, y.a), x.b);
586 *newy = range(t, y.a, min_t(t, x.b, y.b));
587 break;
588 case OP_EQ:
589 *newx = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
590 *newy = range(t, max_t(t, x.a, y.a), min_t(t, x.b, y.b));
591 break;
592 case OP_NE:
593 /* below logic is supported by the verifier now */
594 if (x.a == x.b && x.a == y.a) {
595 /* X is a constant matching left side of Y */
596 *newx = range(t, x.a, x.b);
597 *newy = range(t, y.a + 1, y.b);
598 } else if (x.a == x.b && x.b == y.b) {
599 /* X is a constant matching rigth side of Y */
600 *newx = range(t, x.a, x.b);
601 *newy = range(t, y.a, y.b - 1);
602 } else if (y.a == y.b && x.a == y.a) {
603 /* Y is a constant matching left side of X */
604 *newx = range(t, x.a + 1, x.b);
605 *newy = range(t, y.a, y.b);
606 } else if (y.a == y.b && x.b == y.b) {
607 /* Y is a constant matching rigth side of X */
608 *newx = range(t, x.a, x.b - 1);
609 *newy = range(t, y.a, y.b);
610 } else {
611 /* generic case, can't derive more information */
612 *newx = range(t, x.a, x.b);
613 *newy = range(t, y.a, y.b);
614 }
615
616 break;
617 default:
618 break;
619 }
620}
621
622/* =======================
623 * REGISTER STATE HANDLING
624 * =======================
625 */
626struct reg_state {
627 struct range r[4]; /* indexed by enum num_t: U64, U32, S64, S32 */
628 bool valid;
629};
630
631static void print_reg_state(struct reg_state *r, const char *sfx)
632{
633 DEFINE_STRBUF(sb, 512);
634 enum num_t t;
635 int cnt = 0;
636
637 if (!r->valid) {
638 printf("<not found>%s", sfx);
639 return;
640 }
641
642 snappendf(sb, "scalar(");
643 for (t = first_t; t <= last_t; t++) {
644 snappendf(sb, "%s%s=", cnt++ ? "," : "", t_str(t));
645 snprintf_range(t, sb, r->r[t]);
646 }
647 snappendf(sb, ")");
648
649 printf("%s%s", sb->buf, sfx);
650}
651
652static void print_refinement(enum num_t s_t, struct range src,
653 enum num_t d_t, struct range old, struct range new,
654 const char *ctx)
655{
656 printf("REFINING (%s) (%s)SRC=", ctx, t_str(s_t));
657 print_range(s_t, src, "");
658 printf(" (%s)DST_OLD=", t_str(d_t));
659 print_range(d_t, old, "");
660 printf(" (%s)DST_NEW=", t_str(d_t));
661 print_range(d_t, new, "\n");
662}
663
664static void reg_state_refine(struct reg_state *r, enum num_t t, struct range x, const char *ctx)
665{
666 enum num_t d_t, s_t;
667 struct range old;
668 bool keep_going = false;
669
670again:
671 /* try to derive new knowledge from just learned range x of type t */
672 for (d_t = first_t; d_t <= last_t; d_t++) {
673 old = r->r[d_t];
674 r->r[d_t] = range_refine(d_t, r->r[d_t], t, x);
675 if (!range_eq(r->r[d_t], old)) {
676 keep_going = true;
677 if (env.verbosity >= VERBOSE_VERY)
678 print_refinement(t, x, d_t, old, r->r[d_t], ctx);
679 }
680 }
681
682 /* now see if we can derive anything new from updated reg_state's ranges */
683 for (s_t = first_t; s_t <= last_t; s_t++) {
684 for (d_t = first_t; d_t <= last_t; d_t++) {
685 old = r->r[d_t];
686 r->r[d_t] = range_refine(d_t, r->r[d_t], s_t, r->r[s_t]);
687 if (!range_eq(r->r[d_t], old)) {
688 keep_going = true;
689 if (env.verbosity >= VERBOSE_VERY)
690 print_refinement(s_t, r->r[s_t], d_t, old, r->r[d_t], ctx);
691 }
692 }
693 }
694
695 /* keep refining until we converge */
696 if (keep_going) {
697 keep_going = false;
698 goto again;
699 }
700}
701
702static void reg_state_set_const(struct reg_state *rs, enum num_t t, u64 val)
703{
704 enum num_t tt;
705
706 rs->valid = true;
707 for (tt = first_t; tt <= last_t; tt++)
708 rs->r[tt] = tt == t ? range(t, val, val) : unkn[tt];
709
710 reg_state_refine(rs, t, rs->r[t], "CONST");
711}
712
713static void reg_state_cond(enum num_t t, struct reg_state *x, struct reg_state *y, enum op op,
714 struct reg_state *newx, struct reg_state *newy, const char *ctx)
715{
716 char buf[32];
717 enum num_t ts[2];
718 struct reg_state xx = *x, yy = *y;
719 int i, t_cnt;
720 struct range z1, z2;
721
722 if (op == OP_EQ || op == OP_NE) {
723 /* OP_EQ and OP_NE are sign-agnostic, so we need to process
724 * both signed and unsigned domains at the same time
725 */
726 ts[0] = t_unsigned(t);
727 ts[1] = t_signed(t);
728 t_cnt = 2;
729 } else {
730 ts[0] = t;
731 t_cnt = 1;
732 }
733
734 for (i = 0; i < t_cnt; i++) {
735 t = ts[i];
736 z1 = x->r[t];
737 z2 = y->r[t];
738
739 range_cond(t, z1, z2, op, &z1, &z2);
740
741 if (newx) {
742 snprintf(buf, sizeof(buf), "%s R1", ctx);
743 reg_state_refine(&xx, t, z1, buf);
744 }
745 if (newy) {
746 snprintf(buf, sizeof(buf), "%s R2", ctx);
747 reg_state_refine(&yy, t, z2, buf);
748 }
749 }
750
751 if (newx)
752 *newx = xx;
753 if (newy)
754 *newy = yy;
755}
756
757static int reg_state_branch_taken_op(enum num_t t, struct reg_state *x, struct reg_state *y,
758 enum op op)
759{
760 if (op == OP_EQ || op == OP_NE) {
761 /* OP_EQ and OP_NE are sign-agnostic */
762 enum num_t tu = t_unsigned(t);
763 enum num_t ts = t_signed(t);
764 int br_u, br_s, br;
765
766 br_u = range_branch_taken_op(tu, x->r[tu], y->r[tu], op);
767 br_s = range_branch_taken_op(ts, x->r[ts], y->r[ts], op);
768
769 if (br_u >= 0 && br_s >= 0 && br_u != br_s)
770 ASSERT_FALSE(true, "branch taken inconsistency!\n");
771
772 /* if 64-bit ranges are indecisive, use 32-bit subranges to
773 * eliminate always/never taken branches, if possible
774 */
775 if (br_u == -1 && (t == U64 || t == S64)) {
776 br = range_branch_taken_op(U32, x->r[U32], y->r[U32], op);
777 /* we can only reject for OP_EQ, never take branch
778 * based on lower 32 bits
779 */
780 if (op == OP_EQ && br == 0)
781 return 0;
782 /* for OP_NEQ we can be conclusive only if lower 32 bits
783 * differ and thus inequality branch is always taken
784 */
785 if (op == OP_NE && br == 1)
786 return 1;
787
788 br = range_branch_taken_op(S32, x->r[S32], y->r[S32], op);
789 if (op == OP_EQ && br == 0)
790 return 0;
791 if (op == OP_NE && br == 1)
792 return 1;
793 }
794
795 return br_u >= 0 ? br_u : br_s;
796 }
797 return range_branch_taken_op(t, x->r[t], y->r[t], op);
798}
799
800/* =====================================
801 * BPF PROGS GENERATION AND VERIFICATION
802 * =====================================
803 */
804struct case_spec {
805 /* whether to init full register (r1) or sub-register (w1) */
806 bool init_subregs;
807 /* whether to establish initial value range on full register (r1) or
808 * sub-register (w1)
809 */
810 bool setup_subregs;
811 /* whether to establish initial value range using signed or unsigned
812 * comparisons (i.e., initialize umin/umax or smin/smax directly)
813 */
814 bool setup_signed;
815 /* whether to perform comparison on full registers or sub-registers */
816 bool compare_subregs;
817 /* whether to perform comparison using signed or unsigned operations */
818 bool compare_signed;
819};
820
821/* Generate test BPF program based on provided test ranges, operation, and
822 * specifications about register bitness and signedness.
823 */
824static int load_range_cmp_prog(struct range x, struct range y, enum op op,
825 int branch_taken, struct case_spec spec,
826 char *log_buf, size_t log_sz,
827 int *false_pos, int *true_pos)
828{
829#define emit(insn) ({ \
830 struct bpf_insn __insns[] = { insn }; \
831 int __i; \
832 for (__i = 0; __i < ARRAY_SIZE(__insns); __i++) \
833 insns[cur_pos + __i] = __insns[__i]; \
834 cur_pos += __i; \
835})
836#define JMP_TO(target) (target - cur_pos - 1)
837 int cur_pos = 0, exit_pos, fd, op_code;
838 struct bpf_insn insns[64];
839 LIBBPF_OPTS(bpf_prog_load_opts, opts,
840 .log_level = 2,
841 .log_buf = log_buf,
842 .log_size = log_sz,
843 .prog_flags = testing_prog_flags(),
844 );
845
846 /* ; skip exit block below
847 * goto +2;
848 */
849 emit(BPF_JMP_A(2));
850 exit_pos = cur_pos;
851 /* ; exit block for all the preparatory conditionals
852 * out:
853 * r0 = 0;
854 * exit;
855 */
856 emit(BPF_MOV64_IMM(BPF_REG_0, 0));
857 emit(BPF_EXIT_INSN());
858 /*
859 * ; assign r6/w6 and r7/w7 unpredictable u64/u32 value
860 * call bpf_get_current_pid_tgid;
861 * r6 = r0; | w6 = w0;
862 * call bpf_get_current_pid_tgid;
863 * r7 = r0; | w7 = w0;
864 */
865 emit(BPF_EMIT_CALL(BPF_FUNC_get_current_pid_tgid));
866 if (spec.init_subregs)
867 emit(BPF_MOV32_REG(BPF_REG_6, BPF_REG_0));
868 else
869 emit(BPF_MOV64_REG(BPF_REG_6, BPF_REG_0));
870 emit(BPF_EMIT_CALL(BPF_FUNC_get_current_pid_tgid));
871 if (spec.init_subregs)
872 emit(BPF_MOV32_REG(BPF_REG_7, BPF_REG_0));
873 else
874 emit(BPF_MOV64_REG(BPF_REG_7, BPF_REG_0));
875 /* ; setup initial r6/w6 possible value range ([x.a, x.b])
876 * r1 = %[x.a] ll; | w1 = %[x.a];
877 * r2 = %[x.b] ll; | w2 = %[x.b];
878 * if r6 < r1 goto out; | if w6 < w1 goto out;
879 * if r6 > r2 goto out; | if w6 > w2 goto out;
880 */
881 if (spec.setup_subregs) {
882 emit(BPF_MOV32_IMM(BPF_REG_1, (s32)x.a));
883 emit(BPF_MOV32_IMM(BPF_REG_2, (s32)x.b));
884 emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
885 BPF_REG_6, BPF_REG_1, JMP_TO(exit_pos)));
886 emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
887 BPF_REG_6, BPF_REG_2, JMP_TO(exit_pos)));
888 } else {
889 emit(BPF_LD_IMM64(BPF_REG_1, x.a));
890 emit(BPF_LD_IMM64(BPF_REG_2, x.b));
891 emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
892 BPF_REG_6, BPF_REG_1, JMP_TO(exit_pos)));
893 emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
894 BPF_REG_6, BPF_REG_2, JMP_TO(exit_pos)));
895 }
896 /* ; setup initial r7/w7 possible value range ([y.a, y.b])
897 * r1 = %[y.a] ll; | w1 = %[y.a];
898 * r2 = %[y.b] ll; | w2 = %[y.b];
899 * if r7 < r1 goto out; | if w7 < w1 goto out;
900 * if r7 > r2 goto out; | if w7 > w2 goto out;
901 */
902 if (spec.setup_subregs) {
903 emit(BPF_MOV32_IMM(BPF_REG_1, (s32)y.a));
904 emit(BPF_MOV32_IMM(BPF_REG_2, (s32)y.b));
905 emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
906 BPF_REG_7, BPF_REG_1, JMP_TO(exit_pos)));
907 emit(BPF_JMP32_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
908 BPF_REG_7, BPF_REG_2, JMP_TO(exit_pos)));
909 } else {
910 emit(BPF_LD_IMM64(BPF_REG_1, y.a));
911 emit(BPF_LD_IMM64(BPF_REG_2, y.b));
912 emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSLT : BPF_JLT,
913 BPF_REG_7, BPF_REG_1, JMP_TO(exit_pos)));
914 emit(BPF_JMP_REG(spec.setup_signed ? BPF_JSGT : BPF_JGT,
915 BPF_REG_7, BPF_REG_2, JMP_TO(exit_pos)));
916 }
917 /* ; range test instruction
918 * if r6 <op> r7 goto +3; | if w6 <op> w7 goto +3;
919 */
920 switch (op) {
921 case OP_LT: op_code = spec.compare_signed ? BPF_JSLT : BPF_JLT; break;
922 case OP_LE: op_code = spec.compare_signed ? BPF_JSLE : BPF_JLE; break;
923 case OP_GT: op_code = spec.compare_signed ? BPF_JSGT : BPF_JGT; break;
924 case OP_GE: op_code = spec.compare_signed ? BPF_JSGE : BPF_JGE; break;
925 case OP_EQ: op_code = BPF_JEQ; break;
926 case OP_NE: op_code = BPF_JNE; break;
927 default:
928 printf("unrecognized op %d\n", op);
929 return -ENOTSUP;
930 }
931 /* ; BEFORE conditional, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
932 * ; this is used for debugging, as verifier doesn't always print
933 * ; registers states as of condition jump instruction (e.g., when
934 * ; precision marking happens)
935 * r0 = r6; | w0 = w6;
936 * r0 = r7; | w0 = w7;
937 */
938 if (spec.compare_subregs) {
939 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
940 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
941 } else {
942 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
943 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
944 }
945 if (spec.compare_subregs)
946 emit(BPF_JMP32_REG(op_code, BPF_REG_6, BPF_REG_7, 3));
947 else
948 emit(BPF_JMP_REG(op_code, BPF_REG_6, BPF_REG_7, 3));
949 /* ; FALSE branch, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
950 * r0 = r6; | w0 = w6;
951 * r0 = r7; | w0 = w7;
952 * exit;
953 */
954 *false_pos = cur_pos;
955 if (spec.compare_subregs) {
956 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
957 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
958 } else {
959 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
960 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
961 }
962 if (branch_taken == 1) /* false branch is never taken */
963 emit(BPF_EMIT_CALL(0xDEAD)); /* poison this branch */
964 else
965 emit(BPF_EXIT_INSN());
966 /* ; TRUE branch, r0/w0 = {r6/w6,r7/w7} is to extract verifier state reliably
967 * r0 = r6; | w0 = w6;
968 * r0 = r7; | w0 = w7;
969 * exit;
970 */
971 *true_pos = cur_pos;
972 if (spec.compare_subregs) {
973 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_6));
974 emit(BPF_MOV32_REG(BPF_REG_0, BPF_REG_7));
975 } else {
976 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_6));
977 emit(BPF_MOV64_REG(BPF_REG_0, BPF_REG_7));
978 }
979 if (branch_taken == 0) /* true branch is never taken */
980 emit(BPF_EMIT_CALL(0xDEAD)); /* poison this branch */
981 emit(BPF_EXIT_INSN()); /* last instruction has to be exit */
982
983 fd = bpf_prog_load(BPF_PROG_TYPE_RAW_TRACEPOINT, "reg_bounds_test",
984 "GPL", insns, cur_pos, &opts);
985 if (fd < 0)
986 return fd;
987
988 close(fd);
989 return 0;
990#undef emit
991#undef JMP_TO
992}
993
994#define str_has_pfx(str, pfx) (strncmp(str, pfx, strlen(pfx)) == 0)
995
996/* Parse register state from verifier log.
997 * `s` should point to the start of "Rx = ..." substring in the verifier log.
998 */
999static int parse_reg_state(const char *s, struct reg_state *reg)
1000{
1001 /* There are two generic forms for SCALAR register:
1002 * - known constant: R6_rwD=P%lld
1003 * - range: R6_rwD=scalar(id=1,...), where "..." is a comma-separated
1004 * list of optional range specifiers:
1005 * - umin=%llu, if missing, assumed 0;
1006 * - umax=%llu, if missing, assumed U64_MAX;
1007 * - smin=%lld, if missing, assumed S64_MIN;
1008 * - smax=%lld, if missing, assummed S64_MAX;
1009 * - umin32=%d, if missing, assumed 0;
1010 * - umax32=%d, if missing, assumed U32_MAX;
1011 * - smin32=%d, if missing, assumed S32_MIN;
1012 * - smax32=%d, if missing, assummed S32_MAX;
1013 * - var_off=(%#llx; %#llx), tnum part, we don't care about it.
1014 *
1015 * If some of the values are equal, they will be grouped (but min/max
1016 * are not mixed together, and similarly negative values are not
1017 * grouped with non-negative ones). E.g.:
1018 *
1019 * R6_w=Pscalar(smin=smin32=0, smax=umax=umax32=1000)
1020 *
1021 * _rwD part is optional (and any of the letters can be missing).
1022 * P (precision mark) is optional as well.
1023 *
1024 * Anything inside scalar() is optional, including id, of course.
1025 */
1026 struct {
1027 const char *pfx;
1028 u64 *dst, def;
1029 bool is_32, is_set;
1030 } *f, fields[8] = {
1031 {"smin=", ®->r[S64].a, S64_MIN},
1032 {"smax=", ®->r[S64].b, S64_MAX},
1033 {"umin=", ®->r[U64].a, 0},
1034 {"umax=", ®->r[U64].b, U64_MAX},
1035 {"smin32=", ®->r[S32].a, (u32)S32_MIN, true},
1036 {"smax32=", ®->r[S32].b, (u32)S32_MAX, true},
1037 {"umin32=", ®->r[U32].a, 0, true},
1038 {"umax32=", ®->r[U32].b, U32_MAX, true},
1039 };
1040 const char *p;
1041 int i;
1042
1043 p = strchr(s, '=');
1044 if (!p)
1045 return -EINVAL;
1046 p++;
1047 if (*p == 'P')
1048 p++;
1049
1050 if (!str_has_pfx(p, "scalar(")) {
1051 long long sval;
1052 enum num_t t;
1053
1054 if (p[0] == '0' && p[1] == 'x') {
1055 if (sscanf(p, "%llx", &sval) != 1)
1056 return -EINVAL;
1057 } else {
1058 if (sscanf(p, "%lld", &sval) != 1)
1059 return -EINVAL;
1060 }
1061
1062 reg->valid = true;
1063 for (t = first_t; t <= last_t; t++) {
1064 reg->r[t] = range(t, sval, sval);
1065 }
1066 return 0;
1067 }
1068
1069 p += sizeof("scalar");
1070 while (p) {
1071 int midxs[ARRAY_SIZE(fields)], mcnt = 0;
1072 u64 val;
1073
1074 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1075 f = &fields[i];
1076 if (!str_has_pfx(p, f->pfx))
1077 continue;
1078 midxs[mcnt++] = i;
1079 p += strlen(f->pfx);
1080 }
1081
1082 if (mcnt) {
1083 /* populate all matched fields */
1084 if (p[0] == '0' && p[1] == 'x') {
1085 if (sscanf(p, "%llx", &val) != 1)
1086 return -EINVAL;
1087 } else {
1088 if (sscanf(p, "%lld", &val) != 1)
1089 return -EINVAL;
1090 }
1091
1092 for (i = 0; i < mcnt; i++) {
1093 f = &fields[midxs[i]];
1094 f->is_set = true;
1095 *f->dst = f->is_32 ? (u64)(u32)val : val;
1096 }
1097 } else if (str_has_pfx(p, "var_off")) {
1098 /* skip "var_off=(0x0; 0x3f)" part completely */
1099 p = strchr(p, ')');
1100 if (!p)
1101 return -EINVAL;
1102 p++;
1103 }
1104
1105 p = strpbrk(p, ",)");
1106 if (*p == ')')
1107 break;
1108 if (p)
1109 p++;
1110 }
1111
1112 reg->valid = true;
1113
1114 for (i = 0; i < ARRAY_SIZE(fields); i++) {
1115 f = &fields[i];
1116 if (!f->is_set)
1117 *f->dst = f->def;
1118 }
1119
1120 return 0;
1121}
1122
1123
1124/* Parse all register states (TRUE/FALSE branches and DST/SRC registers)
1125 * out of the verifier log for a corresponding test case BPF program.
1126 */
1127static int parse_range_cmp_log(const char *log_buf, struct case_spec spec,
1128 int false_pos, int true_pos,
1129 struct reg_state *false1_reg, struct reg_state *false2_reg,
1130 struct reg_state *true1_reg, struct reg_state *true2_reg)
1131{
1132 struct {
1133 int insn_idx;
1134 int reg_idx;
1135 const char *reg_upper;
1136 struct reg_state *state;
1137 } specs[] = {
1138 {false_pos, 6, "R6=", false1_reg},
1139 {false_pos + 1, 7, "R7=", false2_reg},
1140 {true_pos, 6, "R6=", true1_reg},
1141 {true_pos + 1, 7, "R7=", true2_reg},
1142 };
1143 char buf[32];
1144 const char *p = log_buf, *q;
1145 int i, err;
1146
1147 for (i = 0; i < 4; i++) {
1148 sprintf(buf, "%d: (%s) %s = %s%d", specs[i].insn_idx,
1149 spec.compare_subregs ? "bc" : "bf",
1150 spec.compare_subregs ? "w0" : "r0",
1151 spec.compare_subregs ? "w" : "r", specs[i].reg_idx);
1152
1153 q = strstr(p, buf);
1154 if (!q) {
1155 *specs[i].state = (struct reg_state){.valid = false};
1156 continue;
1157 }
1158 p = strstr(q, specs[i].reg_upper);
1159 if (!p)
1160 return -EINVAL;
1161 err = parse_reg_state(p, specs[i].state);
1162 if (err)
1163 return -EINVAL;
1164 }
1165 return 0;
1166}
1167
1168/* Validate ranges match, and print details if they don't */
1169static bool assert_range_eq(enum num_t t, struct range x, struct range y,
1170 const char *ctx1, const char *ctx2)
1171{
1172 DEFINE_STRBUF(sb, 512);
1173
1174 if (range_eq(x, y))
1175 return true;
1176
1177 snappendf(sb, "MISMATCH %s.%s: ", ctx1, ctx2);
1178 snprintf_range(t, sb, x);
1179 snappendf(sb, " != ");
1180 snprintf_range(t, sb, y);
1181
1182 printf("%s\n", sb->buf);
1183
1184 return false;
1185}
1186
1187/* Validate that register states match, and print details if they don't */
1188static bool assert_reg_state_eq(struct reg_state *r, struct reg_state *e, const char *ctx)
1189{
1190 bool ok = true;
1191 enum num_t t;
1192
1193 if (r->valid != e->valid) {
1194 printf("MISMATCH %s: actual %s != expected %s\n", ctx,
1195 r->valid ? "<valid>" : "<invalid>",
1196 e->valid ? "<valid>" : "<invalid>");
1197 return false;
1198 }
1199
1200 if (!r->valid)
1201 return true;
1202
1203 for (t = first_t; t <= last_t; t++) {
1204 if (!assert_range_eq(t, r->r[t], e->r[t], ctx, t_str(t)))
1205 ok = false;
1206 }
1207
1208 return ok;
1209}
1210
1211/* Printf verifier log, filtering out irrelevant noise */
1212static void print_verifier_log(const char *buf)
1213{
1214 const char *p;
1215
1216 while (buf[0]) {
1217 p = strchrnul(buf, '\n');
1218
1219 /* filter out irrelevant precision backtracking logs */
1220 if (str_has_pfx(buf, "mark_precise: "))
1221 goto skip_line;
1222
1223 printf("%.*s\n", (int)(p - buf), buf);
1224
1225skip_line:
1226 buf = *p == '\0' ? p : p + 1;
1227 }
1228}
1229
1230/* Simulate provided test case purely with our own range-based logic.
1231 * This is done to set up expectations for verifier's branch_taken logic and
1232 * verifier's register states in the verifier log.
1233 */
1234static void sim_case(enum num_t init_t, enum num_t cond_t,
1235 struct range x, struct range y, enum op op,
1236 struct reg_state *fr1, struct reg_state *fr2,
1237 struct reg_state *tr1, struct reg_state *tr2,
1238 int *branch_taken)
1239{
1240 const u64 A = x.a;
1241 const u64 B = x.b;
1242 const u64 C = y.a;
1243 const u64 D = y.b;
1244 struct reg_state rc;
1245 enum op rev_op = complement_op(op);
1246 enum num_t t;
1247
1248 fr1->valid = fr2->valid = true;
1249 tr1->valid = tr2->valid = true;
1250 for (t = first_t; t <= last_t; t++) {
1251 /* if we are initializing using 32-bit subregisters,
1252 * full registers get upper 32 bits zeroed automatically
1253 */
1254 struct range z = t_is_32(init_t) ? unkn_subreg(t) : unkn[t];
1255
1256 fr1->r[t] = fr2->r[t] = tr1->r[t] = tr2->r[t] = z;
1257 }
1258
1259 /* step 1: r1 >= A, r2 >= C */
1260 reg_state_set_const(&rc, init_t, A);
1261 reg_state_cond(init_t, fr1, &rc, OP_GE, fr1, NULL, "r1>=A");
1262 reg_state_set_const(&rc, init_t, C);
1263 reg_state_cond(init_t, fr2, &rc, OP_GE, fr2, NULL, "r2>=C");
1264 *tr1 = *fr1;
1265 *tr2 = *fr2;
1266 if (env.verbosity >= VERBOSE_VERY) {
1267 printf("STEP1 (%s) R1: ", t_str(init_t)); print_reg_state(fr1, "\n");
1268 printf("STEP1 (%s) R2: ", t_str(init_t)); print_reg_state(fr2, "\n");
1269 }
1270
1271 /* step 2: r1 <= B, r2 <= D */
1272 reg_state_set_const(&rc, init_t, B);
1273 reg_state_cond(init_t, fr1, &rc, OP_LE, fr1, NULL, "r1<=B");
1274 reg_state_set_const(&rc, init_t, D);
1275 reg_state_cond(init_t, fr2, &rc, OP_LE, fr2, NULL, "r2<=D");
1276 *tr1 = *fr1;
1277 *tr2 = *fr2;
1278 if (env.verbosity >= VERBOSE_VERY) {
1279 printf("STEP2 (%s) R1: ", t_str(init_t)); print_reg_state(fr1, "\n");
1280 printf("STEP2 (%s) R2: ", t_str(init_t)); print_reg_state(fr2, "\n");
1281 }
1282
1283 /* step 3: r1 <op> r2 */
1284 *branch_taken = reg_state_branch_taken_op(cond_t, fr1, fr2, op);
1285 fr1->valid = fr2->valid = false;
1286 tr1->valid = tr2->valid = false;
1287 if (*branch_taken != 1) { /* FALSE is possible */
1288 fr1->valid = fr2->valid = true;
1289 reg_state_cond(cond_t, fr1, fr2, rev_op, fr1, fr2, "FALSE");
1290 }
1291 if (*branch_taken != 0) { /* TRUE is possible */
1292 tr1->valid = tr2->valid = true;
1293 reg_state_cond(cond_t, tr1, tr2, op, tr1, tr2, "TRUE");
1294 }
1295 if (env.verbosity >= VERBOSE_VERY) {
1296 printf("STEP3 (%s) FALSE R1:", t_str(cond_t)); print_reg_state(fr1, "\n");
1297 printf("STEP3 (%s) FALSE R2:", t_str(cond_t)); print_reg_state(fr2, "\n");
1298 printf("STEP3 (%s) TRUE R1:", t_str(cond_t)); print_reg_state(tr1, "\n");
1299 printf("STEP3 (%s) TRUE R2:", t_str(cond_t)); print_reg_state(tr2, "\n");
1300 }
1301}
1302
1303/* ===============================
1304 * HIGH-LEVEL TEST CASE VALIDATION
1305 * ===============================
1306 */
1307static u32 upper_seeds[] = {
1308 0,
1309 1,
1310 U32_MAX,
1311 U32_MAX - 1,
1312 S32_MAX,
1313 (u32)S32_MIN,
1314};
1315
1316static u32 lower_seeds[] = {
1317 0,
1318 1,
1319 2, (u32)-2,
1320 255, (u32)-255,
1321 UINT_MAX,
1322 UINT_MAX - 1,
1323 INT_MAX,
1324 (u32)INT_MIN,
1325};
1326
1327struct ctx {
1328 int val_cnt, subval_cnt, range_cnt, subrange_cnt;
1329 u64 uvals[ARRAY_SIZE(upper_seeds) * ARRAY_SIZE(lower_seeds)];
1330 s64 svals[ARRAY_SIZE(upper_seeds) * ARRAY_SIZE(lower_seeds)];
1331 u32 usubvals[ARRAY_SIZE(lower_seeds)];
1332 s32 ssubvals[ARRAY_SIZE(lower_seeds)];
1333 struct range *uranges, *sranges;
1334 struct range *usubranges, *ssubranges;
1335 int max_failure_cnt, cur_failure_cnt;
1336 int total_case_cnt, case_cnt;
1337 int rand_case_cnt;
1338 unsigned rand_seed;
1339 __u64 start_ns;
1340 char progress_ctx[64];
1341};
1342
1343static void cleanup_ctx(struct ctx *ctx)
1344{
1345 free(ctx->uranges);
1346 free(ctx->sranges);
1347 free(ctx->usubranges);
1348 free(ctx->ssubranges);
1349}
1350
1351struct subtest_case {
1352 enum num_t init_t;
1353 enum num_t cond_t;
1354 struct range x;
1355 struct range y;
1356 enum op op;
1357};
1358
1359static void subtest_case_str(struct strbuf *sb, struct subtest_case *t, bool use_op)
1360{
1361 snappendf(sb, "(%s)", t_str(t->init_t));
1362 snprintf_range(t->init_t, sb, t->x);
1363 snappendf(sb, " (%s)%s ", t_str(t->cond_t), use_op ? op_str(t->op) : "<op>");
1364 snprintf_range(t->init_t, sb, t->y);
1365}
1366
1367/* Generate and validate test case based on specific combination of setup
1368 * register ranges (including their expected num_t domain), and conditional
1369 * operation to perform (including num_t domain in which it has to be
1370 * performed)
1371 */
1372static int verify_case_op(enum num_t init_t, enum num_t cond_t,
1373 struct range x, struct range y, enum op op)
1374{
1375 char log_buf[256 * 1024];
1376 size_t log_sz = sizeof(log_buf);
1377 int err, false_pos = 0, true_pos = 0, branch_taken;
1378 struct reg_state fr1, fr2, tr1, tr2;
1379 struct reg_state fe1, fe2, te1, te2;
1380 bool failed = false;
1381 struct case_spec spec = {
1382 .init_subregs = (init_t == U32 || init_t == S32),
1383 .setup_subregs = (init_t == U32 || init_t == S32),
1384 .setup_signed = (init_t == S64 || init_t == S32),
1385 .compare_subregs = (cond_t == U32 || cond_t == S32),
1386 .compare_signed = (cond_t == S64 || cond_t == S32),
1387 };
1388
1389 log_buf[0] = '\0';
1390
1391 sim_case(init_t, cond_t, x, y, op, &fe1, &fe2, &te1, &te2, &branch_taken);
1392
1393 err = load_range_cmp_prog(x, y, op, branch_taken, spec,
1394 log_buf, log_sz, &false_pos, &true_pos);
1395 if (err) {
1396 ASSERT_OK(err, "load_range_cmp_prog");
1397 failed = true;
1398 }
1399
1400 err = parse_range_cmp_log(log_buf, spec, false_pos, true_pos,
1401 &fr1, &fr2, &tr1, &tr2);
1402 if (err) {
1403 ASSERT_OK(err, "parse_range_cmp_log");
1404 failed = true;
1405 }
1406
1407 if (!assert_reg_state_eq(&fr1, &fe1, "false_reg1") ||
1408 !assert_reg_state_eq(&fr2, &fe2, "false_reg2") ||
1409 !assert_reg_state_eq(&tr1, &te1, "true_reg1") ||
1410 !assert_reg_state_eq(&tr2, &te2, "true_reg2")) {
1411 failed = true;
1412 }
1413
1414 if (failed || env.verbosity >= VERBOSE_NORMAL) {
1415 if (failed || env.verbosity >= VERBOSE_VERY) {
1416 printf("VERIFIER LOG:\n========================\n");
1417 print_verifier_log(log_buf);
1418 printf("=====================\n");
1419 }
1420 printf("ACTUAL FALSE1: "); print_reg_state(&fr1, "\n");
1421 printf("EXPECTED FALSE1: "); print_reg_state(&fe1, "\n");
1422 printf("ACTUAL FALSE2: "); print_reg_state(&fr2, "\n");
1423 printf("EXPECTED FALSE2: "); print_reg_state(&fe2, "\n");
1424 printf("ACTUAL TRUE1: "); print_reg_state(&tr1, "\n");
1425 printf("EXPECTED TRUE1: "); print_reg_state(&te1, "\n");
1426 printf("ACTUAL TRUE2: "); print_reg_state(&tr2, "\n");
1427 printf("EXPECTED TRUE2: "); print_reg_state(&te2, "\n");
1428
1429 return failed ? -EINVAL : 0;
1430 }
1431
1432 return 0;
1433}
1434
1435/* Given setup ranges and number types, go over all supported operations,
1436 * generating individual subtest for each allowed combination
1437 */
1438static int verify_case_opt(struct ctx *ctx, enum num_t init_t, enum num_t cond_t,
1439 struct range x, struct range y, bool is_subtest)
1440{
1441 DEFINE_STRBUF(sb, 256);
1442 int err;
1443 struct subtest_case sub = {
1444 .init_t = init_t,
1445 .cond_t = cond_t,
1446 .x = x,
1447 .y = y,
1448 };
1449
1450 sb->pos = 0; /* reset position in strbuf */
1451 subtest_case_str(sb, &sub, false /* ignore op */);
1452 if (is_subtest && !test__start_subtest(sb->buf))
1453 return 0;
1454
1455 for (sub.op = first_op; sub.op <= last_op; sub.op++) {
1456 sb->pos = 0; /* reset position in strbuf */
1457 subtest_case_str(sb, &sub, true /* print op */);
1458
1459 if (env.verbosity >= VERBOSE_NORMAL) /* this speeds up debugging */
1460 printf("TEST CASE: %s\n", sb->buf);
1461
1462 err = verify_case_op(init_t, cond_t, x, y, sub.op);
1463 if (err || env.verbosity >= VERBOSE_NORMAL)
1464 ASSERT_OK(err, sb->buf);
1465 if (err) {
1466 ctx->cur_failure_cnt++;
1467 if (ctx->cur_failure_cnt > ctx->max_failure_cnt)
1468 return err;
1469 return 0; /* keep testing other cases */
1470 }
1471 ctx->case_cnt++;
1472 if ((ctx->case_cnt % 10000) == 0) {
1473 double progress = (ctx->case_cnt + 0.0) / ctx->total_case_cnt;
1474 u64 elapsed_ns = get_time_ns() - ctx->start_ns;
1475 double remain_ns = elapsed_ns / progress * (1 - progress);
1476
1477 fprintf(env.stderr, "PROGRESS (%s): %d/%d (%.2lf%%), "
1478 "elapsed %llu mins (%.2lf hrs), "
1479 "ETA %.0lf mins (%.2lf hrs)\n",
1480 ctx->progress_ctx,
1481 ctx->case_cnt, ctx->total_case_cnt, 100.0 * progress,
1482 elapsed_ns / 1000000000 / 60,
1483 elapsed_ns / 1000000000.0 / 3600,
1484 remain_ns / 1000000000.0 / 60,
1485 remain_ns / 1000000000.0 / 3600);
1486 }
1487 }
1488
1489 return 0;
1490}
1491
1492static int verify_case(struct ctx *ctx, enum num_t init_t, enum num_t cond_t,
1493 struct range x, struct range y)
1494{
1495 return verify_case_opt(ctx, init_t, cond_t, x, y, true /* is_subtest */);
1496}
1497
1498/* ================================
1499 * GENERATED CASES FROM SEED VALUES
1500 * ================================
1501 */
1502static int u64_cmp(const void *p1, const void *p2)
1503{
1504 u64 x1 = *(const u64 *)p1, x2 = *(const u64 *)p2;
1505
1506 return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
1507}
1508
1509static int u32_cmp(const void *p1, const void *p2)
1510{
1511 u32 x1 = *(const u32 *)p1, x2 = *(const u32 *)p2;
1512
1513 return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
1514}
1515
1516static int s64_cmp(const void *p1, const void *p2)
1517{
1518 s64 x1 = *(const s64 *)p1, x2 = *(const s64 *)p2;
1519
1520 return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
1521}
1522
1523static int s32_cmp(const void *p1, const void *p2)
1524{
1525 s32 x1 = *(const s32 *)p1, x2 = *(const s32 *)p2;
1526
1527 return x1 != x2 ? (x1 < x2 ? -1 : 1) : 0;
1528}
1529
1530/* Generate valid unique constants from seeds, both signed and unsigned */
1531static void gen_vals(struct ctx *ctx)
1532{
1533 int i, j, cnt = 0;
1534
1535 for (i = 0; i < ARRAY_SIZE(upper_seeds); i++) {
1536 for (j = 0; j < ARRAY_SIZE(lower_seeds); j++) {
1537 ctx->uvals[cnt++] = (((u64)upper_seeds[i]) << 32) | lower_seeds[j];
1538 }
1539 }
1540
1541 /* sort and compact uvals (i.e., it's `sort | uniq`) */
1542 qsort(ctx->uvals, cnt, sizeof(*ctx->uvals), u64_cmp);
1543 for (i = 1, j = 0; i < cnt; i++) {
1544 if (ctx->uvals[j] == ctx->uvals[i])
1545 continue;
1546 j++;
1547 ctx->uvals[j] = ctx->uvals[i];
1548 }
1549 ctx->val_cnt = j + 1;
1550
1551 /* we have exactly the same number of s64 values, they are just in
1552 * a different order than u64s, so just sort them differently
1553 */
1554 for (i = 0; i < ctx->val_cnt; i++)
1555 ctx->svals[i] = ctx->uvals[i];
1556 qsort(ctx->svals, ctx->val_cnt, sizeof(*ctx->svals), s64_cmp);
1557
1558 if (env.verbosity >= VERBOSE_SUPER) {
1559 DEFINE_STRBUF(sb1, 256);
1560 DEFINE_STRBUF(sb2, 256);
1561
1562 for (i = 0; i < ctx->val_cnt; i++) {
1563 sb1->pos = sb2->pos = 0;
1564 snprintf_num(U64, sb1, ctx->uvals[i]);
1565 snprintf_num(S64, sb2, ctx->svals[i]);
1566 printf("SEED #%d: u64=%-20s s64=%-20s\n", i, sb1->buf, sb2->buf);
1567 }
1568 }
1569
1570 /* 32-bit values are generated separately */
1571 cnt = 0;
1572 for (i = 0; i < ARRAY_SIZE(lower_seeds); i++) {
1573 ctx->usubvals[cnt++] = lower_seeds[i];
1574 }
1575
1576 /* sort and compact usubvals (i.e., it's `sort | uniq`) */
1577 qsort(ctx->usubvals, cnt, sizeof(*ctx->usubvals), u32_cmp);
1578 for (i = 1, j = 0; i < cnt; i++) {
1579 if (ctx->usubvals[j] == ctx->usubvals[i])
1580 continue;
1581 j++;
1582 ctx->usubvals[j] = ctx->usubvals[i];
1583 }
1584 ctx->subval_cnt = j + 1;
1585
1586 for (i = 0; i < ctx->subval_cnt; i++)
1587 ctx->ssubvals[i] = ctx->usubvals[i];
1588 qsort(ctx->ssubvals, ctx->subval_cnt, sizeof(*ctx->ssubvals), s32_cmp);
1589
1590 if (env.verbosity >= VERBOSE_SUPER) {
1591 DEFINE_STRBUF(sb1, 256);
1592 DEFINE_STRBUF(sb2, 256);
1593
1594 for (i = 0; i < ctx->subval_cnt; i++) {
1595 sb1->pos = sb2->pos = 0;
1596 snprintf_num(U32, sb1, ctx->usubvals[i]);
1597 snprintf_num(S32, sb2, ctx->ssubvals[i]);
1598 printf("SUBSEED #%d: u32=%-10s s32=%-10s\n", i, sb1->buf, sb2->buf);
1599 }
1600 }
1601}
1602
1603/* Generate valid ranges from upper/lower seeds */
1604static int gen_ranges(struct ctx *ctx)
1605{
1606 int i, j, cnt = 0;
1607
1608 for (i = 0; i < ctx->val_cnt; i++) {
1609 for (j = i; j < ctx->val_cnt; j++) {
1610 if (env.verbosity >= VERBOSE_SUPER) {
1611 DEFINE_STRBUF(sb1, 256);
1612 DEFINE_STRBUF(sb2, 256);
1613
1614 sb1->pos = sb2->pos = 0;
1615 snprintf_range(U64, sb1, range(U64, ctx->uvals[i], ctx->uvals[j]));
1616 snprintf_range(S64, sb2, range(S64, ctx->svals[i], ctx->svals[j]));
1617 printf("RANGE #%d: u64=%-40s s64=%-40s\n", cnt, sb1->buf, sb2->buf);
1618 }
1619 cnt++;
1620 }
1621 }
1622 ctx->range_cnt = cnt;
1623
1624 ctx->uranges = calloc(ctx->range_cnt, sizeof(*ctx->uranges));
1625 if (!ASSERT_OK_PTR(ctx->uranges, "uranges_calloc"))
1626 return -EINVAL;
1627 ctx->sranges = calloc(ctx->range_cnt, sizeof(*ctx->sranges));
1628 if (!ASSERT_OK_PTR(ctx->sranges, "sranges_calloc"))
1629 return -EINVAL;
1630
1631 cnt = 0;
1632 for (i = 0; i < ctx->val_cnt; i++) {
1633 for (j = i; j < ctx->val_cnt; j++) {
1634 ctx->uranges[cnt] = range(U64, ctx->uvals[i], ctx->uvals[j]);
1635 ctx->sranges[cnt] = range(S64, ctx->svals[i], ctx->svals[j]);
1636 cnt++;
1637 }
1638 }
1639
1640 cnt = 0;
1641 for (i = 0; i < ctx->subval_cnt; i++) {
1642 for (j = i; j < ctx->subval_cnt; j++) {
1643 if (env.verbosity >= VERBOSE_SUPER) {
1644 DEFINE_STRBUF(sb1, 256);
1645 DEFINE_STRBUF(sb2, 256);
1646
1647 sb1->pos = sb2->pos = 0;
1648 snprintf_range(U32, sb1, range(U32, ctx->usubvals[i], ctx->usubvals[j]));
1649 snprintf_range(S32, sb2, range(S32, ctx->ssubvals[i], ctx->ssubvals[j]));
1650 printf("SUBRANGE #%d: u32=%-20s s32=%-20s\n", cnt, sb1->buf, sb2->buf);
1651 }
1652 cnt++;
1653 }
1654 }
1655 ctx->subrange_cnt = cnt;
1656
1657 ctx->usubranges = calloc(ctx->subrange_cnt, sizeof(*ctx->usubranges));
1658 if (!ASSERT_OK_PTR(ctx->usubranges, "usubranges_calloc"))
1659 return -EINVAL;
1660 ctx->ssubranges = calloc(ctx->subrange_cnt, sizeof(*ctx->ssubranges));
1661 if (!ASSERT_OK_PTR(ctx->ssubranges, "ssubranges_calloc"))
1662 return -EINVAL;
1663
1664 cnt = 0;
1665 for (i = 0; i < ctx->subval_cnt; i++) {
1666 for (j = i; j < ctx->subval_cnt; j++) {
1667 ctx->usubranges[cnt] = range(U32, ctx->usubvals[i], ctx->usubvals[j]);
1668 ctx->ssubranges[cnt] = range(S32, ctx->ssubvals[i], ctx->ssubvals[j]);
1669 cnt++;
1670 }
1671 }
1672
1673 return 0;
1674}
1675
1676static int parse_env_vars(struct ctx *ctx)
1677{
1678 const char *s;
1679
1680 if ((s = getenv("REG_BOUNDS_MAX_FAILURE_CNT"))) {
1681 errno = 0;
1682 ctx->max_failure_cnt = strtol(s, NULL, 10);
1683 if (errno || ctx->max_failure_cnt < 0) {
1684 ASSERT_OK(-errno, "REG_BOUNDS_MAX_FAILURE_CNT");
1685 return -EINVAL;
1686 }
1687 }
1688
1689 if ((s = getenv("REG_BOUNDS_RAND_CASE_CNT"))) {
1690 errno = 0;
1691 ctx->rand_case_cnt = strtol(s, NULL, 10);
1692 if (errno || ctx->rand_case_cnt < 0) {
1693 ASSERT_OK(-errno, "REG_BOUNDS_RAND_CASE_CNT");
1694 return -EINVAL;
1695 }
1696 }
1697
1698 if ((s = getenv("REG_BOUNDS_RAND_SEED"))) {
1699 errno = 0;
1700 ctx->rand_seed = strtoul(s, NULL, 10);
1701 if (errno) {
1702 ASSERT_OK(-errno, "REG_BOUNDS_RAND_SEED");
1703 return -EINVAL;
1704 }
1705 }
1706
1707 return 0;
1708}
1709
1710static int prepare_gen_tests(struct ctx *ctx)
1711{
1712 const char *s;
1713 int err;
1714
1715 if (!(s = getenv("SLOW_TESTS")) || strcmp(s, "1") != 0) {
1716 test__skip();
1717 return -ENOTSUP;
1718 }
1719
1720 err = parse_env_vars(ctx);
1721 if (err)
1722 return err;
1723
1724 gen_vals(ctx);
1725 err = gen_ranges(ctx);
1726 if (err) {
1727 ASSERT_OK(err, "gen_ranges");
1728 return err;
1729 }
1730
1731 return 0;
1732}
1733
1734/* Go over generated constants and ranges and validate various supported
1735 * combinations of them
1736 */
1737static void validate_gen_range_vs_const_64(enum num_t init_t, enum num_t cond_t)
1738{
1739 struct ctx ctx;
1740 struct range rconst;
1741 const struct range *ranges;
1742 const u64 *vals;
1743 int i, j;
1744
1745 memset(&ctx, 0, sizeof(ctx));
1746
1747 if (prepare_gen_tests(&ctx))
1748 goto cleanup;
1749
1750 ranges = init_t == U64 ? ctx.uranges : ctx.sranges;
1751 vals = init_t == U64 ? ctx.uvals : (const u64 *)ctx.svals;
1752
1753 ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.range_cnt * ctx.val_cnt);
1754 ctx.start_ns = get_time_ns();
1755 snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
1756 "RANGE x CONST, %s -> %s",
1757 t_str(init_t), t_str(cond_t));
1758
1759 for (i = 0; i < ctx.val_cnt; i++) {
1760 for (j = 0; j < ctx.range_cnt; j++) {
1761 rconst = range(init_t, vals[i], vals[i]);
1762
1763 /* (u64|s64)(<range> x <const>) */
1764 if (verify_case(&ctx, init_t, cond_t, ranges[j], rconst))
1765 goto cleanup;
1766 /* (u64|s64)(<const> x <range>) */
1767 if (verify_case(&ctx, init_t, cond_t, rconst, ranges[j]))
1768 goto cleanup;
1769 }
1770 }
1771
1772cleanup:
1773 cleanup_ctx(&ctx);
1774}
1775
1776static void validate_gen_range_vs_const_32(enum num_t init_t, enum num_t cond_t)
1777{
1778 struct ctx ctx;
1779 struct range rconst;
1780 const struct range *ranges;
1781 const u32 *vals;
1782 int i, j;
1783
1784 memset(&ctx, 0, sizeof(ctx));
1785
1786 if (prepare_gen_tests(&ctx))
1787 goto cleanup;
1788
1789 ranges = init_t == U32 ? ctx.usubranges : ctx.ssubranges;
1790 vals = init_t == U32 ? ctx.usubvals : (const u32 *)ctx.ssubvals;
1791
1792 ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.subrange_cnt * ctx.subval_cnt);
1793 ctx.start_ns = get_time_ns();
1794 snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
1795 "RANGE x CONST, %s -> %s",
1796 t_str(init_t), t_str(cond_t));
1797
1798 for (i = 0; i < ctx.subval_cnt; i++) {
1799 for (j = 0; j < ctx.subrange_cnt; j++) {
1800 rconst = range(init_t, vals[i], vals[i]);
1801
1802 /* (u32|s32)(<range> x <const>) */
1803 if (verify_case(&ctx, init_t, cond_t, ranges[j], rconst))
1804 goto cleanup;
1805 /* (u32|s32)(<const> x <range>) */
1806 if (verify_case(&ctx, init_t, cond_t, rconst, ranges[j]))
1807 goto cleanup;
1808 }
1809 }
1810
1811cleanup:
1812 cleanup_ctx(&ctx);
1813}
1814
1815static void validate_gen_range_vs_range(enum num_t init_t, enum num_t cond_t)
1816{
1817 struct ctx ctx;
1818 const struct range *ranges;
1819 int i, j, rcnt;
1820
1821 memset(&ctx, 0, sizeof(ctx));
1822
1823 if (prepare_gen_tests(&ctx))
1824 goto cleanup;
1825
1826 switch (init_t)
1827 {
1828 case U64:
1829 ranges = ctx.uranges;
1830 rcnt = ctx.range_cnt;
1831 break;
1832 case U32:
1833 ranges = ctx.usubranges;
1834 rcnt = ctx.subrange_cnt;
1835 break;
1836 case S64:
1837 ranges = ctx.sranges;
1838 rcnt = ctx.range_cnt;
1839 break;
1840 case S32:
1841 ranges = ctx.ssubranges;
1842 rcnt = ctx.subrange_cnt;
1843 break;
1844 default:
1845 printf("validate_gen_range_vs_range!\n");
1846 exit(1);
1847 }
1848
1849 ctx.total_case_cnt = (last_op - first_op + 1) * (2 * rcnt * (rcnt + 1) / 2);
1850 ctx.start_ns = get_time_ns();
1851 snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
1852 "RANGE x RANGE, %s -> %s",
1853 t_str(init_t), t_str(cond_t));
1854
1855 for (i = 0; i < rcnt; i++) {
1856 for (j = i; j < rcnt; j++) {
1857 /* (<range> x <range>) */
1858 if (verify_case(&ctx, init_t, cond_t, ranges[i], ranges[j]))
1859 goto cleanup;
1860 if (verify_case(&ctx, init_t, cond_t, ranges[j], ranges[i]))
1861 goto cleanup;
1862 }
1863 }
1864
1865cleanup:
1866 cleanup_ctx(&ctx);
1867}
1868
1869/* Go over thousands of test cases generated from initial seed values.
1870 * Given this take a long time, guard this begind SLOW_TESTS=1 envvar. If
1871 * envvar is not set, this test is skipped during test_progs testing.
1872 *
1873 * We split this up into smaller subsets based on initialization and
1874 * conditiona numeric domains to get an easy parallelization with test_progs'
1875 * -j argument.
1876 */
1877
1878/* RANGE x CONST, U64 initial range */
1879void test_reg_bounds_gen_consts_u64_u64(void) { validate_gen_range_vs_const_64(U64, U64); }
1880void test_reg_bounds_gen_consts_u64_s64(void) { validate_gen_range_vs_const_64(U64, S64); }
1881void test_reg_bounds_gen_consts_u64_u32(void) { validate_gen_range_vs_const_64(U64, U32); }
1882void test_reg_bounds_gen_consts_u64_s32(void) { validate_gen_range_vs_const_64(U64, S32); }
1883/* RANGE x CONST, S64 initial range */
1884void test_reg_bounds_gen_consts_s64_u64(void) { validate_gen_range_vs_const_64(S64, U64); }
1885void test_reg_bounds_gen_consts_s64_s64(void) { validate_gen_range_vs_const_64(S64, S64); }
1886void test_reg_bounds_gen_consts_s64_u32(void) { validate_gen_range_vs_const_64(S64, U32); }
1887void test_reg_bounds_gen_consts_s64_s32(void) { validate_gen_range_vs_const_64(S64, S32); }
1888/* RANGE x CONST, U32 initial range */
1889void test_reg_bounds_gen_consts_u32_u64(void) { validate_gen_range_vs_const_32(U32, U64); }
1890void test_reg_bounds_gen_consts_u32_s64(void) { validate_gen_range_vs_const_32(U32, S64); }
1891void test_reg_bounds_gen_consts_u32_u32(void) { validate_gen_range_vs_const_32(U32, U32); }
1892void test_reg_bounds_gen_consts_u32_s32(void) { validate_gen_range_vs_const_32(U32, S32); }
1893/* RANGE x CONST, S32 initial range */
1894void test_reg_bounds_gen_consts_s32_u64(void) { validate_gen_range_vs_const_32(S32, U64); }
1895void test_reg_bounds_gen_consts_s32_s64(void) { validate_gen_range_vs_const_32(S32, S64); }
1896void test_reg_bounds_gen_consts_s32_u32(void) { validate_gen_range_vs_const_32(S32, U32); }
1897void test_reg_bounds_gen_consts_s32_s32(void) { validate_gen_range_vs_const_32(S32, S32); }
1898
1899/* RANGE x RANGE, U64 initial range */
1900void test_reg_bounds_gen_ranges_u64_u64(void) { validate_gen_range_vs_range(U64, U64); }
1901void test_reg_bounds_gen_ranges_u64_s64(void) { validate_gen_range_vs_range(U64, S64); }
1902void test_reg_bounds_gen_ranges_u64_u32(void) { validate_gen_range_vs_range(U64, U32); }
1903void test_reg_bounds_gen_ranges_u64_s32(void) { validate_gen_range_vs_range(U64, S32); }
1904/* RANGE x RANGE, S64 initial range */
1905void test_reg_bounds_gen_ranges_s64_u64(void) { validate_gen_range_vs_range(S64, U64); }
1906void test_reg_bounds_gen_ranges_s64_s64(void) { validate_gen_range_vs_range(S64, S64); }
1907void test_reg_bounds_gen_ranges_s64_u32(void) { validate_gen_range_vs_range(S64, U32); }
1908void test_reg_bounds_gen_ranges_s64_s32(void) { validate_gen_range_vs_range(S64, S32); }
1909/* RANGE x RANGE, U32 initial range */
1910void test_reg_bounds_gen_ranges_u32_u64(void) { validate_gen_range_vs_range(U32, U64); }
1911void test_reg_bounds_gen_ranges_u32_s64(void) { validate_gen_range_vs_range(U32, S64); }
1912void test_reg_bounds_gen_ranges_u32_u32(void) { validate_gen_range_vs_range(U32, U32); }
1913void test_reg_bounds_gen_ranges_u32_s32(void) { validate_gen_range_vs_range(U32, S32); }
1914/* RANGE x RANGE, S32 initial range */
1915void test_reg_bounds_gen_ranges_s32_u64(void) { validate_gen_range_vs_range(S32, U64); }
1916void test_reg_bounds_gen_ranges_s32_s64(void) { validate_gen_range_vs_range(S32, S64); }
1917void test_reg_bounds_gen_ranges_s32_u32(void) { validate_gen_range_vs_range(S32, U32); }
1918void test_reg_bounds_gen_ranges_s32_s32(void) { validate_gen_range_vs_range(S32, S32); }
1919
1920#define DEFAULT_RAND_CASE_CNT 100
1921
1922#define RAND_21BIT_MASK ((1 << 22) - 1)
1923
1924static u64 rand_u64()
1925{
1926 /* RAND_MAX is guaranteed to be at least 1<<15, but in practice it
1927 * seems to be 1<<31, so we need to call it thrice to get full u64;
1928 * we'll use rougly equal split: 22 + 21 + 21 bits
1929 */
1930 return ((u64)random() << 42) |
1931 (((u64)random() & RAND_21BIT_MASK) << 21) |
1932 (random() & RAND_21BIT_MASK);
1933}
1934
1935static u64 rand_const(enum num_t t)
1936{
1937 return cast_t(t, rand_u64());
1938}
1939
1940static struct range rand_range(enum num_t t)
1941{
1942 u64 x = rand_const(t), y = rand_const(t);
1943
1944 return range(t, min_t(t, x, y), max_t(t, x, y));
1945}
1946
1947static void validate_rand_ranges(enum num_t init_t, enum num_t cond_t, bool const_range)
1948{
1949 struct ctx ctx;
1950 struct range range1, range2;
1951 int err, i;
1952 u64 t;
1953
1954 memset(&ctx, 0, sizeof(ctx));
1955
1956 err = parse_env_vars(&ctx);
1957 if (err) {
1958 ASSERT_OK(err, "parse_env_vars");
1959 return;
1960 }
1961
1962 if (ctx.rand_case_cnt == 0)
1963 ctx.rand_case_cnt = DEFAULT_RAND_CASE_CNT;
1964 if (ctx.rand_seed == 0)
1965 ctx.rand_seed = (unsigned)get_time_ns();
1966
1967 srandom(ctx.rand_seed);
1968
1969 ctx.total_case_cnt = (last_op - first_op + 1) * (2 * ctx.rand_case_cnt);
1970 ctx.start_ns = get_time_ns();
1971 snprintf(ctx.progress_ctx, sizeof(ctx.progress_ctx),
1972 "[RANDOM SEED %u] RANGE x %s, %s -> %s",
1973 ctx.rand_seed, const_range ? "CONST" : "RANGE",
1974 t_str(init_t), t_str(cond_t));
1975
1976 for (i = 0; i < ctx.rand_case_cnt; i++) {
1977 range1 = rand_range(init_t);
1978 if (const_range) {
1979 t = rand_const(init_t);
1980 range2 = range(init_t, t, t);
1981 } else {
1982 range2 = rand_range(init_t);
1983 }
1984
1985 /* <range1> x <range2> */
1986 if (verify_case_opt(&ctx, init_t, cond_t, range1, range2, false /* !is_subtest */))
1987 goto cleanup;
1988 /* <range2> x <range1> */
1989 if (verify_case_opt(&ctx, init_t, cond_t, range2, range1, false /* !is_subtest */))
1990 goto cleanup;
1991 }
1992
1993cleanup:
1994 /* make sure we report random seed for reproducing */
1995 ASSERT_TRUE(true, ctx.progress_ctx);
1996 cleanup_ctx(&ctx);
1997}
1998
1999/* [RANDOM] RANGE x CONST, U64 initial range */
2000void test_reg_bounds_rand_consts_u64_u64(void) { validate_rand_ranges(U64, U64, true /* const */); }
2001void test_reg_bounds_rand_consts_u64_s64(void) { validate_rand_ranges(U64, S64, true /* const */); }
2002void test_reg_bounds_rand_consts_u64_u32(void) { validate_rand_ranges(U64, U32, true /* const */); }
2003void test_reg_bounds_rand_consts_u64_s32(void) { validate_rand_ranges(U64, S32, true /* const */); }
2004/* [RANDOM] RANGE x CONST, S64 initial range */
2005void test_reg_bounds_rand_consts_s64_u64(void) { validate_rand_ranges(S64, U64, true /* const */); }
2006void test_reg_bounds_rand_consts_s64_s64(void) { validate_rand_ranges(S64, S64, true /* const */); }
2007void test_reg_bounds_rand_consts_s64_u32(void) { validate_rand_ranges(S64, U32, true /* const */); }
2008void test_reg_bounds_rand_consts_s64_s32(void) { validate_rand_ranges(S64, S32, true /* const */); }
2009/* [RANDOM] RANGE x CONST, U32 initial range */
2010void test_reg_bounds_rand_consts_u32_u64(void) { validate_rand_ranges(U32, U64, true /* const */); }
2011void test_reg_bounds_rand_consts_u32_s64(void) { validate_rand_ranges(U32, S64, true /* const */); }
2012void test_reg_bounds_rand_consts_u32_u32(void) { validate_rand_ranges(U32, U32, true /* const */); }
2013void test_reg_bounds_rand_consts_u32_s32(void) { validate_rand_ranges(U32, S32, true /* const */); }
2014/* [RANDOM] RANGE x CONST, S32 initial range */
2015void test_reg_bounds_rand_consts_s32_u64(void) { validate_rand_ranges(S32, U64, true /* const */); }
2016void test_reg_bounds_rand_consts_s32_s64(void) { validate_rand_ranges(S32, S64, true /* const */); }
2017void test_reg_bounds_rand_consts_s32_u32(void) { validate_rand_ranges(S32, U32, true /* const */); }
2018void test_reg_bounds_rand_consts_s32_s32(void) { validate_rand_ranges(S32, S32, true /* const */); }
2019
2020/* [RANDOM] RANGE x RANGE, U64 initial range */
2021void test_reg_bounds_rand_ranges_u64_u64(void) { validate_rand_ranges(U64, U64, false /* range */); }
2022void test_reg_bounds_rand_ranges_u64_s64(void) { validate_rand_ranges(U64, S64, false /* range */); }
2023void test_reg_bounds_rand_ranges_u64_u32(void) { validate_rand_ranges(U64, U32, false /* range */); }
2024void test_reg_bounds_rand_ranges_u64_s32(void) { validate_rand_ranges(U64, S32, false /* range */); }
2025/* [RANDOM] RANGE x RANGE, S64 initial range */
2026void test_reg_bounds_rand_ranges_s64_u64(void) { validate_rand_ranges(S64, U64, false /* range */); }
2027void test_reg_bounds_rand_ranges_s64_s64(void) { validate_rand_ranges(S64, S64, false /* range */); }
2028void test_reg_bounds_rand_ranges_s64_u32(void) { validate_rand_ranges(S64, U32, false /* range */); }
2029void test_reg_bounds_rand_ranges_s64_s32(void) { validate_rand_ranges(S64, S32, false /* range */); }
2030/* [RANDOM] RANGE x RANGE, U32 initial range */
2031void test_reg_bounds_rand_ranges_u32_u64(void) { validate_rand_ranges(U32, U64, false /* range */); }
2032void test_reg_bounds_rand_ranges_u32_s64(void) { validate_rand_ranges(U32, S64, false /* range */); }
2033void test_reg_bounds_rand_ranges_u32_u32(void) { validate_rand_ranges(U32, U32, false /* range */); }
2034void test_reg_bounds_rand_ranges_u32_s32(void) { validate_rand_ranges(U32, S32, false /* range */); }
2035/* [RANDOM] RANGE x RANGE, S32 initial range */
2036void test_reg_bounds_rand_ranges_s32_u64(void) { validate_rand_ranges(S32, U64, false /* range */); }
2037void test_reg_bounds_rand_ranges_s32_s64(void) { validate_rand_ranges(S32, S64, false /* range */); }
2038void test_reg_bounds_rand_ranges_s32_u32(void) { validate_rand_ranges(S32, U32, false /* range */); }
2039void test_reg_bounds_rand_ranges_s32_s32(void) { validate_rand_ranges(S32, S32, false /* range */); }
2040
2041/* A set of hard-coded "interesting" cases to validate as part of normal
2042 * test_progs test runs
2043 */
2044static struct subtest_case crafted_cases[] = {
2045 {U64, U64, {0, 0xffffffff}, {0, 0}},
2046 {U64, U64, {0, 0x80000000}, {0, 0}},
2047 {U64, U64, {0x100000000ULL, 0x100000100ULL}, {0, 0}},
2048 {U64, U64, {0x100000000ULL, 0x180000000ULL}, {0, 0}},
2049 {U64, U64, {0x100000000ULL, 0x1ffffff00ULL}, {0, 0}},
2050 {U64, U64, {0x100000000ULL, 0x1ffffff01ULL}, {0, 0}},
2051 {U64, U64, {0x100000000ULL, 0x1fffffffeULL}, {0, 0}},
2052 {U64, U64, {0x100000001ULL, 0x1000000ffULL}, {0, 0}},
2053
2054 /* single point overlap, interesting BPF_EQ and BPF_NE interactions */
2055 {U64, U64, {0, 1}, {1, 0x80000000}},
2056 {U64, S64, {0, 1}, {1, 0x80000000}},
2057 {U64, U32, {0, 1}, {1, 0x80000000}},
2058 {U64, S32, {0, 1}, {1, 0x80000000}},
2059
2060 {U64, S64, {0, 0xffffffff00000000ULL}, {0, 0}},
2061 {U64, S64, {0x7fffffffffffffffULL, 0xffffffff00000000ULL}, {0, 0}},
2062 {U64, S64, {0x7fffffff00000001ULL, 0xffffffff00000000ULL}, {0, 0}},
2063 {U64, S64, {0, 0xffffffffULL}, {1, 1}},
2064 {U64, S64, {0, 0xffffffffULL}, {0x7fffffff, 0x7fffffff}},
2065
2066 {U64, U32, {0, 0x100000000}, {0, 0}},
2067 {U64, U32, {0xfffffffe, 0x100000000}, {0x80000000, 0x80000000}},
2068
2069 {U64, S32, {0, 0xffffffff00000000ULL}, {0, 0}},
2070 /* these are tricky cases where lower 32 bits allow to tighten 64
2071 * bit boundaries based on tightened lower 32 bit boundaries
2072 */
2073 {U64, S32, {0, 0x0ffffffffULL}, {0, 0}},
2074 {U64, S32, {0, 0x100000000ULL}, {0, 0}},
2075 {U64, S32, {0, 0x100000001ULL}, {0, 0}},
2076 {U64, S32, {0, 0x180000000ULL}, {0, 0}},
2077 {U64, S32, {0, 0x17fffffffULL}, {0, 0}},
2078 {U64, S32, {0, 0x180000001ULL}, {0, 0}},
2079
2080 /* verifier knows about [-1, 0] range for s32 for this case already */
2081 {S64, S64, {0xffffffffffffffffULL, 0}, {0xffffffff00000000ULL, 0xffffffff00000000ULL}},
2082 /* but didn't know about these cases initially */
2083 {U64, U64, {0xffffffff, 0x100000000ULL}, {0, 0}}, /* s32: [-1, 0] */
2084 {U64, U64, {0xffffffff, 0x100000001ULL}, {0, 0}}, /* s32: [-1, 1] */
2085
2086 /* longer convergence case: learning from u64 -> s64 -> u64 -> u32,
2087 * arriving at u32: [1, U32_MAX] (instead of more pessimistic [0, U32_MAX])
2088 */
2089 {S64, U64, {0xffffffff00000001ULL, 0}, {0xffffffff00000000ULL, 0xffffffff00000000ULL}},
2090
2091 {U32, U32, {1, U32_MAX}, {0, 0}},
2092
2093 {U32, S32, {0, U32_MAX}, {U32_MAX, U32_MAX}},
2094
2095 {S32, U64, {(u32)S32_MIN, (u32)S32_MIN}, {(u32)(s32)-255, 0}},
2096 {S32, S64, {(u32)S32_MIN, (u32)(s32)-255}, {(u32)(s32)-2, 0}},
2097 {S32, S64, {0, 1}, {(u32)S32_MIN, (u32)S32_MIN}},
2098 {S32, U32, {(u32)S32_MIN, (u32)S32_MIN}, {(u32)S32_MIN, (u32)S32_MIN}},
2099
2100 /* edge overlap testings for BPF_NE */
2101 {U64, U64, {0, U64_MAX}, {U64_MAX, U64_MAX}},
2102 {U64, U64, {0, U64_MAX}, {0, 0}},
2103 {S64, U64, {S64_MIN, 0}, {S64_MIN, S64_MIN}},
2104 {S64, U64, {S64_MIN, 0}, {0, 0}},
2105 {S64, U64, {S64_MIN, S64_MAX}, {S64_MAX, S64_MAX}},
2106 {U32, U32, {0, U32_MAX}, {0, 0}},
2107 {U32, U32, {0, U32_MAX}, {U32_MAX, U32_MAX}},
2108 {S32, U32, {(u32)S32_MIN, 0}, {0, 0}},
2109 {S32, U32, {(u32)S32_MIN, 0}, {(u32)S32_MIN, (u32)S32_MIN}},
2110 {S32, U32, {(u32)S32_MIN, S32_MAX}, {S32_MAX, S32_MAX}},
2111};
2112
2113/* Go over crafted hard-coded cases. This is fast, so we do it as part of
2114 * normal test_progs run.
2115 */
2116void test_reg_bounds_crafted(void)
2117{
2118 struct ctx ctx;
2119 int i;
2120
2121 memset(&ctx, 0, sizeof(ctx));
2122
2123 for (i = 0; i < ARRAY_SIZE(crafted_cases); i++) {
2124 struct subtest_case *c = &crafted_cases[i];
2125
2126 verify_case(&ctx, c->init_t, c->cond_t, c->x, c->y);
2127 verify_case(&ctx, c->init_t, c->cond_t, c->y, c->x);
2128 }
2129
2130 cleanup_ctx(&ctx);
2131}