Loading...
1#include <asm/types.h>
2#include <linux/types.h>
3#include <stdint.h>
4#include <stdio.h>
5#include <stdlib.h>
6#include <unistd.h>
7#include <errno.h>
8#include <string.h>
9#include <stddef.h>
10#include <stdbool.h>
11
12#include <linux/unistd.h>
13#include <linux/filter.h>
14#include <linux/bpf_perf_event.h>
15#include <linux/bpf.h>
16
17#include <bpf/bpf.h>
18
19#include "../../../include/linux/filter.h"
20#include "bpf_rlimit.h"
21#include "bpf_util.h"
22
23#define MAX_INSNS 512
24#define MAX_MATCHES 16
25
26struct bpf_reg_match {
27 unsigned int line;
28 const char *match;
29};
30
31struct bpf_align_test {
32 const char *descr;
33 struct bpf_insn insns[MAX_INSNS];
34 enum {
35 UNDEF,
36 ACCEPT,
37 REJECT
38 } result;
39 enum bpf_prog_type prog_type;
40 /* Matches must be in order of increasing line */
41 struct bpf_reg_match matches[MAX_MATCHES];
42};
43
44static struct bpf_align_test tests[] = {
45 /* Four tests of known constants. These aren't staggeringly
46 * interesting since we track exact values now.
47 */
48 {
49 .descr = "mov",
50 .insns = {
51 BPF_MOV64_IMM(BPF_REG_3, 2),
52 BPF_MOV64_IMM(BPF_REG_3, 4),
53 BPF_MOV64_IMM(BPF_REG_3, 8),
54 BPF_MOV64_IMM(BPF_REG_3, 16),
55 BPF_MOV64_IMM(BPF_REG_3, 32),
56 BPF_MOV64_IMM(BPF_REG_0, 0),
57 BPF_EXIT_INSN(),
58 },
59 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
60 .matches = {
61 {1, "R1=ctx(id=0,off=0,imm=0)"},
62 {1, "R10=fp0"},
63 {1, "R3_w=inv2"},
64 {2, "R3_w=inv4"},
65 {3, "R3_w=inv8"},
66 {4, "R3_w=inv16"},
67 {5, "R3_w=inv32"},
68 },
69 },
70 {
71 .descr = "shift",
72 .insns = {
73 BPF_MOV64_IMM(BPF_REG_3, 1),
74 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
75 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
76 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
77 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
78 BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4),
79 BPF_MOV64_IMM(BPF_REG_4, 32),
80 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
81 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
82 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
83 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
84 BPF_MOV64_IMM(BPF_REG_0, 0),
85 BPF_EXIT_INSN(),
86 },
87 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
88 .matches = {
89 {1, "R1=ctx(id=0,off=0,imm=0)"},
90 {1, "R10=fp0"},
91 {1, "R3_w=inv1"},
92 {2, "R3_w=inv2"},
93 {3, "R3_w=inv4"},
94 {4, "R3_w=inv8"},
95 {5, "R3_w=inv16"},
96 {6, "R3_w=inv1"},
97 {7, "R4_w=inv32"},
98 {8, "R4_w=inv16"},
99 {9, "R4_w=inv8"},
100 {10, "R4_w=inv4"},
101 {11, "R4_w=inv2"},
102 },
103 },
104 {
105 .descr = "addsub",
106 .insns = {
107 BPF_MOV64_IMM(BPF_REG_3, 4),
108 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4),
109 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2),
110 BPF_MOV64_IMM(BPF_REG_4, 8),
111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
113 BPF_MOV64_IMM(BPF_REG_0, 0),
114 BPF_EXIT_INSN(),
115 },
116 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
117 .matches = {
118 {1, "R1=ctx(id=0,off=0,imm=0)"},
119 {1, "R10=fp0"},
120 {1, "R3_w=inv4"},
121 {2, "R3_w=inv8"},
122 {3, "R3_w=inv10"},
123 {4, "R4_w=inv8"},
124 {5, "R4_w=inv12"},
125 {6, "R4_w=inv14"},
126 },
127 },
128 {
129 .descr = "mul",
130 .insns = {
131 BPF_MOV64_IMM(BPF_REG_3, 7),
132 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1),
133 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2),
134 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4),
135 BPF_MOV64_IMM(BPF_REG_0, 0),
136 BPF_EXIT_INSN(),
137 },
138 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
139 .matches = {
140 {1, "R1=ctx(id=0,off=0,imm=0)"},
141 {1, "R10=fp0"},
142 {1, "R3_w=inv7"},
143 {2, "R3_w=inv7"},
144 {3, "R3_w=inv14"},
145 {4, "R3_w=inv56"},
146 },
147 },
148
149 /* Tests using unknown values */
150#define PREP_PKT_POINTERS \
151 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
152 offsetof(struct __sk_buff, data)), \
153 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
154 offsetof(struct __sk_buff, data_end))
155
156#define LOAD_UNKNOWN(DST_REG) \
157 PREP_PKT_POINTERS, \
158 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
159 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
160 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
161 BPF_EXIT_INSN(), \
162 BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
163
164 {
165 .descr = "unknown shift",
166 .insns = {
167 LOAD_UNKNOWN(BPF_REG_3),
168 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
169 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
170 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
171 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
172 LOAD_UNKNOWN(BPF_REG_4),
173 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5),
174 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
175 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
176 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
177 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
178 BPF_MOV64_IMM(BPF_REG_0, 0),
179 BPF_EXIT_INSN(),
180 },
181 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
182 .matches = {
183 {7, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
184 {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
185 {8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
186 {9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
187 {10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
188 {11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
189 {18, "R3=pkt_end(id=0,off=0,imm=0)"},
190 {18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
191 {19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
192 {20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
193 {21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
194 {22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
195 {23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
196 },
197 },
198 {
199 .descr = "unknown mul",
200 .insns = {
201 LOAD_UNKNOWN(BPF_REG_3),
202 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
203 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1),
204 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
205 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
206 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
207 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4),
208 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
209 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),
210 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
211 BPF_MOV64_IMM(BPF_REG_0, 0),
212 BPF_EXIT_INSN(),
213 },
214 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
215 .matches = {
216 {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
217 {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
218 {9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
219 {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
220 {11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
221 {12, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
222 {13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
223 {14, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
224 {15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
225 {16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
226 },
227 },
228 {
229 .descr = "packet const offset",
230 .insns = {
231 PREP_PKT_POINTERS,
232 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
233
234 BPF_MOV64_IMM(BPF_REG_0, 0),
235
236 /* Skip over ethernet header. */
237 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
238 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
239 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
240 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
241 BPF_EXIT_INSN(),
242
243 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0),
244 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1),
245 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2),
246 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3),
247 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0),
248 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2),
249 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
250
251 BPF_MOV64_IMM(BPF_REG_0, 0),
252 BPF_EXIT_INSN(),
253 },
254 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
255 .matches = {
256 {4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
257 {5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
258 {6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
259 {10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
260 {10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
261 {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
262 {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
263 {15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
264 },
265 },
266 {
267 .descr = "packet variable offset",
268 .insns = {
269 LOAD_UNKNOWN(BPF_REG_6),
270 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
271
272 /* First, add a constant to the R5 packet pointer,
273 * then a variable with a known alignment.
274 */
275 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
276 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
277 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
278 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
279 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
280 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
281 BPF_EXIT_INSN(),
282 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
283
284 /* Now, test in the other direction. Adding first
285 * the variable offset to R5, then the constant.
286 */
287 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
288 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
289 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
290 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
291 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
292 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
293 BPF_EXIT_INSN(),
294 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
295
296 /* Test multiple accumulations of unknown values
297 * into a packet pointer.
298 */
299 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
300 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
301 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
302 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
303 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
304 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
305 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
306 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
307 BPF_EXIT_INSN(),
308 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
309
310 BPF_MOV64_IMM(BPF_REG_0, 0),
311 BPF_EXIT_INSN(),
312 },
313 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
314 .matches = {
315 /* Calculated offset in R6 has unknown value, but known
316 * alignment of 4.
317 */
318 {8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
319 {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
320 /* Offset is added to packet pointer R5, resulting in
321 * known fixed offset, and variable offset from R6.
322 */
323 {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
324 /* At the time the word size load is performed from R5,
325 * it's total offset is NET_IP_ALIGN + reg->off (0) +
326 * reg->aux_off (14) which is 16. Then the variable
327 * offset is considered using reg->aux_off_align which
328 * is 4 and meets the load's requirements.
329 */
330 {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
331 {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
332 /* Variable offset is added to R5 packet pointer,
333 * resulting in auxiliary alignment of 4.
334 */
335 {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
336 /* Constant offset is added to R5, resulting in
337 * reg->off of 14.
338 */
339 {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
340 /* At the time the word size load is performed from R5,
341 * its total fixed offset is NET_IP_ALIGN + reg->off
342 * (14) which is 16. Then the variable offset is 4-byte
343 * aligned, so the total offset is 4-byte aligned and
344 * meets the load's requirements.
345 */
346 {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
347 {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
348 /* Constant offset is added to R5 packet pointer,
349 * resulting in reg->off value of 14.
350 */
351 {26, "R5_w=pkt(id=0,off=14,r=8"},
352 /* Variable offset is added to R5, resulting in a
353 * variable offset of (4n).
354 */
355 {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
356 /* Constant is added to R5 again, setting reg->off to 18. */
357 {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
358 /* And once more we add a variable; resulting var_off
359 * is still (4n), fixed offset is not changed.
360 * Also, we create a new reg->id.
361 */
362 {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
363 /* At the time the word size load is performed from R5,
364 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
365 * which is 20. Then the variable offset is (4n), so
366 * the total offset is 4-byte aligned and meets the
367 * load's requirements.
368 */
369 {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
370 {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
371 },
372 },
373 {
374 .descr = "packet variable offset 2",
375 .insns = {
376 /* Create an unknown offset, (4n+2)-aligned */
377 LOAD_UNKNOWN(BPF_REG_6),
378 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
379 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
380 /* Add it to the packet pointer */
381 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
382 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
383 /* Check bounds and perform a read */
384 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
385 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
386 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
387 BPF_EXIT_INSN(),
388 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
389 /* Make a (4n) offset from the value we just read */
390 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff),
391 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
392 /* Add it to the packet pointer */
393 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
394 /* Check bounds and perform a read */
395 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
396 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
397 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
398 BPF_EXIT_INSN(),
399 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
400 BPF_MOV64_IMM(BPF_REG_0, 0),
401 BPF_EXIT_INSN(),
402 },
403 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
404 .matches = {
405 /* Calculated offset in R6 has unknown value, but known
406 * alignment of 4.
407 */
408 {8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
409 {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
410 /* Adding 14 makes R6 be (4n+2) */
411 {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
412 /* Packet pointer has (4n+2) offset */
413 {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
414 {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
415 /* At the time the word size load is performed from R5,
416 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
417 * which is 2. Then the variable offset is (4n+2), so
418 * the total offset is 4-byte aligned and meets the
419 * load's requirements.
420 */
421 {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
422 /* Newly read value in R6 was shifted left by 2, so has
423 * known alignment of 4.
424 */
425 {18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
426 /* Added (4n) to packet pointer's (4n+2) var_off, giving
427 * another (4n+2).
428 */
429 {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
430 {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
431 /* At the time the word size load is performed from R5,
432 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
433 * which is 2. Then the variable offset is (4n+2), so
434 * the total offset is 4-byte aligned and meets the
435 * load's requirements.
436 */
437 {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
438 },
439 },
440 {
441 .descr = "dubious pointer arithmetic",
442 .insns = {
443 PREP_PKT_POINTERS,
444 BPF_MOV64_IMM(BPF_REG_0, 0),
445 /* (ptr - ptr) << 2 */
446 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
447 BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
448 BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
449 /* We have a (4n) value. Let's make a packet offset
450 * out of it. First add 14, to make it a (4n+2)
451 */
452 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
453 /* Then make sure it's nonnegative */
454 BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1),
455 BPF_EXIT_INSN(),
456 /* Add it to packet pointer */
457 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
458 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
459 /* Check bounds and perform a read */
460 BPF_MOV64_REG(BPF_REG_4, BPF_REG_6),
461 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
462 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
463 BPF_EXIT_INSN(),
464 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0),
465 BPF_EXIT_INSN(),
466 },
467 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
468 .result = REJECT,
469 .matches = {
470 {4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
471 /* (ptr - ptr) << 2 == unknown, (4n) */
472 {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
473 /* (4n) + 14 == (4n+2). We blow our bounds, because
474 * the add could overflow.
475 */
476 {7, "R5_w=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
477 /* Checked s>=0 */
478 {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
479 /* packet pointer + nonnegative (4n+2) */
480 {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
481 {13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
482 /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
483 * We checked the bounds, but it might have been able
484 * to overflow if the packet pointer started in the
485 * upper half of the address space.
486 * So we did not get a 'range' on R6, and the access
487 * attempt will fail.
488 */
489 {15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
490 }
491 },
492 {
493 .descr = "variable subtraction",
494 .insns = {
495 /* Create an unknown offset, (4n+2)-aligned */
496 LOAD_UNKNOWN(BPF_REG_6),
497 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
498 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
499 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
500 /* Create another unknown, (4n)-aligned, and subtract
501 * it from the first one
502 */
503 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
504 BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7),
505 /* Bounds-check the result */
506 BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1),
507 BPF_EXIT_INSN(),
508 /* Add it to the packet pointer */
509 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
510 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
511 /* Check bounds and perform a read */
512 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
513 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
514 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
515 BPF_EXIT_INSN(),
516 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
517 BPF_EXIT_INSN(),
518 },
519 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
520 .matches = {
521 /* Calculated offset in R6 has unknown value, but known
522 * alignment of 4.
523 */
524 {7, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
525 {9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
526 /* Adding 14 makes R6 be (4n+2) */
527 {10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
528 /* New unknown value in R7 is (4n) */
529 {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
530 /* Subtracting it from R6 blows our unsigned bounds */
531 {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
532 /* Checked s>= 0 */
533 {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
534 /* At the time the word size load is performed from R5,
535 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
536 * which is 2. Then the variable offset is (4n+2), so
537 * the total offset is 4-byte aligned and meets the
538 * load's requirements.
539 */
540 {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
541 },
542 },
543 {
544 .descr = "pointer variable subtraction",
545 .insns = {
546 /* Create an unknown offset, (4n+2)-aligned and bounded
547 * to [14,74]
548 */
549 LOAD_UNKNOWN(BPF_REG_6),
550 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
551 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf),
552 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
553 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
554 /* Subtract it from the packet pointer */
555 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
556 BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6),
557 /* Create another unknown, (4n)-aligned and >= 74.
558 * That in fact means >= 76, since 74 % 4 == 2
559 */
560 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
561 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76),
562 /* Add it to the packet pointer */
563 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7),
564 /* Check bounds and perform a read */
565 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
566 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
567 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
568 BPF_EXIT_INSN(),
569 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
570 BPF_EXIT_INSN(),
571 },
572 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
573 .matches = {
574 /* Calculated offset in R6 has unknown value, but known
575 * alignment of 4.
576 */
577 {7, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
578 {10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
579 /* Adding 14 makes R6 be (4n+2) */
580 {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
581 /* Subtracting from packet pointer overflows ubounds */
582 {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
583 /* New unknown value in R7 is (4n), >= 76 */
584 {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
585 /* Adding it to packet pointer gives nice bounds again */
586 {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
587 /* At the time the word size load is performed from R5,
588 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
589 * which is 2. Then the variable offset is (4n+2), so
590 * the total offset is 4-byte aligned and meets the
591 * load's requirements.
592 */
593 {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
594 },
595 },
596};
597
598static int probe_filter_length(const struct bpf_insn *fp)
599{
600 int len;
601
602 for (len = MAX_INSNS - 1; len > 0; --len)
603 if (fp[len].code != 0 || fp[len].imm != 0)
604 break;
605 return len + 1;
606}
607
608static char bpf_vlog[32768];
609
610static int do_test_single(struct bpf_align_test *test)
611{
612 struct bpf_insn *prog = test->insns;
613 int prog_type = test->prog_type;
614 char bpf_vlog_copy[32768];
615 const char *line_ptr;
616 int cur_line = -1;
617 int prog_len, i;
618 int fd_prog;
619 int ret;
620
621 prog_len = probe_filter_length(prog);
622 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
623 prog, prog_len, BPF_F_STRICT_ALIGNMENT,
624 "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 2);
625 if (fd_prog < 0 && test->result != REJECT) {
626 printf("Failed to load program.\n");
627 printf("%s", bpf_vlog);
628 ret = 1;
629 } else if (fd_prog >= 0 && test->result == REJECT) {
630 printf("Unexpected success to load!\n");
631 printf("%s", bpf_vlog);
632 ret = 1;
633 close(fd_prog);
634 } else {
635 ret = 0;
636 /* We make a local copy so that we can strtok() it */
637 strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy));
638 line_ptr = strtok(bpf_vlog_copy, "\n");
639 for (i = 0; i < MAX_MATCHES; i++) {
640 struct bpf_reg_match m = test->matches[i];
641
642 if (!m.match)
643 break;
644 while (line_ptr) {
645 cur_line = -1;
646 sscanf(line_ptr, "%u: ", &cur_line);
647 if (cur_line == m.line)
648 break;
649 line_ptr = strtok(NULL, "\n");
650 }
651 if (!line_ptr) {
652 printf("Failed to find line %u for match: %s\n",
653 m.line, m.match);
654 ret = 1;
655 printf("%s", bpf_vlog);
656 break;
657 }
658 if (!strstr(line_ptr, m.match)) {
659 printf("Failed to find match %u: %s\n",
660 m.line, m.match);
661 ret = 1;
662 printf("%s", bpf_vlog);
663 break;
664 }
665 }
666 if (fd_prog >= 0)
667 close(fd_prog);
668 }
669 return ret;
670}
671
672static int do_test(unsigned int from, unsigned int to)
673{
674 int all_pass = 0;
675 int all_fail = 0;
676 unsigned int i;
677
678 for (i = from; i < to; i++) {
679 struct bpf_align_test *test = &tests[i];
680 int fail;
681
682 printf("Test %3d: %s ... ",
683 i, test->descr);
684 fail = do_test_single(test);
685 if (fail) {
686 all_fail++;
687 printf("FAIL\n");
688 } else {
689 all_pass++;
690 printf("PASS\n");
691 }
692 }
693 printf("Results: %d pass %d fail\n",
694 all_pass, all_fail);
695 return all_fail ? EXIT_FAILURE : EXIT_SUCCESS;
696}
697
698int main(int argc, char **argv)
699{
700 unsigned int from = 0, to = ARRAY_SIZE(tests);
701
702 if (argc == 3) {
703 unsigned int l = atoi(argv[argc - 2]);
704 unsigned int u = atoi(argv[argc - 1]);
705
706 if (l < to && u < to) {
707 from = l;
708 to = u + 1;
709 }
710 } else if (argc == 2) {
711 unsigned int t = atoi(argv[argc - 1]);
712
713 if (t < to) {
714 from = t;
715 to = t + 1;
716 }
717 }
718 return do_test(from, to);
719}
1#include <asm/types.h>
2#include <linux/types.h>
3#include <stdint.h>
4#include <stdio.h>
5#include <stdlib.h>
6#include <unistd.h>
7#include <errno.h>
8#include <string.h>
9#include <stddef.h>
10#include <stdbool.h>
11
12#include <linux/unistd.h>
13#include <linux/filter.h>
14#include <linux/bpf_perf_event.h>
15#include <linux/bpf.h>
16
17#include <bpf/bpf.h>
18
19#include "../../../include/linux/filter.h"
20#include "bpf_rlimit.h"
21
22#ifndef ARRAY_SIZE
23# define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
24#endif
25
26#define MAX_INSNS 512
27#define MAX_MATCHES 16
28
29struct bpf_reg_match {
30 unsigned int line;
31 const char *match;
32};
33
34struct bpf_align_test {
35 const char *descr;
36 struct bpf_insn insns[MAX_INSNS];
37 enum {
38 UNDEF,
39 ACCEPT,
40 REJECT
41 } result;
42 enum bpf_prog_type prog_type;
43 /* Matches must be in order of increasing line */
44 struct bpf_reg_match matches[MAX_MATCHES];
45};
46
47static struct bpf_align_test tests[] = {
48 /* Four tests of known constants. These aren't staggeringly
49 * interesting since we track exact values now.
50 */
51 {
52 .descr = "mov",
53 .insns = {
54 BPF_MOV64_IMM(BPF_REG_3, 2),
55 BPF_MOV64_IMM(BPF_REG_3, 4),
56 BPF_MOV64_IMM(BPF_REG_3, 8),
57 BPF_MOV64_IMM(BPF_REG_3, 16),
58 BPF_MOV64_IMM(BPF_REG_3, 32),
59 BPF_MOV64_IMM(BPF_REG_0, 0),
60 BPF_EXIT_INSN(),
61 },
62 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
63 .matches = {
64 {1, "R1=ctx(id=0,off=0,imm=0)"},
65 {1, "R10=fp0"},
66 {1, "R3_w=inv2"},
67 {2, "R3_w=inv4"},
68 {3, "R3_w=inv8"},
69 {4, "R3_w=inv16"},
70 {5, "R3_w=inv32"},
71 },
72 },
73 {
74 .descr = "shift",
75 .insns = {
76 BPF_MOV64_IMM(BPF_REG_3, 1),
77 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
78 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
79 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
80 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
81 BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4),
82 BPF_MOV64_IMM(BPF_REG_4, 32),
83 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
84 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
85 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
86 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
87 BPF_MOV64_IMM(BPF_REG_0, 0),
88 BPF_EXIT_INSN(),
89 },
90 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
91 .matches = {
92 {1, "R1=ctx(id=0,off=0,imm=0)"},
93 {1, "R10=fp0"},
94 {1, "R3_w=inv1"},
95 {2, "R3_w=inv2"},
96 {3, "R3_w=inv4"},
97 {4, "R3_w=inv8"},
98 {5, "R3_w=inv16"},
99 {6, "R3_w=inv1"},
100 {7, "R4_w=inv32"},
101 {8, "R4_w=inv16"},
102 {9, "R4_w=inv8"},
103 {10, "R4_w=inv4"},
104 {11, "R4_w=inv2"},
105 },
106 },
107 {
108 .descr = "addsub",
109 .insns = {
110 BPF_MOV64_IMM(BPF_REG_3, 4),
111 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4),
112 BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2),
113 BPF_MOV64_IMM(BPF_REG_4, 8),
114 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
115 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
116 BPF_MOV64_IMM(BPF_REG_0, 0),
117 BPF_EXIT_INSN(),
118 },
119 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
120 .matches = {
121 {1, "R1=ctx(id=0,off=0,imm=0)"},
122 {1, "R10=fp0"},
123 {1, "R3_w=inv4"},
124 {2, "R3_w=inv8"},
125 {3, "R3_w=inv10"},
126 {4, "R4_w=inv8"},
127 {5, "R4_w=inv12"},
128 {6, "R4_w=inv14"},
129 },
130 },
131 {
132 .descr = "mul",
133 .insns = {
134 BPF_MOV64_IMM(BPF_REG_3, 7),
135 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1),
136 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2),
137 BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4),
138 BPF_MOV64_IMM(BPF_REG_0, 0),
139 BPF_EXIT_INSN(),
140 },
141 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
142 .matches = {
143 {1, "R1=ctx(id=0,off=0,imm=0)"},
144 {1, "R10=fp0"},
145 {1, "R3_w=inv7"},
146 {2, "R3_w=inv7"},
147 {3, "R3_w=inv14"},
148 {4, "R3_w=inv56"},
149 },
150 },
151
152 /* Tests using unknown values */
153#define PREP_PKT_POINTERS \
154 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
155 offsetof(struct __sk_buff, data)), \
156 BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
157 offsetof(struct __sk_buff, data_end))
158
159#define LOAD_UNKNOWN(DST_REG) \
160 PREP_PKT_POINTERS, \
161 BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
162 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
163 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
164 BPF_EXIT_INSN(), \
165 BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
166
167 {
168 .descr = "unknown shift",
169 .insns = {
170 LOAD_UNKNOWN(BPF_REG_3),
171 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
172 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
173 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
174 BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
175 LOAD_UNKNOWN(BPF_REG_4),
176 BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5),
177 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
178 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
179 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
180 BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
181 BPF_MOV64_IMM(BPF_REG_0, 0),
182 BPF_EXIT_INSN(),
183 },
184 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
185 .matches = {
186 {7, "R0=pkt(id=0,off=8,r=8,imm=0)"},
187 {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
188 {8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
189 {9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
190 {10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
191 {11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
192 {18, "R3=pkt_end(id=0,off=0,imm=0)"},
193 {18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
194 {19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
195 {20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
196 {21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
197 {22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
198 {23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
199 },
200 },
201 {
202 .descr = "unknown mul",
203 .insns = {
204 LOAD_UNKNOWN(BPF_REG_3),
205 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
206 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1),
207 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
208 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
209 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
210 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4),
211 BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
212 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),
213 BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
214 BPF_MOV64_IMM(BPF_REG_0, 0),
215 BPF_EXIT_INSN(),
216 },
217 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
218 .matches = {
219 {7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
220 {8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
221 {9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
222 {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
223 {11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
224 {12, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
225 {13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
226 {14, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
227 {15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
228 {16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
229 },
230 },
231 {
232 .descr = "packet const offset",
233 .insns = {
234 PREP_PKT_POINTERS,
235 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
236
237 BPF_MOV64_IMM(BPF_REG_0, 0),
238
239 /* Skip over ethernet header. */
240 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
241 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
242 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
243 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
244 BPF_EXIT_INSN(),
245
246 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0),
247 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1),
248 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2),
249 BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3),
250 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0),
251 BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2),
252 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
253
254 BPF_MOV64_IMM(BPF_REG_0, 0),
255 BPF_EXIT_INSN(),
256 },
257 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
258 .matches = {
259 {4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
260 {5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
261 {6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
262 {10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
263 {10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
264 {10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
265 {14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
266 {15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
267 },
268 },
269 {
270 .descr = "packet variable offset",
271 .insns = {
272 LOAD_UNKNOWN(BPF_REG_6),
273 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
274
275 /* First, add a constant to the R5 packet pointer,
276 * then a variable with a known alignment.
277 */
278 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
279 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
280 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
281 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
282 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
283 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
284 BPF_EXIT_INSN(),
285 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
286
287 /* Now, test in the other direction. Adding first
288 * the variable offset to R5, then the constant.
289 */
290 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
291 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
292 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
293 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
294 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
295 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
296 BPF_EXIT_INSN(),
297 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
298
299 /* Test multiple accumulations of unknown values
300 * into a packet pointer.
301 */
302 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
303 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
304 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
305 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
306 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
307 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
308 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
309 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
310 BPF_EXIT_INSN(),
311 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
312
313 BPF_MOV64_IMM(BPF_REG_0, 0),
314 BPF_EXIT_INSN(),
315 },
316 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
317 .matches = {
318 /* Calculated offset in R6 has unknown value, but known
319 * alignment of 4.
320 */
321 {8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
322 {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
323 /* Offset is added to packet pointer R5, resulting in
324 * known fixed offset, and variable offset from R6.
325 */
326 {11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
327 /* At the time the word size load is performed from R5,
328 * it's total offset is NET_IP_ALIGN + reg->off (0) +
329 * reg->aux_off (14) which is 16. Then the variable
330 * offset is considered using reg->aux_off_align which
331 * is 4 and meets the load's requirements.
332 */
333 {15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
334 {15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
335 /* Variable offset is added to R5 packet pointer,
336 * resulting in auxiliary alignment of 4.
337 */
338 {18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
339 /* Constant offset is added to R5, resulting in
340 * reg->off of 14.
341 */
342 {19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
343 /* At the time the word size load is performed from R5,
344 * its total fixed offset is NET_IP_ALIGN + reg->off
345 * (14) which is 16. Then the variable offset is 4-byte
346 * aligned, so the total offset is 4-byte aligned and
347 * meets the load's requirements.
348 */
349 {23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
350 {23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
351 /* Constant offset is added to R5 packet pointer,
352 * resulting in reg->off value of 14.
353 */
354 {26, "R5_w=pkt(id=0,off=14,r=8"},
355 /* Variable offset is added to R5, resulting in a
356 * variable offset of (4n).
357 */
358 {27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
359 /* Constant is added to R5 again, setting reg->off to 18. */
360 {28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
361 /* And once more we add a variable; resulting var_off
362 * is still (4n), fixed offset is not changed.
363 * Also, we create a new reg->id.
364 */
365 {29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
366 /* At the time the word size load is performed from R5,
367 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
368 * which is 20. Then the variable offset is (4n), so
369 * the total offset is 4-byte aligned and meets the
370 * load's requirements.
371 */
372 {33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
373 {33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
374 },
375 },
376 {
377 .descr = "packet variable offset 2",
378 .insns = {
379 /* Create an unknown offset, (4n+2)-aligned */
380 LOAD_UNKNOWN(BPF_REG_6),
381 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
382 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
383 /* Add it to the packet pointer */
384 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
385 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
386 /* Check bounds and perform a read */
387 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
388 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
389 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
390 BPF_EXIT_INSN(),
391 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
392 /* Make a (4n) offset from the value we just read */
393 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff),
394 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
395 /* Add it to the packet pointer */
396 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
397 /* Check bounds and perform a read */
398 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
399 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
400 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
401 BPF_EXIT_INSN(),
402 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
403 BPF_MOV64_IMM(BPF_REG_0, 0),
404 BPF_EXIT_INSN(),
405 },
406 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
407 .matches = {
408 /* Calculated offset in R6 has unknown value, but known
409 * alignment of 4.
410 */
411 {8, "R2=pkt(id=0,off=0,r=8,imm=0)"},
412 {8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
413 /* Adding 14 makes R6 be (4n+2) */
414 {9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
415 /* Packet pointer has (4n+2) offset */
416 {11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
417 {13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
418 /* At the time the word size load is performed from R5,
419 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
420 * which is 2. Then the variable offset is (4n+2), so
421 * the total offset is 4-byte aligned and meets the
422 * load's requirements.
423 */
424 {15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
425 /* Newly read value in R6 was shifted left by 2, so has
426 * known alignment of 4.
427 */
428 {18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
429 /* Added (4n) to packet pointer's (4n+2) var_off, giving
430 * another (4n+2).
431 */
432 {19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
433 {21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
434 /* At the time the word size load is performed from R5,
435 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
436 * which is 2. Then the variable offset is (4n+2), so
437 * the total offset is 4-byte aligned and meets the
438 * load's requirements.
439 */
440 {23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
441 },
442 },
443 {
444 .descr = "dubious pointer arithmetic",
445 .insns = {
446 PREP_PKT_POINTERS,
447 BPF_MOV64_IMM(BPF_REG_0, 0),
448 /* (ptr - ptr) << 2 */
449 BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
450 BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
451 BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
452 /* We have a (4n) value. Let's make a packet offset
453 * out of it. First add 14, to make it a (4n+2)
454 */
455 BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
456 /* Then make sure it's nonnegative */
457 BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1),
458 BPF_EXIT_INSN(),
459 /* Add it to packet pointer */
460 BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
461 BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
462 /* Check bounds and perform a read */
463 BPF_MOV64_REG(BPF_REG_4, BPF_REG_6),
464 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
465 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
466 BPF_EXIT_INSN(),
467 BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0),
468 BPF_EXIT_INSN(),
469 },
470 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
471 .result = REJECT,
472 .matches = {
473 {4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
474 /* (ptr - ptr) << 2 == unknown, (4n) */
475 {6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
476 /* (4n) + 14 == (4n+2). We blow our bounds, because
477 * the add could overflow.
478 */
479 {7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
480 /* Checked s>=0 */
481 {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
482 /* packet pointer + nonnegative (4n+2) */
483 {11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
484 {13, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
485 /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
486 * We checked the bounds, but it might have been able
487 * to overflow if the packet pointer started in the
488 * upper half of the address space.
489 * So we did not get a 'range' on R6, and the access
490 * attempt will fail.
491 */
492 {15, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
493 }
494 },
495 {
496 .descr = "variable subtraction",
497 .insns = {
498 /* Create an unknown offset, (4n+2)-aligned */
499 LOAD_UNKNOWN(BPF_REG_6),
500 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
501 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
502 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
503 /* Create another unknown, (4n)-aligned, and subtract
504 * it from the first one
505 */
506 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
507 BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7),
508 /* Bounds-check the result */
509 BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1),
510 BPF_EXIT_INSN(),
511 /* Add it to the packet pointer */
512 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
513 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
514 /* Check bounds and perform a read */
515 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
516 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
517 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
518 BPF_EXIT_INSN(),
519 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
520 BPF_EXIT_INSN(),
521 },
522 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
523 .matches = {
524 /* Calculated offset in R6 has unknown value, but known
525 * alignment of 4.
526 */
527 {7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
528 {9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
529 /* Adding 14 makes R6 be (4n+2) */
530 {10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
531 /* New unknown value in R7 is (4n) */
532 {11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
533 /* Subtracting it from R6 blows our unsigned bounds */
534 {12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
535 /* Checked s>= 0 */
536 {14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
537 /* At the time the word size load is performed from R5,
538 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
539 * which is 2. Then the variable offset is (4n+2), so
540 * the total offset is 4-byte aligned and meets the
541 * load's requirements.
542 */
543 {20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
544 },
545 },
546 {
547 .descr = "pointer variable subtraction",
548 .insns = {
549 /* Create an unknown offset, (4n+2)-aligned and bounded
550 * to [14,74]
551 */
552 LOAD_UNKNOWN(BPF_REG_6),
553 BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
554 BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf),
555 BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
556 BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
557 /* Subtract it from the packet pointer */
558 BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
559 BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6),
560 /* Create another unknown, (4n)-aligned and >= 74.
561 * That in fact means >= 76, since 74 % 4 == 2
562 */
563 BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
564 BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76),
565 /* Add it to the packet pointer */
566 BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7),
567 /* Check bounds and perform a read */
568 BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
569 BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
570 BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
571 BPF_EXIT_INSN(),
572 BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
573 BPF_EXIT_INSN(),
574 },
575 .prog_type = BPF_PROG_TYPE_SCHED_CLS,
576 .matches = {
577 /* Calculated offset in R6 has unknown value, but known
578 * alignment of 4.
579 */
580 {7, "R2=pkt(id=0,off=0,r=8,imm=0)"},
581 {10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
582 /* Adding 14 makes R6 be (4n+2) */
583 {11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
584 /* Subtracting from packet pointer overflows ubounds */
585 {13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
586 /* New unknown value in R7 is (4n), >= 76 */
587 {15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
588 /* Adding it to packet pointer gives nice bounds again */
589 {16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
590 /* At the time the word size load is performed from R5,
591 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
592 * which is 2. Then the variable offset is (4n+2), so
593 * the total offset is 4-byte aligned and meets the
594 * load's requirements.
595 */
596 {20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
597 },
598 },
599};
600
601static int probe_filter_length(const struct bpf_insn *fp)
602{
603 int len;
604
605 for (len = MAX_INSNS - 1; len > 0; --len)
606 if (fp[len].code != 0 || fp[len].imm != 0)
607 break;
608 return len + 1;
609}
610
611static char bpf_vlog[32768];
612
613static int do_test_single(struct bpf_align_test *test)
614{
615 struct bpf_insn *prog = test->insns;
616 int prog_type = test->prog_type;
617 char bpf_vlog_copy[32768];
618 const char *line_ptr;
619 int cur_line = -1;
620 int prog_len, i;
621 int fd_prog;
622 int ret;
623
624 prog_len = probe_filter_length(prog);
625 fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
626 prog, prog_len, 1, "GPL", 0,
627 bpf_vlog, sizeof(bpf_vlog), 2);
628 if (fd_prog < 0 && test->result != REJECT) {
629 printf("Failed to load program.\n");
630 printf("%s", bpf_vlog);
631 ret = 1;
632 } else if (fd_prog >= 0 && test->result == REJECT) {
633 printf("Unexpected success to load!\n");
634 printf("%s", bpf_vlog);
635 ret = 1;
636 close(fd_prog);
637 } else {
638 ret = 0;
639 /* We make a local copy so that we can strtok() it */
640 strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy));
641 line_ptr = strtok(bpf_vlog_copy, "\n");
642 for (i = 0; i < MAX_MATCHES; i++) {
643 struct bpf_reg_match m = test->matches[i];
644
645 if (!m.match)
646 break;
647 while (line_ptr) {
648 cur_line = -1;
649 sscanf(line_ptr, "%u: ", &cur_line);
650 if (cur_line == m.line)
651 break;
652 line_ptr = strtok(NULL, "\n");
653 }
654 if (!line_ptr) {
655 printf("Failed to find line %u for match: %s\n",
656 m.line, m.match);
657 ret = 1;
658 printf("%s", bpf_vlog);
659 break;
660 }
661 if (!strstr(line_ptr, m.match)) {
662 printf("Failed to find match %u: %s\n",
663 m.line, m.match);
664 ret = 1;
665 printf("%s", bpf_vlog);
666 break;
667 }
668 }
669 if (fd_prog >= 0)
670 close(fd_prog);
671 }
672 return ret;
673}
674
675static int do_test(unsigned int from, unsigned int to)
676{
677 int all_pass = 0;
678 int all_fail = 0;
679 unsigned int i;
680
681 for (i = from; i < to; i++) {
682 struct bpf_align_test *test = &tests[i];
683 int fail;
684
685 printf("Test %3d: %s ... ",
686 i, test->descr);
687 fail = do_test_single(test);
688 if (fail) {
689 all_fail++;
690 printf("FAIL\n");
691 } else {
692 all_pass++;
693 printf("PASS\n");
694 }
695 }
696 printf("Results: %d pass %d fail\n",
697 all_pass, all_fail);
698 return all_fail ? EXIT_FAILURE : EXIT_SUCCESS;
699}
700
701int main(int argc, char **argv)
702{
703 unsigned int from = 0, to = ARRAY_SIZE(tests);
704
705 if (argc == 3) {
706 unsigned int l = atoi(argv[argc - 2]);
707 unsigned int u = atoi(argv[argc - 1]);
708
709 if (l < to && u < to) {
710 from = l;
711 to = u + 1;
712 }
713 } else if (argc == 2) {
714 unsigned int t = atoi(argv[argc - 1]);
715
716 if (t < to) {
717 from = t;
718 to = t + 1;
719 }
720 }
721 return do_test(from, to);
722}