Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.10.11.
  1// SPDX-License-Identifier: GPL-2.0
  2#include <test_progs.h>
  3
  4#define MAX_INSNS	512
  5#define MAX_MATCHES	24
  6
  7struct bpf_reg_match {
  8	unsigned int line;
  9	const char *match;
 10};
 11
 12struct bpf_align_test {
 13	const char *descr;
 14	struct bpf_insn	insns[MAX_INSNS];
 15	enum {
 16		UNDEF,
 17		ACCEPT,
 18		REJECT
 19	} result;
 20	enum bpf_prog_type prog_type;
 21	/* Matches must be in order of increasing line */
 22	struct bpf_reg_match matches[MAX_MATCHES];
 23};
 24
 25static struct bpf_align_test tests[] = {
 26	/* Four tests of known constants.  These aren't staggeringly
 27	 * interesting since we track exact values now.
 28	 */
 29	{
 30		.descr = "mov",
 31		.insns = {
 32			BPF_MOV64_IMM(BPF_REG_3, 2),
 33			BPF_MOV64_IMM(BPF_REG_3, 4),
 34			BPF_MOV64_IMM(BPF_REG_3, 8),
 35			BPF_MOV64_IMM(BPF_REG_3, 16),
 36			BPF_MOV64_IMM(BPF_REG_3, 32),
 37			BPF_MOV64_IMM(BPF_REG_0, 0),
 38			BPF_EXIT_INSN(),
 39		},
 40		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 41		.matches = {
 42			{0, "R1=ctx(off=0,imm=0)"},
 43			{0, "R10=fp0"},
 44			{0, "R3_w=2"},
 45			{1, "R3_w=4"},
 46			{2, "R3_w=8"},
 47			{3, "R3_w=16"},
 48			{4, "R3_w=32"},
 49		},
 50	},
 51	{
 52		.descr = "shift",
 53		.insns = {
 54			BPF_MOV64_IMM(BPF_REG_3, 1),
 55			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
 56			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
 57			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
 58			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
 59			BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4),
 60			BPF_MOV64_IMM(BPF_REG_4, 32),
 61			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
 62			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
 63			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
 64			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
 65			BPF_MOV64_IMM(BPF_REG_0, 0),
 66			BPF_EXIT_INSN(),
 67		},
 68		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 69		.matches = {
 70			{0, "R1=ctx(off=0,imm=0)"},
 71			{0, "R10=fp0"},
 72			{0, "R3_w=1"},
 73			{1, "R3_w=2"},
 74			{2, "R3_w=4"},
 75			{3, "R3_w=8"},
 76			{4, "R3_w=16"},
 77			{5, "R3_w=1"},
 78			{6, "R4_w=32"},
 79			{7, "R4_w=16"},
 80			{8, "R4_w=8"},
 81			{9, "R4_w=4"},
 82			{10, "R4_w=2"},
 83		},
 84	},
 85	{
 86		.descr = "addsub",
 87		.insns = {
 88			BPF_MOV64_IMM(BPF_REG_3, 4),
 89			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4),
 90			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2),
 91			BPF_MOV64_IMM(BPF_REG_4, 8),
 92			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
 93			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
 94			BPF_MOV64_IMM(BPF_REG_0, 0),
 95			BPF_EXIT_INSN(),
 96		},
 97		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 98		.matches = {
 99			{0, "R1=ctx(off=0,imm=0)"},
100			{0, "R10=fp0"},
101			{0, "R3_w=4"},
102			{1, "R3_w=8"},
103			{2, "R3_w=10"},
104			{3, "R4_w=8"},
105			{4, "R4_w=12"},
106			{5, "R4_w=14"},
107		},
108	},
109	{
110		.descr = "mul",
111		.insns = {
112			BPF_MOV64_IMM(BPF_REG_3, 7),
113			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1),
114			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2),
115			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4),
116			BPF_MOV64_IMM(BPF_REG_0, 0),
117			BPF_EXIT_INSN(),
118		},
119		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
120		.matches = {
121			{0, "R1=ctx(off=0,imm=0)"},
122			{0, "R10=fp0"},
123			{0, "R3_w=7"},
124			{1, "R3_w=7"},
125			{2, "R3_w=14"},
126			{3, "R3_w=56"},
127		},
128	},
129
130	/* Tests using unknown values */
131#define PREP_PKT_POINTERS \
132	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
133		    offsetof(struct __sk_buff, data)), \
134	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
135		    offsetof(struct __sk_buff, data_end))
136
137#define LOAD_UNKNOWN(DST_REG) \
138	PREP_PKT_POINTERS, \
139	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
140	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
141	BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
142	BPF_EXIT_INSN(), \
143	BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
144
145	{
146		.descr = "unknown shift",
147		.insns = {
148			LOAD_UNKNOWN(BPF_REG_3),
149			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
150			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
151			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
152			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
153			LOAD_UNKNOWN(BPF_REG_4),
154			BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5),
155			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
156			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
157			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
158			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
159			BPF_MOV64_IMM(BPF_REG_0, 0),
160			BPF_EXIT_INSN(),
161		},
162		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
163		.matches = {
164			{6, "R0_w=pkt(off=8,r=8,imm=0)"},
165			{6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"},
166			{7, "R3_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
167			{8, "R3_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
168			{9, "R3_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
169			{10, "R3_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
170			{12, "R3_w=pkt_end(off=0,imm=0)"},
171			{17, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
172			{18, "R4_w=scalar(umax=8160,var_off=(0x0; 0x1fe0))"},
173			{19, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
174			{20, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
175			{21, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
176			{22, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
177		},
178	},
179	{
180		.descr = "unknown mul",
181		.insns = {
182			LOAD_UNKNOWN(BPF_REG_3),
183			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
184			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1),
185			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
186			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
187			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
188			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4),
189			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
190			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),
191			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
192			BPF_MOV64_IMM(BPF_REG_0, 0),
193			BPF_EXIT_INSN(),
194		},
195		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
196		.matches = {
197			{6, "R3_w=scalar(umax=255,var_off=(0x0; 0xff))"},
198			{7, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
199			{8, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
200			{9, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
201			{10, "R4_w=scalar(umax=510,var_off=(0x0; 0x1fe))"},
202			{11, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
203			{12, "R4_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
204			{13, "R4_w=scalar(id=1,umax=255,var_off=(0x0; 0xff))"},
205			{14, "R4_w=scalar(umax=2040,var_off=(0x0; 0x7f8))"},
206			{15, "R4_w=scalar(umax=4080,var_off=(0x0; 0xff0))"},
207		},
208	},
209	{
210		.descr = "packet const offset",
211		.insns = {
212			PREP_PKT_POINTERS,
213			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
214
215			BPF_MOV64_IMM(BPF_REG_0, 0),
216
217			/* Skip over ethernet header.  */
218			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
219			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
220			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
221			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
222			BPF_EXIT_INSN(),
223
224			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0),
225			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1),
226			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2),
227			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3),
228			BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0),
229			BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2),
230			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
231
232			BPF_MOV64_IMM(BPF_REG_0, 0),
233			BPF_EXIT_INSN(),
234		},
235		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
236		.matches = {
237			{2, "R5_w=pkt(off=0,r=0,imm=0)"},
238			{4, "R5_w=pkt(off=14,r=0,imm=0)"},
239			{5, "R4_w=pkt(off=14,r=0,imm=0)"},
240			{9, "R2=pkt(off=0,r=18,imm=0)"},
241			{10, "R5=pkt(off=14,r=18,imm=0)"},
242			{10, "R4_w=scalar(umax=255,var_off=(0x0; 0xff))"},
243			{13, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},
244			{14, "R4_w=scalar(umax=65535,var_off=(0x0; 0xffff))"},
245		},
246	},
247	{
248		.descr = "packet variable offset",
249		.insns = {
250			LOAD_UNKNOWN(BPF_REG_6),
251			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
252
253			/* First, add a constant to the R5 packet pointer,
254			 * then a variable with a known alignment.
255			 */
256			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
257			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
258			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
259			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
260			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
261			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
262			BPF_EXIT_INSN(),
263			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
264
265			/* Now, test in the other direction.  Adding first
266			 * the variable offset to R5, then the constant.
267			 */
268			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
269			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
270			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
271			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
272			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
273			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
274			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
275			BPF_EXIT_INSN(),
276			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
277
278			/* Test multiple accumulations of unknown values
279			 * into a packet pointer.
280			 */
281			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
282			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
283			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
284			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
285			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
286			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
287			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
288			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
289			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
290			BPF_EXIT_INSN(),
291			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
292
293			BPF_MOV64_IMM(BPF_REG_0, 0),
294			BPF_EXIT_INSN(),
295		},
296		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
297		.matches = {
298			/* Calculated offset in R6 has unknown value, but known
299			 * alignment of 4.
300			 */
301			{6, "R2_w=pkt(off=0,r=8,imm=0)"},
302			{7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
303			/* Offset is added to packet pointer R5, resulting in
304			 * known fixed offset, and variable offset from R6.
305			 */
306			{11, "R5_w=pkt(id=1,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
307			/* At the time the word size load is performed from R5,
308			 * it's total offset is NET_IP_ALIGN + reg->off (0) +
309			 * reg->aux_off (14) which is 16.  Then the variable
310			 * offset is considered using reg->aux_off_align which
311			 * is 4 and meets the load's requirements.
312			 */
313			{15, "R4=pkt(id=1,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
314			{15, "R5=pkt(id=1,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
315			/* Variable offset is added to R5 packet pointer,
316			 * resulting in auxiliary alignment of 4. To avoid BPF
317			 * verifier's precision backtracking logging
318			 * interfering we also have a no-op R4 = R5
319			 * instruction to validate R5 state. We also check
320			 * that R4 is what it should be in such case.
321			 */
322			{18, "R4_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
323			{18, "R5_w=pkt(id=2,off=0,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
324			/* Constant offset is added to R5, resulting in
325			 * reg->off of 14.
326			 */
327			{19, "R5_w=pkt(id=2,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
328			/* At the time the word size load is performed from R5,
329			 * its total fixed offset is NET_IP_ALIGN + reg->off
330			 * (14) which is 16.  Then the variable offset is 4-byte
331			 * aligned, so the total offset is 4-byte aligned and
332			 * meets the load's requirements.
333			 */
334			{24, "R4=pkt(id=2,off=18,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
335			{24, "R5=pkt(id=2,off=14,r=18,umax=1020,var_off=(0x0; 0x3fc))"},
336			/* Constant offset is added to R5 packet pointer,
337			 * resulting in reg->off value of 14.
338			 */
339			{26, "R5_w=pkt(off=14,r=8"},
340			/* Variable offset is added to R5, resulting in a
341			 * variable offset of (4n). See comment for insn #18
342			 * for R4 = R5 trick.
343			 */
344			{28, "R4_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
345			{28, "R5_w=pkt(id=3,off=14,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
346			/* Constant is added to R5 again, setting reg->off to 18. */
347			{29, "R5_w=pkt(id=3,off=18,r=0,umax=1020,var_off=(0x0; 0x3fc))"},
348			/* And once more we add a variable; resulting var_off
349			 * is still (4n), fixed offset is not changed.
350			 * Also, we create a new reg->id.
351			 */
352			{31, "R4_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
353			{31, "R5_w=pkt(id=4,off=18,r=0,umax=2040,var_off=(0x0; 0x7fc)"},
354			/* At the time the word size load is performed from R5,
355			 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
356			 * which is 20.  Then the variable offset is (4n), so
357			 * the total offset is 4-byte aligned and meets the
358			 * load's requirements.
359			 */
360			{35, "R4=pkt(id=4,off=22,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
361			{35, "R5=pkt(id=4,off=18,r=22,umax=2040,var_off=(0x0; 0x7fc)"},
362		},
363	},
364	{
365		.descr = "packet variable offset 2",
366		.insns = {
367			/* Create an unknown offset, (4n+2)-aligned */
368			LOAD_UNKNOWN(BPF_REG_6),
369			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
370			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
371			/* Add it to the packet pointer */
372			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
373			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
374			/* Check bounds and perform a read */
375			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
376			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
377			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
378			BPF_EXIT_INSN(),
379			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
380			/* Make a (4n) offset from the value we just read */
381			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff),
382			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
383			/* Add it to the packet pointer */
384			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
385			/* Check bounds and perform a read */
386			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
387			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
388			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
389			BPF_EXIT_INSN(),
390			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
391			BPF_MOV64_IMM(BPF_REG_0, 0),
392			BPF_EXIT_INSN(),
393		},
394		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
395		.matches = {
396			/* Calculated offset in R6 has unknown value, but known
397			 * alignment of 4.
398			 */
399			{6, "R2_w=pkt(off=0,r=8,imm=0)"},
400			{7, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
401			/* Adding 14 makes R6 be (4n+2) */
402			{8, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},
403			/* Packet pointer has (4n+2) offset */
404			{11, "R5_w=pkt(id=1,off=0,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
405			{12, "R4=pkt(id=1,off=4,r=0,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
406			/* At the time the word size load is performed from R5,
407			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
408			 * which is 2.  Then the variable offset is (4n+2), so
409			 * the total offset is 4-byte aligned and meets the
410			 * load's requirements.
411			 */
412			{15, "R5=pkt(id=1,off=0,r=4,umin=14,umax=1034,var_off=(0x2; 0x7fc)"},
413			/* Newly read value in R6 was shifted left by 2, so has
414			 * known alignment of 4.
415			 */
416			{17, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
417			/* Added (4n) to packet pointer's (4n+2) var_off, giving
418			 * another (4n+2).
419			 */
420			{19, "R5_w=pkt(id=2,off=0,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
421			{20, "R4=pkt(id=2,off=4,r=0,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
422			/* At the time the word size load is performed from R5,
423			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
424			 * which is 2.  Then the variable offset is (4n+2), so
425			 * the total offset is 4-byte aligned and meets the
426			 * load's requirements.
427			 */
428			{23, "R5=pkt(id=2,off=0,r=4,umin=14,umax=2054,var_off=(0x2; 0xffc)"},
429		},
430	},
431	{
432		.descr = "dubious pointer arithmetic",
433		.insns = {
434			PREP_PKT_POINTERS,
435			BPF_MOV64_IMM(BPF_REG_0, 0),
436			/* (ptr - ptr) << 2 */
437			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
438			BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
439			BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
440			/* We have a (4n) value.  Let's make a packet offset
441			 * out of it.  First add 14, to make it a (4n+2)
442			 */
443			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
444			/* Then make sure it's nonnegative */
445			BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1),
446			BPF_EXIT_INSN(),
447			/* Add it to packet pointer */
448			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
449			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
450			/* Check bounds and perform a read */
451			BPF_MOV64_REG(BPF_REG_4, BPF_REG_6),
452			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
453			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
454			BPF_EXIT_INSN(),
455			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0),
456			BPF_EXIT_INSN(),
457		},
458		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
459		.result = REJECT,
460		.matches = {
461			{3, "R5_w=pkt_end(off=0,imm=0)"},
462			/* (ptr - ptr) << 2 == unknown, (4n) */
463			{5, "R5_w=scalar(smax=9223372036854775804,umax=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc)"},
464			/* (4n) + 14 == (4n+2).  We blow our bounds, because
465			 * the add could overflow.
466			 */
467			{6, "R5_w=scalar(smin=-9223372036854775806,smax=9223372036854775806,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
468			/* Checked s>=0 */
469			{9, "R5=scalar(umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
470			/* packet pointer + nonnegative (4n+2) */
471			{11, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
472			{12, "R4_w=pkt(id=1,off=4,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
473			/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
474			 * We checked the bounds, but it might have been able
475			 * to overflow if the packet pointer started in the
476			 * upper half of the address space.
477			 * So we did not get a 'range' on R6, and the access
478			 * attempt will fail.
479			 */
480			{15, "R6_w=pkt(id=1,off=0,r=0,umin=2,umax=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc)"},
481		}
482	},
483	{
484		.descr = "variable subtraction",
485		.insns = {
486			/* Create an unknown offset, (4n+2)-aligned */
487			LOAD_UNKNOWN(BPF_REG_6),
488			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
489			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
490			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
491			/* Create another unknown, (4n)-aligned, and subtract
492			 * it from the first one
493			 */
494			BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
495			BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7),
496			/* Bounds-check the result */
497			BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1),
498			BPF_EXIT_INSN(),
499			/* Add it to the packet pointer */
500			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
501			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
502			/* Check bounds and perform a read */
503			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
504			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
505			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
506			BPF_EXIT_INSN(),
507			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
508			BPF_EXIT_INSN(),
509		},
510		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
511		.matches = {
512			/* Calculated offset in R6 has unknown value, but known
513			 * alignment of 4.
514			 */
515			{6, "R2_w=pkt(off=0,r=8,imm=0)"},
516			{8, "R6_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
517			/* Adding 14 makes R6 be (4n+2) */
518			{9, "R6_w=scalar(umin=14,umax=1034,var_off=(0x2; 0x7fc))"},
519			/* New unknown value in R7 is (4n) */
520			{10, "R7_w=scalar(umax=1020,var_off=(0x0; 0x3fc))"},
521			/* Subtracting it from R6 blows our unsigned bounds */
522			{11, "R6=scalar(smin=-1006,smax=1034,umin=2,umax=18446744073709551614,var_off=(0x2; 0xfffffffffffffffc)"},
523			/* Checked s>= 0 */
524			{14, "R6=scalar(umin=2,umax=1034,var_off=(0x2; 0x7fc))"},
525			/* At the time the word size load is performed from R5,
526			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
527			 * which is 2.  Then the variable offset is (4n+2), so
528			 * the total offset is 4-byte aligned and meets the
529			 * load's requirements.
530			 */
531			{20, "R5=pkt(id=2,off=0,r=4,umin=2,umax=1034,var_off=(0x2; 0x7fc)"},
532
533		},
534	},
535	{
536		.descr = "pointer variable subtraction",
537		.insns = {
538			/* Create an unknown offset, (4n+2)-aligned and bounded
539			 * to [14,74]
540			 */
541			LOAD_UNKNOWN(BPF_REG_6),
542			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
543			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf),
544			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
545			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
546			/* Subtract it from the packet pointer */
547			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
548			BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6),
549			/* Create another unknown, (4n)-aligned and >= 74.
550			 * That in fact means >= 76, since 74 % 4 == 2
551			 */
552			BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
553			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76),
554			/* Add it to the packet pointer */
555			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7),
556			/* Check bounds and perform a read */
557			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
558			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
559			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
560			BPF_EXIT_INSN(),
561			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
562			BPF_EXIT_INSN(),
563		},
564		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
565		.matches = {
566			/* Calculated offset in R6 has unknown value, but known
567			 * alignment of 4.
568			 */
569			{6, "R2_w=pkt(off=0,r=8,imm=0)"},
570			{9, "R6_w=scalar(umax=60,var_off=(0x0; 0x3c))"},
571			/* Adding 14 makes R6 be (4n+2) */
572			{10, "R6_w=scalar(umin=14,umax=74,var_off=(0x2; 0x7c))"},
573			/* Subtracting from packet pointer overflows ubounds */
574			{13, "R5_w=pkt(id=2,off=0,r=8,umin=18446744073709551542,umax=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c)"},
575			/* New unknown value in R7 is (4n), >= 76 */
576			{14, "R7_w=scalar(umin=76,umax=1096,var_off=(0x0; 0x7fc))"},
577			/* Adding it to packet pointer gives nice bounds again */
578			{16, "R5_w=pkt(id=3,off=0,r=0,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
579			/* At the time the word size load is performed from R5,
580			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
581			 * which is 2.  Then the variable offset is (4n+2), so
582			 * the total offset is 4-byte aligned and meets the
583			 * load's requirements.
584			 */
585			{20, "R5=pkt(id=3,off=0,r=4,umin=2,umax=1082,var_off=(0x2; 0xfffffffc)"},
586		},
587	},
588};
589
590static int probe_filter_length(const struct bpf_insn *fp)
591{
592	int len;
593
594	for (len = MAX_INSNS - 1; len > 0; --len)
595		if (fp[len].code != 0 || fp[len].imm != 0)
596			break;
597	return len + 1;
598}
599
600static char bpf_vlog[32768];
601
602static int do_test_single(struct bpf_align_test *test)
603{
604	struct bpf_insn *prog = test->insns;
605	int prog_type = test->prog_type;
606	char bpf_vlog_copy[32768];
607	LIBBPF_OPTS(bpf_prog_load_opts, opts,
608		.prog_flags = BPF_F_STRICT_ALIGNMENT,
609		.log_buf = bpf_vlog,
610		.log_size = sizeof(bpf_vlog),
611		.log_level = 2,
612	);
613	const char *line_ptr;
614	int cur_line = -1;
615	int prog_len, i;
616	int fd_prog;
617	int ret;
618
619	prog_len = probe_filter_length(prog);
620	fd_prog = bpf_prog_load(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL",
621				prog, prog_len, &opts);
622	if (fd_prog < 0 && test->result != REJECT) {
623		printf("Failed to load program.\n");
624		printf("%s", bpf_vlog);
625		ret = 1;
626	} else if (fd_prog >= 0 && test->result == REJECT) {
627		printf("Unexpected success to load!\n");
628		printf("%s", bpf_vlog);
629		ret = 1;
630		close(fd_prog);
631	} else {
632		ret = 0;
633		/* We make a local copy so that we can strtok() it */
634		strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy));
635		line_ptr = strtok(bpf_vlog_copy, "\n");
636		for (i = 0; i < MAX_MATCHES; i++) {
637			struct bpf_reg_match m = test->matches[i];
638			int tmp;
639
640			if (!m.match)
641				break;
642			while (line_ptr) {
643				cur_line = -1;
644				sscanf(line_ptr, "%u: ", &cur_line);
645				if (cur_line == -1)
646					sscanf(line_ptr, "from %u to %u: ", &tmp, &cur_line);
647				if (cur_line == m.line)
648					break;
649				line_ptr = strtok(NULL, "\n");
650			}
651			if (!line_ptr) {
652				printf("Failed to find line %u for match: %s\n",
653				       m.line, m.match);
654				ret = 1;
655				printf("%s", bpf_vlog);
656				break;
657			}
658			/* Check the next line as well in case the previous line
659			 * did not have a corresponding bpf insn. Example:
660			 * func#0 @0
661			 * 0: R1=ctx(off=0,imm=0) R10=fp0
662			 * 0: (b7) r3 = 2                 ; R3_w=2
663			 */
664			if (!strstr(line_ptr, m.match)) {
665				cur_line = -1;
666				line_ptr = strtok(NULL, "\n");
667				sscanf(line_ptr, "%u: ", &cur_line);
668			}
669			if (cur_line != m.line || !line_ptr ||
670			    !strstr(line_ptr, m.match)) {
671				printf("Failed to find match %u: %s\n",
672				       m.line, m.match);
673				ret = 1;
674				printf("%s", bpf_vlog);
675				break;
676			}
677		}
678		if (fd_prog >= 0)
679			close(fd_prog);
680	}
681	return ret;
682}
683
684void test_align(void)
685{
686	unsigned int i;
687
688	for (i = 0; i < ARRAY_SIZE(tests); i++) {
689		struct bpf_align_test *test = &tests[i];
690
691		if (!test__start_subtest(test->descr))
692			continue;
693
694		ASSERT_OK(do_test_single(test), test->descr);
695	}
696}