Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.13.7.
  1#include <asm/types.h>
  2#include <linux/types.h>
  3#include <stdint.h>
  4#include <stdio.h>
  5#include <stdlib.h>
  6#include <unistd.h>
  7#include <errno.h>
  8#include <string.h>
  9#include <stddef.h>
 10#include <stdbool.h>
 11
 12#include <linux/unistd.h>
 13#include <linux/filter.h>
 14#include <linux/bpf_perf_event.h>
 15#include <linux/bpf.h>
 16
 17#include <bpf/bpf.h>
 18
 19#include "../../../include/linux/filter.h"
 20#include "bpf_rlimit.h"
 21#include "bpf_util.h"
 22
 23#define MAX_INSNS	512
 24#define MAX_MATCHES	16
 25
 26struct bpf_reg_match {
 27	unsigned int line;
 28	const char *match;
 29};
 30
 31struct bpf_align_test {
 32	const char *descr;
 33	struct bpf_insn	insns[MAX_INSNS];
 34	enum {
 35		UNDEF,
 36		ACCEPT,
 37		REJECT
 38	} result;
 39	enum bpf_prog_type prog_type;
 40	/* Matches must be in order of increasing line */
 41	struct bpf_reg_match matches[MAX_MATCHES];
 42};
 43
 44static struct bpf_align_test tests[] = {
 45	/* Four tests of known constants.  These aren't staggeringly
 46	 * interesting since we track exact values now.
 47	 */
 48	{
 49		.descr = "mov",
 50		.insns = {
 51			BPF_MOV64_IMM(BPF_REG_3, 2),
 52			BPF_MOV64_IMM(BPF_REG_3, 4),
 53			BPF_MOV64_IMM(BPF_REG_3, 8),
 54			BPF_MOV64_IMM(BPF_REG_3, 16),
 55			BPF_MOV64_IMM(BPF_REG_3, 32),
 56			BPF_MOV64_IMM(BPF_REG_0, 0),
 57			BPF_EXIT_INSN(),
 58		},
 59		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 60		.matches = {
 61			{1, "R1=ctx(id=0,off=0,imm=0)"},
 62			{1, "R10=fp0"},
 63			{1, "R3_w=inv2"},
 64			{2, "R3_w=inv4"},
 65			{3, "R3_w=inv8"},
 66			{4, "R3_w=inv16"},
 67			{5, "R3_w=inv32"},
 68		},
 69	},
 70	{
 71		.descr = "shift",
 72		.insns = {
 73			BPF_MOV64_IMM(BPF_REG_3, 1),
 74			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
 75			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
 76			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
 77			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
 78			BPF_ALU64_IMM(BPF_RSH, BPF_REG_3, 4),
 79			BPF_MOV64_IMM(BPF_REG_4, 32),
 80			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
 81			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
 82			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
 83			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
 84			BPF_MOV64_IMM(BPF_REG_0, 0),
 85			BPF_EXIT_INSN(),
 86		},
 87		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
 88		.matches = {
 89			{1, "R1=ctx(id=0,off=0,imm=0)"},
 90			{1, "R10=fp0"},
 91			{1, "R3_w=inv1"},
 92			{2, "R3_w=inv2"},
 93			{3, "R3_w=inv4"},
 94			{4, "R3_w=inv8"},
 95			{5, "R3_w=inv16"},
 96			{6, "R3_w=inv1"},
 97			{7, "R4_w=inv32"},
 98			{8, "R4_w=inv16"},
 99			{9, "R4_w=inv8"},
100			{10, "R4_w=inv4"},
101			{11, "R4_w=inv2"},
102		},
103	},
104	{
105		.descr = "addsub",
106		.insns = {
107			BPF_MOV64_IMM(BPF_REG_3, 4),
108			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 4),
109			BPF_ALU64_IMM(BPF_ADD, BPF_REG_3, 2),
110			BPF_MOV64_IMM(BPF_REG_4, 8),
111			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
112			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 2),
113			BPF_MOV64_IMM(BPF_REG_0, 0),
114			BPF_EXIT_INSN(),
115		},
116		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
117		.matches = {
118			{1, "R1=ctx(id=0,off=0,imm=0)"},
119			{1, "R10=fp0"},
120			{1, "R3_w=inv4"},
121			{2, "R3_w=inv8"},
122			{3, "R3_w=inv10"},
123			{4, "R4_w=inv8"},
124			{5, "R4_w=inv12"},
125			{6, "R4_w=inv14"},
126		},
127	},
128	{
129		.descr = "mul",
130		.insns = {
131			BPF_MOV64_IMM(BPF_REG_3, 7),
132			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 1),
133			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 2),
134			BPF_ALU64_IMM(BPF_MUL, BPF_REG_3, 4),
135			BPF_MOV64_IMM(BPF_REG_0, 0),
136			BPF_EXIT_INSN(),
137		},
138		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
139		.matches = {
140			{1, "R1=ctx(id=0,off=0,imm=0)"},
141			{1, "R10=fp0"},
142			{1, "R3_w=inv7"},
143			{2, "R3_w=inv7"},
144			{3, "R3_w=inv14"},
145			{4, "R3_w=inv56"},
146		},
147	},
148
149	/* Tests using unknown values */
150#define PREP_PKT_POINTERS \
151	BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1, \
152		    offsetof(struct __sk_buff, data)), \
153	BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, \
154		    offsetof(struct __sk_buff, data_end))
155
156#define LOAD_UNKNOWN(DST_REG) \
157	PREP_PKT_POINTERS, \
158	BPF_MOV64_REG(BPF_REG_0, BPF_REG_2), \
159	BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 8), \
160	BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_0, 1), \
161	BPF_EXIT_INSN(), \
162	BPF_LDX_MEM(BPF_B, DST_REG, BPF_REG_2, 0)
163
164	{
165		.descr = "unknown shift",
166		.insns = {
167			LOAD_UNKNOWN(BPF_REG_3),
168			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
169			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
170			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
171			BPF_ALU64_IMM(BPF_LSH, BPF_REG_3, 1),
172			LOAD_UNKNOWN(BPF_REG_4),
173			BPF_ALU64_IMM(BPF_LSH, BPF_REG_4, 5),
174			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
175			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
176			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
177			BPF_ALU64_IMM(BPF_RSH, BPF_REG_4, 1),
178			BPF_MOV64_IMM(BPF_REG_0, 0),
179			BPF_EXIT_INSN(),
180		},
181		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
182		.matches = {
183			{7, "R0_w=pkt(id=0,off=8,r=8,imm=0)"},
184			{7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
185			{8, "R3_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
186			{9, "R3_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
187			{10, "R3_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
188			{11, "R3_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
189			{18, "R3=pkt_end(id=0,off=0,imm=0)"},
190			{18, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
191			{19, "R4_w=inv(id=0,umax_value=8160,var_off=(0x0; 0x1fe0))"},
192			{20, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
193			{21, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
194			{22, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
195			{23, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
196		},
197	},
198	{
199		.descr = "unknown mul",
200		.insns = {
201			LOAD_UNKNOWN(BPF_REG_3),
202			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
203			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 1),
204			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
205			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
206			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
207			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 4),
208			BPF_MOV64_REG(BPF_REG_4, BPF_REG_3),
209			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 8),
210			BPF_ALU64_IMM(BPF_MUL, BPF_REG_4, 2),
211			BPF_MOV64_IMM(BPF_REG_0, 0),
212			BPF_EXIT_INSN(),
213		},
214		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
215		.matches = {
216			{7, "R3_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
217			{8, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
218			{9, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
219			{10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
220			{11, "R4_w=inv(id=0,umax_value=510,var_off=(0x0; 0x1fe))"},
221			{12, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
222			{13, "R4_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
223			{14, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
224			{15, "R4_w=inv(id=0,umax_value=2040,var_off=(0x0; 0x7f8))"},
225			{16, "R4_w=inv(id=0,umax_value=4080,var_off=(0x0; 0xff0))"},
226		},
227	},
228	{
229		.descr = "packet const offset",
230		.insns = {
231			PREP_PKT_POINTERS,
232			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
233
234			BPF_MOV64_IMM(BPF_REG_0, 0),
235
236			/* Skip over ethernet header.  */
237			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
238			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
239			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
240			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
241			BPF_EXIT_INSN(),
242
243			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 0),
244			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 1),
245			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 2),
246			BPF_LDX_MEM(BPF_B, BPF_REG_4, BPF_REG_5, 3),
247			BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 0),
248			BPF_LDX_MEM(BPF_H, BPF_REG_4, BPF_REG_5, 2),
249			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
250
251			BPF_MOV64_IMM(BPF_REG_0, 0),
252			BPF_EXIT_INSN(),
253		},
254		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
255		.matches = {
256			{4, "R5_w=pkt(id=0,off=0,r=0,imm=0)"},
257			{5, "R5_w=pkt(id=0,off=14,r=0,imm=0)"},
258			{6, "R4_w=pkt(id=0,off=14,r=0,imm=0)"},
259			{10, "R2=pkt(id=0,off=0,r=18,imm=0)"},
260			{10, "R5=pkt(id=0,off=14,r=18,imm=0)"},
261			{10, "R4_w=inv(id=0,umax_value=255,var_off=(0x0; 0xff))"},
262			{14, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
263			{15, "R4_w=inv(id=0,umax_value=65535,var_off=(0x0; 0xffff))"},
264		},
265	},
266	{
267		.descr = "packet variable offset",
268		.insns = {
269			LOAD_UNKNOWN(BPF_REG_6),
270			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
271
272			/* First, add a constant to the R5 packet pointer,
273			 * then a variable with a known alignment.
274			 */
275			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
276			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
277			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
278			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
279			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
280			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
281			BPF_EXIT_INSN(),
282			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
283
284			/* Now, test in the other direction.  Adding first
285			 * the variable offset to R5, then the constant.
286			 */
287			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
288			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
289			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
290			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
291			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
292			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
293			BPF_EXIT_INSN(),
294			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
295
296			/* Test multiple accumulations of unknown values
297			 * into a packet pointer.
298			 */
299			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
300			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
301			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
302			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 4),
303			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
304			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
305			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
306			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
307			BPF_EXIT_INSN(),
308			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_5, 0),
309
310			BPF_MOV64_IMM(BPF_REG_0, 0),
311			BPF_EXIT_INSN(),
312		},
313		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
314		.matches = {
315			/* Calculated offset in R6 has unknown value, but known
316			 * alignment of 4.
317			 */
318			{8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
319			{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
320			/* Offset is added to packet pointer R5, resulting in
321			 * known fixed offset, and variable offset from R6.
322			 */
323			{11, "R5_w=pkt(id=1,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
324			/* At the time the word size load is performed from R5,
325			 * it's total offset is NET_IP_ALIGN + reg->off (0) +
326			 * reg->aux_off (14) which is 16.  Then the variable
327			 * offset is considered using reg->aux_off_align which
328			 * is 4 and meets the load's requirements.
329			 */
330			{15, "R4=pkt(id=1,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
331			{15, "R5=pkt(id=1,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
332			/* Variable offset is added to R5 packet pointer,
333			 * resulting in auxiliary alignment of 4.
334			 */
335			{18, "R5_w=pkt(id=2,off=0,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
336			/* Constant offset is added to R5, resulting in
337			 * reg->off of 14.
338			 */
339			{19, "R5_w=pkt(id=2,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
340			/* At the time the word size load is performed from R5,
341			 * its total fixed offset is NET_IP_ALIGN + reg->off
342			 * (14) which is 16.  Then the variable offset is 4-byte
343			 * aligned, so the total offset is 4-byte aligned and
344			 * meets the load's requirements.
345			 */
346			{23, "R4=pkt(id=2,off=18,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
347			{23, "R5=pkt(id=2,off=14,r=18,umax_value=1020,var_off=(0x0; 0x3fc))"},
348			/* Constant offset is added to R5 packet pointer,
349			 * resulting in reg->off value of 14.
350			 */
351			{26, "R5_w=pkt(id=0,off=14,r=8"},
352			/* Variable offset is added to R5, resulting in a
353			 * variable offset of (4n).
354			 */
355			{27, "R5_w=pkt(id=3,off=14,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
356			/* Constant is added to R5 again, setting reg->off to 18. */
357			{28, "R5_w=pkt(id=3,off=18,r=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
358			/* And once more we add a variable; resulting var_off
359			 * is still (4n), fixed offset is not changed.
360			 * Also, we create a new reg->id.
361			 */
362			{29, "R5_w=pkt(id=4,off=18,r=0,umax_value=2040,var_off=(0x0; 0x7fc))"},
363			/* At the time the word size load is performed from R5,
364			 * its total fixed offset is NET_IP_ALIGN + reg->off (18)
365			 * which is 20.  Then the variable offset is (4n), so
366			 * the total offset is 4-byte aligned and meets the
367			 * load's requirements.
368			 */
369			{33, "R4=pkt(id=4,off=22,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
370			{33, "R5=pkt(id=4,off=18,r=22,umax_value=2040,var_off=(0x0; 0x7fc))"},
371		},
372	},
373	{
374		.descr = "packet variable offset 2",
375		.insns = {
376			/* Create an unknown offset, (4n+2)-aligned */
377			LOAD_UNKNOWN(BPF_REG_6),
378			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
379			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
380			/* Add it to the packet pointer */
381			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
382			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
383			/* Check bounds and perform a read */
384			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
385			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
386			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
387			BPF_EXIT_INSN(),
388			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
389			/* Make a (4n) offset from the value we just read */
390			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xff),
391			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
392			/* Add it to the packet pointer */
393			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
394			/* Check bounds and perform a read */
395			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
396			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
397			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
398			BPF_EXIT_INSN(),
399			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
400			BPF_MOV64_IMM(BPF_REG_0, 0),
401			BPF_EXIT_INSN(),
402		},
403		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
404		.matches = {
405			/* Calculated offset in R6 has unknown value, but known
406			 * alignment of 4.
407			 */
408			{8, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
409			{8, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
410			/* Adding 14 makes R6 be (4n+2) */
411			{9, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
412			/* Packet pointer has (4n+2) offset */
413			{11, "R5_w=pkt(id=1,off=0,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
414			{13, "R4=pkt(id=1,off=4,r=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
415			/* At the time the word size load is performed from R5,
416			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
417			 * which is 2.  Then the variable offset is (4n+2), so
418			 * the total offset is 4-byte aligned and meets the
419			 * load's requirements.
420			 */
421			{15, "R5=pkt(id=1,off=0,r=4,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
422			/* Newly read value in R6 was shifted left by 2, so has
423			 * known alignment of 4.
424			 */
425			{18, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
426			/* Added (4n) to packet pointer's (4n+2) var_off, giving
427			 * another (4n+2).
428			 */
429			{19, "R5_w=pkt(id=2,off=0,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
430			{21, "R4=pkt(id=2,off=4,r=0,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
431			/* At the time the word size load is performed from R5,
432			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
433			 * which is 2.  Then the variable offset is (4n+2), so
434			 * the total offset is 4-byte aligned and meets the
435			 * load's requirements.
436			 */
437			{23, "R5=pkt(id=2,off=0,r=4,umin_value=14,umax_value=2054,var_off=(0x2; 0xffc))"},
438		},
439	},
440	{
441		.descr = "dubious pointer arithmetic",
442		.insns = {
443			PREP_PKT_POINTERS,
444			BPF_MOV64_IMM(BPF_REG_0, 0),
445			/* (ptr - ptr) << 2 */
446			BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
447			BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
448			BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
449			/* We have a (4n) value.  Let's make a packet offset
450			 * out of it.  First add 14, to make it a (4n+2)
451			 */
452			BPF_ALU64_IMM(BPF_ADD, BPF_REG_5, 14),
453			/* Then make sure it's nonnegative */
454			BPF_JMP_IMM(BPF_JSGE, BPF_REG_5, 0, 1),
455			BPF_EXIT_INSN(),
456			/* Add it to packet pointer */
457			BPF_MOV64_REG(BPF_REG_6, BPF_REG_2),
458			BPF_ALU64_REG(BPF_ADD, BPF_REG_6, BPF_REG_5),
459			/* Check bounds and perform a read */
460			BPF_MOV64_REG(BPF_REG_4, BPF_REG_6),
461			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
462			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
463			BPF_EXIT_INSN(),
464			BPF_LDX_MEM(BPF_W, BPF_REG_4, BPF_REG_6, 0),
465			BPF_EXIT_INSN(),
466		},
467		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
468		.result = REJECT,
469		.matches = {
470			{4, "R5_w=pkt_end(id=0,off=0,imm=0)"},
471			/* (ptr - ptr) << 2 == unknown, (4n) */
472			{6, "R5_w=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
473			/* (4n) + 14 == (4n+2).  We blow our bounds, because
474			 * the add could overflow.
475			 */
476			{7, "R5_w=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
477			/* Checked s>=0 */
478			{9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
479			/* packet pointer + nonnegative (4n+2) */
480			{11, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
481			{13, "R4_w=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
482			/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
483			 * We checked the bounds, but it might have been able
484			 * to overflow if the packet pointer started in the
485			 * upper half of the address space.
486			 * So we did not get a 'range' on R6, and the access
487			 * attempt will fail.
488			 */
489			{15, "R6_w=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
490		}
491	},
492	{
493		.descr = "variable subtraction",
494		.insns = {
495			/* Create an unknown offset, (4n+2)-aligned */
496			LOAD_UNKNOWN(BPF_REG_6),
497			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
498			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
499			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
500			/* Create another unknown, (4n)-aligned, and subtract
501			 * it from the first one
502			 */
503			BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
504			BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_7),
505			/* Bounds-check the result */
506			BPF_JMP_IMM(BPF_JSGE, BPF_REG_6, 0, 1),
507			BPF_EXIT_INSN(),
508			/* Add it to the packet pointer */
509			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
510			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_6),
511			/* Check bounds and perform a read */
512			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
513			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
514			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
515			BPF_EXIT_INSN(),
516			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
517			BPF_EXIT_INSN(),
518		},
519		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
520		.matches = {
521			/* Calculated offset in R6 has unknown value, but known
522			 * alignment of 4.
523			 */
524			{7, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
525			{9, "R6_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
526			/* Adding 14 makes R6 be (4n+2) */
527			{10, "R6_w=inv(id=0,umin_value=14,umax_value=1034,var_off=(0x2; 0x7fc))"},
528			/* New unknown value in R7 is (4n) */
529			{11, "R7_w=inv(id=0,umax_value=1020,var_off=(0x0; 0x3fc))"},
530			/* Subtracting it from R6 blows our unsigned bounds */
531			{12, "R6=inv(id=0,smin_value=-1006,smax_value=1034,var_off=(0x2; 0xfffffffffffffffc))"},
532			/* Checked s>= 0 */
533			{14, "R6=inv(id=0,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
534			/* At the time the word size load is performed from R5,
535			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
536			 * which is 2.  Then the variable offset is (4n+2), so
537			 * the total offset is 4-byte aligned and meets the
538			 * load's requirements.
539			 */
540			{20, "R5=pkt(id=1,off=0,r=4,umin_value=2,umax_value=1034,var_off=(0x2; 0x7fc))"},
541		},
542	},
543	{
544		.descr = "pointer variable subtraction",
545		.insns = {
546			/* Create an unknown offset, (4n+2)-aligned and bounded
547			 * to [14,74]
548			 */
549			LOAD_UNKNOWN(BPF_REG_6),
550			BPF_MOV64_REG(BPF_REG_7, BPF_REG_6),
551			BPF_ALU64_IMM(BPF_AND, BPF_REG_6, 0xf),
552			BPF_ALU64_IMM(BPF_LSH, BPF_REG_6, 2),
553			BPF_ALU64_IMM(BPF_ADD, BPF_REG_6, 14),
554			/* Subtract it from the packet pointer */
555			BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
556			BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_6),
557			/* Create another unknown, (4n)-aligned and >= 74.
558			 * That in fact means >= 76, since 74 % 4 == 2
559			 */
560			BPF_ALU64_IMM(BPF_LSH, BPF_REG_7, 2),
561			BPF_ALU64_IMM(BPF_ADD, BPF_REG_7, 76),
562			/* Add it to the packet pointer */
563			BPF_ALU64_REG(BPF_ADD, BPF_REG_5, BPF_REG_7),
564			/* Check bounds and perform a read */
565			BPF_MOV64_REG(BPF_REG_4, BPF_REG_5),
566			BPF_ALU64_IMM(BPF_ADD, BPF_REG_4, 4),
567			BPF_JMP_REG(BPF_JGE, BPF_REG_3, BPF_REG_4, 1),
568			BPF_EXIT_INSN(),
569			BPF_LDX_MEM(BPF_W, BPF_REG_6, BPF_REG_5, 0),
570			BPF_EXIT_INSN(),
571		},
572		.prog_type = BPF_PROG_TYPE_SCHED_CLS,
573		.matches = {
574			/* Calculated offset in R6 has unknown value, but known
575			 * alignment of 4.
576			 */
577			{7, "R2_w=pkt(id=0,off=0,r=8,imm=0)"},
578			{10, "R6_w=inv(id=0,umax_value=60,var_off=(0x0; 0x3c))"},
579			/* Adding 14 makes R6 be (4n+2) */
580			{11, "R6_w=inv(id=0,umin_value=14,umax_value=74,var_off=(0x2; 0x7c))"},
581			/* Subtracting from packet pointer overflows ubounds */
582			{13, "R5_w=pkt(id=1,off=0,r=8,umin_value=18446744073709551542,umax_value=18446744073709551602,var_off=(0xffffffffffffff82; 0x7c))"},
583			/* New unknown value in R7 is (4n), >= 76 */
584			{15, "R7_w=inv(id=0,umin_value=76,umax_value=1096,var_off=(0x0; 0x7fc))"},
585			/* Adding it to packet pointer gives nice bounds again */
586			{16, "R5_w=pkt(id=2,off=0,r=0,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
587			/* At the time the word size load is performed from R5,
588			 * its total fixed offset is NET_IP_ALIGN + reg->off (0)
589			 * which is 2.  Then the variable offset is (4n+2), so
590			 * the total offset is 4-byte aligned and meets the
591			 * load's requirements.
592			 */
593			{20, "R5=pkt(id=2,off=0,r=4,umin_value=2,umax_value=1082,var_off=(0x2; 0x7fc))"},
594		},
595	},
596};
597
598static int probe_filter_length(const struct bpf_insn *fp)
599{
600	int len;
601
602	for (len = MAX_INSNS - 1; len > 0; --len)
603		if (fp[len].code != 0 || fp[len].imm != 0)
604			break;
605	return len + 1;
606}
607
608static char bpf_vlog[32768];
609
610static int do_test_single(struct bpf_align_test *test)
611{
612	struct bpf_insn *prog = test->insns;
613	int prog_type = test->prog_type;
614	char bpf_vlog_copy[32768];
615	const char *line_ptr;
616	int cur_line = -1;
617	int prog_len, i;
618	int fd_prog;
619	int ret;
620
621	prog_len = probe_filter_length(prog);
622	fd_prog = bpf_verify_program(prog_type ? : BPF_PROG_TYPE_SOCKET_FILTER,
623				     prog, prog_len, BPF_F_STRICT_ALIGNMENT,
624				     "GPL", 0, bpf_vlog, sizeof(bpf_vlog), 2);
625	if (fd_prog < 0 && test->result != REJECT) {
626		printf("Failed to load program.\n");
627		printf("%s", bpf_vlog);
628		ret = 1;
629	} else if (fd_prog >= 0 && test->result == REJECT) {
630		printf("Unexpected success to load!\n");
631		printf("%s", bpf_vlog);
632		ret = 1;
633		close(fd_prog);
634	} else {
635		ret = 0;
636		/* We make a local copy so that we can strtok() it */
637		strncpy(bpf_vlog_copy, bpf_vlog, sizeof(bpf_vlog_copy));
638		line_ptr = strtok(bpf_vlog_copy, "\n");
639		for (i = 0; i < MAX_MATCHES; i++) {
640			struct bpf_reg_match m = test->matches[i];
641
642			if (!m.match)
643				break;
644			while (line_ptr) {
645				cur_line = -1;
646				sscanf(line_ptr, "%u: ", &cur_line);
647				if (cur_line == m.line)
648					break;
649				line_ptr = strtok(NULL, "\n");
650			}
651			if (!line_ptr) {
652				printf("Failed to find line %u for match: %s\n",
653				       m.line, m.match);
654				ret = 1;
655				printf("%s", bpf_vlog);
656				break;
657			}
658			if (!strstr(line_ptr, m.match)) {
659				printf("Failed to find match %u: %s\n",
660				       m.line, m.match);
661				ret = 1;
662				printf("%s", bpf_vlog);
663				break;
664			}
665		}
666		if (fd_prog >= 0)
667			close(fd_prog);
668	}
669	return ret;
670}
671
672static int do_test(unsigned int from, unsigned int to)
673{
674	int all_pass = 0;
675	int all_fail = 0;
676	unsigned int i;
677
678	for (i = from; i < to; i++) {
679		struct bpf_align_test *test = &tests[i];
680		int fail;
681
682		printf("Test %3d: %s ... ",
683		       i, test->descr);
684		fail = do_test_single(test);
685		if (fail) {
686			all_fail++;
687			printf("FAIL\n");
688		} else {
689			all_pass++;
690			printf("PASS\n");
691		}
692	}
693	printf("Results: %d pass %d fail\n",
694	       all_pass, all_fail);
695	return all_fail ? EXIT_FAILURE : EXIT_SUCCESS;
696}
697
698int main(int argc, char **argv)
699{
700	unsigned int from = 0, to = ARRAY_SIZE(tests);
701
702	if (argc == 3) {
703		unsigned int l = atoi(argv[argc - 2]);
704		unsigned int u = atoi(argv[argc - 1]);
705
706		if (l < to && u < to) {
707			from = l;
708			to   = u + 1;
709		}
710	} else if (argc == 2) {
711		unsigned int t = atoi(argv[argc - 1]);
712
713		if (t < to) {
714			from = t;
715			to   = t + 1;
716		}
717	}
718	return do_test(from, to);
719}