Linux Audio

Check our new training course

Loading...
v6.13.7
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Example wrapper around BPF macros.
  4 *
  5 * Copyright (c) 2012 The Chromium OS Authors <chromium-os-dev@chromium.org>
  6 * Author: Will Drewry <wad@chromium.org>
  7 *
  8 * The code may be used by anyone for any purpose,
  9 * and can serve as a starting point for developing
 10 * applications using prctl(PR_SET_SECCOMP, 2, ...).
 11 *
 12 * No guarantees are provided with respect to the correctness
 13 * or functionality of this code.
 14 */
 15#ifndef __BPF_HELPER_H__
 16#define __BPF_HELPER_H__
 17
 18#include <asm/bitsperlong.h>	/* for __BITS_PER_LONG */
 19#include <endian.h>
 20#include <linux/filter.h>
 21#include <linux/seccomp.h>	/* for seccomp_data */
 22#include <linux/types.h>
 23#include <linux/unistd.h>
 24#include <stddef.h>
 25
 26#define BPF_LABELS_MAX 256
 27struct bpf_labels {
 28	int count;
 29	struct __bpf_label {
 30		const char *label;
 31		__u32 location;
 32	} labels[BPF_LABELS_MAX];
 33};
 34
 35int bpf_resolve_jumps(struct bpf_labels *labels,
 36		      struct sock_filter *filter, size_t count);
 37__u32 seccomp_bpf_label(struct bpf_labels *labels, const char *label);
 38void seccomp_bpf_print(struct sock_filter *filter, size_t count);
 39
 40#define JUMP_JT 0xff
 41#define JUMP_JF 0xff
 42#define LABEL_JT 0xfe
 43#define LABEL_JF 0xfe
 44
 45#define ALLOW \
 46	BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW)
 47#define DENY \
 48	BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL)
 49#define JUMP(labels, label) \
 50	BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
 51		 JUMP_JT, JUMP_JF)
 52#define LABEL(labels, label) \
 53	BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
 54		 LABEL_JT, LABEL_JF)
 55#define SYSCALL(nr, jt) \
 56	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (nr), 0, 1), \
 57	jt
 58
 59/* Lame, but just an example */
 60#define FIND_LABEL(labels, label) seccomp_bpf_label((labels), #label)
 61
 62#define EXPAND(...) __VA_ARGS__
 63
 64/* Ensure that we load the logically correct offset. */
 65#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
 66#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
 67#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
 68#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
 69#else
 70#error "Unknown endianness"
 71#endif
 72
 73/* Map all width-sensitive operations */
 74#if __BITS_PER_LONG == 32
 75
 76#define JEQ(x, jt) JEQ32(x, EXPAND(jt))
 77#define JNE(x, jt) JNE32(x, EXPAND(jt))
 78#define JGT(x, jt) JGT32(x, EXPAND(jt))
 79#define JLT(x, jt) JLT32(x, EXPAND(jt))
 80#define JGE(x, jt) JGE32(x, EXPAND(jt))
 81#define JLE(x, jt) JLE32(x, EXPAND(jt))
 82#define JA(x, jt) JA32(x, EXPAND(jt))
 83#define ARG(i) ARG_32(i)
 84
 85#elif __BITS_PER_LONG == 64
 86
 87/* Ensure that we load the logically correct offset. */
 88#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
 89#define ENDIAN(_lo, _hi) _lo, _hi
 90#define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
 91#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
 92#define ENDIAN(_lo, _hi) _hi, _lo
 93#define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
 94#endif
 95
 96union arg64 {
 97	struct {
 98		__u32 ENDIAN(lo32, hi32);
 99	};
100	__u64 u64;
101};
102
103#define JEQ(x, jt) \
104	JEQ64(((union arg64){.u64 = (x)}).lo32, \
105	      ((union arg64){.u64 = (x)}).hi32, \
106	      EXPAND(jt))
107#define JGT(x, jt) \
108	JGT64(((union arg64){.u64 = (x)}).lo32, \
109	      ((union arg64){.u64 = (x)}).hi32, \
110	      EXPAND(jt))
111#define JGE(x, jt) \
112	JGE64(((union arg64){.u64 = (x)}).lo32, \
113	      ((union arg64){.u64 = (x)}).hi32, \
114	      EXPAND(jt))
115#define JNE(x, jt) \
116	JNE64(((union arg64){.u64 = (x)}).lo32, \
117	      ((union arg64){.u64 = (x)}).hi32, \
118	      EXPAND(jt))
119#define JLT(x, jt) \
120	JLT64(((union arg64){.u64 = (x)}).lo32, \
121	      ((union arg64){.u64 = (x)}).hi32, \
122	      EXPAND(jt))
123#define JLE(x, jt) \
124	JLE64(((union arg64){.u64 = (x)}).lo32, \
125	      ((union arg64){.u64 = (x)}).hi32, \
126	      EXPAND(jt))
127
128#define JA(x, jt) \
129	JA64(((union arg64){.u64 = (x)}).lo32, \
130	       ((union arg64){.u64 = (x)}).hi32, \
131	       EXPAND(jt))
132#define ARG(i) ARG_64(i)
133
134#else
135#error __BITS_PER_LONG value unusable.
136#endif
137
138/* Loads the arg into A */
139#define ARG_32(idx) \
140	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx))
141
142/* Loads lo into M[0] and hi into M[1] and A */
143#define ARG_64(idx) \
144	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
145	BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
146	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, HI_ARG(idx)), \
147	BPF_STMT(BPF_ST, 1) /* hi -> M[1] */
148
149#define JEQ32(value, jt) \
150	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 0, 1), \
151	jt
152
153#define JNE32(value, jt) \
154	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \
155	jt
156
157#define JA32(value, jt) \
158	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
159	jt
160
161#define JGE32(value, jt) \
162	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
163	jt
164
165#define JGT32(value, jt) \
166	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
167	jt
168
169#define JLE32(value, jt) \
170	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
171	jt
172
173#define JLT32(value, jt) \
174	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
175	jt
176
177/*
178 * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both
179 * A and M[1]. This invariant is kept by restoring A if necessary.
180 */
181#define JEQ64(lo, hi, jt) \
182	/* if (hi != arg.hi) goto NOMATCH; */ \
183	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
184	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
185	/* if (lo != arg.lo) goto NOMATCH; */ \
186	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \
187	BPF_STMT(BPF_LD+BPF_MEM, 1), \
188	jt, \
189	BPF_STMT(BPF_LD+BPF_MEM, 1)
190
191#define JNE64(lo, hi, jt) \
192	/* if (hi != arg.hi) goto MATCH; */ \
193	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
194	BPF_STMT(BPF_LD+BPF_MEM, 0), \
195	/* if (lo != arg.lo) goto MATCH; */ \
196	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \
197	BPF_STMT(BPF_LD+BPF_MEM, 1), \
198	jt, \
199	BPF_STMT(BPF_LD+BPF_MEM, 1)
200
201#define JA64(lo, hi, jt) \
202	/* if (hi & arg.hi) goto MATCH; */ \
203	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \
204	BPF_STMT(BPF_LD+BPF_MEM, 0), \
205	/* if (lo & arg.lo) goto MATCH; */ \
206	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \
207	BPF_STMT(BPF_LD+BPF_MEM, 1), \
208	jt, \
209	BPF_STMT(BPF_LD+BPF_MEM, 1)
210
211#define JGE64(lo, hi, jt) \
212	/* if (hi > arg.hi) goto MATCH; */ \
213	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
214	/* if (hi != arg.hi) goto NOMATCH; */ \
215	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
216	BPF_STMT(BPF_LD+BPF_MEM, 0), \
217	/* if (lo >= arg.lo) goto MATCH; */ \
218	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \
219	BPF_STMT(BPF_LD+BPF_MEM, 1), \
220	jt, \
221	BPF_STMT(BPF_LD+BPF_MEM, 1)
222
223#define JGT64(lo, hi, jt) \
224	/* if (hi > arg.hi) goto MATCH; */ \
225	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
226	/* if (hi != arg.hi) goto NOMATCH; */ \
227	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
228	BPF_STMT(BPF_LD+BPF_MEM, 0), \
229	/* if (lo > arg.lo) goto MATCH; */ \
230	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \
231	BPF_STMT(BPF_LD+BPF_MEM, 1), \
232	jt, \
233	BPF_STMT(BPF_LD+BPF_MEM, 1)
234
235#define JLE64(lo, hi, jt) \
236	/* if (hi < arg.hi) goto MATCH; */ \
237	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
238	/* if (hi != arg.hi) goto NOMATCH; */ \
239	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
240	BPF_STMT(BPF_LD+BPF_MEM, 0), \
241	/* if (lo <= arg.lo) goto MATCH; */ \
242	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
243	BPF_STMT(BPF_LD+BPF_MEM, 1), \
244	jt, \
245	BPF_STMT(BPF_LD+BPF_MEM, 1)
246
247#define JLT64(lo, hi, jt) \
248	/* if (hi < arg.hi) goto MATCH; */ \
249	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
250	/* if (hi != arg.hi) goto NOMATCH; */ \
251	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
252	BPF_STMT(BPF_LD+BPF_MEM, 0), \
253	/* if (lo < arg.lo) goto MATCH; */ \
254	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \
255	BPF_STMT(BPF_LD+BPF_MEM, 1), \
256	jt, \
257	BPF_STMT(BPF_LD+BPF_MEM, 1)
258
259#define LOAD_SYSCALL_NR \
260	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \
261		 offsetof(struct seccomp_data, nr))
262
263#endif  /* __BPF_HELPER_H__ */
v4.10.11
 
  1/*
  2 * Example wrapper around BPF macros.
  3 *
  4 * Copyright (c) 2012 The Chromium OS Authors <chromium-os-dev@chromium.org>
  5 * Author: Will Drewry <wad@chromium.org>
  6 *
  7 * The code may be used by anyone for any purpose,
  8 * and can serve as a starting point for developing
  9 * applications using prctl(PR_SET_SECCOMP, 2, ...).
 10 *
 11 * No guarantees are provided with respect to the correctness
 12 * or functionality of this code.
 13 */
 14#ifndef __BPF_HELPER_H__
 15#define __BPF_HELPER_H__
 16
 17#include <asm/bitsperlong.h>	/* for __BITS_PER_LONG */
 18#include <endian.h>
 19#include <linux/filter.h>
 20#include <linux/seccomp.h>	/* for seccomp_data */
 21#include <linux/types.h>
 22#include <linux/unistd.h>
 23#include <stddef.h>
 24
 25#define BPF_LABELS_MAX 256
 26struct bpf_labels {
 27	int count;
 28	struct __bpf_label {
 29		const char *label;
 30		__u32 location;
 31	} labels[BPF_LABELS_MAX];
 32};
 33
 34int bpf_resolve_jumps(struct bpf_labels *labels,
 35		      struct sock_filter *filter, size_t count);
 36__u32 seccomp_bpf_label(struct bpf_labels *labels, const char *label);
 37void seccomp_bpf_print(struct sock_filter *filter, size_t count);
 38
 39#define JUMP_JT 0xff
 40#define JUMP_JF 0xff
 41#define LABEL_JT 0xfe
 42#define LABEL_JF 0xfe
 43
 44#define ALLOW \
 45	BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW)
 46#define DENY \
 47	BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_KILL)
 48#define JUMP(labels, label) \
 49	BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
 50		 JUMP_JT, JUMP_JF)
 51#define LABEL(labels, label) \
 52	BPF_JUMP(BPF_JMP+BPF_JA, FIND_LABEL((labels), (label)), \
 53		 LABEL_JT, LABEL_JF)
 54#define SYSCALL(nr, jt) \
 55	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (nr), 0, 1), \
 56	jt
 57
 58/* Lame, but just an example */
 59#define FIND_LABEL(labels, label) seccomp_bpf_label((labels), #label)
 60
 61#define EXPAND(...) __VA_ARGS__
 62
 63/* Ensure that we load the logically correct offset. */
 64#if __BYTE_ORDER == __LITTLE_ENDIAN
 65#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
 66#elif __BYTE_ORDER == __BIG_ENDIAN
 67#define LO_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
 68#else
 69#error "Unknown endianness"
 70#endif
 71
 72/* Map all width-sensitive operations */
 73#if __BITS_PER_LONG == 32
 74
 75#define JEQ(x, jt) JEQ32(x, EXPAND(jt))
 76#define JNE(x, jt) JNE32(x, EXPAND(jt))
 77#define JGT(x, jt) JGT32(x, EXPAND(jt))
 78#define JLT(x, jt) JLT32(x, EXPAND(jt))
 79#define JGE(x, jt) JGE32(x, EXPAND(jt))
 80#define JLE(x, jt) JLE32(x, EXPAND(jt))
 81#define JA(x, jt) JA32(x, EXPAND(jt))
 82#define ARG(i) ARG_32(i)
 83
 84#elif __BITS_PER_LONG == 64
 85
 86/* Ensure that we load the logically correct offset. */
 87#if __BYTE_ORDER == __LITTLE_ENDIAN
 88#define ENDIAN(_lo, _hi) _lo, _hi
 89#define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)]) + sizeof(__u32)
 90#elif __BYTE_ORDER == __BIG_ENDIAN
 91#define ENDIAN(_lo, _hi) _hi, _lo
 92#define HI_ARG(idx) offsetof(struct seccomp_data, args[(idx)])
 93#endif
 94
 95union arg64 {
 96	struct {
 97		__u32 ENDIAN(lo32, hi32);
 98	};
 99	__u64 u64;
100};
101
102#define JEQ(x, jt) \
103	JEQ64(((union arg64){.u64 = (x)}).lo32, \
104	      ((union arg64){.u64 = (x)}).hi32, \
105	      EXPAND(jt))
106#define JGT(x, jt) \
107	JGT64(((union arg64){.u64 = (x)}).lo32, \
108	      ((union arg64){.u64 = (x)}).hi32, \
109	      EXPAND(jt))
110#define JGE(x, jt) \
111	JGE64(((union arg64){.u64 = (x)}).lo32, \
112	      ((union arg64){.u64 = (x)}).hi32, \
113	      EXPAND(jt))
114#define JNE(x, jt) \
115	JNE64(((union arg64){.u64 = (x)}).lo32, \
116	      ((union arg64){.u64 = (x)}).hi32, \
117	      EXPAND(jt))
118#define JLT(x, jt) \
119	JLT64(((union arg64){.u64 = (x)}).lo32, \
120	      ((union arg64){.u64 = (x)}).hi32, \
121	      EXPAND(jt))
122#define JLE(x, jt) \
123	JLE64(((union arg64){.u64 = (x)}).lo32, \
124	      ((union arg64){.u64 = (x)}).hi32, \
125	      EXPAND(jt))
126
127#define JA(x, jt) \
128	JA64(((union arg64){.u64 = (x)}).lo32, \
129	       ((union arg64){.u64 = (x)}).hi32, \
130	       EXPAND(jt))
131#define ARG(i) ARG_64(i)
132
133#else
134#error __BITS_PER_LONG value unusable.
135#endif
136
137/* Loads the arg into A */
138#define ARG_32(idx) \
139	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx))
140
141/* Loads lo into M[0] and hi into M[1] and A */
142#define ARG_64(idx) \
143	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, LO_ARG(idx)), \
144	BPF_STMT(BPF_ST, 0), /* lo -> M[0] */ \
145	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, HI_ARG(idx)), \
146	BPF_STMT(BPF_ST, 1) /* hi -> M[1] */
147
148#define JEQ32(value, jt) \
149	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 0, 1), \
150	jt
151
152#define JNE32(value, jt) \
153	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (value), 1, 0), \
154	jt
155
156#define JA32(value, jt) \
157	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (value), 0, 1), \
158	jt
159
160#define JGE32(value, jt) \
161	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 0, 1), \
162	jt
163
164#define JGT32(value, jt) \
165	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 0, 1), \
166	jt
167
168#define JLE32(value, jt) \
169	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (value), 1, 0), \
170	jt
171
172#define JLT32(value, jt) \
173	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (value), 1, 0), \
174	jt
175
176/*
177 * All the JXX64 checks assume lo is saved in M[0] and hi is saved in both
178 * A and M[1]. This invariant is kept by restoring A if necessary.
179 */
180#define JEQ64(lo, hi, jt) \
181	/* if (hi != arg.hi) goto NOMATCH; */ \
182	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
183	BPF_STMT(BPF_LD+BPF_MEM, 0), /* swap in lo */ \
184	/* if (lo != arg.lo) goto NOMATCH; */ \
185	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 0, 2), \
186	BPF_STMT(BPF_LD+BPF_MEM, 1), \
187	jt, \
188	BPF_STMT(BPF_LD+BPF_MEM, 1)
189
190#define JNE64(lo, hi, jt) \
191	/* if (hi != arg.hi) goto MATCH; */ \
192	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 3), \
193	BPF_STMT(BPF_LD+BPF_MEM, 0), \
194	/* if (lo != arg.lo) goto MATCH; */ \
195	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (lo), 2, 0), \
196	BPF_STMT(BPF_LD+BPF_MEM, 1), \
197	jt, \
198	BPF_STMT(BPF_LD+BPF_MEM, 1)
199
200#define JA64(lo, hi, jt) \
201	/* if (hi & arg.hi) goto MATCH; */ \
202	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (hi), 3, 0), \
203	BPF_STMT(BPF_LD+BPF_MEM, 0), \
204	/* if (lo & arg.lo) goto MATCH; */ \
205	BPF_JUMP(BPF_JMP+BPF_JSET+BPF_K, (lo), 0, 2), \
206	BPF_STMT(BPF_LD+BPF_MEM, 1), \
207	jt, \
208	BPF_STMT(BPF_LD+BPF_MEM, 1)
209
210#define JGE64(lo, hi, jt) \
211	/* if (hi > arg.hi) goto MATCH; */ \
212	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
213	/* if (hi != arg.hi) goto NOMATCH; */ \
214	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
215	BPF_STMT(BPF_LD+BPF_MEM, 0), \
216	/* if (lo >= arg.lo) goto MATCH; */ \
217	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 0, 2), \
218	BPF_STMT(BPF_LD+BPF_MEM, 1), \
219	jt, \
220	BPF_STMT(BPF_LD+BPF_MEM, 1)
221
222#define JGT64(lo, hi, jt) \
223	/* if (hi > arg.hi) goto MATCH; */ \
224	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (hi), 4, 0), \
225	/* if (hi != arg.hi) goto NOMATCH; */ \
226	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
227	BPF_STMT(BPF_LD+BPF_MEM, 0), \
228	/* if (lo > arg.lo) goto MATCH; */ \
229	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 0, 2), \
230	BPF_STMT(BPF_LD+BPF_MEM, 1), \
231	jt, \
232	BPF_STMT(BPF_LD+BPF_MEM, 1)
233
234#define JLE64(lo, hi, jt) \
235	/* if (hi < arg.hi) goto MATCH; */ \
236	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
237	/* if (hi != arg.hi) goto NOMATCH; */ \
238	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
239	BPF_STMT(BPF_LD+BPF_MEM, 0), \
240	/* if (lo <= arg.lo) goto MATCH; */ \
241	BPF_JUMP(BPF_JMP+BPF_JGT+BPF_K, (lo), 2, 0), \
242	BPF_STMT(BPF_LD+BPF_MEM, 1), \
243	jt, \
244	BPF_STMT(BPF_LD+BPF_MEM, 1)
245
246#define JLT64(lo, hi, jt) \
247	/* if (hi < arg.hi) goto MATCH; */ \
248	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (hi), 0, 4), \
249	/* if (hi != arg.hi) goto NOMATCH; */ \
250	BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, (hi), 0, 5), \
251	BPF_STMT(BPF_LD+BPF_MEM, 0), \
252	/* if (lo < arg.lo) goto MATCH; */ \
253	BPF_JUMP(BPF_JMP+BPF_JGE+BPF_K, (lo), 2, 0), \
254	BPF_STMT(BPF_LD+BPF_MEM, 1), \
255	jt, \
256	BPF_STMT(BPF_LD+BPF_MEM, 1)
257
258#define LOAD_SYSCALL_NR \
259	BPF_STMT(BPF_LD+BPF_W+BPF_ABS, \
260		 offsetof(struct seccomp_data, nr))
261
262#endif  /* __BPF_HELPER_H__ */