Linux Audio

Check our new training course

Loading...
v4.6
  1/* eBPF mini library */
  2#ifndef __LIBBPF_H
  3#define __LIBBPF_H
  4
  5struct bpf_insn;
  6
  7int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
  8		   int max_entries, int map_flags);
  9int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags);
 10int bpf_lookup_elem(int fd, void *key, void *value);
 11int bpf_delete_elem(int fd, void *key);
 12int bpf_get_next_key(int fd, void *key, void *next_key);
 13
 14int bpf_prog_load(enum bpf_prog_type prog_type,
 15		  const struct bpf_insn *insns, int insn_len,
 16		  const char *license, int kern_version);
 17
 18int bpf_obj_pin(int fd, const char *pathname);
 19int bpf_obj_get(const char *pathname);
 20
 21#define LOG_BUF_SIZE 65536
 22extern char bpf_log_buf[LOG_BUF_SIZE];
 23
 24/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
 25
 26#define BPF_ALU64_REG(OP, DST, SRC)				\
 27	((struct bpf_insn) {					\
 28		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
 29		.dst_reg = DST,					\
 30		.src_reg = SRC,					\
 31		.off   = 0,					\
 32		.imm   = 0 })
 33
 34#define BPF_ALU32_REG(OP, DST, SRC)				\
 35	((struct bpf_insn) {					\
 36		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
 37		.dst_reg = DST,					\
 38		.src_reg = SRC,					\
 39		.off   = 0,					\
 40		.imm   = 0 })
 41
 42/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
 43
 44#define BPF_ALU64_IMM(OP, DST, IMM)				\
 45	((struct bpf_insn) {					\
 46		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
 47		.dst_reg = DST,					\
 48		.src_reg = 0,					\
 49		.off   = 0,					\
 50		.imm   = IMM })
 51
 52#define BPF_ALU32_IMM(OP, DST, IMM)				\
 53	((struct bpf_insn) {					\
 54		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
 55		.dst_reg = DST,					\
 56		.src_reg = 0,					\
 57		.off   = 0,					\
 58		.imm   = IMM })
 59
 60/* Short form of mov, dst_reg = src_reg */
 61
 62#define BPF_MOV64_REG(DST, SRC)					\
 63	((struct bpf_insn) {					\
 64		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
 65		.dst_reg = DST,					\
 66		.src_reg = SRC,					\
 67		.off   = 0,					\
 68		.imm   = 0 })
 69
 70#define BPF_MOV32_REG(DST, SRC)					\
 71	((struct bpf_insn) {					\
 72		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
 73		.dst_reg = DST,					\
 74		.src_reg = SRC,					\
 75		.off   = 0,					\
 76		.imm   = 0 })
 77
 78/* Short form of mov, dst_reg = imm32 */
 79
 80#define BPF_MOV64_IMM(DST, IMM)					\
 81	((struct bpf_insn) {					\
 82		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
 83		.dst_reg = DST,					\
 84		.src_reg = 0,					\
 85		.off   = 0,					\
 86		.imm   = IMM })
 87
 
 
 
 
 
 
 
 
 88/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
 89#define BPF_LD_IMM64(DST, IMM)					\
 90	BPF_LD_IMM64_RAW(DST, 0, IMM)
 91
 92#define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
 93	((struct bpf_insn) {					\
 94		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
 95		.dst_reg = DST,					\
 96		.src_reg = SRC,					\
 97		.off   = 0,					\
 98		.imm   = (__u32) (IMM) }),			\
 99	((struct bpf_insn) {					\
100		.code  = 0, /* zero is reserved opcode */	\
101		.dst_reg = 0,					\
102		.src_reg = 0,					\
103		.off   = 0,					\
104		.imm   = ((__u64) (IMM)) >> 32 })
105
106#ifndef BPF_PSEUDO_MAP_FD
107# define BPF_PSEUDO_MAP_FD	1
108#endif
109
110/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
111#define BPF_LD_MAP_FD(DST, MAP_FD)				\
112	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
113
114
115/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
116
117#define BPF_LD_ABS(SIZE, IMM)					\
118	((struct bpf_insn) {					\
119		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
120		.dst_reg = 0,					\
121		.src_reg = 0,					\
122		.off   = 0,					\
123		.imm   = IMM })
124
125/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
126
127#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
128	((struct bpf_insn) {					\
129		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
130		.dst_reg = DST,					\
131		.src_reg = SRC,					\
132		.off   = OFF,					\
133		.imm   = 0 })
134
135/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
136
137#define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
138	((struct bpf_insn) {					\
139		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
140		.dst_reg = DST,					\
141		.src_reg = SRC,					\
142		.off   = OFF,					\
143		.imm   = 0 })
144
145/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
146
147#define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
148	((struct bpf_insn) {					\
149		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
150		.dst_reg = DST,					\
151		.src_reg = 0,					\
152		.off   = OFF,					\
153		.imm   = IMM })
154
155/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
156
157#define BPF_JMP_REG(OP, DST, SRC, OFF)				\
158	((struct bpf_insn) {					\
159		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
160		.dst_reg = DST,					\
161		.src_reg = SRC,					\
162		.off   = OFF,					\
163		.imm   = 0 })
164
165/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
166
167#define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
168	((struct bpf_insn) {					\
169		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
170		.dst_reg = DST,					\
171		.src_reg = 0,					\
172		.off   = OFF,					\
173		.imm   = IMM })
174
175/* Raw code statement block */
176
177#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
178	((struct bpf_insn) {					\
179		.code  = CODE,					\
180		.dst_reg = DST,					\
181		.src_reg = SRC,					\
182		.off   = OFF,					\
183		.imm   = IMM })
184
185/* Program exit */
186
187#define BPF_EXIT_INSN()						\
188	((struct bpf_insn) {					\
189		.code  = BPF_JMP | BPF_EXIT,			\
190		.dst_reg = 0,					\
191		.src_reg = 0,					\
192		.off   = 0,					\
193		.imm   = 0 })
194
195/* create RAW socket and bind to interface 'name' */
196int open_raw_sock(const char *name);
197
198struct perf_event_attr;
199int perf_event_open(struct perf_event_attr *attr, int pid, int cpu,
200		    int group_fd, unsigned long flags);
201#endif
v4.10.11
  1/* eBPF mini library */
  2#ifndef __LIBBPF_H
  3#define __LIBBPF_H
  4
  5#include <bpf/bpf.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  6
  7struct bpf_insn;
 
  8
  9/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
 10
 11#define BPF_ALU64_REG(OP, DST, SRC)				\
 12	((struct bpf_insn) {					\
 13		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
 14		.dst_reg = DST,					\
 15		.src_reg = SRC,					\
 16		.off   = 0,					\
 17		.imm   = 0 })
 18
 19#define BPF_ALU32_REG(OP, DST, SRC)				\
 20	((struct bpf_insn) {					\
 21		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
 22		.dst_reg = DST,					\
 23		.src_reg = SRC,					\
 24		.off   = 0,					\
 25		.imm   = 0 })
 26
 27/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
 28
 29#define BPF_ALU64_IMM(OP, DST, IMM)				\
 30	((struct bpf_insn) {					\
 31		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
 32		.dst_reg = DST,					\
 33		.src_reg = 0,					\
 34		.off   = 0,					\
 35		.imm   = IMM })
 36
 37#define BPF_ALU32_IMM(OP, DST, IMM)				\
 38	((struct bpf_insn) {					\
 39		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
 40		.dst_reg = DST,					\
 41		.src_reg = 0,					\
 42		.off   = 0,					\
 43		.imm   = IMM })
 44
 45/* Short form of mov, dst_reg = src_reg */
 46
 47#define BPF_MOV64_REG(DST, SRC)					\
 48	((struct bpf_insn) {					\
 49		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
 50		.dst_reg = DST,					\
 51		.src_reg = SRC,					\
 52		.off   = 0,					\
 53		.imm   = 0 })
 54
 55#define BPF_MOV32_REG(DST, SRC)					\
 56	((struct bpf_insn) {					\
 57		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
 58		.dst_reg = DST,					\
 59		.src_reg = SRC,					\
 60		.off   = 0,					\
 61		.imm   = 0 })
 62
 63/* Short form of mov, dst_reg = imm32 */
 64
 65#define BPF_MOV64_IMM(DST, IMM)					\
 66	((struct bpf_insn) {					\
 67		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
 68		.dst_reg = DST,					\
 69		.src_reg = 0,					\
 70		.off   = 0,					\
 71		.imm   = IMM })
 72
 73#define BPF_MOV32_IMM(DST, IMM)					\
 74	((struct bpf_insn) {					\
 75		.code  = BPF_ALU | BPF_MOV | BPF_K,		\
 76		.dst_reg = DST,					\
 77		.src_reg = 0,					\
 78		.off   = 0,					\
 79		.imm   = IMM })
 80
 81/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
 82#define BPF_LD_IMM64(DST, IMM)					\
 83	BPF_LD_IMM64_RAW(DST, 0, IMM)
 84
 85#define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
 86	((struct bpf_insn) {					\
 87		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
 88		.dst_reg = DST,					\
 89		.src_reg = SRC,					\
 90		.off   = 0,					\
 91		.imm   = (__u32) (IMM) }),			\
 92	((struct bpf_insn) {					\
 93		.code  = 0, /* zero is reserved opcode */	\
 94		.dst_reg = 0,					\
 95		.src_reg = 0,					\
 96		.off   = 0,					\
 97		.imm   = ((__u64) (IMM)) >> 32 })
 98
 99#ifndef BPF_PSEUDO_MAP_FD
100# define BPF_PSEUDO_MAP_FD	1
101#endif
102
103/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
104#define BPF_LD_MAP_FD(DST, MAP_FD)				\
105	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
106
107
108/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
109
110#define BPF_LD_ABS(SIZE, IMM)					\
111	((struct bpf_insn) {					\
112		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
113		.dst_reg = 0,					\
114		.src_reg = 0,					\
115		.off   = 0,					\
116		.imm   = IMM })
117
118/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
119
120#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
121	((struct bpf_insn) {					\
122		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
123		.dst_reg = DST,					\
124		.src_reg = SRC,					\
125		.off   = OFF,					\
126		.imm   = 0 })
127
128/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
129
130#define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
131	((struct bpf_insn) {					\
132		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
133		.dst_reg = DST,					\
134		.src_reg = SRC,					\
135		.off   = OFF,					\
136		.imm   = 0 })
137
138/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
139
140#define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
141	((struct bpf_insn) {					\
142		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
143		.dst_reg = DST,					\
144		.src_reg = 0,					\
145		.off   = OFF,					\
146		.imm   = IMM })
147
148/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
149
150#define BPF_JMP_REG(OP, DST, SRC, OFF)				\
151	((struct bpf_insn) {					\
152		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
153		.dst_reg = DST,					\
154		.src_reg = SRC,					\
155		.off   = OFF,					\
156		.imm   = 0 })
157
158/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
159
160#define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
161	((struct bpf_insn) {					\
162		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
163		.dst_reg = DST,					\
164		.src_reg = 0,					\
165		.off   = OFF,					\
166		.imm   = IMM })
167
168/* Raw code statement block */
169
170#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
171	((struct bpf_insn) {					\
172		.code  = CODE,					\
173		.dst_reg = DST,					\
174		.src_reg = SRC,					\
175		.off   = OFF,					\
176		.imm   = IMM })
177
178/* Program exit */
179
180#define BPF_EXIT_INSN()						\
181	((struct bpf_insn) {					\
182		.code  = BPF_JMP | BPF_EXIT,			\
183		.dst_reg = 0,					\
184		.src_reg = 0,					\
185		.off   = 0,					\
186		.imm   = 0 })
187
 
 
 
 
 
 
188#endif