Linux Audio

Check our new training course

Loading...
v4.6
 
  1/* eBPF mini library */
  2#ifndef __LIBBPF_H
  3#define __LIBBPF_H
  4
  5struct bpf_insn;
  6
  7int bpf_create_map(enum bpf_map_type map_type, int key_size, int value_size,
  8		   int max_entries, int map_flags);
  9int bpf_update_elem(int fd, void *key, void *value, unsigned long long flags);
 10int bpf_lookup_elem(int fd, void *key, void *value);
 11int bpf_delete_elem(int fd, void *key);
 12int bpf_get_next_key(int fd, void *key, void *next_key);
 13
 14int bpf_prog_load(enum bpf_prog_type prog_type,
 15		  const struct bpf_insn *insns, int insn_len,
 16		  const char *license, int kern_version);
 17
 18int bpf_obj_pin(int fd, const char *pathname);
 19int bpf_obj_get(const char *pathname);
 20
 21#define LOG_BUF_SIZE 65536
 22extern char bpf_log_buf[LOG_BUF_SIZE];
 23
 24/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
 25
 26#define BPF_ALU64_REG(OP, DST, SRC)				\
 27	((struct bpf_insn) {					\
 28		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
 29		.dst_reg = DST,					\
 30		.src_reg = SRC,					\
 31		.off   = 0,					\
 32		.imm   = 0 })
 33
 34#define BPF_ALU32_REG(OP, DST, SRC)				\
 35	((struct bpf_insn) {					\
 36		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
 37		.dst_reg = DST,					\
 38		.src_reg = SRC,					\
 39		.off   = 0,					\
 40		.imm   = 0 })
 41
 42/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
 43
 44#define BPF_ALU64_IMM(OP, DST, IMM)				\
 45	((struct bpf_insn) {					\
 46		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
 47		.dst_reg = DST,					\
 48		.src_reg = 0,					\
 49		.off   = 0,					\
 50		.imm   = IMM })
 51
 52#define BPF_ALU32_IMM(OP, DST, IMM)				\
 53	((struct bpf_insn) {					\
 54		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
 55		.dst_reg = DST,					\
 56		.src_reg = 0,					\
 57		.off   = 0,					\
 58		.imm   = IMM })
 59
 60/* Short form of mov, dst_reg = src_reg */
 61
 62#define BPF_MOV64_REG(DST, SRC)					\
 63	((struct bpf_insn) {					\
 64		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
 65		.dst_reg = DST,					\
 66		.src_reg = SRC,					\
 67		.off   = 0,					\
 68		.imm   = 0 })
 69
 70#define BPF_MOV32_REG(DST, SRC)					\
 71	((struct bpf_insn) {					\
 72		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
 73		.dst_reg = DST,					\
 74		.src_reg = SRC,					\
 75		.off   = 0,					\
 76		.imm   = 0 })
 77
 78/* Short form of mov, dst_reg = imm32 */
 79
 80#define BPF_MOV64_IMM(DST, IMM)					\
 81	((struct bpf_insn) {					\
 82		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
 83		.dst_reg = DST,					\
 84		.src_reg = 0,					\
 85		.off   = 0,					\
 86		.imm   = IMM })
 87
 
 
 
 
 
 
 
 
 88/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
 89#define BPF_LD_IMM64(DST, IMM)					\
 90	BPF_LD_IMM64_RAW(DST, 0, IMM)
 91
 92#define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
 93	((struct bpf_insn) {					\
 94		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
 95		.dst_reg = DST,					\
 96		.src_reg = SRC,					\
 97		.off   = 0,					\
 98		.imm   = (__u32) (IMM) }),			\
 99	((struct bpf_insn) {					\
100		.code  = 0, /* zero is reserved opcode */	\
101		.dst_reg = 0,					\
102		.src_reg = 0,					\
103		.off   = 0,					\
104		.imm   = ((__u64) (IMM)) >> 32 })
105
106#ifndef BPF_PSEUDO_MAP_FD
107# define BPF_PSEUDO_MAP_FD	1
108#endif
109
110/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
111#define BPF_LD_MAP_FD(DST, MAP_FD)				\
112	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
113
114
115/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
116
117#define BPF_LD_ABS(SIZE, IMM)					\
118	((struct bpf_insn) {					\
119		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
120		.dst_reg = 0,					\
121		.src_reg = 0,					\
122		.off   = 0,					\
123		.imm   = IMM })
124
125/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
126
127#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
128	((struct bpf_insn) {					\
129		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
130		.dst_reg = DST,					\
131		.src_reg = SRC,					\
132		.off   = OFF,					\
133		.imm   = 0 })
134
135/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
136
137#define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
138	((struct bpf_insn) {					\
139		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
140		.dst_reg = DST,					\
141		.src_reg = SRC,					\
142		.off   = OFF,					\
143		.imm   = 0 })
144
 
 
 
 
 
 
 
 
 
 
145/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
146
147#define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
148	((struct bpf_insn) {					\
149		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
150		.dst_reg = DST,					\
151		.src_reg = 0,					\
152		.off   = OFF,					\
153		.imm   = IMM })
154
155/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
156
157#define BPF_JMP_REG(OP, DST, SRC, OFF)				\
158	((struct bpf_insn) {					\
159		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
160		.dst_reg = DST,					\
161		.src_reg = SRC,					\
162		.off   = OFF,					\
163		.imm   = 0 })
164
165/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
166
167#define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
168	((struct bpf_insn) {					\
169		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
170		.dst_reg = DST,					\
171		.src_reg = 0,					\
172		.off   = OFF,					\
173		.imm   = IMM })
174
175/* Raw code statement block */
176
177#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
178	((struct bpf_insn) {					\
179		.code  = CODE,					\
180		.dst_reg = DST,					\
181		.src_reg = SRC,					\
182		.off   = OFF,					\
183		.imm   = IMM })
184
185/* Program exit */
186
187#define BPF_EXIT_INSN()						\
188	((struct bpf_insn) {					\
189		.code  = BPF_JMP | BPF_EXIT,			\
190		.dst_reg = 0,					\
191		.src_reg = 0,					\
192		.off   = 0,					\
193		.imm   = 0 })
194
195/* create RAW socket and bind to interface 'name' */
196int open_raw_sock(const char *name);
197
198struct perf_event_attr;
199int perf_event_open(struct perf_event_attr *attr, int pid, int cpu,
200		    int group_fd, unsigned long flags);
201#endif
v4.17
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/* eBPF mini library */
  3#ifndef __LIBBPF_H
  4#define __LIBBPF_H
  5
  6#include <bpf/bpf.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  7
  8struct bpf_insn;
 
  9
 10/* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
 11
 12#define BPF_ALU64_REG(OP, DST, SRC)				\
 13	((struct bpf_insn) {					\
 14		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,	\
 15		.dst_reg = DST,					\
 16		.src_reg = SRC,					\
 17		.off   = 0,					\
 18		.imm   = 0 })
 19
 20#define BPF_ALU32_REG(OP, DST, SRC)				\
 21	((struct bpf_insn) {					\
 22		.code  = BPF_ALU | BPF_OP(OP) | BPF_X,		\
 23		.dst_reg = DST,					\
 24		.src_reg = SRC,					\
 25		.off   = 0,					\
 26		.imm   = 0 })
 27
 28/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
 29
 30#define BPF_ALU64_IMM(OP, DST, IMM)				\
 31	((struct bpf_insn) {					\
 32		.code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,	\
 33		.dst_reg = DST,					\
 34		.src_reg = 0,					\
 35		.off   = 0,					\
 36		.imm   = IMM })
 37
 38#define BPF_ALU32_IMM(OP, DST, IMM)				\
 39	((struct bpf_insn) {					\
 40		.code  = BPF_ALU | BPF_OP(OP) | BPF_K,		\
 41		.dst_reg = DST,					\
 42		.src_reg = 0,					\
 43		.off   = 0,					\
 44		.imm   = IMM })
 45
 46/* Short form of mov, dst_reg = src_reg */
 47
 48#define BPF_MOV64_REG(DST, SRC)					\
 49	((struct bpf_insn) {					\
 50		.code  = BPF_ALU64 | BPF_MOV | BPF_X,		\
 51		.dst_reg = DST,					\
 52		.src_reg = SRC,					\
 53		.off   = 0,					\
 54		.imm   = 0 })
 55
 56#define BPF_MOV32_REG(DST, SRC)					\
 57	((struct bpf_insn) {					\
 58		.code  = BPF_ALU | BPF_MOV | BPF_X,		\
 59		.dst_reg = DST,					\
 60		.src_reg = SRC,					\
 61		.off   = 0,					\
 62		.imm   = 0 })
 63
 64/* Short form of mov, dst_reg = imm32 */
 65
 66#define BPF_MOV64_IMM(DST, IMM)					\
 67	((struct bpf_insn) {					\
 68		.code  = BPF_ALU64 | BPF_MOV | BPF_K,		\
 69		.dst_reg = DST,					\
 70		.src_reg = 0,					\
 71		.off   = 0,					\
 72		.imm   = IMM })
 73
 74#define BPF_MOV32_IMM(DST, IMM)					\
 75	((struct bpf_insn) {					\
 76		.code  = BPF_ALU | BPF_MOV | BPF_K,		\
 77		.dst_reg = DST,					\
 78		.src_reg = 0,					\
 79		.off   = 0,					\
 80		.imm   = IMM })
 81
 82/* BPF_LD_IMM64 macro encodes single 'load 64-bit immediate' insn */
 83#define BPF_LD_IMM64(DST, IMM)					\
 84	BPF_LD_IMM64_RAW(DST, 0, IMM)
 85
 86#define BPF_LD_IMM64_RAW(DST, SRC, IMM)				\
 87	((struct bpf_insn) {					\
 88		.code  = BPF_LD | BPF_DW | BPF_IMM,		\
 89		.dst_reg = DST,					\
 90		.src_reg = SRC,					\
 91		.off   = 0,					\
 92		.imm   = (__u32) (IMM) }),			\
 93	((struct bpf_insn) {					\
 94		.code  = 0, /* zero is reserved opcode */	\
 95		.dst_reg = 0,					\
 96		.src_reg = 0,					\
 97		.off   = 0,					\
 98		.imm   = ((__u64) (IMM)) >> 32 })
 99
100#ifndef BPF_PSEUDO_MAP_FD
101# define BPF_PSEUDO_MAP_FD	1
102#endif
103
104/* pseudo BPF_LD_IMM64 insn used to refer to process-local map_fd */
105#define BPF_LD_MAP_FD(DST, MAP_FD)				\
106	BPF_LD_IMM64_RAW(DST, BPF_PSEUDO_MAP_FD, MAP_FD)
107
108
109/* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
110
111#define BPF_LD_ABS(SIZE, IMM)					\
112	((struct bpf_insn) {					\
113		.code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,	\
114		.dst_reg = 0,					\
115		.src_reg = 0,					\
116		.off   = 0,					\
117		.imm   = IMM })
118
119/* Memory load, dst_reg = *(uint *) (src_reg + off16) */
120
121#define BPF_LDX_MEM(SIZE, DST, SRC, OFF)			\
122	((struct bpf_insn) {					\
123		.code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,	\
124		.dst_reg = DST,					\
125		.src_reg = SRC,					\
126		.off   = OFF,					\
127		.imm   = 0 })
128
129/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
130
131#define BPF_STX_MEM(SIZE, DST, SRC, OFF)			\
132	((struct bpf_insn) {					\
133		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,	\
134		.dst_reg = DST,					\
135		.src_reg = SRC,					\
136		.off   = OFF,					\
137		.imm   = 0 })
138
139/* Atomic memory add, *(uint *)(dst_reg + off16) += src_reg */
140
141#define BPF_STX_XADD(SIZE, DST, SRC, OFF)			\
142	((struct bpf_insn) {					\
143		.code  = BPF_STX | BPF_SIZE(SIZE) | BPF_XADD,	\
144		.dst_reg = DST,					\
145		.src_reg = SRC,					\
146		.off   = OFF,					\
147		.imm   = 0 })
148
149/* Memory store, *(uint *) (dst_reg + off16) = imm32 */
150
151#define BPF_ST_MEM(SIZE, DST, OFF, IMM)				\
152	((struct bpf_insn) {					\
153		.code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,	\
154		.dst_reg = DST,					\
155		.src_reg = 0,					\
156		.off   = OFF,					\
157		.imm   = IMM })
158
159/* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
160
161#define BPF_JMP_REG(OP, DST, SRC, OFF)				\
162	((struct bpf_insn) {					\
163		.code  = BPF_JMP | BPF_OP(OP) | BPF_X,		\
164		.dst_reg = DST,					\
165		.src_reg = SRC,					\
166		.off   = OFF,					\
167		.imm   = 0 })
168
169/* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
170
171#define BPF_JMP_IMM(OP, DST, IMM, OFF)				\
172	((struct bpf_insn) {					\
173		.code  = BPF_JMP | BPF_OP(OP) | BPF_K,		\
174		.dst_reg = DST,					\
175		.src_reg = 0,					\
176		.off   = OFF,					\
177		.imm   = IMM })
178
179/* Raw code statement block */
180
181#define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)			\
182	((struct bpf_insn) {					\
183		.code  = CODE,					\
184		.dst_reg = DST,					\
185		.src_reg = SRC,					\
186		.off   = OFF,					\
187		.imm   = IMM })
188
189/* Program exit */
190
191#define BPF_EXIT_INSN()						\
192	((struct bpf_insn) {					\
193		.code  = BPF_JMP | BPF_EXIT,			\
194		.dst_reg = 0,					\
195		.src_reg = 0,					\
196		.off   = 0,					\
197		.imm   = 0 })
198
 
 
 
 
 
 
199#endif