Linux Audio

Check our new training course

Loading...
v3.5.6
  1/* bpf_jit_comp.c : BPF JIT compiler
  2 *
  3 * Copyright (C) 2011 Eric Dumazet (eric.dumazet@gmail.com)
 
  4 *
  5 * This program is free software; you can redistribute it and/or
  6 * modify it under the terms of the GNU General Public License
  7 * as published by the Free Software Foundation; version 2
  8 * of the License.
  9 */
 10#include <linux/moduleloader.h>
 11#include <asm/cacheflush.h>
 12#include <linux/netdevice.h>
 13#include <linux/filter.h>
 
 
 
 14
 15/*
 16 * Conventions :
 17 *  EAX : BPF A accumulator
 18 *  EBX : BPF X accumulator
 19 *  RDI : pointer to skb   (first argument given to JIT function)
 20 *  RBP : frame pointer (even if CONFIG_FRAME_POINTER=n)
 21 *  ECX,EDX,ESI : scratch registers
 22 *  r9d : skb->len - skb->data_len (headlen)
 23 *  r8  : skb->data
 24 * -8(RBP) : saved RBX value
 25 * -16(RBP)..-80(RBP) : BPF_MEMWORDS values
 26 */
 27int bpf_jit_enable __read_mostly;
 28
 29/*
 30 * assembly code in arch/x86/net/bpf_jit.S
 31 */
 32extern u8 sk_load_word[], sk_load_half[], sk_load_byte[], sk_load_byte_msh[];
 33extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
 34extern u8 sk_load_byte_positive_offset[], sk_load_byte_msh_positive_offset[];
 35extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
 36extern u8 sk_load_byte_negative_offset[], sk_load_byte_msh_negative_offset[];
 37
 38static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
 39{
 40	if (len == 1)
 41		*ptr = bytes;
 42	else if (len == 2)
 43		*(u16 *)ptr = bytes;
 44	else {
 45		*(u32 *)ptr = bytes;
 46		barrier();
 47	}
 48	return ptr + len;
 49}
 50
 51#define EMIT(bytes, len)	do { prog = emit_code(prog, bytes, len); } while (0)
 
 52
 53#define EMIT1(b1)		EMIT(b1, 1)
 54#define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
 55#define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
 56#define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
 57#define EMIT1_off32(b1, off)	do { EMIT1(b1); EMIT(off, 4);} while (0)
 
 
 
 
 
 
 
 58
 59#define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
 60#define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
 61
 62static inline bool is_imm8(int value)
 63{
 64	return value <= 127 && value >= -128;
 65}
 66
 67static inline bool is_near(int offset)
 68{
 69	return offset <= 127 && offset >= -128;
 70}
 71
 72#define EMIT_JMP(offset)						\
 73do {									\
 74	if (offset) {							\
 75		if (is_near(offset))					\
 76			EMIT2(0xeb, offset); /* jmp .+off8 */		\
 77		else							\
 78			EMIT1_off32(0xe9, offset); /* jmp .+off32 */	\
 79	}								\
 80} while (0)
 
 
 
 
 
 
 
 
 
 
 81
 82/* list of x86 cond jumps opcodes (. + s8)
 83 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
 84 */
 85#define X86_JB  0x72
 86#define X86_JAE 0x73
 87#define X86_JE  0x74
 88#define X86_JNE 0x75
 89#define X86_JBE 0x76
 90#define X86_JA  0x77
 
 
 91
 92#define EMIT_COND_JMP(op, offset)				\
 93do {								\
 94	if (is_near(offset))					\
 95		EMIT2(op, offset); /* jxx .+off8 */		\
 96	else {							\
 97		EMIT2(0x0f, op + 0x10);				\
 98		EMIT(offset, 4); /* jxx .+off32 */		\
 99	}							\
100} while (0)
101
102#define COND_SEL(CODE, TOP, FOP)	\
103	case CODE:			\
104		t_op = TOP;		\
105		f_op = FOP;		\
106		goto cond_branch
107
108
109#define SEEN_DATAREF 1 /* might call external helpers */
110#define SEEN_XREG    2 /* ebx is used */
111#define SEEN_MEM     4 /* use mem[] for temporary storage */
112
113static inline void bpf_flush_icache(void *start, void *end)
114{
115	mm_segment_t old_fs = get_fs();
116
117	set_fs(KERNEL_DS);
118	smp_wmb();
119	flush_icache_range((unsigned long)start, (unsigned long)end);
120	set_fs(old_fs);
121}
122
123#define CHOOSE_LOAD_FUNC(K, func) \
124	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
125
126void bpf_jit_compile(struct sk_filter *fp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
127{
128	u8 temp[64];
129	u8 *prog;
130	unsigned int proglen, oldproglen = 0;
131	int ilen, i;
132	int t_offset, f_offset;
133	u8 t_op, f_op, seen = 0, pass;
134	u8 *image = NULL;
135	u8 *func;
136	int pc_ret0 = -1; /* bpf index of first RET #0 instruction (if any) */
137	unsigned int cleanup_addr; /* epilogue code offset */
138	unsigned int *addrs;
139	const struct sock_filter *filter = fp->insns;
140	int flen = fp->len;
141
142	if (!bpf_jit_enable)
143		return;
 
 
 
 
 
144
145	addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
146	if (addrs == NULL)
147		return;
 
 
 
 
 
148
149	/* Before first pass, make a rough estimation of addrs[]
150	 * each bpf instruction is translated to less than 64 bytes
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
151	 */
152	for (proglen = 0, i = 0; i < flen; i++) {
153		proglen += 64;
154		addrs[i] = proglen;
155	}
156	cleanup_addr = proglen; /* epilogue address */
157
158	for (pass = 0; pass < 10; pass++) {
159		u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
160		/* no prologue/epilogue for trivial filters (RET something) */
161		proglen = 0;
162		prog = temp;
 
 
 
 
 
 
 
163
164		if (seen_or_pass0) {
165			EMIT4(0x55, 0x48, 0x89, 0xe5); /* push %rbp; mov %rsp,%rbp */
166			EMIT4(0x48, 0x83, 0xec, 96);	/* subq  $96,%rsp	*/
167			/* note : must save %rbx in case bpf_error is hit */
168			if (seen_or_pass0 & (SEEN_XREG | SEEN_DATAREF))
169				EMIT4(0x48, 0x89, 0x5d, 0xf8); /* mov %rbx, -8(%rbp) */
170			if (seen_or_pass0 & SEEN_XREG)
171				CLEAR_X(); /* make sure we dont leek kernel memory */
172
173			/*
174			 * If this filter needs to access skb data,
175			 * loads r9 and r8 with :
176			 *  r9 = skb->len - skb->data_len
177			 *  r8 = skb->data
178			 */
179			if (seen_or_pass0 & SEEN_DATAREF) {
180				if (offsetof(struct sk_buff, len) <= 127)
181					/* mov    off8(%rdi),%r9d */
182					EMIT4(0x44, 0x8b, 0x4f, offsetof(struct sk_buff, len));
183				else {
184					/* mov    off32(%rdi),%r9d */
185					EMIT3(0x44, 0x8b, 0x8f);
186					EMIT(offsetof(struct sk_buff, len), 4);
187				}
188				if (is_imm8(offsetof(struct sk_buff, data_len)))
189					/* sub    off8(%rdi),%r9d */
190					EMIT4(0x44, 0x2b, 0x4f, offsetof(struct sk_buff, data_len));
191				else {
192					EMIT3(0x44, 0x2b, 0x8f);
193					EMIT(offsetof(struct sk_buff, data_len), 4);
194				}
195
196				if (is_imm8(offsetof(struct sk_buff, data)))
197					/* mov off8(%rdi),%r8 */
198					EMIT4(0x4c, 0x8b, 0x47, offsetof(struct sk_buff, data));
199				else {
200					/* mov off32(%rdi),%r8 */
201					EMIT3(0x4c, 0x8b, 0x87);
202					EMIT(offsetof(struct sk_buff, data), 4);
203				}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204			}
205		}
 
 
 
 
 
206
207		switch (filter[0].code) {
208		case BPF_S_RET_K:
209		case BPF_S_LD_W_LEN:
210		case BPF_S_ANC_PROTOCOL:
211		case BPF_S_ANC_IFINDEX:
212		case BPF_S_ANC_MARK:
213		case BPF_S_ANC_RXHASH:
214		case BPF_S_ANC_CPU:
215		case BPF_S_ANC_QUEUE:
216		case BPF_S_LD_W_ABS:
217		case BPF_S_LD_H_ABS:
218		case BPF_S_LD_B_ABS:
219			/* first instruction sets A register (or is RET 'constant') */
220			break;
221		default:
222			/* make sure we dont leak kernel information to user */
223			CLEAR_A(); /* A = 0 */
224		}
225
226		for (i = 0; i < flen; i++) {
227			unsigned int K = filter[i].k;
 
 
 
 
228
229			switch (filter[i].code) {
230			case BPF_S_ALU_ADD_X: /* A += X; */
231				seen |= SEEN_XREG;
232				EMIT2(0x01, 0xd8);		/* add %ebx,%eax */
233				break;
234			case BPF_S_ALU_ADD_K: /* A += K; */
235				if (!K)
236					break;
237				if (is_imm8(K))
238					EMIT3(0x83, 0xc0, K);	/* add imm8,%eax */
239				else
240					EMIT1_off32(0x05, K);	/* add imm32,%eax */
241				break;
242			case BPF_S_ALU_SUB_X: /* A -= X; */
243				seen |= SEEN_XREG;
244				EMIT2(0x29, 0xd8);		/* sub    %ebx,%eax */
245				break;
246			case BPF_S_ALU_SUB_K: /* A -= K */
247				if (!K)
248					break;
249				if (is_imm8(K))
250					EMIT3(0x83, 0xe8, K); /* sub imm8,%eax */
251				else
252					EMIT1_off32(0x2d, K); /* sub imm32,%eax */
253				break;
254			case BPF_S_ALU_MUL_X: /* A *= X; */
255				seen |= SEEN_XREG;
256				EMIT3(0x0f, 0xaf, 0xc3);	/* imul %ebx,%eax */
257				break;
258			case BPF_S_ALU_MUL_K: /* A *= K */
259				if (is_imm8(K))
260					EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
261				else {
262					EMIT2(0x69, 0xc0);		/* imul imm32,%eax */
263					EMIT(K, 4);
264				}
265				break;
266			case BPF_S_ALU_DIV_X: /* A /= X; */
267				seen |= SEEN_XREG;
268				EMIT2(0x85, 0xdb);	/* test %ebx,%ebx */
269				if (pc_ret0 > 0) {
270					/* addrs[pc_ret0 - 1] is start address of target
271					 * (addrs[i] - 4) is the address following this jmp
272					 * ("xor %edx,%edx; div %ebx" being 4 bytes long)
273					 */
274					EMIT_COND_JMP(X86_JE, addrs[pc_ret0 - 1] -
275								(addrs[i] - 4));
276				} else {
277					EMIT_COND_JMP(X86_JNE, 2 + 5);
278					CLEAR_A();
279					EMIT1_off32(0xe9, cleanup_addr - (addrs[i] - 4)); /* jmp .+off32 */
280				}
281				EMIT4(0x31, 0xd2, 0xf7, 0xf3); /* xor %edx,%edx; div %ebx */
282				break;
283			case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
284				EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
285				EMIT(K, 4);
286				EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
287				break;
288			case BPF_S_ALU_AND_X:
289				seen |= SEEN_XREG;
290				EMIT2(0x21, 0xd8);		/* and %ebx,%eax */
291				break;
292			case BPF_S_ALU_AND_K:
293				if (K >= 0xFFFFFF00) {
294					EMIT2(0x24, K & 0xFF); /* and imm8,%al */
295				} else if (K >= 0xFFFF0000) {
296					EMIT2(0x66, 0x25);	/* and imm16,%ax */
297					EMIT(K, 2);
298				} else {
299					EMIT1_off32(0x25, K);	/* and imm32,%eax */
300				}
301				break;
302			case BPF_S_ALU_OR_X:
303				seen |= SEEN_XREG;
304				EMIT2(0x09, 0xd8);		/* or %ebx,%eax */
305				break;
306			case BPF_S_ALU_OR_K:
307				if (is_imm8(K))
308					EMIT3(0x83, 0xc8, K); /* or imm8,%eax */
309				else
310					EMIT1_off32(0x0d, K);	/* or imm32,%eax */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311				break;
312			case BPF_S_ALU_LSH_X: /* A <<= X; */
313				seen |= SEEN_XREG;
314				EMIT4(0x89, 0xd9, 0xd3, 0xe0);	/* mov %ebx,%ecx; shl %cl,%eax */
315				break;
316			case BPF_S_ALU_LSH_K:
317				if (K == 0)
318					break;
319				else if (K == 1)
320					EMIT2(0xd1, 0xe0); /* shl %eax */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
321				else
322					EMIT3(0xc1, 0xe0, K);
 
323				break;
324			case BPF_S_ALU_RSH_X: /* A >>= X; */
325				seen |= SEEN_XREG;
326				EMIT4(0x89, 0xd9, 0xd3, 0xe8);	/* mov %ebx,%ecx; shr %cl,%eax */
327				break;
328			case BPF_S_ALU_RSH_K: /* A >>= K; */
329				if (K == 0)
330					break;
331				else if (K == 1)
332					EMIT2(0xd1, 0xe8); /* shr %eax */
333				else
334					EMIT3(0xc1, 0xe8, K);
335				break;
336			case BPF_S_ALU_NEG:
337				EMIT2(0xf7, 0xd8);		/* neg %eax */
338				break;
339			case BPF_S_RET_K:
340				if (!K) {
341					if (pc_ret0 == -1)
342						pc_ret0 = i;
343					CLEAR_A();
344				} else {
345					EMIT1_off32(0xb8, K);	/* mov $imm32,%eax */
346				}
347				/* fallinto */
348			case BPF_S_RET_A:
349				if (seen_or_pass0) {
350					if (i != flen - 1) {
351						EMIT_JMP(cleanup_addr - addrs[i]);
352						break;
353					}
354					if (seen_or_pass0 & SEEN_XREG)
355						EMIT4(0x48, 0x8b, 0x5d, 0xf8);  /* mov  -8(%rbp),%rbx */
356					EMIT1(0xc9);		/* leaveq */
357				}
358				EMIT1(0xc3);		/* ret */
359				break;
360			case BPF_S_MISC_TAX: /* X = A */
361				seen |= SEEN_XREG;
362				EMIT2(0x89, 0xc3);	/* mov    %eax,%ebx */
363				break;
364			case BPF_S_MISC_TXA: /* A = X */
365				seen |= SEEN_XREG;
366				EMIT2(0x89, 0xd8);	/* mov    %ebx,%eax */
367				break;
368			case BPF_S_LD_IMM: /* A = K */
369				if (!K)
370					CLEAR_A();
371				else
372					EMIT1_off32(0xb8, K); /* mov $imm32,%eax */
373				break;
374			case BPF_S_LDX_IMM: /* X = K */
375				seen |= SEEN_XREG;
376				if (!K)
377					CLEAR_X();
 
 
 
 
 
 
 
378				else
379					EMIT1_off32(0xbb, K); /* mov $imm32,%ebx */
380				break;
381			case BPF_S_LD_MEM: /* A = mem[K] : mov off8(%rbp),%eax */
382				seen |= SEEN_MEM;
383				EMIT3(0x8b, 0x45, 0xf0 - K*4);
384				break;
385			case BPF_S_LDX_MEM: /* X = mem[K] : mov off8(%rbp),%ebx */
386				seen |= SEEN_XREG | SEEN_MEM;
387				EMIT3(0x8b, 0x5d, 0xf0 - K*4);
388				break;
389			case BPF_S_ST: /* mem[K] = A : mov %eax,off8(%rbp) */
390				seen |= SEEN_MEM;
391				EMIT3(0x89, 0x45, 0xf0 - K*4);
392				break;
393			case BPF_S_STX: /* mem[K] = X : mov %ebx,off8(%rbp) */
394				seen |= SEEN_XREG | SEEN_MEM;
395				EMIT3(0x89, 0x5d, 0xf0 - K*4);
396				break;
397			case BPF_S_LD_W_LEN: /*	A = skb->len; */
398				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
399				if (is_imm8(offsetof(struct sk_buff, len)))
400					/* mov    off8(%rdi),%eax */
401					EMIT3(0x8b, 0x47, offsetof(struct sk_buff, len));
402				else {
403					EMIT2(0x8b, 0x87);
404					EMIT(offsetof(struct sk_buff, len), 4);
405				}
406				break;
407			case BPF_S_LDX_W_LEN: /* X = skb->len; */
408				seen |= SEEN_XREG;
409				if (is_imm8(offsetof(struct sk_buff, len)))
410					/* mov off8(%rdi),%ebx */
411					EMIT3(0x8b, 0x5f, offsetof(struct sk_buff, len));
412				else {
413					EMIT2(0x8b, 0x9f);
414					EMIT(offsetof(struct sk_buff, len), 4);
415				}
416				break;
417			case BPF_S_ANC_PROTOCOL: /* A = ntohs(skb->protocol); */
418				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, protocol) != 2);
419				if (is_imm8(offsetof(struct sk_buff, protocol))) {
420					/* movzwl off8(%rdi),%eax */
421					EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, protocol));
422				} else {
423					EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
424					EMIT(offsetof(struct sk_buff, protocol), 4);
425				}
426				EMIT2(0x86, 0xc4); /* ntohs() : xchg   %al,%ah */
427				break;
428			case BPF_S_ANC_IFINDEX:
429				if (is_imm8(offsetof(struct sk_buff, dev))) {
430					/* movq off8(%rdi),%rax */
431					EMIT4(0x48, 0x8b, 0x47, offsetof(struct sk_buff, dev));
432				} else {
433					EMIT3(0x48, 0x8b, 0x87); /* movq off32(%rdi),%rax */
434					EMIT(offsetof(struct sk_buff, dev), 4);
435				}
436				EMIT3(0x48, 0x85, 0xc0);	/* test %rax,%rax */
437				EMIT_COND_JMP(X86_JE, cleanup_addr - (addrs[i] - 6));
438				BUILD_BUG_ON(FIELD_SIZEOF(struct net_device, ifindex) != 4);
439				EMIT2(0x8b, 0x80);	/* mov off32(%rax),%eax */
440				EMIT(offsetof(struct net_device, ifindex), 4);
441				break;
442			case BPF_S_ANC_MARK:
443				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
444				if (is_imm8(offsetof(struct sk_buff, mark))) {
445					/* mov off8(%rdi),%eax */
446					EMIT3(0x8b, 0x47, offsetof(struct sk_buff, mark));
447				} else {
448					EMIT2(0x8b, 0x87);
449					EMIT(offsetof(struct sk_buff, mark), 4);
450				}
451				break;
452			case BPF_S_ANC_RXHASH:
453				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
454				if (is_imm8(offsetof(struct sk_buff, rxhash))) {
455					/* mov off8(%rdi),%eax */
456					EMIT3(0x8b, 0x47, offsetof(struct sk_buff, rxhash));
457				} else {
458					EMIT2(0x8b, 0x87);
459					EMIT(offsetof(struct sk_buff, rxhash), 4);
460				}
461				break;
462			case BPF_S_ANC_QUEUE:
463				BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, queue_mapping) != 2);
464				if (is_imm8(offsetof(struct sk_buff, queue_mapping))) {
465					/* movzwl off8(%rdi),%eax */
466					EMIT4(0x0f, 0xb7, 0x47, offsetof(struct sk_buff, queue_mapping));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
467				} else {
468					EMIT3(0x0f, 0xb7, 0x87); /* movzwl off32(%rdi),%eax */
469					EMIT(offsetof(struct sk_buff, queue_mapping), 4);
 
 
 
 
470				}
471				break;
472			case BPF_S_ANC_CPU:
473#ifdef CONFIG_SMP
474				EMIT4(0x65, 0x8b, 0x04, 0x25); /* mov %gs:off32,%eax */
475				EMIT((u32)(unsigned long)&cpu_number, 4); /* A = smp_processor_id(); */
476#else
477				CLEAR_A();
478#endif
479				break;
480			case BPF_S_LD_W_ABS:
481				func = CHOOSE_LOAD_FUNC(K, sk_load_word);
482common_load:			seen |= SEEN_DATAREF;
483				t_offset = func - (image + addrs[i]);
484				EMIT1_off32(0xbe, K); /* mov imm32,%esi */
485				EMIT1_off32(0xe8, t_offset); /* call */
486				break;
487			case BPF_S_LD_H_ABS:
488				func = CHOOSE_LOAD_FUNC(K, sk_load_half);
489				goto common_load;
490			case BPF_S_LD_B_ABS:
491				func = CHOOSE_LOAD_FUNC(K, sk_load_byte);
492				goto common_load;
493			case BPF_S_LDX_B_MSH:
494				func = CHOOSE_LOAD_FUNC(K, sk_load_byte_msh);
495				seen |= SEEN_DATAREF | SEEN_XREG;
496				t_offset = func - (image + addrs[i]);
497				EMIT1_off32(0xbe, K);	/* mov imm32,%esi */
498				EMIT1_off32(0xe8, t_offset); /* call sk_load_byte_msh */
499				break;
500			case BPF_S_LD_W_IND:
501				func = sk_load_word;
502common_load_ind:		seen |= SEEN_DATAREF | SEEN_XREG;
503				t_offset = func - (image + addrs[i]);
504				if (K) {
505					if (is_imm8(K)) {
506						EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
507					} else {
508						EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
509						EMIT(K, 4);
510					}
511				} else {
512					EMIT2(0x89,0xde); /* mov %ebx,%esi */
 
513				}
514				EMIT1_off32(0xe8, t_offset);	/* call sk_load_xxx_ind */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
515				break;
516			case BPF_S_LD_H_IND:
517				func = sk_load_half;
518				goto common_load_ind;
519			case BPF_S_LD_B_IND:
520				func = sk_load_byte;
521				goto common_load_ind;
522			case BPF_S_JMP_JA:
523				t_offset = addrs[i + K] - addrs[i];
524				EMIT_JMP(t_offset);
525				break;
526			COND_SEL(BPF_S_JMP_JGT_K, X86_JA, X86_JBE);
527			COND_SEL(BPF_S_JMP_JGE_K, X86_JAE, X86_JB);
528			COND_SEL(BPF_S_JMP_JEQ_K, X86_JE, X86_JNE);
529			COND_SEL(BPF_S_JMP_JSET_K,X86_JNE, X86_JE);
530			COND_SEL(BPF_S_JMP_JGT_X, X86_JA, X86_JBE);
531			COND_SEL(BPF_S_JMP_JGE_X, X86_JAE, X86_JB);
532			COND_SEL(BPF_S_JMP_JEQ_X, X86_JE, X86_JNE);
533			COND_SEL(BPF_S_JMP_JSET_X,X86_JNE, X86_JE);
534
535cond_branch:			f_offset = addrs[i + filter[i].jf] - addrs[i];
536				t_offset = addrs[i + filter[i].jt] - addrs[i];
537
538				/* same targets, can avoid doing the test :) */
539				if (filter[i].jt == filter[i].jf) {
540					EMIT_JMP(t_offset);
541					break;
542				}
543
544				switch (filter[i].code) {
545				case BPF_S_JMP_JGT_X:
546				case BPF_S_JMP_JGE_X:
547				case BPF_S_JMP_JEQ_X:
548					seen |= SEEN_XREG;
549					EMIT2(0x39, 0xd8); /* cmp %ebx,%eax */
550					break;
551				case BPF_S_JMP_JSET_X:
552					seen |= SEEN_XREG;
553					EMIT2(0x85, 0xd8); /* test %ebx,%eax */
554					break;
555				case BPF_S_JMP_JEQ_K:
556					if (K == 0) {
557						EMIT2(0x85, 0xc0); /* test   %eax,%eax */
558						break;
559					}
560				case BPF_S_JMP_JGT_K:
561				case BPF_S_JMP_JGE_K:
562					if (K <= 127)
563						EMIT3(0x83, 0xf8, K); /* cmp imm8,%eax */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
564					else
565						EMIT1_off32(0x3d, K); /* cmp imm32,%eax */
566					break;
567				case BPF_S_JMP_JSET_K:
568					if (K <= 0xFF)
569						EMIT2(0xa8, K); /* test imm8,%al */
570					else if (!(K & 0xFFFF00FF))
571						EMIT3(0xf6, 0xc4, K >> 8); /* test imm8,%ah */
572					else if (K <= 0xFFFF) {
573						EMIT2(0x66, 0xa9); /* test imm16,%ax */
574						EMIT(K, 2);
575					} else {
576						EMIT1_off32(0xa9, K); /* test imm32,%eax */
577					}
578					break;
579				}
580				if (filter[i].jt != 0) {
581					if (filter[i].jf && f_offset)
582						t_offset += is_near(f_offset) ? 2 : 5;
583					EMIT_COND_JMP(t_op, t_offset);
584					if (filter[i].jf)
585						EMIT_JMP(f_offset);
586					break;
587				}
588				EMIT_COND_JMP(f_op, f_offset);
589				break;
590			default:
591				/* hmm, too complex filter, give up with jit compiler */
592				goto out;
593			}
594			ilen = prog - temp;
595			if (image) {
596				if (unlikely(proglen + ilen > oldproglen)) {
597					pr_err("bpb_jit_compile fatal error\n");
598					kfree(addrs);
599					module_free(NULL, image);
600					return;
601				}
602				memcpy(image + proglen, temp, ilen);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
603			}
604			proglen += ilen;
605			addrs[i] = proglen;
606			prog = temp;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
607		}
608		/* last bpf instruction is always a RET :
609		 * use it to give the cleanup instruction(s) addr
610		 */
611		cleanup_addr = proglen - 1; /* ret */
612		if (seen_or_pass0)
613			cleanup_addr -= 1; /* leaveq */
614		if (seen_or_pass0 & SEEN_XREG)
615			cleanup_addr -= 4; /* mov  -8(%rbp),%rbx */
616
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
617		if (image) {
618			if (proglen != oldproglen)
619				pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n", proglen, oldproglen);
 
 
 
620			break;
621		}
622		if (proglen == oldproglen) {
623			image = module_alloc(max_t(unsigned int,
624						   proglen,
625						   sizeof(struct work_struct)));
626			if (!image)
627				goto out;
628		}
629		oldproglen = proglen;
630	}
 
631	if (bpf_jit_enable > 1)
632		pr_err("flen=%d proglen=%u pass=%d image=%p\n",
633		       flen, proglen, pass, image);
634
635	if (image) {
636		if (bpf_jit_enable > 1)
637			print_hex_dump(KERN_ERR, "JIT code: ", DUMP_PREFIX_ADDRESS,
638				       16, 1, image, proglen, false);
639
640		bpf_flush_icache(image, image + proglen);
641
642		fp->bpf_func = (void *)image;
643	}
644out:
645	kfree(addrs);
646	return;
647}
648
649static void jit_free_defer(struct work_struct *arg)
650{
651	module_free(NULL, arg);
652}
653
654/* run from softirq, we must use a work_struct to call
655 * module_free() from process context
656 */
657void bpf_jit_free(struct sk_filter *fp)
658{
659	if (fp->bpf_func != sk_run_filter) {
660		struct work_struct *work = (struct work_struct *)fp->bpf_func;
661
662		INIT_WORK(work, jit_free_defer);
663		schedule_work(work);
664	}
 
 
665}
v4.6
   1/* bpf_jit_comp.c : BPF JIT compiler
   2 *
   3 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
   4 * Internal BPF Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
   5 *
   6 * This program is free software; you can redistribute it and/or
   7 * modify it under the terms of the GNU General Public License
   8 * as published by the Free Software Foundation; version 2
   9 * of the License.
  10 */
 
 
  11#include <linux/netdevice.h>
  12#include <linux/filter.h>
  13#include <linux/if_vlan.h>
  14#include <asm/cacheflush.h>
  15#include <linux/bpf.h>
  16
 
 
 
 
 
 
 
 
 
 
 
 
  17int bpf_jit_enable __read_mostly;
  18
  19/*
  20 * assembly code in arch/x86/net/bpf_jit.S
  21 */
  22extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
  23extern u8 sk_load_word_positive_offset[], sk_load_half_positive_offset[];
  24extern u8 sk_load_byte_positive_offset[];
  25extern u8 sk_load_word_negative_offset[], sk_load_half_negative_offset[];
  26extern u8 sk_load_byte_negative_offset[];
  27
  28static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
  29{
  30	if (len == 1)
  31		*ptr = bytes;
  32	else if (len == 2)
  33		*(u16 *)ptr = bytes;
  34	else {
  35		*(u32 *)ptr = bytes;
  36		barrier();
  37	}
  38	return ptr + len;
  39}
  40
  41#define EMIT(bytes, len) \
  42	do { prog = emit_code(prog, bytes, len); cnt += len; } while (0)
  43
  44#define EMIT1(b1)		EMIT(b1, 1)
  45#define EMIT2(b1, b2)		EMIT((b1) + ((b2) << 8), 2)
  46#define EMIT3(b1, b2, b3)	EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
  47#define EMIT4(b1, b2, b3, b4)   EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
  48#define EMIT1_off32(b1, off) \
  49	do {EMIT1(b1); EMIT(off, 4); } while (0)
  50#define EMIT2_off32(b1, b2, off) \
  51	do {EMIT2(b1, b2); EMIT(off, 4); } while (0)
  52#define EMIT3_off32(b1, b2, b3, off) \
  53	do {EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
  54#define EMIT4_off32(b1, b2, b3, b4, off) \
  55	do {EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
  56
  57static bool is_imm8(int value)
 
 
 
  58{
  59	return value <= 127 && value >= -128;
  60}
  61
  62static bool is_simm32(s64 value)
  63{
  64	return value == (s64) (s32) value;
  65}
  66
  67/* mov dst, src */
  68#define EMIT_mov(DST, SRC) \
  69	do {if (DST != SRC) \
  70		EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
  71	} while (0)
  72
  73static int bpf_size_to_x86_bytes(int bpf_size)
  74{
  75	if (bpf_size == BPF_W)
  76		return 4;
  77	else if (bpf_size == BPF_H)
  78		return 2;
  79	else if (bpf_size == BPF_B)
  80		return 1;
  81	else if (bpf_size == BPF_DW)
  82		return 4; /* imm32 */
  83	else
  84		return 0;
  85}
  86
  87/* list of x86 cond jumps opcodes (. + s8)
  88 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
  89 */
  90#define X86_JB  0x72
  91#define X86_JAE 0x73
  92#define X86_JE  0x74
  93#define X86_JNE 0x75
  94#define X86_JBE 0x76
  95#define X86_JA  0x77
  96#define X86_JGE 0x7D
  97#define X86_JG  0x7F
  98
  99static void bpf_flush_icache(void *start, void *end)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 100{
 101	mm_segment_t old_fs = get_fs();
 102
 103	set_fs(KERNEL_DS);
 104	smp_wmb();
 105	flush_icache_range((unsigned long)start, (unsigned long)end);
 106	set_fs(old_fs);
 107}
 108
 109#define CHOOSE_LOAD_FUNC(K, func) \
 110	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
 111
 112/* pick a register outside of BPF range for JIT internal work */
 113#define AUX_REG (MAX_BPF_REG + 1)
 114
 115/* the following table maps BPF registers to x64 registers.
 116 * x64 register r12 is unused, since if used as base address register
 117 * in load/store instructions, it always needs an extra byte of encoding
 118 */
 119static const int reg2hex[] = {
 120	[BPF_REG_0] = 0,  /* rax */
 121	[BPF_REG_1] = 7,  /* rdi */
 122	[BPF_REG_2] = 6,  /* rsi */
 123	[BPF_REG_3] = 2,  /* rdx */
 124	[BPF_REG_4] = 1,  /* rcx */
 125	[BPF_REG_5] = 0,  /* r8 */
 126	[BPF_REG_6] = 3,  /* rbx callee saved */
 127	[BPF_REG_7] = 5,  /* r13 callee saved */
 128	[BPF_REG_8] = 6,  /* r14 callee saved */
 129	[BPF_REG_9] = 7,  /* r15 callee saved */
 130	[BPF_REG_FP] = 5, /* rbp readonly */
 131	[AUX_REG] = 3,    /* r11 temp register */
 132};
 133
 134/* is_ereg() == true if BPF register 'reg' maps to x64 r8..r15
 135 * which need extra byte of encoding.
 136 * rax,rcx,...,rbp have simpler encoding
 137 */
 138static bool is_ereg(u32 reg)
 139{
 140	return (1 << reg) & (BIT(BPF_REG_5) |
 141			     BIT(AUX_REG) |
 142			     BIT(BPF_REG_7) |
 143			     BIT(BPF_REG_8) |
 144			     BIT(BPF_REG_9));
 145}
 
 
 
 
 
 
 
 146
 147/* add modifiers if 'reg' maps to x64 registers r8..r15 */
 148static u8 add_1mod(u8 byte, u32 reg)
 149{
 150	if (is_ereg(reg))
 151		byte |= 1;
 152	return byte;
 153}
 154
 155static u8 add_2mod(u8 byte, u32 r1, u32 r2)
 156{
 157	if (is_ereg(r1))
 158		byte |= 1;
 159	if (is_ereg(r2))
 160		byte |= 4;
 161	return byte;
 162}
 163
 164/* encode 'dst_reg' register into x64 opcode 'byte' */
 165static u8 add_1reg(u8 byte, u32 dst_reg)
 166{
 167	return byte + reg2hex[dst_reg];
 168}
 169
 170/* encode 'dst_reg' and 'src_reg' registers into x64 opcode 'byte' */
 171static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
 172{
 173	return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
 174}
 175
 176static void jit_fill_hole(void *area, unsigned int size)
 177{
 178	/* fill whole space with int3 instructions */
 179	memset(area, 0xcc, size);
 180}
 181
 182struct jit_context {
 183	int cleanup_addr; /* epilogue code offset */
 184	bool seen_ld_abs;
 185};
 186
 187/* maximum number of bytes emitted while JITing one eBPF insn */
 188#define BPF_MAX_INSN_SIZE	128
 189#define BPF_INSN_SAFETY		64
 190
 191#define STACKSIZE \
 192	(MAX_BPF_STACK + \
 193	 32 /* space for rbx, r13, r14, r15 */ + \
 194	 8 /* space for skb_copy_bits() buffer */)
 195
 196#define PROLOGUE_SIZE 48
 197
 198/* emit x64 prologue code for BPF program and check it's size.
 199 * bpf_tail_call helper will skip it while jumping into another program
 200 */
 201static void emit_prologue(u8 **pprog)
 202{
 203	u8 *prog = *pprog;
 204	int cnt = 0;
 205
 206	EMIT1(0x55); /* push rbp */
 207	EMIT3(0x48, 0x89, 0xE5); /* mov rbp,rsp */
 208
 209	/* sub rsp, STACKSIZE */
 210	EMIT3_off32(0x48, 0x81, 0xEC, STACKSIZE);
 211
 212	/* all classic BPF filters use R6(rbx) save it */
 213
 214	/* mov qword ptr [rbp-X],rbx */
 215	EMIT3_off32(0x48, 0x89, 0x9D, -STACKSIZE);
 216
 217	/* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
 218	 * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
 219	 * R8(r14). R9(r15) spill could be made conditional, but there is only
 220	 * one 'bpf_error' return path out of helper functions inside bpf_jit.S
 221	 * The overhead of extra spill is negligible for any filter other
 222	 * than synthetic ones. Therefore not worth adding complexity.
 223	 */
 
 
 
 
 
 224
 225	/* mov qword ptr [rbp-X],r13 */
 226	EMIT3_off32(0x4C, 0x89, 0xAD, -STACKSIZE + 8);
 227	/* mov qword ptr [rbp-X],r14 */
 228	EMIT3_off32(0x4C, 0x89, 0xB5, -STACKSIZE + 16);
 229	/* mov qword ptr [rbp-X],r15 */
 230	EMIT3_off32(0x4C, 0x89, 0xBD, -STACKSIZE + 24);
 231
 232	/* Clear the tail call counter (tail_call_cnt): for eBPF tail calls
 233	 * we need to reset the counter to 0. It's done in two instructions,
 234	 * resetting rax register to 0 (xor on eax gets 0 extended), and
 235	 * moving it to the counter location.
 236	 */
 237
 238	/* xor eax, eax */
 239	EMIT2(0x31, 0xc0);
 240	/* mov qword ptr [rbp-X], rax */
 241	EMIT3_off32(0x48, 0x89, 0x85, -STACKSIZE + 32);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 242
 243	BUILD_BUG_ON(cnt != PROLOGUE_SIZE);
 244	*pprog = prog;
 245}
 246
 247/* generate the following code:
 248 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
 249 *   if (index >= array->map.max_entries)
 250 *     goto out;
 251 *   if (++tail_call_cnt > MAX_TAIL_CALL_CNT)
 252 *     goto out;
 253 *   prog = array->ptrs[index];
 254 *   if (prog == NULL)
 255 *     goto out;
 256 *   goto *(prog->bpf_func + prologue_size);
 257 * out:
 258 */
 259static void emit_bpf_tail_call(u8 **pprog)
 260{
 261	u8 *prog = *pprog;
 262	int label1, label2, label3;
 263	int cnt = 0;
 264
 265	/* rdi - pointer to ctx
 266	 * rsi - pointer to bpf_array
 267	 * rdx - index in bpf_array
 268	 */
 269
 270	/* if (index >= array->map.max_entries)
 271	 *   goto out;
 272	 */
 273	EMIT4(0x48, 0x8B, 0x46,                   /* mov rax, qword ptr [rsi + 16] */
 274	      offsetof(struct bpf_array, map.max_entries));
 275	EMIT3(0x48, 0x39, 0xD0);                  /* cmp rax, rdx */
 276#define OFFSET1 47 /* number of bytes to jump */
 277	EMIT2(X86_JBE, OFFSET1);                  /* jbe out */
 278	label1 = cnt;
 279
 280	/* if (tail_call_cnt > MAX_TAIL_CALL_CNT)
 281	 *   goto out;
 282	 */
 283	EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
 284	EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT);     /* cmp eax, MAX_TAIL_CALL_CNT */
 285#define OFFSET2 36
 286	EMIT2(X86_JA, OFFSET2);                   /* ja out */
 287	label2 = cnt;
 288	EMIT3(0x83, 0xC0, 0x01);                  /* add eax, 1 */
 289	EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
 290
 291	/* prog = array->ptrs[index]; */
 292	EMIT4_off32(0x48, 0x8D, 0x84, 0xD6,       /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
 293		    offsetof(struct bpf_array, ptrs));
 294	EMIT3(0x48, 0x8B, 0x00);                  /* mov rax, qword ptr [rax] */
 295
 296	/* if (prog == NULL)
 297	 *   goto out;
 298	 */
 299	EMIT4(0x48, 0x83, 0xF8, 0x00);            /* cmp rax, 0 */
 300#define OFFSET3 10
 301	EMIT2(X86_JE, OFFSET3);                   /* je out */
 302	label3 = cnt;
 303
 304	/* goto *(prog->bpf_func + prologue_size); */
 305	EMIT4(0x48, 0x8B, 0x40,                   /* mov rax, qword ptr [rax + 32] */
 306	      offsetof(struct bpf_prog, bpf_func));
 307	EMIT4(0x48, 0x83, 0xC0, PROLOGUE_SIZE);   /* add rax, prologue_size */
 308
 309	/* now we're ready to jump into next BPF program
 310	 * rdi == ctx (1st arg)
 311	 * rax == prog->bpf_func + prologue_size
 312	 */
 313	EMIT2(0xFF, 0xE0);                        /* jmp rax */
 314
 315	/* out: */
 316	BUILD_BUG_ON(cnt - label1 != OFFSET1);
 317	BUILD_BUG_ON(cnt - label2 != OFFSET2);
 318	BUILD_BUG_ON(cnt - label3 != OFFSET3);
 319	*pprog = prog;
 320}
 321
 322
 323static void emit_load_skb_data_hlen(u8 **pprog)
 324{
 325	u8 *prog = *pprog;
 326	int cnt = 0;
 327
 328	/* r9d = skb->len - skb->data_len (headlen)
 329	 * r10 = skb->data
 330	 */
 331	/* mov %r9d, off32(%rdi) */
 332	EMIT3_off32(0x44, 0x8b, 0x8f, offsetof(struct sk_buff, len));
 333
 334	/* sub %r9d, off32(%rdi) */
 335	EMIT3_off32(0x44, 0x2b, 0x8f, offsetof(struct sk_buff, data_len));
 336
 337	/* mov %r10, off32(%rdi) */
 338	EMIT3_off32(0x4c, 0x8b, 0x97, offsetof(struct sk_buff, data));
 339	*pprog = prog;
 340}
 341
 342static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
 343		  int oldproglen, struct jit_context *ctx)
 344{
 345	struct bpf_insn *insn = bpf_prog->insnsi;
 346	int insn_cnt = bpf_prog->len;
 347	bool seen_ld_abs = ctx->seen_ld_abs | (oldproglen == 0);
 348	bool seen_exit = false;
 349	u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
 350	int i, cnt = 0;
 351	int proglen = 0;
 352	u8 *prog = temp;
 353
 354	emit_prologue(&prog);
 355
 356	if (seen_ld_abs)
 357		emit_load_skb_data_hlen(&prog);
 358
 359	for (i = 0; i < insn_cnt; i++, insn++) {
 360		const s32 imm32 = insn->imm;
 361		u32 dst_reg = insn->dst_reg;
 362		u32 src_reg = insn->src_reg;
 363		u8 b1 = 0, b2 = 0, b3 = 0;
 364		s64 jmp_offset;
 365		u8 jmp_cond;
 366		bool reload_skb_data;
 367		int ilen;
 368		u8 *func;
 369
 370		switch (insn->code) {
 371			/* ALU */
 372		case BPF_ALU | BPF_ADD | BPF_X:
 373		case BPF_ALU | BPF_SUB | BPF_X:
 374		case BPF_ALU | BPF_AND | BPF_X:
 375		case BPF_ALU | BPF_OR | BPF_X:
 376		case BPF_ALU | BPF_XOR | BPF_X:
 377		case BPF_ALU64 | BPF_ADD | BPF_X:
 378		case BPF_ALU64 | BPF_SUB | BPF_X:
 379		case BPF_ALU64 | BPF_AND | BPF_X:
 380		case BPF_ALU64 | BPF_OR | BPF_X:
 381		case BPF_ALU64 | BPF_XOR | BPF_X:
 382			switch (BPF_OP(insn->code)) {
 383			case BPF_ADD: b2 = 0x01; break;
 384			case BPF_SUB: b2 = 0x29; break;
 385			case BPF_AND: b2 = 0x21; break;
 386			case BPF_OR: b2 = 0x09; break;
 387			case BPF_XOR: b2 = 0x31; break;
 388			}
 389			if (BPF_CLASS(insn->code) == BPF_ALU64)
 390				EMIT1(add_2mod(0x48, dst_reg, src_reg));
 391			else if (is_ereg(dst_reg) || is_ereg(src_reg))
 392				EMIT1(add_2mod(0x40, dst_reg, src_reg));
 393			EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
 394			break;
 395
 396			/* mov dst, src */
 397		case BPF_ALU64 | BPF_MOV | BPF_X:
 398			EMIT_mov(dst_reg, src_reg);
 
 
 
 
 
 
 
 
 
 
 399			break;
 
 
 
 
 400
 401			/* mov32 dst, src */
 402		case BPF_ALU | BPF_MOV | BPF_X:
 403			if (is_ereg(dst_reg) || is_ereg(src_reg))
 404				EMIT1(add_2mod(0x40, dst_reg, src_reg));
 405			EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
 406			break;
 407
 408			/* neg dst */
 409		case BPF_ALU | BPF_NEG:
 410		case BPF_ALU64 | BPF_NEG:
 411			if (BPF_CLASS(insn->code) == BPF_ALU64)
 412				EMIT1(add_1mod(0x48, dst_reg));
 413			else if (is_ereg(dst_reg))
 414				EMIT1(add_1mod(0x40, dst_reg));
 415			EMIT2(0xF7, add_1reg(0xD8, dst_reg));
 416			break;
 417
 418		case BPF_ALU | BPF_ADD | BPF_K:
 419		case BPF_ALU | BPF_SUB | BPF_K:
 420		case BPF_ALU | BPF_AND | BPF_K:
 421		case BPF_ALU | BPF_OR | BPF_K:
 422		case BPF_ALU | BPF_XOR | BPF_K:
 423		case BPF_ALU64 | BPF_ADD | BPF_K:
 424		case BPF_ALU64 | BPF_SUB | BPF_K:
 425		case BPF_ALU64 | BPF_AND | BPF_K:
 426		case BPF_ALU64 | BPF_OR | BPF_K:
 427		case BPF_ALU64 | BPF_XOR | BPF_K:
 428			if (BPF_CLASS(insn->code) == BPF_ALU64)
 429				EMIT1(add_1mod(0x48, dst_reg));
 430			else if (is_ereg(dst_reg))
 431				EMIT1(add_1mod(0x40, dst_reg));
 432
 433			switch (BPF_OP(insn->code)) {
 434			case BPF_ADD: b3 = 0xC0; break;
 435			case BPF_SUB: b3 = 0xE8; break;
 436			case BPF_AND: b3 = 0xE0; break;
 437			case BPF_OR: b3 = 0xC8; break;
 438			case BPF_XOR: b3 = 0xF0; break;
 439			}
 440
 441			if (is_imm8(imm32))
 442				EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
 443			else
 444				EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
 445			break;
 446
 447		case BPF_ALU64 | BPF_MOV | BPF_K:
 448			/* optimization: if imm32 is positive,
 449			 * use 'mov eax, imm32' (which zero-extends imm32)
 450			 * to save 2 bytes
 451			 */
 452			if (imm32 < 0) {
 453				/* 'mov rax, imm32' sign extends imm32 */
 454				b1 = add_1mod(0x48, dst_reg);
 455				b2 = 0xC7;
 456				b3 = 0xC0;
 457				EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
 
 
 
 458				break;
 459			}
 460
 461		case BPF_ALU | BPF_MOV | BPF_K:
 462			/* optimization: if imm32 is zero, use 'xor <dst>,<dst>'
 463			 * to save 3 bytes.
 464			 */
 465			if (imm32 == 0) {
 466				if (is_ereg(dst_reg))
 467					EMIT1(add_2mod(0x40, dst_reg, dst_reg));
 468				b2 = 0x31; /* xor */
 469				b3 = 0xC0;
 470				EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
 
 
 
 
 
 
 471				break;
 472			}
 473
 474			/* mov %eax, imm32 */
 475			if (is_ereg(dst_reg))
 476				EMIT1(add_1mod(0x40, dst_reg));
 477			EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
 478			break;
 479
 480		case BPF_LD | BPF_IMM | BPF_DW:
 481			if (insn[1].code != 0 || insn[1].src_reg != 0 ||
 482			    insn[1].dst_reg != 0 || insn[1].off != 0) {
 483				/* verifier must catch invalid insns */
 484				pr_err("invalid BPF_LD_IMM64 insn\n");
 485				return -EINVAL;
 486			}
 487
 488			/* optimization: if imm64 is zero, use 'xor <dst>,<dst>'
 489			 * to save 7 bytes.
 490			 */
 491			if (insn[0].imm == 0 && insn[1].imm == 0) {
 492				b1 = add_2mod(0x48, dst_reg, dst_reg);
 493				b2 = 0x31; /* xor */
 494				b3 = 0xC0;
 495				EMIT3(b1, b2, add_2reg(b3, dst_reg, dst_reg));
 496
 497				insn++;
 498				i++;
 499				break;
 500			}
 501
 502			/* movabsq %rax, imm64 */
 503			EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
 504			EMIT(insn[0].imm, 4);
 505			EMIT(insn[1].imm, 4);
 506
 507			insn++;
 508			i++;
 509			break;
 510
 511			/* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
 512		case BPF_ALU | BPF_MOD | BPF_X:
 513		case BPF_ALU | BPF_DIV | BPF_X:
 514		case BPF_ALU | BPF_MOD | BPF_K:
 515		case BPF_ALU | BPF_DIV | BPF_K:
 516		case BPF_ALU64 | BPF_MOD | BPF_X:
 517		case BPF_ALU64 | BPF_DIV | BPF_X:
 518		case BPF_ALU64 | BPF_MOD | BPF_K:
 519		case BPF_ALU64 | BPF_DIV | BPF_K:
 520			EMIT1(0x50); /* push rax */
 521			EMIT1(0x52); /* push rdx */
 522
 523			if (BPF_SRC(insn->code) == BPF_X)
 524				/* mov r11, src_reg */
 525				EMIT_mov(AUX_REG, src_reg);
 526			else
 527				/* mov r11, imm32 */
 528				EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
 529
 530			/* mov rax, dst_reg */
 531			EMIT_mov(BPF_REG_0, dst_reg);
 532
 533			/* xor edx, edx
 534			 * equivalent to 'xor rdx, rdx', but one byte less
 535			 */
 536			EMIT2(0x31, 0xd2);
 537
 538			if (BPF_SRC(insn->code) == BPF_X) {
 539				/* if (src_reg == 0) return 0 */
 540
 541				/* cmp r11, 0 */
 542				EMIT4(0x49, 0x83, 0xFB, 0x00);
 543
 544				/* jne .+9 (skip over pop, pop, xor and jmp) */
 545				EMIT2(X86_JNE, 1 + 1 + 2 + 5);
 546				EMIT1(0x5A); /* pop rdx */
 547				EMIT1(0x58); /* pop rax */
 548				EMIT2(0x31, 0xc0); /* xor eax, eax */
 549
 550				/* jmp cleanup_addr
 551				 * addrs[i] - 11, because there are 11 bytes
 552				 * after this insn: div, mov, pop, pop, mov
 553				 */
 554				jmp_offset = ctx->cleanup_addr - (addrs[i] - 11);
 555				EMIT1_off32(0xE9, jmp_offset);
 556			}
 557
 558			if (BPF_CLASS(insn->code) == BPF_ALU64)
 559				/* div r11 */
 560				EMIT3(0x49, 0xF7, 0xF3);
 561			else
 562				/* div r11d */
 563				EMIT3(0x41, 0xF7, 0xF3);
 564
 565			if (BPF_OP(insn->code) == BPF_MOD)
 566				/* mov r11, rdx */
 567				EMIT3(0x49, 0x89, 0xD3);
 568			else
 569				/* mov r11, rax */
 570				EMIT3(0x49, 0x89, 0xC3);
 571
 572			EMIT1(0x5A); /* pop rdx */
 573			EMIT1(0x58); /* pop rax */
 574
 575			/* mov dst_reg, r11 */
 576			EMIT_mov(dst_reg, AUX_REG);
 577			break;
 578
 579		case BPF_ALU | BPF_MUL | BPF_K:
 580		case BPF_ALU | BPF_MUL | BPF_X:
 581		case BPF_ALU64 | BPF_MUL | BPF_K:
 582		case BPF_ALU64 | BPF_MUL | BPF_X:
 583			EMIT1(0x50); /* push rax */
 584			EMIT1(0x52); /* push rdx */
 585
 586			/* mov r11, dst_reg */
 587			EMIT_mov(AUX_REG, dst_reg);
 588
 589			if (BPF_SRC(insn->code) == BPF_X)
 590				/* mov rax, src_reg */
 591				EMIT_mov(BPF_REG_0, src_reg);
 592			else
 593				/* mov rax, imm32 */
 594				EMIT3_off32(0x48, 0xC7, 0xC0, imm32);
 595
 596			if (BPF_CLASS(insn->code) == BPF_ALU64)
 597				EMIT1(add_1mod(0x48, AUX_REG));
 598			else if (is_ereg(AUX_REG))
 599				EMIT1(add_1mod(0x40, AUX_REG));
 600			/* mul(q) r11 */
 601			EMIT2(0xF7, add_1reg(0xE0, AUX_REG));
 602
 603			/* mov r11, rax */
 604			EMIT_mov(AUX_REG, BPF_REG_0);
 605
 606			EMIT1(0x5A); /* pop rdx */
 607			EMIT1(0x58); /* pop rax */
 608
 609			/* mov dst_reg, r11 */
 610			EMIT_mov(dst_reg, AUX_REG);
 611			break;
 612
 613			/* shifts */
 614		case BPF_ALU | BPF_LSH | BPF_K:
 615		case BPF_ALU | BPF_RSH | BPF_K:
 616		case BPF_ALU | BPF_ARSH | BPF_K:
 617		case BPF_ALU64 | BPF_LSH | BPF_K:
 618		case BPF_ALU64 | BPF_RSH | BPF_K:
 619		case BPF_ALU64 | BPF_ARSH | BPF_K:
 620			if (BPF_CLASS(insn->code) == BPF_ALU64)
 621				EMIT1(add_1mod(0x48, dst_reg));
 622			else if (is_ereg(dst_reg))
 623				EMIT1(add_1mod(0x40, dst_reg));
 624
 625			switch (BPF_OP(insn->code)) {
 626			case BPF_LSH: b3 = 0xE0; break;
 627			case BPF_RSH: b3 = 0xE8; break;
 628			case BPF_ARSH: b3 = 0xF8; break;
 629			}
 630			EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
 631			break;
 632
 633		case BPF_ALU | BPF_LSH | BPF_X:
 634		case BPF_ALU | BPF_RSH | BPF_X:
 635		case BPF_ALU | BPF_ARSH | BPF_X:
 636		case BPF_ALU64 | BPF_LSH | BPF_X:
 637		case BPF_ALU64 | BPF_RSH | BPF_X:
 638		case BPF_ALU64 | BPF_ARSH | BPF_X:
 639
 640			/* check for bad case when dst_reg == rcx */
 641			if (dst_reg == BPF_REG_4) {
 642				/* mov r11, dst_reg */
 643				EMIT_mov(AUX_REG, dst_reg);
 644				dst_reg = AUX_REG;
 645			}
 646
 647			if (src_reg != BPF_REG_4) { /* common case */
 648				EMIT1(0x51); /* push rcx */
 649
 650				/* mov rcx, src_reg */
 651				EMIT_mov(BPF_REG_4, src_reg);
 652			}
 653
 654			/* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
 655			if (BPF_CLASS(insn->code) == BPF_ALU64)
 656				EMIT1(add_1mod(0x48, dst_reg));
 657			else if (is_ereg(dst_reg))
 658				EMIT1(add_1mod(0x40, dst_reg));
 659
 660			switch (BPF_OP(insn->code)) {
 661			case BPF_LSH: b3 = 0xE0; break;
 662			case BPF_RSH: b3 = 0xE8; break;
 663			case BPF_ARSH: b3 = 0xF8; break;
 664			}
 665			EMIT2(0xD3, add_1reg(b3, dst_reg));
 666
 667			if (src_reg != BPF_REG_4)
 668				EMIT1(0x59); /* pop rcx */
 669
 670			if (insn->dst_reg == BPF_REG_4)
 671				/* mov dst_reg, r11 */
 672				EMIT_mov(insn->dst_reg, AUX_REG);
 673			break;
 674
 675		case BPF_ALU | BPF_END | BPF_FROM_BE:
 676			switch (imm32) {
 677			case 16:
 678				/* emit 'ror %ax, 8' to swap lower 2 bytes */
 679				EMIT1(0x66);
 680				if (is_ereg(dst_reg))
 681					EMIT1(0x41);
 682				EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
 683
 684				/* emit 'movzwl eax, ax' */
 685				if (is_ereg(dst_reg))
 686					EMIT3(0x45, 0x0F, 0xB7);
 687				else
 688					EMIT2(0x0F, 0xB7);
 689				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
 690				break;
 691			case 32:
 692				/* emit 'bswap eax' to swap lower 4 bytes */
 693				if (is_ereg(dst_reg))
 694					EMIT2(0x41, 0x0F);
 
 
 
 
 
 695				else
 696					EMIT1(0x0F);
 697				EMIT1(add_1reg(0xC8, dst_reg));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 698				break;
 699			case 64:
 700				/* emit 'bswap rax' to swap 8 bytes */
 701				EMIT3(add_1mod(0x48, dst_reg), 0x0F,
 702				      add_1reg(0xC8, dst_reg));
 
 
 
 
 
 
 
 
 
 703				break;
 704			}
 705			break;
 706
 707		case BPF_ALU | BPF_END | BPF_FROM_LE:
 708			switch (imm32) {
 709			case 16:
 710				/* emit 'movzwl eax, ax' to zero extend 16-bit
 711				 * into 64 bit
 712				 */
 713				if (is_ereg(dst_reg))
 714					EMIT3(0x45, 0x0F, 0xB7);
 715				else
 716					EMIT2(0x0F, 0xB7);
 717				EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 718				break;
 719			case 32:
 720				/* emit 'mov eax, eax' to clear upper 32-bits */
 721				if (is_ereg(dst_reg))
 722					EMIT1(0x45);
 723				EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 724				break;
 725			case 64:
 726				/* nop */
 
 
 
 
 
 
 
 727				break;
 728			}
 729			break;
 730
 731			/* ST: *(u8*)(dst_reg + off) = imm */
 732		case BPF_ST | BPF_MEM | BPF_B:
 733			if (is_ereg(dst_reg))
 734				EMIT2(0x41, 0xC6);
 735			else
 736				EMIT1(0xC6);
 737			goto st;
 738		case BPF_ST | BPF_MEM | BPF_H:
 739			if (is_ereg(dst_reg))
 740				EMIT3(0x66, 0x41, 0xC7);
 741			else
 742				EMIT2(0x66, 0xC7);
 743			goto st;
 744		case BPF_ST | BPF_MEM | BPF_W:
 745			if (is_ereg(dst_reg))
 746				EMIT2(0x41, 0xC7);
 747			else
 748				EMIT1(0xC7);
 749			goto st;
 750		case BPF_ST | BPF_MEM | BPF_DW:
 751			EMIT2(add_1mod(0x48, dst_reg), 0xC7);
 752
 753st:			if (is_imm8(insn->off))
 754				EMIT2(add_1reg(0x40, dst_reg), insn->off);
 755			else
 756				EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
 757
 758			EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
 759			break;
 760
 761			/* STX: *(u8*)(dst_reg + off) = src_reg */
 762		case BPF_STX | BPF_MEM | BPF_B:
 763			/* emit 'mov byte ptr [rax + off], al' */
 764			if (is_ereg(dst_reg) || is_ereg(src_reg) ||
 765			    /* have to add extra byte for x86 SIL, DIL regs */
 766			    src_reg == BPF_REG_1 || src_reg == BPF_REG_2)
 767				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
 768			else
 769				EMIT1(0x88);
 770			goto stx;
 771		case BPF_STX | BPF_MEM | BPF_H:
 772			if (is_ereg(dst_reg) || is_ereg(src_reg))
 773				EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
 774			else
 775				EMIT2(0x66, 0x89);
 776			goto stx;
 777		case BPF_STX | BPF_MEM | BPF_W:
 778			if (is_ereg(dst_reg) || is_ereg(src_reg))
 779				EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
 780			else
 781				EMIT1(0x89);
 782			goto stx;
 783		case BPF_STX | BPF_MEM | BPF_DW:
 784			EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
 785stx:			if (is_imm8(insn->off))
 786				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
 787			else
 788				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
 789					    insn->off);
 790			break;
 791
 792			/* LDX: dst_reg = *(u8*)(src_reg + off) */
 793		case BPF_LDX | BPF_MEM | BPF_B:
 794			/* emit 'movzx rax, byte ptr [rax + off]' */
 795			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
 796			goto ldx;
 797		case BPF_LDX | BPF_MEM | BPF_H:
 798			/* emit 'movzx rax, word ptr [rax + off]' */
 799			EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
 800			goto ldx;
 801		case BPF_LDX | BPF_MEM | BPF_W:
 802			/* emit 'mov eax, dword ptr [rax+0x14]' */
 803			if (is_ereg(dst_reg) || is_ereg(src_reg))
 804				EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
 805			else
 806				EMIT1(0x8B);
 807			goto ldx;
 808		case BPF_LDX | BPF_MEM | BPF_DW:
 809			/* emit 'mov rax, qword ptr [rax+0x14]' */
 810			EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
 811ldx:			/* if insn->off == 0 we can save one extra byte, but
 812			 * special case of x86 r13 which always needs an offset
 813			 * is not worth the hassle
 814			 */
 815			if (is_imm8(insn->off))
 816				EMIT2(add_2reg(0x40, src_reg, dst_reg), insn->off);
 817			else
 818				EMIT1_off32(add_2reg(0x80, src_reg, dst_reg),
 819					    insn->off);
 820			break;
 821
 822			/* STX XADD: lock *(u32*)(dst_reg + off) += src_reg */
 823		case BPF_STX | BPF_XADD | BPF_W:
 824			/* emit 'lock add dword ptr [rax + off], eax' */
 825			if (is_ereg(dst_reg) || is_ereg(src_reg))
 826				EMIT3(0xF0, add_2mod(0x40, dst_reg, src_reg), 0x01);
 827			else
 828				EMIT2(0xF0, 0x01);
 829			goto xadd;
 830		case BPF_STX | BPF_XADD | BPF_DW:
 831			EMIT3(0xF0, add_2mod(0x48, dst_reg, src_reg), 0x01);
 832xadd:			if (is_imm8(insn->off))
 833				EMIT2(add_2reg(0x40, dst_reg, src_reg), insn->off);
 834			else
 835				EMIT1_off32(add_2reg(0x80, dst_reg, src_reg),
 836					    insn->off);
 837			break;
 838
 839			/* call */
 840		case BPF_JMP | BPF_CALL:
 841			func = (u8 *) __bpf_call_base + imm32;
 842			jmp_offset = func - (image + addrs[i]);
 843			if (seen_ld_abs) {
 844				reload_skb_data = bpf_helper_changes_skb_data(func);
 845				if (reload_skb_data) {
 846					EMIT1(0x57); /* push %rdi */
 847					jmp_offset += 22; /* pop, mov, sub, mov */
 848				} else {
 849					EMIT2(0x41, 0x52); /* push %r10 */
 850					EMIT2(0x41, 0x51); /* push %r9 */
 851					/* need to adjust jmp offset, since
 852					 * pop %r9, pop %r10 take 4 bytes after call insn
 853					 */
 854					jmp_offset += 4;
 855				}
 856			}
 857			if (!imm32 || !is_simm32(jmp_offset)) {
 858				pr_err("unsupported bpf func %d addr %p image %p\n",
 859				       imm32, func, image);
 860				return -EINVAL;
 861			}
 862			EMIT1_off32(0xE8, jmp_offset);
 863			if (seen_ld_abs) {
 864				if (reload_skb_data) {
 865					EMIT1(0x5F); /* pop %rdi */
 866					emit_load_skb_data_hlen(&prog);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 867				} else {
 868					EMIT2(0x41, 0x59); /* pop %r9 */
 869					EMIT2(0x41, 0x5A); /* pop %r10 */
 870				}
 871			}
 872			break;
 873
 874		case BPF_JMP | BPF_CALL | BPF_X:
 875			emit_bpf_tail_call(&prog);
 876			break;
 877
 878			/* cond jump */
 879		case BPF_JMP | BPF_JEQ | BPF_X:
 880		case BPF_JMP | BPF_JNE | BPF_X:
 881		case BPF_JMP | BPF_JGT | BPF_X:
 882		case BPF_JMP | BPF_JGE | BPF_X:
 883		case BPF_JMP | BPF_JSGT | BPF_X:
 884		case BPF_JMP | BPF_JSGE | BPF_X:
 885			/* cmp dst_reg, src_reg */
 886			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x39,
 887			      add_2reg(0xC0, dst_reg, src_reg));
 888			goto emit_cond_jmp;
 889
 890		case BPF_JMP | BPF_JSET | BPF_X:
 891			/* test dst_reg, src_reg */
 892			EMIT3(add_2mod(0x48, dst_reg, src_reg), 0x85,
 893			      add_2reg(0xC0, dst_reg, src_reg));
 894			goto emit_cond_jmp;
 895
 896		case BPF_JMP | BPF_JSET | BPF_K:
 897			/* test dst_reg, imm32 */
 898			EMIT1(add_1mod(0x48, dst_reg));
 899			EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
 900			goto emit_cond_jmp;
 901
 902		case BPF_JMP | BPF_JEQ | BPF_K:
 903		case BPF_JMP | BPF_JNE | BPF_K:
 904		case BPF_JMP | BPF_JGT | BPF_K:
 905		case BPF_JMP | BPF_JGE | BPF_K:
 906		case BPF_JMP | BPF_JSGT | BPF_K:
 907		case BPF_JMP | BPF_JSGE | BPF_K:
 908			/* cmp dst_reg, imm8/32 */
 909			EMIT1(add_1mod(0x48, dst_reg));
 910
 911			if (is_imm8(imm32))
 912				EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
 913			else
 914				EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
 915
 916emit_cond_jmp:		/* convert BPF opcode to x86 */
 917			switch (BPF_OP(insn->code)) {
 918			case BPF_JEQ:
 919				jmp_cond = X86_JE;
 920				break;
 921			case BPF_JSET:
 922			case BPF_JNE:
 923				jmp_cond = X86_JNE;
 924				break;
 925			case BPF_JGT:
 926				/* GT is unsigned '>', JA in x86 */
 927				jmp_cond = X86_JA;
 928				break;
 929			case BPF_JGE:
 930				/* GE is unsigned '>=', JAE in x86 */
 931				jmp_cond = X86_JAE;
 932				break;
 933			case BPF_JSGT:
 934				/* signed '>', GT in x86 */
 935				jmp_cond = X86_JG;
 936				break;
 937			case BPF_JSGE:
 938				/* signed '>=', GE in x86 */
 939				jmp_cond = X86_JGE;
 940				break;
 941			default: /* to silence gcc warning */
 942				return -EFAULT;
 943			}
 944			jmp_offset = addrs[i + insn->off] - addrs[i];
 945			if (is_imm8(jmp_offset)) {
 946				EMIT2(jmp_cond, jmp_offset);
 947			} else if (is_simm32(jmp_offset)) {
 948				EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
 949			} else {
 950				pr_err("cond_jmp gen bug %llx\n", jmp_offset);
 951				return -EFAULT;
 952			}
 953
 954			break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 955
 956		case BPF_JMP | BPF_JA:
 957			jmp_offset = addrs[i + insn->off] - addrs[i];
 958			if (!jmp_offset)
 959				/* optimize out nop jumps */
 960				break;
 961emit_jmp:
 962			if (is_imm8(jmp_offset)) {
 963				EMIT2(0xEB, jmp_offset);
 964			} else if (is_simm32(jmp_offset)) {
 965				EMIT1_off32(0xE9, jmp_offset);
 966			} else {
 967				pr_err("jmp gen bug %llx\n", jmp_offset);
 968				return -EFAULT;
 969			}
 970			break;
 971
 972		case BPF_LD | BPF_IND | BPF_W:
 973			func = sk_load_word;
 974			goto common_load;
 975		case BPF_LD | BPF_ABS | BPF_W:
 976			func = CHOOSE_LOAD_FUNC(imm32, sk_load_word);
 977common_load:
 978			ctx->seen_ld_abs = seen_ld_abs = true;
 979			jmp_offset = func - (image + addrs[i]);
 980			if (!func || !is_simm32(jmp_offset)) {
 981				pr_err("unsupported bpf func %d addr %p image %p\n",
 982				       imm32, func, image);
 983				return -EINVAL;
 984			}
 985			if (BPF_MODE(insn->code) == BPF_ABS) {
 986				/* mov %esi, imm32 */
 987				EMIT1_off32(0xBE, imm32);
 988			} else {
 989				/* mov %rsi, src_reg */
 990				EMIT_mov(BPF_REG_2, src_reg);
 991				if (imm32) {
 992					if (is_imm8(imm32))
 993						/* add %esi, imm8 */
 994						EMIT3(0x83, 0xC6, imm32);
 995					else
 996						/* add %esi, imm32 */
 997						EMIT2_off32(0x81, 0xC6, imm32);
 
 
 
 
 
 
 
 
 
 
 
 
 998				}
 
 
 
 
 
 
 
 
 
 
 
 
 
 999			}
1000			/* skb pointer is in R6 (%rbx), it will be copied into
1001			 * %rdi if skb_copy_bits() call is necessary.
1002			 * sk_load_* helpers also use %r10 and %r9d.
1003			 * See bpf_jit.S
1004			 */
1005			EMIT1_off32(0xE8, jmp_offset); /* call */
1006			break;
1007
1008		case BPF_LD | BPF_IND | BPF_H:
1009			func = sk_load_half;
1010			goto common_load;
1011		case BPF_LD | BPF_ABS | BPF_H:
1012			func = CHOOSE_LOAD_FUNC(imm32, sk_load_half);
1013			goto common_load;
1014		case BPF_LD | BPF_IND | BPF_B:
1015			func = sk_load_byte;
1016			goto common_load;
1017		case BPF_LD | BPF_ABS | BPF_B:
1018			func = CHOOSE_LOAD_FUNC(imm32, sk_load_byte);
1019			goto common_load;
1020
1021		case BPF_JMP | BPF_EXIT:
1022			if (seen_exit) {
1023				jmp_offset = ctx->cleanup_addr - addrs[i];
1024				goto emit_jmp;
1025			}
1026			seen_exit = true;
1027			/* update cleanup_addr */
1028			ctx->cleanup_addr = proglen;
1029			/* mov rbx, qword ptr [rbp-X] */
1030			EMIT3_off32(0x48, 0x8B, 0x9D, -STACKSIZE);
1031			/* mov r13, qword ptr [rbp-X] */
1032			EMIT3_off32(0x4C, 0x8B, 0xAD, -STACKSIZE + 8);
1033			/* mov r14, qword ptr [rbp-X] */
1034			EMIT3_off32(0x4C, 0x8B, 0xB5, -STACKSIZE + 16);
1035			/* mov r15, qword ptr [rbp-X] */
1036			EMIT3_off32(0x4C, 0x8B, 0xBD, -STACKSIZE + 24);
1037
1038			EMIT1(0xC9); /* leave */
1039			EMIT1(0xC3); /* ret */
1040			break;
1041
1042		default:
1043			/* By design x64 JIT should support all BPF instructions
1044			 * This error will be seen if new instruction was added
1045			 * to interpreter, but not to JIT
1046			 * or if there is junk in bpf_prog
1047			 */
1048			pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1049			return -EINVAL;
1050		}
 
 
 
 
 
 
 
 
1051
1052		ilen = prog - temp;
1053		if (ilen > BPF_MAX_INSN_SIZE) {
1054			pr_err("bpf_jit_compile fatal insn size error\n");
1055			return -EFAULT;
1056		}
1057
1058		if (image) {
1059			if (unlikely(proglen + ilen > oldproglen)) {
1060				pr_err("bpf_jit_compile fatal error\n");
1061				return -EFAULT;
1062			}
1063			memcpy(image + proglen, temp, ilen);
1064		}
1065		proglen += ilen;
1066		addrs[i] = proglen;
1067		prog = temp;
1068	}
1069	return proglen;
1070}
1071
1072void bpf_jit_compile(struct bpf_prog *prog)
1073{
1074}
1075
1076void bpf_int_jit_compile(struct bpf_prog *prog)
1077{
1078	struct bpf_binary_header *header = NULL;
1079	int proglen, oldproglen = 0;
1080	struct jit_context ctx = {};
1081	u8 *image = NULL;
1082	int *addrs;
1083	int pass;
1084	int i;
1085
1086	if (!bpf_jit_enable)
1087		return;
1088
1089	if (!prog || !prog->len)
1090		return;
1091
1092	addrs = kmalloc(prog->len * sizeof(*addrs), GFP_KERNEL);
1093	if (!addrs)
1094		return;
1095
1096	/* Before first pass, make a rough estimation of addrs[]
1097	 * each bpf instruction is translated to less than 64 bytes
1098	 */
1099	for (proglen = 0, i = 0; i < prog->len; i++) {
1100		proglen += 64;
1101		addrs[i] = proglen;
1102	}
1103	ctx.cleanup_addr = proglen;
1104
1105	/* JITed image shrinks with every pass and the loop iterates
1106	 * until the image stops shrinking. Very large bpf programs
1107	 * may converge on the last pass. In such case do one more
1108	 * pass to emit the final image
1109	 */
1110	for (pass = 0; pass < 10 || image; pass++) {
1111		proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
1112		if (proglen <= 0) {
1113			image = NULL;
1114			if (header)
1115				bpf_jit_binary_free(header);
1116			goto out;
1117		}
1118		if (image) {
1119			if (proglen != oldproglen) {
1120				pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
1121				       proglen, oldproglen);
1122				goto out;
1123			}
1124			break;
1125		}
1126		if (proglen == oldproglen) {
1127			header = bpf_jit_binary_alloc(proglen, &image,
1128						      1, jit_fill_hole);
1129			if (!header)
 
1130				goto out;
1131		}
1132		oldproglen = proglen;
1133	}
1134
1135	if (bpf_jit_enable > 1)
1136		bpf_jit_dump(prog->len, proglen, pass + 1, image);
 
1137
1138	if (image) {
1139		bpf_flush_icache(header, image + proglen);
1140		set_memory_ro((unsigned long)header, header->pages);
1141		prog->bpf_func = (void *)image;
1142		prog->jited = 1;
 
 
 
1143	}
1144out:
1145	kfree(addrs);
 
1146}
1147
1148void bpf_jit_free(struct bpf_prog *fp)
1149{
1150	unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
1151	struct bpf_binary_header *header = (void *)addr;
1152
1153	if (!fp->jited)
1154		goto free_filter;
 
 
 
 
 
1155
1156	set_memory_rw(addr, header->pages);
1157	bpf_jit_binary_free(header);
1158
1159free_filter:
1160	bpf_prog_unlock_free(fp);
1161}