Linux Audio

Check our new training course

Loading...
v3.15
  1/*
  2 * Just-In-Time compiler for BPF filters on 32bit ARM
  3 *
  4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
  5 *
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms of the GNU General Public License as published by the
  8 * Free Software Foundation; version 2 of the License.
  9 */
 10
 11#include <linux/bitops.h>
 12#include <linux/compiler.h>
 13#include <linux/errno.h>
 14#include <linux/filter.h>
 15#include <linux/moduleloader.h>
 16#include <linux/netdevice.h>
 17#include <linux/string.h>
 18#include <linux/slab.h>
 19#include <linux/if_vlan.h>
 20#include <asm/cacheflush.h>
 21#include <asm/hwcap.h>
 22#include <asm/opcodes.h>
 23
 24#include "bpf_jit_32.h"
 25
 26/*
 27 * ABI:
 28 *
 29 * r0	scratch register
 30 * r4	BPF register A
 31 * r5	BPF register X
 32 * r6	pointer to the skb
 33 * r7	skb->data
 34 * r8	skb_headlen(skb)
 35 */
 36
 37#define r_scratch	ARM_R0
 38/* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
 39#define r_off		ARM_R1
 40#define r_A		ARM_R4
 41#define r_X		ARM_R5
 42#define r_skb		ARM_R6
 43#define r_skb_data	ARM_R7
 44#define r_skb_hl	ARM_R8
 45
 46#define SCRATCH_SP_OFFSET	0
 47#define SCRATCH_OFF(k)		(SCRATCH_SP_OFFSET + 4 * (k))
 48
 49#define SEEN_MEM		((1 << BPF_MEMWORDS) - 1)
 50#define SEEN_MEM_WORD(k)	(1 << (k))
 51#define SEEN_X			(1 << BPF_MEMWORDS)
 52#define SEEN_CALL		(1 << (BPF_MEMWORDS + 1))
 53#define SEEN_SKB		(1 << (BPF_MEMWORDS + 2))
 54#define SEEN_DATA		(1 << (BPF_MEMWORDS + 3))
 55
 56#define FLAG_NEED_X_RESET	(1 << 0)
 57
 58struct jit_ctx {
 59	const struct sk_filter *skf;
 60	unsigned idx;
 61	unsigned prologue_bytes;
 62	int ret0_fp_idx;
 63	u32 seen;
 64	u32 flags;
 65	u32 *offsets;
 66	u32 *target;
 67#if __LINUX_ARM_ARCH__ < 7
 68	u16 epilogue_bytes;
 69	u16 imm_count;
 70	u32 *imms;
 71#endif
 72};
 73
 74int bpf_jit_enable __read_mostly;
 75
 76static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
 77{
 78	u8 ret;
 79	int err;
 80
 81	err = skb_copy_bits(skb, offset, &ret, 1);
 82
 83	return (u64)err << 32 | ret;
 84}
 85
 86static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
 87{
 88	u16 ret;
 89	int err;
 90
 91	err = skb_copy_bits(skb, offset, &ret, 2);
 92
 93	return (u64)err << 32 | ntohs(ret);
 94}
 95
 96static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
 97{
 98	u32 ret;
 99	int err;
100
101	err = skb_copy_bits(skb, offset, &ret, 4);
102
103	return (u64)err << 32 | ntohl(ret);
104}
105
106/*
107 * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
108 * (where the assembly routines like __aeabi_uidiv could cause problems).
109 */
110static u32 jit_udiv(u32 dividend, u32 divisor)
111{
112	return dividend / divisor;
113}
114
115static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
116{
117	inst |= (cond << 28);
118	inst = __opcode_to_mem_arm(inst);
119
120	if (ctx->target != NULL)
121		ctx->target[ctx->idx] = inst;
122
123	ctx->idx++;
124}
125
126/*
127 * Emit an instruction that will be executed unconditionally.
128 */
129static inline void emit(u32 inst, struct jit_ctx *ctx)
130{
131	_emit(ARM_COND_AL, inst, ctx);
132}
133
134static u16 saved_regs(struct jit_ctx *ctx)
135{
136	u16 ret = 0;
137
138	if ((ctx->skf->len > 1) ||
139	    (ctx->skf->insns[0].code == BPF_S_RET_A))
140		ret |= 1 << r_A;
141
142#ifdef CONFIG_FRAME_POINTER
143	ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
144#else
145	if (ctx->seen & SEEN_CALL)
146		ret |= 1 << ARM_LR;
147#endif
148	if (ctx->seen & (SEEN_DATA | SEEN_SKB))
149		ret |= 1 << r_skb;
150	if (ctx->seen & SEEN_DATA)
151		ret |= (1 << r_skb_data) | (1 << r_skb_hl);
152	if (ctx->seen & SEEN_X)
153		ret |= 1 << r_X;
154
155	return ret;
156}
157
158static inline int mem_words_used(struct jit_ctx *ctx)
159{
160	/* yes, we do waste some stack space IF there are "holes" in the set" */
161	return fls(ctx->seen & SEEN_MEM);
162}
163
164static inline bool is_load_to_a(u16 inst)
165{
166	switch (inst) {
167	case BPF_S_LD_W_LEN:
168	case BPF_S_LD_W_ABS:
169	case BPF_S_LD_H_ABS:
170	case BPF_S_LD_B_ABS:
171	case BPF_S_ANC_CPU:
172	case BPF_S_ANC_IFINDEX:
173	case BPF_S_ANC_MARK:
174	case BPF_S_ANC_PROTOCOL:
175	case BPF_S_ANC_RXHASH:
176	case BPF_S_ANC_VLAN_TAG:
177	case BPF_S_ANC_VLAN_TAG_PRESENT:
178	case BPF_S_ANC_QUEUE:
179		return true;
180	default:
181		return false;
182	}
183}
184
185static void build_prologue(struct jit_ctx *ctx)
186{
187	u16 reg_set = saved_regs(ctx);
188	u16 first_inst = ctx->skf->insns[0].code;
189	u16 off;
190
191#ifdef CONFIG_FRAME_POINTER
192	emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
193	emit(ARM_PUSH(reg_set), ctx);
194	emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
195#else
196	if (reg_set)
197		emit(ARM_PUSH(reg_set), ctx);
198#endif
199
200	if (ctx->seen & (SEEN_DATA | SEEN_SKB))
201		emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
202
203	if (ctx->seen & SEEN_DATA) {
204		off = offsetof(struct sk_buff, data);
205		emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
206		/* headlen = len - data_len */
207		off = offsetof(struct sk_buff, len);
208		emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
209		off = offsetof(struct sk_buff, data_len);
210		emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
211		emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
212	}
213
214	if (ctx->flags & FLAG_NEED_X_RESET)
215		emit(ARM_MOV_I(r_X, 0), ctx);
216
217	/* do not leak kernel data to userspace */
218	if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
219		emit(ARM_MOV_I(r_A, 0), ctx);
220
221	/* stack space for the BPF_MEM words */
222	if (ctx->seen & SEEN_MEM)
223		emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
224}
225
226static void build_epilogue(struct jit_ctx *ctx)
227{
228	u16 reg_set = saved_regs(ctx);
229
230	if (ctx->seen & SEEN_MEM)
231		emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
232
233	reg_set &= ~(1 << ARM_LR);
234
235#ifdef CONFIG_FRAME_POINTER
236	/* the first instruction of the prologue was: mov ip, sp */
237	reg_set &= ~(1 << ARM_IP);
238	reg_set |= (1 << ARM_SP);
239	emit(ARM_LDM(ARM_SP, reg_set), ctx);
240#else
241	if (reg_set) {
242		if (ctx->seen & SEEN_CALL)
243			reg_set |= 1 << ARM_PC;
244		emit(ARM_POP(reg_set), ctx);
245	}
246
247	if (!(ctx->seen & SEEN_CALL))
248		emit(ARM_BX(ARM_LR), ctx);
249#endif
250}
251
252static int16_t imm8m(u32 x)
253{
254	u32 rot;
255
256	for (rot = 0; rot < 16; rot++)
257		if ((x & ~ror32(0xff, 2 * rot)) == 0)
258			return rol32(x, 2 * rot) | (rot << 8);
259
260	return -1;
261}
262
263#if __LINUX_ARM_ARCH__ < 7
264
265static u16 imm_offset(u32 k, struct jit_ctx *ctx)
266{
267	unsigned i = 0, offset;
268	u16 imm;
269
270	/* on the "fake" run we just count them (duplicates included) */
271	if (ctx->target == NULL) {
272		ctx->imm_count++;
273		return 0;
274	}
275
276	while ((i < ctx->imm_count) && ctx->imms[i]) {
277		if (ctx->imms[i] == k)
278			break;
279		i++;
280	}
281
282	if (ctx->imms[i] == 0)
283		ctx->imms[i] = k;
284
285	/* constants go just after the epilogue */
286	offset =  ctx->offsets[ctx->skf->len];
287	offset += ctx->prologue_bytes;
288	offset += ctx->epilogue_bytes;
289	offset += i * 4;
290
291	ctx->target[offset / 4] = k;
292
293	/* PC in ARM mode == address of the instruction + 8 */
294	imm = offset - (8 + ctx->idx * 4);
295
296	return imm;
297}
298
299#endif /* __LINUX_ARM_ARCH__ */
300
301/*
302 * Move an immediate that's not an imm8m to a core register.
303 */
304static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
305{
306#if __LINUX_ARM_ARCH__ < 7
307	emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
308#else
309	emit(ARM_MOVW(rd, val & 0xffff), ctx);
310	if (val > 0xffff)
311		emit(ARM_MOVT(rd, val >> 16), ctx);
312#endif
313}
314
315static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
316{
317	int imm12 = imm8m(val);
318
319	if (imm12 >= 0)
320		emit(ARM_MOV_I(rd, imm12), ctx);
321	else
322		emit_mov_i_no8m(rd, val, ctx);
323}
324
325#if __LINUX_ARM_ARCH__ < 6
326
327static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
328{
329	_emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
330	_emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
331	_emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
332	_emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
333	_emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
334	_emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
335	_emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
336	_emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
337}
338
339static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
340{
341	_emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
342	_emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
343	_emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
344}
345
346static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
347{
348	/* r_dst = (r_src << 8) | (r_src >> 8) */
349	emit(ARM_LSL_I(ARM_R1, r_src, 8), ctx);
350	emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSR, 8), ctx);
351
352	/*
353	 * we need to mask out the bits set in r_dst[23:16] due to
354	 * the first shift instruction.
355	 *
356	 * note that 0x8ff is the encoded immediate 0x00ff0000.
357	 */
358	emit(ARM_BIC_I(r_dst, r_dst, 0x8ff), ctx);
359}
360
361#else  /* ARMv6+ */
362
363static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
364{
365	_emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
366#ifdef __LITTLE_ENDIAN
367	_emit(cond, ARM_REV(r_res, r_res), ctx);
368#endif
369}
370
371static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
372{
373	_emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
374#ifdef __LITTLE_ENDIAN
375	_emit(cond, ARM_REV16(r_res, r_res), ctx);
376#endif
377}
378
379static inline void emit_swap16(u8 r_dst __maybe_unused,
380			       u8 r_src __maybe_unused,
381			       struct jit_ctx *ctx __maybe_unused)
382{
383#ifdef __LITTLE_ENDIAN
384	emit(ARM_REV16(r_dst, r_src), ctx);
385#endif
386}
387
388#endif /* __LINUX_ARM_ARCH__ < 6 */
389
390
391/* Compute the immediate value for a PC-relative branch. */
392static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
393{
394	u32 imm;
395
396	if (ctx->target == NULL)
397		return 0;
398	/*
399	 * BPF allows only forward jumps and the offset of the target is
400	 * still the one computed during the first pass.
401	 */
402	imm  = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
403
404	return imm >> 2;
405}
406
407#define OP_IMM3(op, r1, r2, imm_val, ctx)				\
408	do {								\
409		imm12 = imm8m(imm_val);					\
410		if (imm12 < 0) {					\
411			emit_mov_i_no8m(r_scratch, imm_val, ctx);	\
412			emit(op ## _R((r1), (r2), r_scratch), ctx);	\
413		} else {						\
414			emit(op ## _I((r1), (r2), imm12), ctx);		\
415		}							\
416	} while (0)
417
418static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
419{
420	if (ctx->ret0_fp_idx >= 0) {
421		_emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
422		/* NOP to keep the size constant between passes */
423		emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
424	} else {
425		_emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
426		_emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
427	}
428}
429
430static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
431{
432#if __LINUX_ARM_ARCH__ < 5
433	emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
434
435	if (elf_hwcap & HWCAP_THUMB)
436		emit(ARM_BX(tgt_reg), ctx);
437	else
438		emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
439#else
440	emit(ARM_BLX_R(tgt_reg), ctx);
441#endif
442}
443
444static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
445{
446#if __LINUX_ARM_ARCH__ == 7
447	if (elf_hwcap & HWCAP_IDIVA) {
448		emit(ARM_UDIV(rd, rm, rn), ctx);
449		return;
450	}
451#endif
452	if (rm != ARM_R0)
453		emit(ARM_MOV_R(ARM_R0, rm), ctx);
454	if (rn != ARM_R1)
455		emit(ARM_MOV_R(ARM_R1, rn), ctx);
456
457	ctx->seen |= SEEN_CALL;
458	emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
459	emit_blx_r(ARM_R3, ctx);
460
461	if (rd != ARM_R0)
462		emit(ARM_MOV_R(rd, ARM_R0), ctx);
463}
464
465static inline void update_on_xread(struct jit_ctx *ctx)
466{
467	if (!(ctx->seen & SEEN_X))
468		ctx->flags |= FLAG_NEED_X_RESET;
469
470	ctx->seen |= SEEN_X;
471}
472
473static int build_body(struct jit_ctx *ctx)
474{
475	void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
476	const struct sk_filter *prog = ctx->skf;
477	const struct sock_filter *inst;
478	unsigned i, load_order, off, condt;
479	int imm12;
480	u32 k;
481
482	for (i = 0; i < prog->len; i++) {
483		inst = &(prog->insns[i]);
484		/* K as an immediate value operand */
485		k = inst->k;
486
487		/* compute offsets only in the fake pass */
488		if (ctx->target == NULL)
489			ctx->offsets[i] = ctx->idx * 4;
490
491		switch (inst->code) {
492		case BPF_S_LD_IMM:
493			emit_mov_i(r_A, k, ctx);
494			break;
495		case BPF_S_LD_W_LEN:
496			ctx->seen |= SEEN_SKB;
497			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
498			emit(ARM_LDR_I(r_A, r_skb,
499				       offsetof(struct sk_buff, len)), ctx);
500			break;
501		case BPF_S_LD_MEM:
502			/* A = scratch[k] */
503			ctx->seen |= SEEN_MEM_WORD(k);
504			emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
505			break;
506		case BPF_S_LD_W_ABS:
507			load_order = 2;
508			goto load;
509		case BPF_S_LD_H_ABS:
510			load_order = 1;
511			goto load;
512		case BPF_S_LD_B_ABS:
513			load_order = 0;
514load:
515			/* the interpreter will deal with the negative K */
516			if ((int)k < 0)
517				return -ENOTSUPP;
518			emit_mov_i(r_off, k, ctx);
519load_common:
520			ctx->seen |= SEEN_DATA | SEEN_CALL;
521
522			if (load_order > 0) {
523				emit(ARM_SUB_I(r_scratch, r_skb_hl,
524					       1 << load_order), ctx);
525				emit(ARM_CMP_R(r_scratch, r_off), ctx);
526				condt = ARM_COND_HS;
527			} else {
528				emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
529				condt = ARM_COND_HI;
530			}
531
532			_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
533			      ctx);
534
535			if (load_order == 0)
536				_emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
537				      ctx);
538			else if (load_order == 1)
539				emit_load_be16(condt, r_A, r_scratch, ctx);
540			else if (load_order == 2)
541				emit_load_be32(condt, r_A, r_scratch, ctx);
542
543			_emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
544
545			/* the slowpath */
546			emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
547			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
548			/* the offset is already in R1 */
549			emit_blx_r(ARM_R3, ctx);
550			/* check the result of skb_copy_bits */
551			emit(ARM_CMP_I(ARM_R1, 0), ctx);
552			emit_err_ret(ARM_COND_NE, ctx);
553			emit(ARM_MOV_R(r_A, ARM_R0), ctx);
554			break;
555		case BPF_S_LD_W_IND:
556			load_order = 2;
557			goto load_ind;
558		case BPF_S_LD_H_IND:
559			load_order = 1;
560			goto load_ind;
561		case BPF_S_LD_B_IND:
562			load_order = 0;
563load_ind:
564			OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
565			goto load_common;
566		case BPF_S_LDX_IMM:
567			ctx->seen |= SEEN_X;
568			emit_mov_i(r_X, k, ctx);
569			break;
570		case BPF_S_LDX_W_LEN:
571			ctx->seen |= SEEN_X | SEEN_SKB;
572			emit(ARM_LDR_I(r_X, r_skb,
573				       offsetof(struct sk_buff, len)), ctx);
574			break;
575		case BPF_S_LDX_MEM:
576			ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
577			emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
578			break;
579		case BPF_S_LDX_B_MSH:
580			/* x = ((*(frame + k)) & 0xf) << 2; */
581			ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
582			/* the interpreter should deal with the negative K */
583			if ((int)k < 0)
584				return -1;
585			/* offset in r1: we might have to take the slow path */
586			emit_mov_i(r_off, k, ctx);
587			emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
588
589			/* load in r0: common with the slowpath */
590			_emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
591						      ARM_R1), ctx);
592			/*
593			 * emit_mov_i() might generate one or two instructions,
594			 * the same holds for emit_blx_r()
595			 */
596			_emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
597
598			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
599			/* r_off is r1 */
600			emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
601			emit_blx_r(ARM_R3, ctx);
602			/* check the return value of skb_copy_bits */
603			emit(ARM_CMP_I(ARM_R1, 0), ctx);
604			emit_err_ret(ARM_COND_NE, ctx);
605
606			emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
607			emit(ARM_LSL_I(r_X, r_X, 2), ctx);
608			break;
609		case BPF_S_ST:
610			ctx->seen |= SEEN_MEM_WORD(k);
611			emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
612			break;
613		case BPF_S_STX:
614			update_on_xread(ctx);
615			ctx->seen |= SEEN_MEM_WORD(k);
616			emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
617			break;
618		case BPF_S_ALU_ADD_K:
619			/* A += K */
620			OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
621			break;
622		case BPF_S_ALU_ADD_X:
623			update_on_xread(ctx);
624			emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
625			break;
626		case BPF_S_ALU_SUB_K:
627			/* A -= K */
628			OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
629			break;
630		case BPF_S_ALU_SUB_X:
631			update_on_xread(ctx);
632			emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
633			break;
634		case BPF_S_ALU_MUL_K:
635			/* A *= K */
636			emit_mov_i(r_scratch, k, ctx);
637			emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
638			break;
639		case BPF_S_ALU_MUL_X:
640			update_on_xread(ctx);
641			emit(ARM_MUL(r_A, r_A, r_X), ctx);
642			break;
643		case BPF_S_ALU_DIV_K:
644			if (k == 1)
645				break;
646			emit_mov_i(r_scratch, k, ctx);
647			emit_udiv(r_A, r_A, r_scratch, ctx);
 
648			break;
649		case BPF_S_ALU_DIV_X:
650			update_on_xread(ctx);
651			emit(ARM_CMP_I(r_X, 0), ctx);
652			emit_err_ret(ARM_COND_EQ, ctx);
653			emit_udiv(r_A, r_A, r_X, ctx);
654			break;
655		case BPF_S_ALU_OR_K:
656			/* A |= K */
657			OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
658			break;
659		case BPF_S_ALU_OR_X:
660			update_on_xread(ctx);
661			emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
662			break;
663		case BPF_S_ALU_XOR_K:
664			/* A ^= K; */
665			OP_IMM3(ARM_EOR, r_A, r_A, k, ctx);
666			break;
667		case BPF_S_ANC_ALU_XOR_X:
668		case BPF_S_ALU_XOR_X:
669			/* A ^= X */
670			update_on_xread(ctx);
671			emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
672			break;
673		case BPF_S_ALU_AND_K:
674			/* A &= K */
675			OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
676			break;
677		case BPF_S_ALU_AND_X:
678			update_on_xread(ctx);
679			emit(ARM_AND_R(r_A, r_A, r_X), ctx);
680			break;
681		case BPF_S_ALU_LSH_K:
682			if (unlikely(k > 31))
683				return -1;
684			emit(ARM_LSL_I(r_A, r_A, k), ctx);
685			break;
686		case BPF_S_ALU_LSH_X:
687			update_on_xread(ctx);
688			emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
689			break;
690		case BPF_S_ALU_RSH_K:
691			if (unlikely(k > 31))
692				return -1;
693			emit(ARM_LSR_I(r_A, r_A, k), ctx);
694			break;
695		case BPF_S_ALU_RSH_X:
696			update_on_xread(ctx);
697			emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
698			break;
699		case BPF_S_ALU_NEG:
700			/* A = -A */
701			emit(ARM_RSB_I(r_A, r_A, 0), ctx);
702			break;
703		case BPF_S_JMP_JA:
704			/* pc += K */
705			emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
706			break;
707		case BPF_S_JMP_JEQ_K:
708			/* pc += (A == K) ? pc->jt : pc->jf */
709			condt  = ARM_COND_EQ;
710			goto cmp_imm;
711		case BPF_S_JMP_JGT_K:
712			/* pc += (A > K) ? pc->jt : pc->jf */
713			condt  = ARM_COND_HI;
714			goto cmp_imm;
715		case BPF_S_JMP_JGE_K:
716			/* pc += (A >= K) ? pc->jt : pc->jf */
717			condt  = ARM_COND_HS;
718cmp_imm:
719			imm12 = imm8m(k);
720			if (imm12 < 0) {
721				emit_mov_i_no8m(r_scratch, k, ctx);
722				emit(ARM_CMP_R(r_A, r_scratch), ctx);
723			} else {
724				emit(ARM_CMP_I(r_A, imm12), ctx);
725			}
726cond_jump:
727			if (inst->jt)
728				_emit(condt, ARM_B(b_imm(i + inst->jt + 1,
729						   ctx)), ctx);
730			if (inst->jf)
731				_emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
732							     ctx)), ctx);
733			break;
734		case BPF_S_JMP_JEQ_X:
735			/* pc += (A == X) ? pc->jt : pc->jf */
736			condt   = ARM_COND_EQ;
737			goto cmp_x;
738		case BPF_S_JMP_JGT_X:
739			/* pc += (A > X) ? pc->jt : pc->jf */
740			condt   = ARM_COND_HI;
741			goto cmp_x;
742		case BPF_S_JMP_JGE_X:
743			/* pc += (A >= X) ? pc->jt : pc->jf */
744			condt   = ARM_COND_CS;
745cmp_x:
746			update_on_xread(ctx);
747			emit(ARM_CMP_R(r_A, r_X), ctx);
748			goto cond_jump;
749		case BPF_S_JMP_JSET_K:
750			/* pc += (A & K) ? pc->jt : pc->jf */
751			condt  = ARM_COND_NE;
752			/* not set iff all zeroes iff Z==1 iff EQ */
753
754			imm12 = imm8m(k);
755			if (imm12 < 0) {
756				emit_mov_i_no8m(r_scratch, k, ctx);
757				emit(ARM_TST_R(r_A, r_scratch), ctx);
758			} else {
759				emit(ARM_TST_I(r_A, imm12), ctx);
760			}
761			goto cond_jump;
762		case BPF_S_JMP_JSET_X:
763			/* pc += (A & X) ? pc->jt : pc->jf */
764			update_on_xread(ctx);
765			condt  = ARM_COND_NE;
766			emit(ARM_TST_R(r_A, r_X), ctx);
767			goto cond_jump;
768		case BPF_S_RET_A:
769			emit(ARM_MOV_R(ARM_R0, r_A), ctx);
770			goto b_epilogue;
771		case BPF_S_RET_K:
772			if ((k == 0) && (ctx->ret0_fp_idx < 0))
773				ctx->ret0_fp_idx = i;
774			emit_mov_i(ARM_R0, k, ctx);
775b_epilogue:
776			if (i != ctx->skf->len - 1)
777				emit(ARM_B(b_imm(prog->len, ctx)), ctx);
778			break;
779		case BPF_S_MISC_TAX:
780			/* X = A */
781			ctx->seen |= SEEN_X;
782			emit(ARM_MOV_R(r_X, r_A), ctx);
783			break;
784		case BPF_S_MISC_TXA:
785			/* A = X */
786			update_on_xread(ctx);
787			emit(ARM_MOV_R(r_A, r_X), ctx);
788			break;
 
 
 
 
 
789		case BPF_S_ANC_PROTOCOL:
790			/* A = ntohs(skb->protocol) */
791			ctx->seen |= SEEN_SKB;
792			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
793						  protocol) != 2);
794			off = offsetof(struct sk_buff, protocol);
795			emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
796			emit_swap16(r_A, r_scratch, ctx);
797			break;
798		case BPF_S_ANC_CPU:
799			/* r_scratch = current_thread_info() */
800			OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
801			/* A = current_thread_info()->cpu */
802			BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
803			off = offsetof(struct thread_info, cpu);
804			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
805			break;
806		case BPF_S_ANC_IFINDEX:
807			/* A = skb->dev->ifindex */
808			ctx->seen |= SEEN_SKB;
809			off = offsetof(struct sk_buff, dev);
810			emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
811
812			emit(ARM_CMP_I(r_scratch, 0), ctx);
813			emit_err_ret(ARM_COND_EQ, ctx);
814
815			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
816						  ifindex) != 4);
817			off = offsetof(struct net_device, ifindex);
818			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
819			break;
820		case BPF_S_ANC_MARK:
821			ctx->seen |= SEEN_SKB;
822			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
823			off = offsetof(struct sk_buff, mark);
824			emit(ARM_LDR_I(r_A, r_skb, off), ctx);
825			break;
826		case BPF_S_ANC_RXHASH:
827			ctx->seen |= SEEN_SKB;
828			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
829			off = offsetof(struct sk_buff, hash);
830			emit(ARM_LDR_I(r_A, r_skb, off), ctx);
831			break;
832		case BPF_S_ANC_VLAN_TAG:
833		case BPF_S_ANC_VLAN_TAG_PRESENT:
834			ctx->seen |= SEEN_SKB;
835			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
836			off = offsetof(struct sk_buff, vlan_tci);
837			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
838			if (inst->code == BPF_S_ANC_VLAN_TAG)
839				OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx);
840			else
841				OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx);
842			break;
843		case BPF_S_ANC_QUEUE:
844			ctx->seen |= SEEN_SKB;
845			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
846						  queue_mapping) != 2);
847			BUILD_BUG_ON(offsetof(struct sk_buff,
848					      queue_mapping) > 0xff);
849			off = offsetof(struct sk_buff, queue_mapping);
850			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
851			break;
852		default:
853			return -1;
854		}
855	}
856
857	/* compute offsets only during the first pass */
858	if (ctx->target == NULL)
859		ctx->offsets[i] = ctx->idx * 4;
860
861	return 0;
862}
863
864
865void bpf_jit_compile(struct sk_filter *fp)
866{
867	struct jit_ctx ctx;
868	unsigned tmp_idx;
869	unsigned alloc_size;
870
871	if (!bpf_jit_enable)
872		return;
873
874	memset(&ctx, 0, sizeof(ctx));
875	ctx.skf		= fp;
876	ctx.ret0_fp_idx = -1;
877
878	ctx.offsets = kzalloc(4 * (ctx.skf->len + 1), GFP_KERNEL);
879	if (ctx.offsets == NULL)
880		return;
881
882	/* fake pass to fill in the ctx->seen */
883	if (unlikely(build_body(&ctx)))
884		goto out;
885
886	tmp_idx = ctx.idx;
887	build_prologue(&ctx);
888	ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
889
890#if __LINUX_ARM_ARCH__ < 7
891	tmp_idx = ctx.idx;
892	build_epilogue(&ctx);
893	ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
894
895	ctx.idx += ctx.imm_count;
896	if (ctx.imm_count) {
897		ctx.imms = kzalloc(4 * ctx.imm_count, GFP_KERNEL);
898		if (ctx.imms == NULL)
899			goto out;
900	}
901#else
902	/* there's nothing after the epilogue on ARMv7 */
903	build_epilogue(&ctx);
904#endif
905
906	alloc_size = 4 * ctx.idx;
907	ctx.target = module_alloc(alloc_size);
 
908	if (unlikely(ctx.target == NULL))
909		goto out;
910
911	ctx.idx = 0;
912	build_prologue(&ctx);
913	build_body(&ctx);
914	build_epilogue(&ctx);
915
916	flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
917
918#if __LINUX_ARM_ARCH__ < 7
919	if (ctx.imm_count)
920		kfree(ctx.imms);
921#endif
922
923	if (bpf_jit_enable > 1)
924		/* there are 2 passes here */
925		bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
 
926
927	fp->bpf_func = (void *)ctx.target;
928	fp->jited = 1;
929out:
930	kfree(ctx.offsets);
931	return;
932}
933
 
 
 
 
 
934void bpf_jit_free(struct sk_filter *fp)
935{
936	if (fp->jited)
937		module_free(NULL, fp->bpf_func);
938	kfree(fp);
 
 
 
 
 
939}
v3.5.6
  1/*
  2 * Just-In-Time compiler for BPF filters on 32bit ARM
  3 *
  4 * Copyright (c) 2011 Mircea Gherzan <mgherzan@gmail.com>
  5 *
  6 * This program is free software; you can redistribute it and/or modify it
  7 * under the terms of the GNU General Public License as published by the
  8 * Free Software Foundation; version 2 of the License.
  9 */
 10
 11#include <linux/bitops.h>
 12#include <linux/compiler.h>
 13#include <linux/errno.h>
 14#include <linux/filter.h>
 15#include <linux/moduleloader.h>
 16#include <linux/netdevice.h>
 17#include <linux/string.h>
 18#include <linux/slab.h>
 
 19#include <asm/cacheflush.h>
 20#include <asm/hwcap.h>
 
 21
 22#include "bpf_jit_32.h"
 23
 24/*
 25 * ABI:
 26 *
 27 * r0	scratch register
 28 * r4	BPF register A
 29 * r5	BPF register X
 30 * r6	pointer to the skb
 31 * r7	skb->data
 32 * r8	skb_headlen(skb)
 33 */
 34
 35#define r_scratch	ARM_R0
 36/* r1-r3 are (also) used for the unaligned loads on the non-ARMv7 slowpath */
 37#define r_off		ARM_R1
 38#define r_A		ARM_R4
 39#define r_X		ARM_R5
 40#define r_skb		ARM_R6
 41#define r_skb_data	ARM_R7
 42#define r_skb_hl	ARM_R8
 43
 44#define SCRATCH_SP_OFFSET	0
 45#define SCRATCH_OFF(k)		(SCRATCH_SP_OFFSET + (k))
 46
 47#define SEEN_MEM		((1 << BPF_MEMWORDS) - 1)
 48#define SEEN_MEM_WORD(k)	(1 << (k))
 49#define SEEN_X			(1 << BPF_MEMWORDS)
 50#define SEEN_CALL		(1 << (BPF_MEMWORDS + 1))
 51#define SEEN_SKB		(1 << (BPF_MEMWORDS + 2))
 52#define SEEN_DATA		(1 << (BPF_MEMWORDS + 3))
 53
 54#define FLAG_NEED_X_RESET	(1 << 0)
 55
 56struct jit_ctx {
 57	const struct sk_filter *skf;
 58	unsigned idx;
 59	unsigned prologue_bytes;
 60	int ret0_fp_idx;
 61	u32 seen;
 62	u32 flags;
 63	u32 *offsets;
 64	u32 *target;
 65#if __LINUX_ARM_ARCH__ < 7
 66	u16 epilogue_bytes;
 67	u16 imm_count;
 68	u32 *imms;
 69#endif
 70};
 71
 72int bpf_jit_enable __read_mostly;
 73
 74static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset)
 75{
 76	u8 ret;
 77	int err;
 78
 79	err = skb_copy_bits(skb, offset, &ret, 1);
 80
 81	return (u64)err << 32 | ret;
 82}
 83
 84static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset)
 85{
 86	u16 ret;
 87	int err;
 88
 89	err = skb_copy_bits(skb, offset, &ret, 2);
 90
 91	return (u64)err << 32 | ntohs(ret);
 92}
 93
 94static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
 95{
 96	u32 ret;
 97	int err;
 98
 99	err = skb_copy_bits(skb, offset, &ret, 4);
100
101	return (u64)err << 32 | ntohl(ret);
102}
103
104/*
105 * Wrapper that handles both OABI and EABI and assures Thumb2 interworking
106 * (where the assembly routines like __aeabi_uidiv could cause problems).
107 */
108static u32 jit_udiv(u32 dividend, u32 divisor)
109{
110	return dividend / divisor;
111}
112
113static inline void _emit(int cond, u32 inst, struct jit_ctx *ctx)
114{
 
 
 
115	if (ctx->target != NULL)
116		ctx->target[ctx->idx] = inst | (cond << 28);
117
118	ctx->idx++;
119}
120
121/*
122 * Emit an instruction that will be executed unconditionally.
123 */
124static inline void emit(u32 inst, struct jit_ctx *ctx)
125{
126	_emit(ARM_COND_AL, inst, ctx);
127}
128
129static u16 saved_regs(struct jit_ctx *ctx)
130{
131	u16 ret = 0;
132
133	if ((ctx->skf->len > 1) ||
134	    (ctx->skf->insns[0].code == BPF_S_RET_A))
135		ret |= 1 << r_A;
136
137#ifdef CONFIG_FRAME_POINTER
138	ret |= (1 << ARM_FP) | (1 << ARM_IP) | (1 << ARM_LR) | (1 << ARM_PC);
139#else
140	if (ctx->seen & SEEN_CALL)
141		ret |= 1 << ARM_LR;
142#endif
143	if (ctx->seen & (SEEN_DATA | SEEN_SKB))
144		ret |= 1 << r_skb;
145	if (ctx->seen & SEEN_DATA)
146		ret |= (1 << r_skb_data) | (1 << r_skb_hl);
147	if (ctx->seen & SEEN_X)
148		ret |= 1 << r_X;
149
150	return ret;
151}
152
153static inline int mem_words_used(struct jit_ctx *ctx)
154{
155	/* yes, we do waste some stack space IF there are "holes" in the set" */
156	return fls(ctx->seen & SEEN_MEM);
157}
158
159static inline bool is_load_to_a(u16 inst)
160{
161	switch (inst) {
162	case BPF_S_LD_W_LEN:
163	case BPF_S_LD_W_ABS:
164	case BPF_S_LD_H_ABS:
165	case BPF_S_LD_B_ABS:
166	case BPF_S_ANC_CPU:
167	case BPF_S_ANC_IFINDEX:
168	case BPF_S_ANC_MARK:
169	case BPF_S_ANC_PROTOCOL:
170	case BPF_S_ANC_RXHASH:
 
 
171	case BPF_S_ANC_QUEUE:
172		return true;
173	default:
174		return false;
175	}
176}
177
178static void build_prologue(struct jit_ctx *ctx)
179{
180	u16 reg_set = saved_regs(ctx);
181	u16 first_inst = ctx->skf->insns[0].code;
182	u16 off;
183
184#ifdef CONFIG_FRAME_POINTER
185	emit(ARM_MOV_R(ARM_IP, ARM_SP), ctx);
186	emit(ARM_PUSH(reg_set), ctx);
187	emit(ARM_SUB_I(ARM_FP, ARM_IP, 4), ctx);
188#else
189	if (reg_set)
190		emit(ARM_PUSH(reg_set), ctx);
191#endif
192
193	if (ctx->seen & (SEEN_DATA | SEEN_SKB))
194		emit(ARM_MOV_R(r_skb, ARM_R0), ctx);
195
196	if (ctx->seen & SEEN_DATA) {
197		off = offsetof(struct sk_buff, data);
198		emit(ARM_LDR_I(r_skb_data, r_skb, off), ctx);
199		/* headlen = len - data_len */
200		off = offsetof(struct sk_buff, len);
201		emit(ARM_LDR_I(r_skb_hl, r_skb, off), ctx);
202		off = offsetof(struct sk_buff, data_len);
203		emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
204		emit(ARM_SUB_R(r_skb_hl, r_skb_hl, r_scratch), ctx);
205	}
206
207	if (ctx->flags & FLAG_NEED_X_RESET)
208		emit(ARM_MOV_I(r_X, 0), ctx);
209
210	/* do not leak kernel data to userspace */
211	if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
212		emit(ARM_MOV_I(r_A, 0), ctx);
213
214	/* stack space for the BPF_MEM words */
215	if (ctx->seen & SEEN_MEM)
216		emit(ARM_SUB_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
217}
218
219static void build_epilogue(struct jit_ctx *ctx)
220{
221	u16 reg_set = saved_regs(ctx);
222
223	if (ctx->seen & SEEN_MEM)
224		emit(ARM_ADD_I(ARM_SP, ARM_SP, mem_words_used(ctx) * 4), ctx);
225
226	reg_set &= ~(1 << ARM_LR);
227
228#ifdef CONFIG_FRAME_POINTER
229	/* the first instruction of the prologue was: mov ip, sp */
230	reg_set &= ~(1 << ARM_IP);
231	reg_set |= (1 << ARM_SP);
232	emit(ARM_LDM(ARM_SP, reg_set), ctx);
233#else
234	if (reg_set) {
235		if (ctx->seen & SEEN_CALL)
236			reg_set |= 1 << ARM_PC;
237		emit(ARM_POP(reg_set), ctx);
238	}
239
240	if (!(ctx->seen & SEEN_CALL))
241		emit(ARM_BX(ARM_LR), ctx);
242#endif
243}
244
245static int16_t imm8m(u32 x)
246{
247	u32 rot;
248
249	for (rot = 0; rot < 16; rot++)
250		if ((x & ~ror32(0xff, 2 * rot)) == 0)
251			return rol32(x, 2 * rot) | (rot << 8);
252
253	return -1;
254}
255
256#if __LINUX_ARM_ARCH__ < 7
257
258static u16 imm_offset(u32 k, struct jit_ctx *ctx)
259{
260	unsigned i = 0, offset;
261	u16 imm;
262
263	/* on the "fake" run we just count them (duplicates included) */
264	if (ctx->target == NULL) {
265		ctx->imm_count++;
266		return 0;
267	}
268
269	while ((i < ctx->imm_count) && ctx->imms[i]) {
270		if (ctx->imms[i] == k)
271			break;
272		i++;
273	}
274
275	if (ctx->imms[i] == 0)
276		ctx->imms[i] = k;
277
278	/* constants go just after the epilogue */
279	offset =  ctx->offsets[ctx->skf->len];
280	offset += ctx->prologue_bytes;
281	offset += ctx->epilogue_bytes;
282	offset += i * 4;
283
284	ctx->target[offset / 4] = k;
285
286	/* PC in ARM mode == address of the instruction + 8 */
287	imm = offset - (8 + ctx->idx * 4);
288
289	return imm;
290}
291
292#endif /* __LINUX_ARM_ARCH__ */
293
294/*
295 * Move an immediate that's not an imm8m to a core register.
296 */
297static inline void emit_mov_i_no8m(int rd, u32 val, struct jit_ctx *ctx)
298{
299#if __LINUX_ARM_ARCH__ < 7
300	emit(ARM_LDR_I(rd, ARM_PC, imm_offset(val, ctx)), ctx);
301#else
302	emit(ARM_MOVW(rd, val & 0xffff), ctx);
303	if (val > 0xffff)
304		emit(ARM_MOVT(rd, val >> 16), ctx);
305#endif
306}
307
308static inline void emit_mov_i(int rd, u32 val, struct jit_ctx *ctx)
309{
310	int imm12 = imm8m(val);
311
312	if (imm12 >= 0)
313		emit(ARM_MOV_I(rd, imm12), ctx);
314	else
315		emit_mov_i_no8m(rd, val, ctx);
316}
317
318#if __LINUX_ARM_ARCH__ < 6
319
320static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
321{
322	_emit(cond, ARM_LDRB_I(ARM_R3, r_addr, 1), ctx);
323	_emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
324	_emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 3), ctx);
325	_emit(cond, ARM_LSL_I(ARM_R3, ARM_R3, 16), ctx);
326	_emit(cond, ARM_LDRB_I(ARM_R0, r_addr, 2), ctx);
327	_emit(cond, ARM_ORR_S(ARM_R3, ARM_R3, ARM_R1, SRTYPE_LSL, 24), ctx);
328	_emit(cond, ARM_ORR_R(ARM_R3, ARM_R3, ARM_R2), ctx);
329	_emit(cond, ARM_ORR_S(r_res, ARM_R3, ARM_R0, SRTYPE_LSL, 8), ctx);
330}
331
332static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
333{
334	_emit(cond, ARM_LDRB_I(ARM_R1, r_addr, 0), ctx);
335	_emit(cond, ARM_LDRB_I(ARM_R2, r_addr, 1), ctx);
336	_emit(cond, ARM_ORR_S(r_res, ARM_R2, ARM_R1, SRTYPE_LSL, 8), ctx);
337}
338
339static inline void emit_swap16(u8 r_dst, u8 r_src, struct jit_ctx *ctx)
340{
341	emit(ARM_LSL_R(ARM_R1, r_src, 8), ctx);
342	emit(ARM_ORR_S(r_dst, ARM_R1, r_src, SRTYPE_LSL, 8), ctx);
343	emit(ARM_LSL_I(r_dst, r_dst, 8), ctx);
344	emit(ARM_LSL_R(r_dst, r_dst, 8), ctx);
 
 
 
 
 
 
 
345}
346
347#else  /* ARMv6+ */
348
349static void emit_load_be32(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
350{
351	_emit(cond, ARM_LDR_I(r_res, r_addr, 0), ctx);
352#ifdef __LITTLE_ENDIAN
353	_emit(cond, ARM_REV(r_res, r_res), ctx);
354#endif
355}
356
357static void emit_load_be16(u8 cond, u8 r_res, u8 r_addr, struct jit_ctx *ctx)
358{
359	_emit(cond, ARM_LDRH_I(r_res, r_addr, 0), ctx);
360#ifdef __LITTLE_ENDIAN
361	_emit(cond, ARM_REV16(r_res, r_res), ctx);
362#endif
363}
364
365static inline void emit_swap16(u8 r_dst __maybe_unused,
366			       u8 r_src __maybe_unused,
367			       struct jit_ctx *ctx __maybe_unused)
368{
369#ifdef __LITTLE_ENDIAN
370	emit(ARM_REV16(r_dst, r_src), ctx);
371#endif
372}
373
374#endif /* __LINUX_ARM_ARCH__ < 6 */
375
376
377/* Compute the immediate value for a PC-relative branch. */
378static inline u32 b_imm(unsigned tgt, struct jit_ctx *ctx)
379{
380	u32 imm;
381
382	if (ctx->target == NULL)
383		return 0;
384	/*
385	 * BPF allows only forward jumps and the offset of the target is
386	 * still the one computed during the first pass.
387	 */
388	imm  = ctx->offsets[tgt] + ctx->prologue_bytes - (ctx->idx * 4 + 8);
389
390	return imm >> 2;
391}
392
393#define OP_IMM3(op, r1, r2, imm_val, ctx)				\
394	do {								\
395		imm12 = imm8m(imm_val);					\
396		if (imm12 < 0) {					\
397			emit_mov_i_no8m(r_scratch, imm_val, ctx);	\
398			emit(op ## _R((r1), (r2), r_scratch), ctx);	\
399		} else {						\
400			emit(op ## _I((r1), (r2), imm12), ctx);		\
401		}							\
402	} while (0)
403
404static inline void emit_err_ret(u8 cond, struct jit_ctx *ctx)
405{
406	if (ctx->ret0_fp_idx >= 0) {
407		_emit(cond, ARM_B(b_imm(ctx->ret0_fp_idx, ctx)), ctx);
408		/* NOP to keep the size constant between passes */
409		emit(ARM_MOV_R(ARM_R0, ARM_R0), ctx);
410	} else {
411		_emit(cond, ARM_MOV_I(ARM_R0, 0), ctx);
412		_emit(cond, ARM_B(b_imm(ctx->skf->len, ctx)), ctx);
413	}
414}
415
416static inline void emit_blx_r(u8 tgt_reg, struct jit_ctx *ctx)
417{
418#if __LINUX_ARM_ARCH__ < 5
419	emit(ARM_MOV_R(ARM_LR, ARM_PC), ctx);
420
421	if (elf_hwcap & HWCAP_THUMB)
422		emit(ARM_BX(tgt_reg), ctx);
423	else
424		emit(ARM_MOV_R(ARM_PC, tgt_reg), ctx);
425#else
426	emit(ARM_BLX_R(tgt_reg), ctx);
427#endif
428}
429
430static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
431{
432#if __LINUX_ARM_ARCH__ == 7
433	if (elf_hwcap & HWCAP_IDIVA) {
434		emit(ARM_UDIV(rd, rm, rn), ctx);
435		return;
436	}
437#endif
438	if (rm != ARM_R0)
439		emit(ARM_MOV_R(ARM_R0, rm), ctx);
440	if (rn != ARM_R1)
441		emit(ARM_MOV_R(ARM_R1, rn), ctx);
442
443	ctx->seen |= SEEN_CALL;
444	emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
445	emit_blx_r(ARM_R3, ctx);
446
447	if (rd != ARM_R0)
448		emit(ARM_MOV_R(rd, ARM_R0), ctx);
449}
450
451static inline void update_on_xread(struct jit_ctx *ctx)
452{
453	if (!(ctx->seen & SEEN_X))
454		ctx->flags |= FLAG_NEED_X_RESET;
455
456	ctx->seen |= SEEN_X;
457}
458
459static int build_body(struct jit_ctx *ctx)
460{
461	void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
462	const struct sk_filter *prog = ctx->skf;
463	const struct sock_filter *inst;
464	unsigned i, load_order, off, condt;
465	int imm12;
466	u32 k;
467
468	for (i = 0; i < prog->len; i++) {
469		inst = &(prog->insns[i]);
470		/* K as an immediate value operand */
471		k = inst->k;
472
473		/* compute offsets only in the fake pass */
474		if (ctx->target == NULL)
475			ctx->offsets[i] = ctx->idx * 4;
476
477		switch (inst->code) {
478		case BPF_S_LD_IMM:
479			emit_mov_i(r_A, k, ctx);
480			break;
481		case BPF_S_LD_W_LEN:
482			ctx->seen |= SEEN_SKB;
483			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
484			emit(ARM_LDR_I(r_A, r_skb,
485				       offsetof(struct sk_buff, len)), ctx);
486			break;
487		case BPF_S_LD_MEM:
488			/* A = scratch[k] */
489			ctx->seen |= SEEN_MEM_WORD(k);
490			emit(ARM_LDR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
491			break;
492		case BPF_S_LD_W_ABS:
493			load_order = 2;
494			goto load;
495		case BPF_S_LD_H_ABS:
496			load_order = 1;
497			goto load;
498		case BPF_S_LD_B_ABS:
499			load_order = 0;
500load:
501			/* the interpreter will deal with the negative K */
502			if ((int)k < 0)
503				return -ENOTSUPP;
504			emit_mov_i(r_off, k, ctx);
505load_common:
506			ctx->seen |= SEEN_DATA | SEEN_CALL;
507
508			if (load_order > 0) {
509				emit(ARM_SUB_I(r_scratch, r_skb_hl,
510					       1 << load_order), ctx);
511				emit(ARM_CMP_R(r_scratch, r_off), ctx);
512				condt = ARM_COND_HS;
513			} else {
514				emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
515				condt = ARM_COND_HI;
516			}
517
518			_emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
519			      ctx);
520
521			if (load_order == 0)
522				_emit(condt, ARM_LDRB_I(r_A, r_scratch, 0),
523				      ctx);
524			else if (load_order == 1)
525				emit_load_be16(condt, r_A, r_scratch, ctx);
526			else if (load_order == 2)
527				emit_load_be32(condt, r_A, r_scratch, ctx);
528
529			_emit(condt, ARM_B(b_imm(i + 1, ctx)), ctx);
530
531			/* the slowpath */
532			emit_mov_i(ARM_R3, (u32)load_func[load_order], ctx);
533			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
534			/* the offset is already in R1 */
535			emit_blx_r(ARM_R3, ctx);
536			/* check the result of skb_copy_bits */
537			emit(ARM_CMP_I(ARM_R1, 0), ctx);
538			emit_err_ret(ARM_COND_NE, ctx);
539			emit(ARM_MOV_R(r_A, ARM_R0), ctx);
540			break;
541		case BPF_S_LD_W_IND:
542			load_order = 2;
543			goto load_ind;
544		case BPF_S_LD_H_IND:
545			load_order = 1;
546			goto load_ind;
547		case BPF_S_LD_B_IND:
548			load_order = 0;
549load_ind:
550			OP_IMM3(ARM_ADD, r_off, r_X, k, ctx);
551			goto load_common;
552		case BPF_S_LDX_IMM:
553			ctx->seen |= SEEN_X;
554			emit_mov_i(r_X, k, ctx);
555			break;
556		case BPF_S_LDX_W_LEN:
557			ctx->seen |= SEEN_X | SEEN_SKB;
558			emit(ARM_LDR_I(r_X, r_skb,
559				       offsetof(struct sk_buff, len)), ctx);
560			break;
561		case BPF_S_LDX_MEM:
562			ctx->seen |= SEEN_X | SEEN_MEM_WORD(k);
563			emit(ARM_LDR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
564			break;
565		case BPF_S_LDX_B_MSH:
566			/* x = ((*(frame + k)) & 0xf) << 2; */
567			ctx->seen |= SEEN_X | SEEN_DATA | SEEN_CALL;
568			/* the interpreter should deal with the negative K */
569			if (k < 0)
570				return -1;
571			/* offset in r1: we might have to take the slow path */
572			emit_mov_i(r_off, k, ctx);
573			emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
574
575			/* load in r0: common with the slowpath */
576			_emit(ARM_COND_HI, ARM_LDRB_R(ARM_R0, r_skb_data,
577						      ARM_R1), ctx);
578			/*
579			 * emit_mov_i() might generate one or two instructions,
580			 * the same holds for emit_blx_r()
581			 */
582			_emit(ARM_COND_HI, ARM_B(b_imm(i + 1, ctx) - 2), ctx);
583
584			emit(ARM_MOV_R(ARM_R0, r_skb), ctx);
585			/* r_off is r1 */
586			emit_mov_i(ARM_R3, (u32)jit_get_skb_b, ctx);
587			emit_blx_r(ARM_R3, ctx);
588			/* check the return value of skb_copy_bits */
589			emit(ARM_CMP_I(ARM_R1, 0), ctx);
590			emit_err_ret(ARM_COND_NE, ctx);
591
592			emit(ARM_AND_I(r_X, ARM_R0, 0x00f), ctx);
593			emit(ARM_LSL_I(r_X, r_X, 2), ctx);
594			break;
595		case BPF_S_ST:
596			ctx->seen |= SEEN_MEM_WORD(k);
597			emit(ARM_STR_I(r_A, ARM_SP, SCRATCH_OFF(k)), ctx);
598			break;
599		case BPF_S_STX:
600			update_on_xread(ctx);
601			ctx->seen |= SEEN_MEM_WORD(k);
602			emit(ARM_STR_I(r_X, ARM_SP, SCRATCH_OFF(k)), ctx);
603			break;
604		case BPF_S_ALU_ADD_K:
605			/* A += K */
606			OP_IMM3(ARM_ADD, r_A, r_A, k, ctx);
607			break;
608		case BPF_S_ALU_ADD_X:
609			update_on_xread(ctx);
610			emit(ARM_ADD_R(r_A, r_A, r_X), ctx);
611			break;
612		case BPF_S_ALU_SUB_K:
613			/* A -= K */
614			OP_IMM3(ARM_SUB, r_A, r_A, k, ctx);
615			break;
616		case BPF_S_ALU_SUB_X:
617			update_on_xread(ctx);
618			emit(ARM_SUB_R(r_A, r_A, r_X), ctx);
619			break;
620		case BPF_S_ALU_MUL_K:
621			/* A *= K */
622			emit_mov_i(r_scratch, k, ctx);
623			emit(ARM_MUL(r_A, r_A, r_scratch), ctx);
624			break;
625		case BPF_S_ALU_MUL_X:
626			update_on_xread(ctx);
627			emit(ARM_MUL(r_A, r_A, r_X), ctx);
628			break;
629		case BPF_S_ALU_DIV_K:
630			/* current k == reciprocal_value(userspace k) */
 
631			emit_mov_i(r_scratch, k, ctx);
632			/* A = top 32 bits of the product */
633			emit(ARM_UMULL(r_scratch, r_A, r_A, r_scratch), ctx);
634			break;
635		case BPF_S_ALU_DIV_X:
636			update_on_xread(ctx);
637			emit(ARM_CMP_I(r_X, 0), ctx);
638			emit_err_ret(ARM_COND_EQ, ctx);
639			emit_udiv(r_A, r_A, r_X, ctx);
640			break;
641		case BPF_S_ALU_OR_K:
642			/* A |= K */
643			OP_IMM3(ARM_ORR, r_A, r_A, k, ctx);
644			break;
645		case BPF_S_ALU_OR_X:
646			update_on_xread(ctx);
647			emit(ARM_ORR_R(r_A, r_A, r_X), ctx);
648			break;
 
 
 
 
 
 
 
 
 
 
649		case BPF_S_ALU_AND_K:
650			/* A &= K */
651			OP_IMM3(ARM_AND, r_A, r_A, k, ctx);
652			break;
653		case BPF_S_ALU_AND_X:
654			update_on_xread(ctx);
655			emit(ARM_AND_R(r_A, r_A, r_X), ctx);
656			break;
657		case BPF_S_ALU_LSH_K:
658			if (unlikely(k > 31))
659				return -1;
660			emit(ARM_LSL_I(r_A, r_A, k), ctx);
661			break;
662		case BPF_S_ALU_LSH_X:
663			update_on_xread(ctx);
664			emit(ARM_LSL_R(r_A, r_A, r_X), ctx);
665			break;
666		case BPF_S_ALU_RSH_K:
667			if (unlikely(k > 31))
668				return -1;
669			emit(ARM_LSR_I(r_A, r_A, k), ctx);
670			break;
671		case BPF_S_ALU_RSH_X:
672			update_on_xread(ctx);
673			emit(ARM_LSR_R(r_A, r_A, r_X), ctx);
674			break;
675		case BPF_S_ALU_NEG:
676			/* A = -A */
677			emit(ARM_RSB_I(r_A, r_A, 0), ctx);
678			break;
679		case BPF_S_JMP_JA:
680			/* pc += K */
681			emit(ARM_B(b_imm(i + k + 1, ctx)), ctx);
682			break;
683		case BPF_S_JMP_JEQ_K:
684			/* pc += (A == K) ? pc->jt : pc->jf */
685			condt  = ARM_COND_EQ;
686			goto cmp_imm;
687		case BPF_S_JMP_JGT_K:
688			/* pc += (A > K) ? pc->jt : pc->jf */
689			condt  = ARM_COND_HI;
690			goto cmp_imm;
691		case BPF_S_JMP_JGE_K:
692			/* pc += (A >= K) ? pc->jt : pc->jf */
693			condt  = ARM_COND_HS;
694cmp_imm:
695			imm12 = imm8m(k);
696			if (imm12 < 0) {
697				emit_mov_i_no8m(r_scratch, k, ctx);
698				emit(ARM_CMP_R(r_A, r_scratch), ctx);
699			} else {
700				emit(ARM_CMP_I(r_A, imm12), ctx);
701			}
702cond_jump:
703			if (inst->jt)
704				_emit(condt, ARM_B(b_imm(i + inst->jt + 1,
705						   ctx)), ctx);
706			if (inst->jf)
707				_emit(condt ^ 1, ARM_B(b_imm(i + inst->jf + 1,
708							     ctx)), ctx);
709			break;
710		case BPF_S_JMP_JEQ_X:
711			/* pc += (A == X) ? pc->jt : pc->jf */
712			condt   = ARM_COND_EQ;
713			goto cmp_x;
714		case BPF_S_JMP_JGT_X:
715			/* pc += (A > X) ? pc->jt : pc->jf */
716			condt   = ARM_COND_HI;
717			goto cmp_x;
718		case BPF_S_JMP_JGE_X:
719			/* pc += (A >= X) ? pc->jt : pc->jf */
720			condt   = ARM_COND_CS;
721cmp_x:
722			update_on_xread(ctx);
723			emit(ARM_CMP_R(r_A, r_X), ctx);
724			goto cond_jump;
725		case BPF_S_JMP_JSET_K:
726			/* pc += (A & K) ? pc->jt : pc->jf */
727			condt  = ARM_COND_NE;
728			/* not set iff all zeroes iff Z==1 iff EQ */
729
730			imm12 = imm8m(k);
731			if (imm12 < 0) {
732				emit_mov_i_no8m(r_scratch, k, ctx);
733				emit(ARM_TST_R(r_A, r_scratch), ctx);
734			} else {
735				emit(ARM_TST_I(r_A, imm12), ctx);
736			}
737			goto cond_jump;
738		case BPF_S_JMP_JSET_X:
739			/* pc += (A & X) ? pc->jt : pc->jf */
740			update_on_xread(ctx);
741			condt  = ARM_COND_NE;
742			emit(ARM_TST_R(r_A, r_X), ctx);
743			goto cond_jump;
744		case BPF_S_RET_A:
745			emit(ARM_MOV_R(ARM_R0, r_A), ctx);
746			goto b_epilogue;
747		case BPF_S_RET_K:
748			if ((k == 0) && (ctx->ret0_fp_idx < 0))
749				ctx->ret0_fp_idx = i;
750			emit_mov_i(ARM_R0, k, ctx);
751b_epilogue:
752			if (i != ctx->skf->len - 1)
753				emit(ARM_B(b_imm(prog->len, ctx)), ctx);
754			break;
755		case BPF_S_MISC_TAX:
756			/* X = A */
757			ctx->seen |= SEEN_X;
758			emit(ARM_MOV_R(r_X, r_A), ctx);
759			break;
760		case BPF_S_MISC_TXA:
761			/* A = X */
762			update_on_xread(ctx);
763			emit(ARM_MOV_R(r_A, r_X), ctx);
764			break;
765		case BPF_S_ANC_ALU_XOR_X:
766			/* A ^= X */
767			update_on_xread(ctx);
768			emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
769			break;
770		case BPF_S_ANC_PROTOCOL:
771			/* A = ntohs(skb->protocol) */
772			ctx->seen |= SEEN_SKB;
773			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
774						  protocol) != 2);
775			off = offsetof(struct sk_buff, protocol);
776			emit(ARM_LDRH_I(r_scratch, r_skb, off), ctx);
777			emit_swap16(r_A, r_scratch, ctx);
778			break;
779		case BPF_S_ANC_CPU:
780			/* r_scratch = current_thread_info() */
781			OP_IMM3(ARM_BIC, r_scratch, ARM_SP, THREAD_SIZE - 1, ctx);
782			/* A = current_thread_info()->cpu */
783			BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, cpu) != 4);
784			off = offsetof(struct thread_info, cpu);
785			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
786			break;
787		case BPF_S_ANC_IFINDEX:
788			/* A = skb->dev->ifindex */
789			ctx->seen |= SEEN_SKB;
790			off = offsetof(struct sk_buff, dev);
791			emit(ARM_LDR_I(r_scratch, r_skb, off), ctx);
792
793			emit(ARM_CMP_I(r_scratch, 0), ctx);
794			emit_err_ret(ARM_COND_EQ, ctx);
795
796			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
797						  ifindex) != 4);
798			off = offsetof(struct net_device, ifindex);
799			emit(ARM_LDR_I(r_A, r_scratch, off), ctx);
800			break;
801		case BPF_S_ANC_MARK:
802			ctx->seen |= SEEN_SKB;
803			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
804			off = offsetof(struct sk_buff, mark);
805			emit(ARM_LDR_I(r_A, r_skb, off), ctx);
806			break;
807		case BPF_S_ANC_RXHASH:
808			ctx->seen |= SEEN_SKB;
809			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, rxhash) != 4);
810			off = offsetof(struct sk_buff, rxhash);
811			emit(ARM_LDR_I(r_A, r_skb, off), ctx);
812			break;
 
 
 
 
 
 
 
 
 
 
 
813		case BPF_S_ANC_QUEUE:
814			ctx->seen |= SEEN_SKB;
815			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
816						  queue_mapping) != 2);
817			BUILD_BUG_ON(offsetof(struct sk_buff,
818					      queue_mapping) > 0xff);
819			off = offsetof(struct sk_buff, queue_mapping);
820			emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
821			break;
822		default:
823			return -1;
824		}
825	}
826
827	/* compute offsets only during the first pass */
828	if (ctx->target == NULL)
829		ctx->offsets[i] = ctx->idx * 4;
830
831	return 0;
832}
833
834
835void bpf_jit_compile(struct sk_filter *fp)
836{
837	struct jit_ctx ctx;
838	unsigned tmp_idx;
839	unsigned alloc_size;
840
841	if (!bpf_jit_enable)
842		return;
843
844	memset(&ctx, 0, sizeof(ctx));
845	ctx.skf		= fp;
846	ctx.ret0_fp_idx = -1;
847
848	ctx.offsets = kzalloc(GFP_KERNEL, 4 * (ctx.skf->len + 1));
849	if (ctx.offsets == NULL)
850		return;
851
852	/* fake pass to fill in the ctx->seen */
853	if (unlikely(build_body(&ctx)))
854		goto out;
855
856	tmp_idx = ctx.idx;
857	build_prologue(&ctx);
858	ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
859
860#if __LINUX_ARM_ARCH__ < 7
861	tmp_idx = ctx.idx;
862	build_epilogue(&ctx);
863	ctx.epilogue_bytes = (ctx.idx - tmp_idx) * 4;
864
865	ctx.idx += ctx.imm_count;
866	if (ctx.imm_count) {
867		ctx.imms = kzalloc(GFP_KERNEL, 4 * ctx.imm_count);
868		if (ctx.imms == NULL)
869			goto out;
870	}
871#else
872	/* there's nothing after the epilogue on ARMv7 */
873	build_epilogue(&ctx);
874#endif
875
876	alloc_size = 4 * ctx.idx;
877	ctx.target = module_alloc(max(sizeof(struct work_struct),
878				      alloc_size));
879	if (unlikely(ctx.target == NULL))
880		goto out;
881
882	ctx.idx = 0;
883	build_prologue(&ctx);
884	build_body(&ctx);
885	build_epilogue(&ctx);
886
887	flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
888
889#if __LINUX_ARM_ARCH__ < 7
890	if (ctx.imm_count)
891		kfree(ctx.imms);
892#endif
893
894	if (bpf_jit_enable > 1)
895		print_hex_dump(KERN_INFO, "BPF JIT code: ",
896			       DUMP_PREFIX_ADDRESS, 16, 4, ctx.target,
897			       alloc_size, false);
898
899	fp->bpf_func = (void *)ctx.target;
 
900out:
901	kfree(ctx.offsets);
902	return;
903}
904
905static void bpf_jit_free_worker(struct work_struct *work)
906{
907	module_free(NULL, work);
908}
909
910void bpf_jit_free(struct sk_filter *fp)
911{
912	struct work_struct *work;
913
914	if (fp->bpf_func != sk_run_filter) {
915		work = (struct work_struct *)fp->bpf_func;
916
917		INIT_WORK(work, bpf_jit_free_worker);
918		schedule_work(work);
919	}
920}