Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1#include <linux/moduleloader.h>
  2#include <linux/workqueue.h>
  3#include <linux/netdevice.h>
  4#include <linux/filter.h>
  5#include <linux/cache.h>
  6#include <linux/if_vlan.h>
  7
  8#include <asm/cacheflush.h>
  9#include <asm/ptrace.h>
 10
 11#include "bpf_jit.h"
 12
 13int bpf_jit_enable __read_mostly;
 14
 15static inline bool is_simm13(unsigned int value)
 16{
 17	return value + 0x1000 < 0x2000;
 18}
 19
 20static void bpf_flush_icache(void *start_, void *end_)
 21{
 22#ifdef CONFIG_SPARC64
 23	/* Cheetah's I-cache is fully coherent.  */
 24	if (tlb_type == spitfire) {
 25		unsigned long start = (unsigned long) start_;
 26		unsigned long end = (unsigned long) end_;
 27
 28		start &= ~7UL;
 29		end = (end + 7UL) & ~7UL;
 30		while (start < end) {
 31			flushi(start);
 32			start += 32;
 33		}
 34	}
 35#endif
 36}
 37
 38#define SEEN_DATAREF 1 /* might call external helpers */
 39#define SEEN_XREG    2 /* ebx is used */
 40#define SEEN_MEM     4 /* use mem[] for temporary storage */
 41
 42#define S13(X)		((X) & 0x1fff)
 43#define IMMED		0x00002000
 44#define RD(X)		((X) << 25)
 45#define RS1(X)		((X) << 14)
 46#define RS2(X)		((X))
 47#define OP(X)		((X) << 30)
 48#define OP2(X)		((X) << 22)
 49#define OP3(X)		((X) << 19)
 50#define COND(X)		((X) << 25)
 51#define F1(X)		OP(X)
 52#define F2(X, Y)	(OP(X) | OP2(Y))
 53#define F3(X, Y)	(OP(X) | OP3(Y))
 54
 55#define CONDN		COND(0x0)
 56#define CONDE		COND(0x1)
 57#define CONDLE		COND(0x2)
 58#define CONDL		COND(0x3)
 59#define CONDLEU		COND(0x4)
 60#define CONDCS		COND(0x5)
 61#define CONDNEG		COND(0x6)
 62#define CONDVC		COND(0x7)
 63#define CONDA		COND(0x8)
 64#define CONDNE		COND(0x9)
 65#define CONDG		COND(0xa)
 66#define CONDGE		COND(0xb)
 67#define CONDGU		COND(0xc)
 68#define CONDCC		COND(0xd)
 69#define CONDPOS		COND(0xe)
 70#define CONDVS		COND(0xf)
 71
 72#define CONDGEU		CONDCC
 73#define CONDLU		CONDCS
 74
 75#define WDISP22(X)	(((X) >> 2) & 0x3fffff)
 76
 77#define BA		(F2(0, 2) | CONDA)
 78#define BGU		(F2(0, 2) | CONDGU)
 79#define BLEU		(F2(0, 2) | CONDLEU)
 80#define BGEU		(F2(0, 2) | CONDGEU)
 81#define BLU		(F2(0, 2) | CONDLU)
 82#define BE		(F2(0, 2) | CONDE)
 83#define BNE		(F2(0, 2) | CONDNE)
 84
 85#ifdef CONFIG_SPARC64
 86#define BNE_PTR		(F2(0, 1) | CONDNE | (2 << 20))
 87#else
 88#define BNE_PTR		BNE
 89#endif
 90
 91#define SETHI(K, REG)	\
 92	(F2(0, 0x4) | RD(REG) | (((K) >> 10) & 0x3fffff))
 93#define OR_LO(K, REG)	\
 94	(F3(2, 0x02) | IMMED | RS1(REG) | ((K) & 0x3ff) | RD(REG))
 95
 96#define ADD		F3(2, 0x00)
 97#define AND		F3(2, 0x01)
 98#define ANDCC		F3(2, 0x11)
 99#define OR		F3(2, 0x02)
100#define XOR		F3(2, 0x03)
101#define SUB		F3(2, 0x04)
102#define SUBCC		F3(2, 0x14)
103#define MUL		F3(2, 0x0a)	/* umul */
104#define DIV		F3(2, 0x0e)	/* udiv */
105#define SLL		F3(2, 0x25)
106#define SRL		F3(2, 0x26)
107#define JMPL		F3(2, 0x38)
108#define CALL		F1(1)
109#define BR		F2(0, 0x01)
110#define RD_Y		F3(2, 0x28)
111#define WR_Y		F3(2, 0x30)
112
113#define LD32		F3(3, 0x00)
114#define LD8		F3(3, 0x01)
115#define LD16		F3(3, 0x02)
116#define LD64		F3(3, 0x0b)
117#define ST32		F3(3, 0x04)
118
119#ifdef CONFIG_SPARC64
120#define LDPTR		LD64
121#define BASE_STACKFRAME	176
122#else
123#define LDPTR		LD32
124#define BASE_STACKFRAME	96
125#endif
126
127#define LD32I		(LD32 | IMMED)
128#define LD8I		(LD8 | IMMED)
129#define LD16I		(LD16 | IMMED)
130#define LD64I		(LD64 | IMMED)
131#define LDPTRI		(LDPTR | IMMED)
132#define ST32I		(ST32 | IMMED)
133
134#define emit_nop()		\
135do {				\
136	*prog++ = SETHI(0, G0);	\
137} while (0)
138
139#define emit_neg()					\
140do {	/* sub %g0, r_A, r_A */				\
141	*prog++ = SUB | RS1(G0) | RS2(r_A) | RD(r_A);	\
142} while (0)
143
144#define emit_reg_move(FROM, TO)				\
145do {	/* or %g0, FROM, TO */				\
146	*prog++ = OR | RS1(G0) | RS2(FROM) | RD(TO);	\
147} while (0)
148
149#define emit_clear(REG)					\
150do {	/* or %g0, %g0, REG */				\
151	*prog++ = OR | RS1(G0) | RS2(G0) | RD(REG);	\
152} while (0)
153
154#define emit_set_const(K, REG)					\
155do {	/* sethi %hi(K), REG */					\
156	*prog++ = SETHI(K, REG);				\
157	/* or REG, %lo(K), REG */				\
158	*prog++ = OR_LO(K, REG);				\
159} while (0)
160
161	/* Emit
162	 *
163	 *	OP	r_A, r_X, r_A
164	 */
165#define emit_alu_X(OPCODE)					\
166do {								\
167	seen |= SEEN_XREG;					\
168	*prog++ = OPCODE | RS1(r_A) | RS2(r_X) | RD(r_A);	\
169} while (0)
170
171	/* Emit either:
172	 *
173	 *	OP	r_A, K, r_A
174	 *
175	 * or
176	 *
177	 *	sethi	%hi(K), r_TMP
178	 *	or	r_TMP, %lo(K), r_TMP
179	 *	OP	r_A, r_TMP, r_A
180	 *
181	 * depending upon whether K fits in a signed 13-bit
182	 * immediate instruction field.  Emit nothing if K
183	 * is zero.
184	 */
185#define emit_alu_K(OPCODE, K)					\
186do {								\
187	if (K) {						\
188		unsigned int _insn = OPCODE;			\
189		_insn |= RS1(r_A) | RD(r_A);			\
190		if (is_simm13(K)) {				\
191			*prog++ = _insn | IMMED | S13(K);	\
192		} else {					\
193			emit_set_const(K, r_TMP);		\
194			*prog++ = _insn | RS2(r_TMP);		\
195		}						\
196	}							\
197} while (0)
198
199#define emit_loadimm(K, DEST)						\
200do {									\
201	if (is_simm13(K)) {						\
202		/* or %g0, K, DEST */					\
203		*prog++ = OR | IMMED | RS1(G0) | S13(K) | RD(DEST);	\
204	} else {							\
205		emit_set_const(K, DEST);				\
206	}								\
207} while (0)
208
209#define emit_loadptr(BASE, STRUCT, FIELD, DEST)				\
210do {	unsigned int _off = offsetof(STRUCT, FIELD);			\
211	BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(void *));	\
212	*prog++ = LDPTRI | RS1(BASE) | S13(_off) | RD(DEST);		\
213} while (0)
214
215#define emit_load32(BASE, STRUCT, FIELD, DEST)				\
216do {	unsigned int _off = offsetof(STRUCT, FIELD);			\
217	BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u32));	\
218	*prog++ = LD32I | RS1(BASE) | S13(_off) | RD(DEST);		\
219} while (0)
220
221#define emit_load16(BASE, STRUCT, FIELD, DEST)				\
222do {	unsigned int _off = offsetof(STRUCT, FIELD);			\
223	BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u16));	\
224	*prog++ = LD16I | RS1(BASE) | S13(_off) | RD(DEST);		\
225} while (0)
226
227#define __emit_load8(BASE, STRUCT, FIELD, DEST)				\
228do {	unsigned int _off = offsetof(STRUCT, FIELD);			\
229	*prog++ = LD8I | RS1(BASE) | S13(_off) | RD(DEST);		\
230} while (0)
231
232#define emit_load8(BASE, STRUCT, FIELD, DEST)				\
233do {	BUILD_BUG_ON(FIELD_SIZEOF(STRUCT, FIELD) != sizeof(u8));	\
234	__emit_load8(BASE, STRUCT, FIELD, DEST);			\
235} while (0)
236
237#define emit_ldmem(OFF, DEST)					\
238do {	*prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(DEST);	\
239} while (0)
240
241#define emit_stmem(OFF, SRC)					\
242do {	*prog++ = LD32I | RS1(FP) | S13(-(OFF)) | RD(SRC);	\
243} while (0)
244
245#ifdef CONFIG_SMP
246#ifdef CONFIG_SPARC64
247#define emit_load_cpu(REG)						\
248	emit_load16(G6, struct thread_info, cpu, REG)
249#else
250#define emit_load_cpu(REG)						\
251	emit_load32(G6, struct thread_info, cpu, REG)
252#endif
253#else
254#define emit_load_cpu(REG)	emit_clear(REG)
255#endif
256
257#define emit_skb_loadptr(FIELD, DEST) \
258	emit_loadptr(r_SKB, struct sk_buff, FIELD, DEST)
259#define emit_skb_load32(FIELD, DEST) \
260	emit_load32(r_SKB, struct sk_buff, FIELD, DEST)
261#define emit_skb_load16(FIELD, DEST) \
262	emit_load16(r_SKB, struct sk_buff, FIELD, DEST)
263#define __emit_skb_load8(FIELD, DEST) \
264	__emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
265#define emit_skb_load8(FIELD, DEST) \
266	emit_load8(r_SKB, struct sk_buff, FIELD, DEST)
267
268#define emit_jmpl(BASE, IMM_OFF, LREG) \
269	*prog++ = (JMPL | IMMED | RS1(BASE) | S13(IMM_OFF) | RD(LREG))
270
271#define emit_call(FUNC)					\
272do {	void *_here = image + addrs[i] - 8;		\
273	unsigned int _off = (void *)(FUNC) - _here;	\
274	*prog++ = CALL | (((_off) >> 2) & 0x3fffffff);	\
275	emit_nop();					\
276} while (0)
277
278#define emit_branch(BR_OPC, DEST)			\
279do {	unsigned int _here = addrs[i] - 8;		\
280	*prog++ = BR_OPC | WDISP22((DEST) - _here);	\
281} while (0)
282
283#define emit_branch_off(BR_OPC, OFF)			\
284do {	*prog++ = BR_OPC | WDISP22(OFF);		\
285} while (0)
286
287#define emit_jump(DEST)		emit_branch(BA, DEST)
288
289#define emit_read_y(REG)	*prog++ = RD_Y | RD(REG)
290#define emit_write_y(REG)	*prog++ = WR_Y | IMMED | RS1(REG) | S13(0)
291
292#define emit_cmp(R1, R2) \
293	*prog++ = (SUBCC | RS1(R1) | RS2(R2) | RD(G0))
294
295#define emit_cmpi(R1, IMM) \
296	*prog++ = (SUBCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
297
298#define emit_btst(R1, R2) \
299	*prog++ = (ANDCC | RS1(R1) | RS2(R2) | RD(G0))
300
301#define emit_btsti(R1, IMM) \
302	*prog++ = (ANDCC | IMMED | RS1(R1) | S13(IMM) | RD(G0));
303
304#define emit_sub(R1, R2, R3) \
305	*prog++ = (SUB | RS1(R1) | RS2(R2) | RD(R3))
306
307#define emit_subi(R1, IMM, R3) \
308	*prog++ = (SUB | IMMED | RS1(R1) | S13(IMM) | RD(R3))
309
310#define emit_add(R1, R2, R3) \
311	*prog++ = (ADD | RS1(R1) | RS2(R2) | RD(R3))
312
313#define emit_addi(R1, IMM, R3) \
314	*prog++ = (ADD | IMMED | RS1(R1) | S13(IMM) | RD(R3))
315
316#define emit_and(R1, R2, R3) \
317	*prog++ = (AND | RS1(R1) | RS2(R2) | RD(R3))
318
319#define emit_andi(R1, IMM, R3) \
320	*prog++ = (AND | IMMED | RS1(R1) | S13(IMM) | RD(R3))
321
322#define emit_alloc_stack(SZ) \
323	*prog++ = (SUB | IMMED | RS1(SP) | S13(SZ) | RD(SP))
324
325#define emit_release_stack(SZ) \
326	*prog++ = (ADD | IMMED | RS1(SP) | S13(SZ) | RD(SP))
327
328/* A note about branch offset calculations.  The addrs[] array,
329 * indexed by BPF instruction, records the address after all the
330 * sparc instructions emitted for that BPF instruction.
331 *
332 * The most common case is to emit a branch at the end of such
333 * a code sequence.  So this would be two instructions, the
334 * branch and it's delay slot.
335 *
336 * Therefore by default the branch emitters calculate the branch
337 * offset field as:
338 *
339 *	destination - (addrs[i] - 8)
340 *
341 * This "addrs[i] - 8" is the address of the branch itself or
342 * what "." would be in assembler notation.  The "8" part is
343 * how we take into consideration the branch and it's delay
344 * slot mentioned above.
345 *
346 * Sometimes we need to emit a branch earlier in the code
347 * sequence.  And in these situations we adjust "destination"
348 * to accomodate this difference.  For example, if we needed
349 * to emit a branch (and it's delay slot) right before the
350 * final instruction emitted for a BPF opcode, we'd use
351 * "destination + 4" instead of just plain "destination" above.
352 *
353 * This is why you see all of these funny emit_branch() and
354 * emit_jump() calls with adjusted offsets.
355 */
356
357void bpf_jit_compile(struct sk_filter *fp)
358{
359	unsigned int cleanup_addr, proglen, oldproglen = 0;
360	u32 temp[8], *prog, *func, seen = 0, pass;
361	const struct sock_filter *filter = fp->insns;
362	int i, flen = fp->len, pc_ret0 = -1;
363	unsigned int *addrs;
364	void *image;
365
366	if (!bpf_jit_enable)
367		return;
368
369	addrs = kmalloc(flen * sizeof(*addrs), GFP_KERNEL);
370	if (addrs == NULL)
371		return;
372
373	/* Before first pass, make a rough estimation of addrs[]
374	 * each bpf instruction is translated to less than 64 bytes
375	 */
376	for (proglen = 0, i = 0; i < flen; i++) {
377		proglen += 64;
378		addrs[i] = proglen;
379	}
380	cleanup_addr = proglen; /* epilogue address */
381	image = NULL;
382	for (pass = 0; pass < 10; pass++) {
383		u8 seen_or_pass0 = (pass == 0) ? (SEEN_XREG | SEEN_DATAREF | SEEN_MEM) : seen;
384
385		/* no prologue/epilogue for trivial filters (RET something) */
386		proglen = 0;
387		prog = temp;
388
389		/* Prologue */
390		if (seen_or_pass0) {
391			if (seen_or_pass0 & SEEN_MEM) {
392				unsigned int sz = BASE_STACKFRAME;
393				sz += BPF_MEMWORDS * sizeof(u32);
394				emit_alloc_stack(sz);
395			}
396
397			/* Make sure we dont leek kernel memory. */
398			if (seen_or_pass0 & SEEN_XREG)
399				emit_clear(r_X);
400
401			/* If this filter needs to access skb data,
402			 * load %o4 and %o5 with:
403			 *  %o4 = skb->len - skb->data_len
404			 *  %o5 = skb->data
405			 * And also back up %o7 into r_saved_O7 so we can
406			 * invoke the stubs using 'call'.
407			 */
408			if (seen_or_pass0 & SEEN_DATAREF) {
409				emit_load32(r_SKB, struct sk_buff, len, r_HEADLEN);
410				emit_load32(r_SKB, struct sk_buff, data_len, r_TMP);
411				emit_sub(r_HEADLEN, r_TMP, r_HEADLEN);
412				emit_loadptr(r_SKB, struct sk_buff, data, r_SKB_DATA);
413			}
414		}
415		emit_reg_move(O7, r_saved_O7);
416
417		switch (filter[0].code) {
418		case BPF_S_RET_K:
419		case BPF_S_LD_W_LEN:
420		case BPF_S_ANC_PROTOCOL:
421		case BPF_S_ANC_PKTTYPE:
422		case BPF_S_ANC_IFINDEX:
423		case BPF_S_ANC_MARK:
424		case BPF_S_ANC_RXHASH:
425		case BPF_S_ANC_VLAN_TAG:
426		case BPF_S_ANC_VLAN_TAG_PRESENT:
427		case BPF_S_ANC_CPU:
428		case BPF_S_ANC_QUEUE:
429		case BPF_S_LD_W_ABS:
430		case BPF_S_LD_H_ABS:
431		case BPF_S_LD_B_ABS:
432			/* The first instruction sets the A register (or is
433			 * a "RET 'constant'")
434			 */
435			break;
436		default:
437			/* Make sure we dont leak kernel information to the
438			 * user.
439			 */
440			emit_clear(r_A); /* A = 0 */
441		}
442
443		for (i = 0; i < flen; i++) {
444			unsigned int K = filter[i].k;
445			unsigned int t_offset;
446			unsigned int f_offset;
447			u32 t_op, f_op;
448			int ilen;
449
450			switch (filter[i].code) {
451			case BPF_S_ALU_ADD_X:	/* A += X; */
452				emit_alu_X(ADD);
453				break;
454			case BPF_S_ALU_ADD_K:	/* A += K; */
455				emit_alu_K(ADD, K);
456				break;
457			case BPF_S_ALU_SUB_X:	/* A -= X; */
458				emit_alu_X(SUB);
459				break;
460			case BPF_S_ALU_SUB_K:	/* A -= K */
461				emit_alu_K(SUB, K);
462				break;
463			case BPF_S_ALU_AND_X:	/* A &= X */
464				emit_alu_X(AND);
465				break;
466			case BPF_S_ALU_AND_K:	/* A &= K */
467				emit_alu_K(AND, K);
468				break;
469			case BPF_S_ALU_OR_X:	/* A |= X */
470				emit_alu_X(OR);
471				break;
472			case BPF_S_ALU_OR_K:	/* A |= K */
473				emit_alu_K(OR, K);
474				break;
475			case BPF_S_ANC_ALU_XOR_X: /* A ^= X; */
476			case BPF_S_ALU_XOR_X:
477				emit_alu_X(XOR);
478				break;
479			case BPF_S_ALU_XOR_K:	/* A ^= K */
480				emit_alu_K(XOR, K);
481				break;
482			case BPF_S_ALU_LSH_X:	/* A <<= X */
483				emit_alu_X(SLL);
484				break;
485			case BPF_S_ALU_LSH_K:	/* A <<= K */
486				emit_alu_K(SLL, K);
487				break;
488			case BPF_S_ALU_RSH_X:	/* A >>= X */
489				emit_alu_X(SRL);
490				break;
491			case BPF_S_ALU_RSH_K:	/* A >>= K */
492				emit_alu_K(SRL, K);
493				break;
494			case BPF_S_ALU_MUL_X:	/* A *= X; */
495				emit_alu_X(MUL);
496				break;
497			case BPF_S_ALU_MUL_K:	/* A *= K */
498				emit_alu_K(MUL, K);
499				break;
500			case BPF_S_ALU_DIV_K:	/* A /= K with K != 0*/
501				if (K == 1)
502					break;
503				emit_write_y(G0);
504#ifdef CONFIG_SPARC32
505				/* The Sparc v8 architecture requires
506				 * three instructions between a %y
507				 * register write and the first use.
508				 */
509				emit_nop();
510				emit_nop();
511				emit_nop();
512#endif
513				emit_alu_K(DIV, K);
514				break;
515			case BPF_S_ALU_DIV_X:	/* A /= X; */
516				emit_cmpi(r_X, 0);
517				if (pc_ret0 > 0) {
518					t_offset = addrs[pc_ret0 - 1];
519#ifdef CONFIG_SPARC32
520					emit_branch(BE, t_offset + 20);
521#else
522					emit_branch(BE, t_offset + 8);
523#endif
524					emit_nop(); /* delay slot */
525				} else {
526					emit_branch_off(BNE, 16);
527					emit_nop();
528#ifdef CONFIG_SPARC32
529					emit_jump(cleanup_addr + 20);
530#else
531					emit_jump(cleanup_addr + 8);
532#endif
533					emit_clear(r_A);
534				}
535				emit_write_y(G0);
536#ifdef CONFIG_SPARC32
537				/* The Sparc v8 architecture requires
538				 * three instructions between a %y
539				 * register write and the first use.
540				 */
541				emit_nop();
542				emit_nop();
543				emit_nop();
544#endif
545				emit_alu_X(DIV);
546				break;
547			case BPF_S_ALU_NEG:
548				emit_neg();
549				break;
550			case BPF_S_RET_K:
551				if (!K) {
552					if (pc_ret0 == -1)
553						pc_ret0 = i;
554					emit_clear(r_A);
555				} else {
556					emit_loadimm(K, r_A);
557				}
558				/* Fallthrough */
559			case BPF_S_RET_A:
560				if (seen_or_pass0) {
561					if (i != flen - 1) {
562						emit_jump(cleanup_addr);
563						emit_nop();
564						break;
565					}
566					if (seen_or_pass0 & SEEN_MEM) {
567						unsigned int sz = BASE_STACKFRAME;
568						sz += BPF_MEMWORDS * sizeof(u32);
569						emit_release_stack(sz);
570					}
571				}
572				/* jmpl %r_saved_O7 + 8, %g0 */
573				emit_jmpl(r_saved_O7, 8, G0);
574				emit_reg_move(r_A, O0); /* delay slot */
575				break;
576			case BPF_S_MISC_TAX:
577				seen |= SEEN_XREG;
578				emit_reg_move(r_A, r_X);
579				break;
580			case BPF_S_MISC_TXA:
581				seen |= SEEN_XREG;
582				emit_reg_move(r_X, r_A);
583				break;
584			case BPF_S_ANC_CPU:
585				emit_load_cpu(r_A);
586				break;
587			case BPF_S_ANC_PROTOCOL:
588				emit_skb_load16(protocol, r_A);
589				break;
590#if 0
591				/* GCC won't let us take the address of
592				 * a bit field even though we very much
593				 * know what we are doing here.
594				 */
595			case BPF_S_ANC_PKTTYPE:
596				__emit_skb_load8(pkt_type, r_A);
597				emit_alu_K(SRL, 5);
598				break;
599#endif
600			case BPF_S_ANC_IFINDEX:
601				emit_skb_loadptr(dev, r_A);
602				emit_cmpi(r_A, 0);
603				emit_branch(BNE_PTR, cleanup_addr + 4);
604				emit_nop();
605				emit_load32(r_A, struct net_device, ifindex, r_A);
606				break;
607			case BPF_S_ANC_MARK:
608				emit_skb_load32(mark, r_A);
609				break;
610			case BPF_S_ANC_QUEUE:
611				emit_skb_load16(queue_mapping, r_A);
612				break;
613			case BPF_S_ANC_HATYPE:
614				emit_skb_loadptr(dev, r_A);
615				emit_cmpi(r_A, 0);
616				emit_branch(BNE_PTR, cleanup_addr + 4);
617				emit_nop();
618				emit_load16(r_A, struct net_device, type, r_A);
619				break;
620			case BPF_S_ANC_RXHASH:
621				emit_skb_load32(hash, r_A);
622				break;
623			case BPF_S_ANC_VLAN_TAG:
624			case BPF_S_ANC_VLAN_TAG_PRESENT:
625				emit_skb_load16(vlan_tci, r_A);
626				if (filter[i].code == BPF_S_ANC_VLAN_TAG) {
627					emit_andi(r_A, VLAN_VID_MASK, r_A);
628				} else {
629					emit_loadimm(VLAN_TAG_PRESENT, r_TMP);
630					emit_and(r_A, r_TMP, r_A);
631				}
632				break;
633
634			case BPF_S_LD_IMM:
635				emit_loadimm(K, r_A);
636				break;
637			case BPF_S_LDX_IMM:
638				emit_loadimm(K, r_X);
639				break;
640			case BPF_S_LD_MEM:
641				emit_ldmem(K * 4, r_A);
642				break;
643			case BPF_S_LDX_MEM:
644				emit_ldmem(K * 4, r_X);
645				break;
646			case BPF_S_ST:
647				emit_stmem(K * 4, r_A);
648				break;
649			case BPF_S_STX:
650				emit_stmem(K * 4, r_X);
651				break;
652
653#define CHOOSE_LOAD_FUNC(K, func) \
654	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
655
656			case BPF_S_LD_W_ABS:
657				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_word);
658common_load:			seen |= SEEN_DATAREF;
659				emit_loadimm(K, r_OFF);
660				emit_call(func);
661				break;
662			case BPF_S_LD_H_ABS:
663				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_half);
664				goto common_load;
665			case BPF_S_LD_B_ABS:
666				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte);
667				goto common_load;
668			case BPF_S_LDX_B_MSH:
669				func = CHOOSE_LOAD_FUNC(K, bpf_jit_load_byte_msh);
670				goto common_load;
671			case BPF_S_LD_W_IND:
672				func = bpf_jit_load_word;
673common_load_ind:		seen |= SEEN_DATAREF | SEEN_XREG;
674				if (K) {
675					if (is_simm13(K)) {
676						emit_addi(r_X, K, r_OFF);
677					} else {
678						emit_loadimm(K, r_TMP);
679						emit_add(r_X, r_TMP, r_OFF);
680					}
681				} else {
682					emit_reg_move(r_X, r_OFF);
683				}
684				emit_call(func);
685				break;
686			case BPF_S_LD_H_IND:
687				func = bpf_jit_load_half;
688				goto common_load_ind;
689			case BPF_S_LD_B_IND:
690				func = bpf_jit_load_byte;
691				goto common_load_ind;
692			case BPF_S_JMP_JA:
693				emit_jump(addrs[i + K]);
694				emit_nop();
695				break;
696
697#define COND_SEL(CODE, TOP, FOP)	\
698	case CODE:			\
699		t_op = TOP;		\
700		f_op = FOP;		\
701		goto cond_branch
702
703			COND_SEL(BPF_S_JMP_JGT_K, BGU, BLEU);
704			COND_SEL(BPF_S_JMP_JGE_K, BGEU, BLU);
705			COND_SEL(BPF_S_JMP_JEQ_K, BE, BNE);
706			COND_SEL(BPF_S_JMP_JSET_K, BNE, BE);
707			COND_SEL(BPF_S_JMP_JGT_X, BGU, BLEU);
708			COND_SEL(BPF_S_JMP_JGE_X, BGEU, BLU);
709			COND_SEL(BPF_S_JMP_JEQ_X, BE, BNE);
710			COND_SEL(BPF_S_JMP_JSET_X, BNE, BE);
711
712cond_branch:			f_offset = addrs[i + filter[i].jf];
713				t_offset = addrs[i + filter[i].jt];
714
715				/* same targets, can avoid doing the test :) */
716				if (filter[i].jt == filter[i].jf) {
717					emit_jump(t_offset);
718					emit_nop();
719					break;
720				}
721
722				switch (filter[i].code) {
723				case BPF_S_JMP_JGT_X:
724				case BPF_S_JMP_JGE_X:
725				case BPF_S_JMP_JEQ_X:
726					seen |= SEEN_XREG;
727					emit_cmp(r_A, r_X);
728					break;
729				case BPF_S_JMP_JSET_X:
730					seen |= SEEN_XREG;
731					emit_btst(r_A, r_X);
732					break;
733				case BPF_S_JMP_JEQ_K:
734				case BPF_S_JMP_JGT_K:
735				case BPF_S_JMP_JGE_K:
736					if (is_simm13(K)) {
737						emit_cmpi(r_A, K);
738					} else {
739						emit_loadimm(K, r_TMP);
740						emit_cmp(r_A, r_TMP);
741					}
742					break;
743				case BPF_S_JMP_JSET_K:
744					if (is_simm13(K)) {
745						emit_btsti(r_A, K);
746					} else {
747						emit_loadimm(K, r_TMP);
748						emit_btst(r_A, r_TMP);
749					}
750					break;
751				}
752				if (filter[i].jt != 0) {
753					if (filter[i].jf)
754						t_offset += 8;
755					emit_branch(t_op, t_offset);
756					emit_nop(); /* delay slot */
757					if (filter[i].jf) {
758						emit_jump(f_offset);
759						emit_nop();
760					}
761					break;
762				}
763				emit_branch(f_op, f_offset);
764				emit_nop(); /* delay slot */
765				break;
766
767			default:
768				/* hmm, too complex filter, give up with jit compiler */
769				goto out;
770			}
771			ilen = (void *) prog - (void *) temp;
772			if (image) {
773				if (unlikely(proglen + ilen > oldproglen)) {
774					pr_err("bpb_jit_compile fatal error\n");
775					kfree(addrs);
776					module_free(NULL, image);
777					return;
778				}
779				memcpy(image + proglen, temp, ilen);
780			}
781			proglen += ilen;
782			addrs[i] = proglen;
783			prog = temp;
784		}
785		/* last bpf instruction is always a RET :
786		 * use it to give the cleanup instruction(s) addr
787		 */
788		cleanup_addr = proglen - 8; /* jmpl; mov r_A,%o0; */
789		if (seen_or_pass0 & SEEN_MEM)
790			cleanup_addr -= 4; /* add %sp, X, %sp; */
791
792		if (image) {
793			if (proglen != oldproglen)
794				pr_err("bpb_jit_compile proglen=%u != oldproglen=%u\n",
795				       proglen, oldproglen);
796			break;
797		}
798		if (proglen == oldproglen) {
799			image = module_alloc(proglen);
800			if (!image)
801				goto out;
802		}
803		oldproglen = proglen;
804	}
805
806	if (bpf_jit_enable > 1)
807		bpf_jit_dump(flen, proglen, pass, image);
808
809	if (image) {
810		bpf_flush_icache(image, image + proglen);
811		fp->bpf_func = (void *)image;
812		fp->jited = 1;
813	}
814out:
815	kfree(addrs);
816	return;
817}
818
819void bpf_jit_free(struct sk_filter *fp)
820{
821	if (fp->jited)
822		module_free(NULL, fp->bpf_func);
823	kfree(fp);
824}