Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
   1/*
   2 * Just-In-Time compiler for BPF filters on MIPS
   3 *
   4 * Copyright (c) 2014 Imagination Technologies Ltd.
   5 * Author: Markos Chandras <markos.chandras@imgtec.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify it
   8 * under the terms of the GNU General Public License as published by the
   9 * Free Software Foundation; version 2 of the License.
  10 */
  11
  12#include <linux/bitops.h>
  13#include <linux/compiler.h>
  14#include <linux/errno.h>
  15#include <linux/filter.h>
  16#include <linux/if_vlan.h>
  17#include <linux/moduleloader.h>
  18#include <linux/netdevice.h>
  19#include <linux/string.h>
  20#include <linux/slab.h>
  21#include <linux/types.h>
  22#include <asm/asm.h>
  23#include <asm/bitops.h>
  24#include <asm/cacheflush.h>
  25#include <asm/cpu-features.h>
  26#include <asm/uasm.h>
  27
  28#include "bpf_jit.h"
  29
  30/* ABI
  31 * r_skb_hl	SKB header length
  32 * r_data	SKB data pointer
  33 * r_off	Offset
  34 * r_A		BPF register A
  35 * r_X		BPF register X
  36 * r_skb	*skb
  37 * r_M		*scratch memory
  38 * r_skb_len	SKB length
  39 *
  40 * On entry (*bpf_func)(*skb, *filter)
  41 * a0 = MIPS_R_A0 = skb;
  42 * a1 = MIPS_R_A1 = filter;
  43 *
  44 * Stack
  45 * ...
  46 * M[15]
  47 * M[14]
  48 * M[13]
  49 * ...
  50 * M[0] <-- r_M
  51 * saved reg k-1
  52 * saved reg k-2
  53 * ...
  54 * saved reg 0 <-- r_sp
  55 * <no argument area>
  56 *
  57 *                     Packet layout
  58 *
  59 * <--------------------- len ------------------------>
  60 * <--skb-len(r_skb_hl)-->< ----- skb->data_len ------>
  61 * ----------------------------------------------------
  62 * |                  skb->data                       |
  63 * ----------------------------------------------------
  64 */
  65
  66#define ptr typeof(unsigned long)
  67
  68#define SCRATCH_OFF(k)		(4 * (k))
  69
  70/* JIT flags */
  71#define SEEN_CALL		(1 << BPF_MEMWORDS)
  72#define SEEN_SREG_SFT		(BPF_MEMWORDS + 1)
  73#define SEEN_SREG_BASE		(1 << SEEN_SREG_SFT)
  74#define SEEN_SREG(x)		(SEEN_SREG_BASE << (x))
  75#define SEEN_OFF		SEEN_SREG(2)
  76#define SEEN_A			SEEN_SREG(3)
  77#define SEEN_X			SEEN_SREG(4)
  78#define SEEN_SKB		SEEN_SREG(5)
  79#define SEEN_MEM		SEEN_SREG(6)
  80/* SEEN_SK_DATA also implies skb_hl an skb_len */
  81#define SEEN_SKB_DATA		(SEEN_SREG(7) | SEEN_SREG(1) | SEEN_SREG(0))
  82
  83/* Arguments used by JIT */
  84#define ARGS_USED_BY_JIT	2 /* only applicable to 64-bit */
  85
  86#define SBIT(x)			(1 << (x)) /* Signed version of BIT() */
  87
  88/**
  89 * struct jit_ctx - JIT context
  90 * @skf:		The sk_filter
  91 * @prologue_bytes:	Number of bytes for prologue
  92 * @idx:		Instruction index
  93 * @flags:		JIT flags
  94 * @offsets:		Instruction offsets
  95 * @target:		Memory location for the compiled filter
  96 */
  97struct jit_ctx {
  98	const struct bpf_prog *skf;
  99	unsigned int prologue_bytes;
 100	u32 idx;
 101	u32 flags;
 102	u32 *offsets;
 103	u32 *target;
 104};
 105
 106
 107static inline int optimize_div(u32 *k)
 108{
 109	/* power of 2 divides can be implemented with right shift */
 110	if (!(*k & (*k-1))) {
 111		*k = ilog2(*k);
 112		return 1;
 113	}
 114
 115	return 0;
 116}
 117
 118static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
 119
 120/* Simply emit the instruction if the JIT memory space has been allocated */
 121#define emit_instr(ctx, func, ...)			\
 122do {							\
 123	if ((ctx)->target != NULL) {			\
 124		u32 *p = &(ctx)->target[ctx->idx];	\
 125		uasm_i_##func(&p, ##__VA_ARGS__);	\
 126	}						\
 127	(ctx)->idx++;					\
 128} while (0)
 129
 130/*
 131 * Similar to emit_instr but it must be used when we need to emit
 132 * 32-bit or 64-bit instructions
 133 */
 134#define emit_long_instr(ctx, func, ...)			\
 135do {							\
 136	if ((ctx)->target != NULL) {			\
 137		u32 *p = &(ctx)->target[ctx->idx];	\
 138		UASM_i_##func(&p, ##__VA_ARGS__);	\
 139	}						\
 140	(ctx)->idx++;					\
 141} while (0)
 142
 143/* Determine if immediate is within the 16-bit signed range */
 144static inline bool is_range16(s32 imm)
 145{
 146	return !(imm >= SBIT(15) || imm < -SBIT(15));
 147}
 148
 149static inline void emit_addu(unsigned int dst, unsigned int src1,
 150			     unsigned int src2, struct jit_ctx *ctx)
 151{
 152	emit_instr(ctx, addu, dst, src1, src2);
 153}
 154
 155static inline void emit_nop(struct jit_ctx *ctx)
 156{
 157	emit_instr(ctx, nop);
 158}
 159
 160/* Load a u32 immediate to a register */
 161static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
 162{
 163	if (ctx->target != NULL) {
 164		/* addiu can only handle s16 */
 165		if (!is_range16(imm)) {
 166			u32 *p = &ctx->target[ctx->idx];
 167			uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
 168			p = &ctx->target[ctx->idx + 1];
 169			uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff);
 170		} else {
 171			u32 *p = &ctx->target[ctx->idx];
 172			uasm_i_addiu(&p, dst, r_zero, imm);
 173		}
 174	}
 175	ctx->idx++;
 176
 177	if (!is_range16(imm))
 178		ctx->idx++;
 179}
 180
 181static inline void emit_or(unsigned int dst, unsigned int src1,
 182			   unsigned int src2, struct jit_ctx *ctx)
 183{
 184	emit_instr(ctx, or, dst, src1, src2);
 185}
 186
 187static inline void emit_ori(unsigned int dst, unsigned src, u32 imm,
 188			    struct jit_ctx *ctx)
 189{
 190	if (imm >= BIT(16)) {
 191		emit_load_imm(r_tmp, imm, ctx);
 192		emit_or(dst, src, r_tmp, ctx);
 193	} else {
 194		emit_instr(ctx, ori, dst, src, imm);
 195	}
 196}
 197
 198static inline void emit_daddiu(unsigned int dst, unsigned int src,
 199			       int imm, struct jit_ctx *ctx)
 200{
 201	/*
 202	 * Only used for stack, so the imm is relatively small
 203	 * and it fits in 15-bits
 204	 */
 205	emit_instr(ctx, daddiu, dst, src, imm);
 206}
 207
 208static inline void emit_addiu(unsigned int dst, unsigned int src,
 209			      u32 imm, struct jit_ctx *ctx)
 210{
 211	if (!is_range16(imm)) {
 212		emit_load_imm(r_tmp, imm, ctx);
 213		emit_addu(dst, r_tmp, src, ctx);
 214	} else {
 215		emit_instr(ctx, addiu, dst, src, imm);
 216	}
 217}
 218
 219static inline void emit_and(unsigned int dst, unsigned int src1,
 220			    unsigned int src2, struct jit_ctx *ctx)
 221{
 222	emit_instr(ctx, and, dst, src1, src2);
 223}
 224
 225static inline void emit_andi(unsigned int dst, unsigned int src,
 226			     u32 imm, struct jit_ctx *ctx)
 227{
 228	/* If imm does not fit in u16 then load it to register */
 229	if (imm >= BIT(16)) {
 230		emit_load_imm(r_tmp, imm, ctx);
 231		emit_and(dst, src, r_tmp, ctx);
 232	} else {
 233		emit_instr(ctx, andi, dst, src, imm);
 234	}
 235}
 236
 237static inline void emit_xor(unsigned int dst, unsigned int src1,
 238			    unsigned int src2, struct jit_ctx *ctx)
 239{
 240	emit_instr(ctx, xor, dst, src1, src2);
 241}
 242
 243static inline void emit_xori(ptr dst, ptr src, u32 imm, struct jit_ctx *ctx)
 244{
 245	/* If imm does not fit in u16 then load it to register */
 246	if (imm >= BIT(16)) {
 247		emit_load_imm(r_tmp, imm, ctx);
 248		emit_xor(dst, src, r_tmp, ctx);
 249	} else {
 250		emit_instr(ctx, xori, dst, src, imm);
 251	}
 252}
 253
 254static inline void emit_stack_offset(int offset, struct jit_ctx *ctx)
 255{
 256	emit_long_instr(ctx, ADDIU, r_sp, r_sp, offset);
 257}
 258
 259static inline void emit_subu(unsigned int dst, unsigned int src1,
 260			     unsigned int src2, struct jit_ctx *ctx)
 261{
 262	emit_instr(ctx, subu, dst, src1, src2);
 263}
 264
 265static inline void emit_neg(unsigned int reg, struct jit_ctx *ctx)
 266{
 267	emit_subu(reg, r_zero, reg, ctx);
 268}
 269
 270static inline void emit_sllv(unsigned int dst, unsigned int src,
 271			     unsigned int sa, struct jit_ctx *ctx)
 272{
 273	emit_instr(ctx, sllv, dst, src, sa);
 274}
 275
 276static inline void emit_sll(unsigned int dst, unsigned int src,
 277			    unsigned int sa, struct jit_ctx *ctx)
 278{
 279	/* sa is 5-bits long */
 280	if (sa >= BIT(5))
 281		/* Shifting >= 32 results in zero */
 282		emit_jit_reg_move(dst, r_zero, ctx);
 283	else
 284		emit_instr(ctx, sll, dst, src, sa);
 285}
 286
 287static inline void emit_srlv(unsigned int dst, unsigned int src,
 288			     unsigned int sa, struct jit_ctx *ctx)
 289{
 290	emit_instr(ctx, srlv, dst, src, sa);
 291}
 292
 293static inline void emit_srl(unsigned int dst, unsigned int src,
 294			    unsigned int sa, struct jit_ctx *ctx)
 295{
 296	/* sa is 5-bits long */
 297	if (sa >= BIT(5))
 298		/* Shifting >= 32 results in zero */
 299		emit_jit_reg_move(dst, r_zero, ctx);
 300	else
 301		emit_instr(ctx, srl, dst, src, sa);
 302}
 303
 304static inline void emit_slt(unsigned int dst, unsigned int src1,
 305			    unsigned int src2, struct jit_ctx *ctx)
 306{
 307	emit_instr(ctx, slt, dst, src1, src2);
 308}
 309
 310static inline void emit_sltu(unsigned int dst, unsigned int src1,
 311			     unsigned int src2, struct jit_ctx *ctx)
 312{
 313	emit_instr(ctx, sltu, dst, src1, src2);
 314}
 315
 316static inline void emit_sltiu(unsigned dst, unsigned int src,
 317			      unsigned int imm, struct jit_ctx *ctx)
 318{
 319	/* 16 bit immediate */
 320	if (!is_range16((s32)imm)) {
 321		emit_load_imm(r_tmp, imm, ctx);
 322		emit_sltu(dst, src, r_tmp, ctx);
 323	} else {
 324		emit_instr(ctx, sltiu, dst, src, imm);
 325	}
 326
 327}
 328
 329/* Store register on the stack */
 330static inline void emit_store_stack_reg(ptr reg, ptr base,
 331					unsigned int offset,
 332					struct jit_ctx *ctx)
 333{
 334	emit_long_instr(ctx, SW, reg, offset, base);
 335}
 336
 337static inline void emit_store(ptr reg, ptr base, unsigned int offset,
 338			      struct jit_ctx *ctx)
 339{
 340	emit_instr(ctx, sw, reg, offset, base);
 341}
 342
 343static inline void emit_load_stack_reg(ptr reg, ptr base,
 344				       unsigned int offset,
 345				       struct jit_ctx *ctx)
 346{
 347	emit_long_instr(ctx, LW, reg, offset, base);
 348}
 349
 350static inline void emit_load(unsigned int reg, unsigned int base,
 351			     unsigned int offset, struct jit_ctx *ctx)
 352{
 353	emit_instr(ctx, lw, reg, offset, base);
 354}
 355
 356static inline void emit_load_byte(unsigned int reg, unsigned int base,
 357				  unsigned int offset, struct jit_ctx *ctx)
 358{
 359	emit_instr(ctx, lb, reg, offset, base);
 360}
 361
 362static inline void emit_half_load(unsigned int reg, unsigned int base,
 363				  unsigned int offset, struct jit_ctx *ctx)
 364{
 365	emit_instr(ctx, lh, reg, offset, base);
 366}
 367
 368static inline void emit_mul(unsigned int dst, unsigned int src1,
 369			    unsigned int src2, struct jit_ctx *ctx)
 370{
 371	emit_instr(ctx, mul, dst, src1, src2);
 372}
 373
 374static inline void emit_div(unsigned int dst, unsigned int src,
 375			    struct jit_ctx *ctx)
 376{
 377	if (ctx->target != NULL) {
 378		u32 *p = &ctx->target[ctx->idx];
 379		uasm_i_divu(&p, dst, src);
 380		p = &ctx->target[ctx->idx + 1];
 381		uasm_i_mflo(&p, dst);
 382	}
 383	ctx->idx += 2; /* 2 insts */
 384}
 385
 386static inline void emit_mod(unsigned int dst, unsigned int src,
 387			    struct jit_ctx *ctx)
 388{
 389	if (ctx->target != NULL) {
 390		u32 *p = &ctx->target[ctx->idx];
 391		uasm_i_divu(&p, dst, src);
 392		p = &ctx->target[ctx->idx + 1];
 393		uasm_i_mfhi(&p, dst);
 394	}
 395	ctx->idx += 2; /* 2 insts */
 396}
 397
 398static inline void emit_dsll(unsigned int dst, unsigned int src,
 399			     unsigned int sa, struct jit_ctx *ctx)
 400{
 401	emit_instr(ctx, dsll, dst, src, sa);
 402}
 403
 404static inline void emit_dsrl32(unsigned int dst, unsigned int src,
 405			       unsigned int sa, struct jit_ctx *ctx)
 406{
 407	emit_instr(ctx, dsrl32, dst, src, sa);
 408}
 409
 410static inline void emit_wsbh(unsigned int dst, unsigned int src,
 411			     struct jit_ctx *ctx)
 412{
 413	emit_instr(ctx, wsbh, dst, src);
 414}
 415
 416/* load pointer to register */
 417static inline void emit_load_ptr(unsigned int dst, unsigned int src,
 418				     int imm, struct jit_ctx *ctx)
 419{
 420	/* src contains the base addr of the 32/64-pointer */
 421	emit_long_instr(ctx, LW, dst, imm, src);
 422}
 423
 424/* load a function pointer to register */
 425static inline void emit_load_func(unsigned int reg, ptr imm,
 426				  struct jit_ctx *ctx)
 427{
 428	if (IS_ENABLED(CONFIG_64BIT)) {
 429		/* At this point imm is always 64-bit */
 430		emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
 431		emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
 432		emit_ori(r_tmp, r_tmp_imm, (imm >> 16) & 0xffff, ctx);
 433		emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
 434		emit_ori(reg, r_tmp_imm, imm & 0xffff, ctx);
 435	} else {
 436		emit_load_imm(reg, imm, ctx);
 437	}
 438}
 439
 440/* Move to real MIPS register */
 441static inline void emit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
 442{
 443	emit_long_instr(ctx, ADDU, dst, src, r_zero);
 444}
 445
 446/* Move to JIT (32-bit) register */
 447static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx)
 448{
 449	emit_addu(dst, src, r_zero, ctx);
 450}
 451
 452/* Compute the immediate value for PC-relative branches. */
 453static inline u32 b_imm(unsigned int tgt, struct jit_ctx *ctx)
 454{
 455	if (ctx->target == NULL)
 456		return 0;
 457
 458	/*
 459	 * We want a pc-relative branch. We only do forward branches
 460	 * so tgt is always after pc. tgt is the instruction offset
 461	 * we want to jump to.
 462
 463	 * Branch on MIPS:
 464	 * I: target_offset <- sign_extend(offset)
 465	 * I+1: PC += target_offset (delay slot)
 466	 *
 467	 * ctx->idx currently points to the branch instruction
 468	 * but the offset is added to the delay slot so we need
 469	 * to subtract 4.
 470	 */
 471	return ctx->offsets[tgt] -
 472		(ctx->idx * 4 - ctx->prologue_bytes) - 4;
 473}
 474
 475static inline void emit_bcond(int cond, unsigned int reg1, unsigned int reg2,
 476			     unsigned int imm, struct jit_ctx *ctx)
 477{
 478	if (ctx->target != NULL) {
 479		u32 *p = &ctx->target[ctx->idx];
 480
 481		switch (cond) {
 482		case MIPS_COND_EQ:
 483			uasm_i_beq(&p, reg1, reg2, imm);
 484			break;
 485		case MIPS_COND_NE:
 486			uasm_i_bne(&p, reg1, reg2, imm);
 487			break;
 488		case MIPS_COND_ALL:
 489			uasm_i_b(&p, imm);
 490			break;
 491		default:
 492			pr_warn("%s: Unhandled branch conditional: %d\n",
 493				__func__, cond);
 494		}
 495	}
 496	ctx->idx++;
 497}
 498
 499static inline void emit_b(unsigned int imm, struct jit_ctx *ctx)
 500{
 501	emit_bcond(MIPS_COND_ALL, r_zero, r_zero, imm, ctx);
 502}
 503
 504static inline void emit_jalr(unsigned int link, unsigned int reg,
 505			     struct jit_ctx *ctx)
 506{
 507	emit_instr(ctx, jalr, link, reg);
 508}
 509
 510static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
 511{
 512	emit_instr(ctx, jr, reg);
 513}
 514
 515static inline u16 align_sp(unsigned int num)
 516{
 517	/* Double word alignment for 32-bit, quadword for 64-bit */
 518	unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
 519	num = (num + (align - 1)) & -align;
 520	return num;
 521}
 522
 523static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
 524{
 525	int i = 0, real_off = 0;
 526	u32 sflags, tmp_flags;
 527
 528	/* Adjust the stack pointer */
 529	emit_stack_offset(-align_sp(offset), ctx);
 530
 531	tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
 532	/* sflags is essentially a bitmap */
 533	while (tmp_flags) {
 534		if ((sflags >> i) & 0x1) {
 535			emit_store_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
 536					     ctx);
 537			real_off += SZREG;
 538		}
 539		i++;
 540		tmp_flags >>= 1;
 541	}
 542
 543	/* save return address */
 544	if (ctx->flags & SEEN_CALL) {
 545		emit_store_stack_reg(r_ra, r_sp, real_off, ctx);
 546		real_off += SZREG;
 547	}
 548
 549	/* Setup r_M leaving the alignment gap if necessary */
 550	if (ctx->flags & SEEN_MEM) {
 551		if (real_off % (SZREG * 2))
 552			real_off += SZREG;
 553		emit_long_instr(ctx, ADDIU, r_M, r_sp, real_off);
 554	}
 555}
 556
 557static void restore_bpf_jit_regs(struct jit_ctx *ctx,
 558				 unsigned int offset)
 559{
 560	int i, real_off = 0;
 561	u32 sflags, tmp_flags;
 562
 563	tmp_flags = sflags = ctx->flags >> SEEN_SREG_SFT;
 564	/* sflags is a bitmap */
 565	i = 0;
 566	while (tmp_flags) {
 567		if ((sflags >> i) & 0x1) {
 568			emit_load_stack_reg(MIPS_R_S0 + i, r_sp, real_off,
 569					    ctx);
 570			real_off += SZREG;
 571		}
 572		i++;
 573		tmp_flags >>= 1;
 574	}
 575
 576	/* restore return address */
 577	if (ctx->flags & SEEN_CALL)
 578		emit_load_stack_reg(r_ra, r_sp, real_off, ctx);
 579
 580	/* Restore the sp and discard the scrach memory */
 581	emit_stack_offset(align_sp(offset), ctx);
 582}
 583
 584static unsigned int get_stack_depth(struct jit_ctx *ctx)
 585{
 586	int sp_off = 0;
 587
 588
 589	/* How may s* regs do we need to preserved? */
 590	sp_off += hweight32(ctx->flags >> SEEN_SREG_SFT) * SZREG;
 591
 592	if (ctx->flags & SEEN_MEM)
 593		sp_off += 4 * BPF_MEMWORDS; /* BPF_MEMWORDS are 32-bit */
 594
 595	if (ctx->flags & SEEN_CALL)
 596		sp_off += SZREG; /* Space for our ra register */
 597
 598	return sp_off;
 599}
 600
 601static void build_prologue(struct jit_ctx *ctx)
 602{
 603	int sp_off;
 604
 605	/* Calculate the total offset for the stack pointer */
 606	sp_off = get_stack_depth(ctx);
 607	save_bpf_jit_regs(ctx, sp_off);
 608
 609	if (ctx->flags & SEEN_SKB)
 610		emit_reg_move(r_skb, MIPS_R_A0, ctx);
 611
 612	if (ctx->flags & SEEN_SKB_DATA) {
 613		/* Load packet length */
 614		emit_load(r_skb_len, r_skb, offsetof(struct sk_buff, len),
 615			  ctx);
 616		emit_load(r_tmp, r_skb, offsetof(struct sk_buff, data_len),
 617			  ctx);
 618		/* Load the data pointer */
 619		emit_load_ptr(r_skb_data, r_skb,
 620			      offsetof(struct sk_buff, data), ctx);
 621		/* Load the header length */
 622		emit_subu(r_skb_hl, r_skb_len, r_tmp, ctx);
 623	}
 624
 625	if (ctx->flags & SEEN_X)
 626		emit_jit_reg_move(r_X, r_zero, ctx);
 627
 628	/* Do not leak kernel data to userspace */
 629	if (bpf_needs_clear_a(&ctx->skf->insns[0]))
 630		emit_jit_reg_move(r_A, r_zero, ctx);
 631}
 632
 633static void build_epilogue(struct jit_ctx *ctx)
 634{
 635	unsigned int sp_off;
 636
 637	/* Calculate the total offset for the stack pointer */
 638
 639	sp_off = get_stack_depth(ctx);
 640	restore_bpf_jit_regs(ctx, sp_off);
 641
 642	/* Return */
 643	emit_jr(r_ra, ctx);
 644	emit_nop(ctx);
 645}
 646
 647#define CHOOSE_LOAD_FUNC(K, func) \
 648	((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative : func) : \
 649	 func##_positive)
 650
 651static int build_body(struct jit_ctx *ctx)
 652{
 653	const struct bpf_prog *prog = ctx->skf;
 654	const struct sock_filter *inst;
 655	unsigned int i, off, condt;
 656	u32 k, b_off __maybe_unused;
 657	u8 (*sk_load_func)(unsigned long *skb, int offset);
 658
 659	for (i = 0; i < prog->len; i++) {
 660		u16 code;
 661
 662		inst = &(prog->insns[i]);
 663		pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
 664			 __func__, inst->code, inst->jt, inst->jf, inst->k);
 665		k = inst->k;
 666		code = bpf_anc_helper(inst);
 667
 668		if (ctx->target == NULL)
 669			ctx->offsets[i] = ctx->idx * 4;
 670
 671		switch (code) {
 672		case BPF_LD | BPF_IMM:
 673			/* A <- k ==> li r_A, k */
 674			ctx->flags |= SEEN_A;
 675			emit_load_imm(r_A, k, ctx);
 676			break;
 677		case BPF_LD | BPF_W | BPF_LEN:
 678			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
 679			/* A <- len ==> lw r_A, offset(skb) */
 680			ctx->flags |= SEEN_SKB | SEEN_A;
 681			off = offsetof(struct sk_buff, len);
 682			emit_load(r_A, r_skb, off, ctx);
 683			break;
 684		case BPF_LD | BPF_MEM:
 685			/* A <- M[k] ==> lw r_A, offset(M) */
 686			ctx->flags |= SEEN_MEM | SEEN_A;
 687			emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
 688			break;
 689		case BPF_LD | BPF_W | BPF_ABS:
 690			/* A <- P[k:4] */
 691			sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_word);
 692			goto load;
 693		case BPF_LD | BPF_H | BPF_ABS:
 694			/* A <- P[k:2] */
 695			sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_half);
 696			goto load;
 697		case BPF_LD | BPF_B | BPF_ABS:
 698			/* A <- P[k:1] */
 699			sk_load_func = CHOOSE_LOAD_FUNC(k, sk_load_byte);
 700load:
 701			emit_load_imm(r_off, k, ctx);
 702load_common:
 703			ctx->flags |= SEEN_CALL | SEEN_OFF |
 704				SEEN_SKB | SEEN_A | SEEN_SKB_DATA;
 705
 706			emit_load_func(r_s0, (ptr)sk_load_func, ctx);
 707			emit_reg_move(MIPS_R_A0, r_skb, ctx);
 708			emit_jalr(MIPS_R_RA, r_s0, ctx);
 709			/* Load second argument to delay slot */
 710			emit_reg_move(MIPS_R_A1, r_off, ctx);
 711			/* Check the error value */
 712			emit_bcond(MIPS_COND_EQ, r_ret, 0, b_imm(i + 1, ctx),
 713				   ctx);
 714			/* Load return register on DS for failures */
 715			emit_reg_move(r_ret, r_zero, ctx);
 716			/* Return with error */
 717			emit_b(b_imm(prog->len, ctx), ctx);
 718			emit_nop(ctx);
 719			break;
 720		case BPF_LD | BPF_W | BPF_IND:
 721			/* A <- P[X + k:4] */
 722			sk_load_func = sk_load_word;
 723			goto load_ind;
 724		case BPF_LD | BPF_H | BPF_IND:
 725			/* A <- P[X + k:2] */
 726			sk_load_func = sk_load_half;
 727			goto load_ind;
 728		case BPF_LD | BPF_B | BPF_IND:
 729			/* A <- P[X + k:1] */
 730			sk_load_func = sk_load_byte;
 731load_ind:
 732			ctx->flags |= SEEN_OFF | SEEN_X;
 733			emit_addiu(r_off, r_X, k, ctx);
 734			goto load_common;
 735		case BPF_LDX | BPF_IMM:
 736			/* X <- k */
 737			ctx->flags |= SEEN_X;
 738			emit_load_imm(r_X, k, ctx);
 739			break;
 740		case BPF_LDX | BPF_MEM:
 741			/* X <- M[k] */
 742			ctx->flags |= SEEN_X | SEEN_MEM;
 743			emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
 744			break;
 745		case BPF_LDX | BPF_W | BPF_LEN:
 746			/* X <- len */
 747			ctx->flags |= SEEN_X | SEEN_SKB;
 748			off = offsetof(struct sk_buff, len);
 749			emit_load(r_X, r_skb, off, ctx);
 750			break;
 751		case BPF_LDX | BPF_B | BPF_MSH:
 752			/* X <- 4 * (P[k:1] & 0xf) */
 753			ctx->flags |= SEEN_X | SEEN_CALL | SEEN_SKB;
 754			/* Load offset to a1 */
 755			emit_load_func(r_s0, (ptr)sk_load_byte, ctx);
 756			/*
 757			 * This may emit two instructions so it may not fit
 758			 * in the delay slot. So use a0 in the delay slot.
 759			 */
 760			emit_load_imm(MIPS_R_A1, k, ctx);
 761			emit_jalr(MIPS_R_RA, r_s0, ctx);
 762			emit_reg_move(MIPS_R_A0, r_skb, ctx); /* delay slot */
 763			/* Check the error value */
 764			emit_bcond(MIPS_COND_NE, r_ret, 0,
 765				   b_imm(prog->len, ctx), ctx);
 766			emit_reg_move(r_ret, r_zero, ctx);
 767			/* We are good */
 768			/* X <- P[1:K] & 0xf */
 769			emit_andi(r_X, r_A, 0xf, ctx);
 770			/* X << 2 */
 771			emit_b(b_imm(i + 1, ctx), ctx);
 772			emit_sll(r_X, r_X, 2, ctx); /* delay slot */
 773			break;
 774		case BPF_ST:
 775			/* M[k] <- A */
 776			ctx->flags |= SEEN_MEM | SEEN_A;
 777			emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
 778			break;
 779		case BPF_STX:
 780			/* M[k] <- X */
 781			ctx->flags |= SEEN_MEM | SEEN_X;
 782			emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
 783			break;
 784		case BPF_ALU | BPF_ADD | BPF_K:
 785			/* A += K */
 786			ctx->flags |= SEEN_A;
 787			emit_addiu(r_A, r_A, k, ctx);
 788			break;
 789		case BPF_ALU | BPF_ADD | BPF_X:
 790			/* A += X */
 791			ctx->flags |= SEEN_A | SEEN_X;
 792			emit_addu(r_A, r_A, r_X, ctx);
 793			break;
 794		case BPF_ALU | BPF_SUB | BPF_K:
 795			/* A -= K */
 796			ctx->flags |= SEEN_A;
 797			emit_addiu(r_A, r_A, -k, ctx);
 798			break;
 799		case BPF_ALU | BPF_SUB | BPF_X:
 800			/* A -= X */
 801			ctx->flags |= SEEN_A | SEEN_X;
 802			emit_subu(r_A, r_A, r_X, ctx);
 803			break;
 804		case BPF_ALU | BPF_MUL | BPF_K:
 805			/* A *= K */
 806			/* Load K to scratch register before MUL */
 807			ctx->flags |= SEEN_A;
 808			emit_load_imm(r_s0, k, ctx);
 809			emit_mul(r_A, r_A, r_s0, ctx);
 810			break;
 811		case BPF_ALU | BPF_MUL | BPF_X:
 812			/* A *= X */
 813			ctx->flags |= SEEN_A | SEEN_X;
 814			emit_mul(r_A, r_A, r_X, ctx);
 815			break;
 816		case BPF_ALU | BPF_DIV | BPF_K:
 817			/* A /= k */
 818			if (k == 1)
 819				break;
 820			if (optimize_div(&k)) {
 821				ctx->flags |= SEEN_A;
 822				emit_srl(r_A, r_A, k, ctx);
 823				break;
 824			}
 825			ctx->flags |= SEEN_A;
 826			emit_load_imm(r_s0, k, ctx);
 827			emit_div(r_A, r_s0, ctx);
 828			break;
 829		case BPF_ALU | BPF_MOD | BPF_K:
 830			/* A %= k */
 831			if (k == 1) {
 832				ctx->flags |= SEEN_A;
 833				emit_jit_reg_move(r_A, r_zero, ctx);
 834			} else {
 835				ctx->flags |= SEEN_A;
 836				emit_load_imm(r_s0, k, ctx);
 837				emit_mod(r_A, r_s0, ctx);
 838			}
 839			break;
 840		case BPF_ALU | BPF_DIV | BPF_X:
 841			/* A /= X */
 842			ctx->flags |= SEEN_X | SEEN_A;
 843			/* Check if r_X is zero */
 844			emit_bcond(MIPS_COND_EQ, r_X, r_zero,
 845				   b_imm(prog->len, ctx), ctx);
 846			emit_load_imm(r_ret, 0, ctx); /* delay slot */
 847			emit_div(r_A, r_X, ctx);
 848			break;
 849		case BPF_ALU | BPF_MOD | BPF_X:
 850			/* A %= X */
 851			ctx->flags |= SEEN_X | SEEN_A;
 852			/* Check if r_X is zero */
 853			emit_bcond(MIPS_COND_EQ, r_X, r_zero,
 854				   b_imm(prog->len, ctx), ctx);
 855			emit_load_imm(r_ret, 0, ctx); /* delay slot */
 856			emit_mod(r_A, r_X, ctx);
 857			break;
 858		case BPF_ALU | BPF_OR | BPF_K:
 859			/* A |= K */
 860			ctx->flags |= SEEN_A;
 861			emit_ori(r_A, r_A, k, ctx);
 862			break;
 863		case BPF_ALU | BPF_OR | BPF_X:
 864			/* A |= X */
 865			ctx->flags |= SEEN_A;
 866			emit_ori(r_A, r_A, r_X, ctx);
 867			break;
 868		case BPF_ALU | BPF_XOR | BPF_K:
 869			/* A ^= k */
 870			ctx->flags |= SEEN_A;
 871			emit_xori(r_A, r_A, k, ctx);
 872			break;
 873		case BPF_ANC | SKF_AD_ALU_XOR_X:
 874		case BPF_ALU | BPF_XOR | BPF_X:
 875			/* A ^= X */
 876			ctx->flags |= SEEN_A;
 877			emit_xor(r_A, r_A, r_X, ctx);
 878			break;
 879		case BPF_ALU | BPF_AND | BPF_K:
 880			/* A &= K */
 881			ctx->flags |= SEEN_A;
 882			emit_andi(r_A, r_A, k, ctx);
 883			break;
 884		case BPF_ALU | BPF_AND | BPF_X:
 885			/* A &= X */
 886			ctx->flags |= SEEN_A | SEEN_X;
 887			emit_and(r_A, r_A, r_X, ctx);
 888			break;
 889		case BPF_ALU | BPF_LSH | BPF_K:
 890			/* A <<= K */
 891			ctx->flags |= SEEN_A;
 892			emit_sll(r_A, r_A, k, ctx);
 893			break;
 894		case BPF_ALU | BPF_LSH | BPF_X:
 895			/* A <<= X */
 896			ctx->flags |= SEEN_A | SEEN_X;
 897			emit_sllv(r_A, r_A, r_X, ctx);
 898			break;
 899		case BPF_ALU | BPF_RSH | BPF_K:
 900			/* A >>= K */
 901			ctx->flags |= SEEN_A;
 902			emit_srl(r_A, r_A, k, ctx);
 903			break;
 904		case BPF_ALU | BPF_RSH | BPF_X:
 905			ctx->flags |= SEEN_A | SEEN_X;
 906			emit_srlv(r_A, r_A, r_X, ctx);
 907			break;
 908		case BPF_ALU | BPF_NEG:
 909			/* A = -A */
 910			ctx->flags |= SEEN_A;
 911			emit_neg(r_A, ctx);
 912			break;
 913		case BPF_JMP | BPF_JA:
 914			/* pc += K */
 915			emit_b(b_imm(i + k + 1, ctx), ctx);
 916			emit_nop(ctx);
 917			break;
 918		case BPF_JMP | BPF_JEQ | BPF_K:
 919			/* pc += ( A == K ) ? pc->jt : pc->jf */
 920			condt = MIPS_COND_EQ | MIPS_COND_K;
 921			goto jmp_cmp;
 922		case BPF_JMP | BPF_JEQ | BPF_X:
 923			ctx->flags |= SEEN_X;
 924			/* pc += ( A == X ) ? pc->jt : pc->jf */
 925			condt = MIPS_COND_EQ | MIPS_COND_X;
 926			goto jmp_cmp;
 927		case BPF_JMP | BPF_JGE | BPF_K:
 928			/* pc += ( A >= K ) ? pc->jt : pc->jf */
 929			condt = MIPS_COND_GE | MIPS_COND_K;
 930			goto jmp_cmp;
 931		case BPF_JMP | BPF_JGE | BPF_X:
 932			ctx->flags |= SEEN_X;
 933			/* pc += ( A >= X ) ? pc->jt : pc->jf */
 934			condt = MIPS_COND_GE | MIPS_COND_X;
 935			goto jmp_cmp;
 936		case BPF_JMP | BPF_JGT | BPF_K:
 937			/* pc += ( A > K ) ? pc->jt : pc->jf */
 938			condt = MIPS_COND_GT | MIPS_COND_K;
 939			goto jmp_cmp;
 940		case BPF_JMP | BPF_JGT | BPF_X:
 941			ctx->flags |= SEEN_X;
 942			/* pc += ( A > X ) ? pc->jt : pc->jf */
 943			condt = MIPS_COND_GT | MIPS_COND_X;
 944jmp_cmp:
 945			/* Greater or Equal */
 946			if ((condt & MIPS_COND_GE) ||
 947			    (condt & MIPS_COND_GT)) {
 948				if (condt & MIPS_COND_K) { /* K */
 949					ctx->flags |= SEEN_A;
 950					emit_sltiu(r_s0, r_A, k, ctx);
 951				} else { /* X */
 952					ctx->flags |= SEEN_A |
 953						SEEN_X;
 954					emit_sltu(r_s0, r_A, r_X, ctx);
 955				}
 956				/* A < (K|X) ? r_scrach = 1 */
 957				b_off = b_imm(i + inst->jf + 1, ctx);
 958				emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
 959					   ctx);
 960				emit_nop(ctx);
 961				/* A > (K|X) ? scratch = 0 */
 962				if (condt & MIPS_COND_GT) {
 963					/* Checking for equality */
 964					ctx->flags |= SEEN_A | SEEN_X;
 965					if (condt & MIPS_COND_K)
 966						emit_load_imm(r_s0, k, ctx);
 967					else
 968						emit_jit_reg_move(r_s0, r_X,
 969								  ctx);
 970					b_off = b_imm(i + inst->jf + 1, ctx);
 971					emit_bcond(MIPS_COND_EQ, r_A, r_s0,
 972						   b_off, ctx);
 973					emit_nop(ctx);
 974					/* Finally, A > K|X */
 975					b_off = b_imm(i + inst->jt + 1, ctx);
 976					emit_b(b_off, ctx);
 977					emit_nop(ctx);
 978				} else {
 979					/* A >= (K|X) so jump */
 980					b_off = b_imm(i + inst->jt + 1, ctx);
 981					emit_b(b_off, ctx);
 982					emit_nop(ctx);
 983				}
 984			} else {
 985				/* A == K|X */
 986				if (condt & MIPS_COND_K) { /* K */
 987					ctx->flags |= SEEN_A;
 988					emit_load_imm(r_s0, k, ctx);
 989					/* jump true */
 990					b_off = b_imm(i + inst->jt + 1, ctx);
 991					emit_bcond(MIPS_COND_EQ, r_A, r_s0,
 992						   b_off, ctx);
 993					emit_nop(ctx);
 994					/* jump false */
 995					b_off = b_imm(i + inst->jf + 1,
 996						      ctx);
 997					emit_bcond(MIPS_COND_NE, r_A, r_s0,
 998						   b_off, ctx);
 999					emit_nop(ctx);
1000				} else { /* X */
1001					/* jump true */
1002					ctx->flags |= SEEN_A | SEEN_X;
1003					b_off = b_imm(i + inst->jt + 1,
1004						      ctx);
1005					emit_bcond(MIPS_COND_EQ, r_A, r_X,
1006						   b_off, ctx);
1007					emit_nop(ctx);
1008					/* jump false */
1009					b_off = b_imm(i + inst->jf + 1, ctx);
1010					emit_bcond(MIPS_COND_NE, r_A, r_X,
1011						   b_off, ctx);
1012					emit_nop(ctx);
1013				}
1014			}
1015			break;
1016		case BPF_JMP | BPF_JSET | BPF_K:
1017			ctx->flags |= SEEN_A;
1018			/* pc += (A & K) ? pc -> jt : pc -> jf */
1019			emit_load_imm(r_s1, k, ctx);
1020			emit_and(r_s0, r_A, r_s1, ctx);
1021			/* jump true */
1022			b_off = b_imm(i + inst->jt + 1, ctx);
1023			emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1024			emit_nop(ctx);
1025			/* jump false */
1026			b_off = b_imm(i + inst->jf + 1, ctx);
1027			emit_b(b_off, ctx);
1028			emit_nop(ctx);
1029			break;
1030		case BPF_JMP | BPF_JSET | BPF_X:
1031			ctx->flags |= SEEN_X | SEEN_A;
1032			/* pc += (A & X) ? pc -> jt : pc -> jf */
1033			emit_and(r_s0, r_A, r_X, ctx);
1034			/* jump true */
1035			b_off = b_imm(i + inst->jt + 1, ctx);
1036			emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off, ctx);
1037			emit_nop(ctx);
1038			/* jump false */
1039			b_off = b_imm(i + inst->jf + 1, ctx);
1040			emit_b(b_off, ctx);
1041			emit_nop(ctx);
1042			break;
1043		case BPF_RET | BPF_A:
1044			ctx->flags |= SEEN_A;
1045			if (i != prog->len - 1)
1046				/*
1047				 * If this is not the last instruction
1048				 * then jump to the epilogue
1049				 */
1050				emit_b(b_imm(prog->len, ctx), ctx);
1051			emit_reg_move(r_ret, r_A, ctx); /* delay slot */
1052			break;
1053		case BPF_RET | BPF_K:
1054			/*
1055			 * It can emit two instructions so it does not fit on
1056			 * the delay slot.
1057			 */
1058			emit_load_imm(r_ret, k, ctx);
1059			if (i != prog->len - 1) {
1060				/*
1061				 * If this is not the last instruction
1062				 * then jump to the epilogue
1063				 */
1064				emit_b(b_imm(prog->len, ctx), ctx);
1065				emit_nop(ctx);
1066			}
1067			break;
1068		case BPF_MISC | BPF_TAX:
1069			/* X = A */
1070			ctx->flags |= SEEN_X | SEEN_A;
1071			emit_jit_reg_move(r_X, r_A, ctx);
1072			break;
1073		case BPF_MISC | BPF_TXA:
1074			/* A = X */
1075			ctx->flags |= SEEN_A | SEEN_X;
1076			emit_jit_reg_move(r_A, r_X, ctx);
1077			break;
1078		/* AUX */
1079		case BPF_ANC | SKF_AD_PROTOCOL:
1080			/* A = ntohs(skb->protocol */
1081			ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
1082			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1083						  protocol) != 2);
1084			off = offsetof(struct sk_buff, protocol);
1085			emit_half_load(r_A, r_skb, off, ctx);
1086#ifdef CONFIG_CPU_LITTLE_ENDIAN
1087			/* This needs little endian fixup */
1088			if (cpu_has_wsbh) {
1089				/* R2 and later have the wsbh instruction */
1090				emit_wsbh(r_A, r_A, ctx);
1091			} else {
1092				/* Get first byte */
1093				emit_andi(r_tmp_imm, r_A, 0xff, ctx);
1094				/* Shift it */
1095				emit_sll(r_tmp, r_tmp_imm, 8, ctx);
1096				/* Get second byte */
1097				emit_srl(r_tmp_imm, r_A, 8, ctx);
1098				emit_andi(r_tmp_imm, r_tmp_imm, 0xff, ctx);
1099				/* Put everyting together in r_A */
1100				emit_or(r_A, r_tmp, r_tmp_imm, ctx);
1101			}
1102#endif
1103			break;
1104		case BPF_ANC | SKF_AD_CPU:
1105			ctx->flags |= SEEN_A | SEEN_OFF;
1106			/* A = current_thread_info()->cpu */
1107			BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
1108						  cpu) != 4);
1109			off = offsetof(struct thread_info, cpu);
1110			/* $28/gp points to the thread_info struct */
1111			emit_load(r_A, 28, off, ctx);
1112			break;
1113		case BPF_ANC | SKF_AD_IFINDEX:
1114			/* A = skb->dev->ifindex */
1115			ctx->flags |= SEEN_SKB | SEEN_A;
1116			off = offsetof(struct sk_buff, dev);
1117			/* Load *dev pointer */
1118			emit_load_ptr(r_s0, r_skb, off, ctx);
1119			/* error (0) in the delay slot */
1120			emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
1121				   b_imm(prog->len, ctx), ctx);
1122			emit_reg_move(r_ret, r_zero, ctx);
1123			BUILD_BUG_ON(FIELD_SIZEOF(struct net_device,
1124						  ifindex) != 4);
1125			off = offsetof(struct net_device, ifindex);
1126			emit_load(r_A, r_s0, off, ctx);
1127			break;
1128		case BPF_ANC | SKF_AD_MARK:
1129			ctx->flags |= SEEN_SKB | SEEN_A;
1130			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
1131			off = offsetof(struct sk_buff, mark);
1132			emit_load(r_A, r_skb, off, ctx);
1133			break;
1134		case BPF_ANC | SKF_AD_RXHASH:
1135			ctx->flags |= SEEN_SKB | SEEN_A;
1136			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
1137			off = offsetof(struct sk_buff, hash);
1138			emit_load(r_A, r_skb, off, ctx);
1139			break;
1140		case BPF_ANC | SKF_AD_VLAN_TAG:
1141		case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
1142			ctx->flags |= SEEN_SKB | SEEN_A;
1143			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1144						  vlan_tci) != 2);
1145			off = offsetof(struct sk_buff, vlan_tci);
1146			emit_half_load(r_s0, r_skb, off, ctx);
1147			if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
1148				emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
1149			} else {
1150				emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
1151				/* return 1 if present */
1152				emit_sltu(r_A, r_zero, r_A, ctx);
1153			}
1154			break;
1155		case BPF_ANC | SKF_AD_PKTTYPE:
1156			ctx->flags |= SEEN_SKB;
1157
1158			emit_load_byte(r_tmp, r_skb, PKT_TYPE_OFFSET(), ctx);
1159			/* Keep only the last 3 bits */
1160			emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
1161#ifdef __BIG_ENDIAN_BITFIELD
1162			/* Get the actual packet type to the lower 3 bits */
1163			emit_srl(r_A, r_A, 5, ctx);
1164#endif
1165			break;
1166		case BPF_ANC | SKF_AD_QUEUE:
1167			ctx->flags |= SEEN_SKB | SEEN_A;
1168			BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1169						  queue_mapping) != 2);
1170			BUILD_BUG_ON(offsetof(struct sk_buff,
1171					      queue_mapping) > 0xff);
1172			off = offsetof(struct sk_buff, queue_mapping);
1173			emit_half_load(r_A, r_skb, off, ctx);
1174			break;
1175		default:
1176			pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
1177				 inst->code);
1178			return -1;
1179		}
1180	}
1181
1182	/* compute offsets only during the first pass */
1183	if (ctx->target == NULL)
1184		ctx->offsets[i] = ctx->idx * 4;
1185
1186	return 0;
1187}
1188
1189int bpf_jit_enable __read_mostly;
1190
1191void bpf_jit_compile(struct bpf_prog *fp)
1192{
1193	struct jit_ctx ctx;
1194	unsigned int alloc_size, tmp_idx;
1195
1196	if (!bpf_jit_enable)
1197		return;
1198
1199	memset(&ctx, 0, sizeof(ctx));
1200
1201	ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
1202	if (ctx.offsets == NULL)
1203		return;
1204
1205	ctx.skf = fp;
1206
1207	if (build_body(&ctx))
1208		goto out;
1209
1210	tmp_idx = ctx.idx;
1211	build_prologue(&ctx);
1212	ctx.prologue_bytes = (ctx.idx - tmp_idx) * 4;
1213	/* just to complete the ctx.idx count */
1214	build_epilogue(&ctx);
1215
1216	alloc_size = 4 * ctx.idx;
1217	ctx.target = module_alloc(alloc_size);
1218	if (ctx.target == NULL)
1219		goto out;
1220
1221	/* Clean it */
1222	memset(ctx.target, 0, alloc_size);
1223
1224	ctx.idx = 0;
1225
1226	/* Generate the actual JIT code */
1227	build_prologue(&ctx);
1228	build_body(&ctx);
1229	build_epilogue(&ctx);
1230
1231	/* Update the icache */
1232	flush_icache_range((ptr)ctx.target, (ptr)(ctx.target + ctx.idx));
1233
1234	if (bpf_jit_enable > 1)
1235		/* Dump JIT code */
1236		bpf_jit_dump(fp->len, alloc_size, 2, ctx.target);
1237
1238	fp->bpf_func = (void *)ctx.target;
1239	fp->jited = 1;
1240
1241out:
1242	kfree(ctx.offsets);
1243}
1244
1245void bpf_jit_free(struct bpf_prog *fp)
1246{
1247	if (fp->jited)
1248		module_memfree(fp->bpf_func);
1249
1250	bpf_prog_unlock_free(fp);
1251}