Loading...
1/*
2 * BPF JIT compiler for ARM64
3 *
4 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18#ifndef _BPF_JIT_H
19#define _BPF_JIT_H
20
21#include <asm/insn.h>
22
23/* 5-bit Register Operand */
24#define A64_R(x) AARCH64_INSN_REG_##x
25#define A64_FP AARCH64_INSN_REG_FP
26#define A64_LR AARCH64_INSN_REG_LR
27#define A64_ZR AARCH64_INSN_REG_ZR
28#define A64_SP AARCH64_INSN_REG_SP
29
30#define A64_VARIANT(sf) \
31 ((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT)
32
33/* Compare & branch (immediate) */
34#define A64_COMP_BRANCH(sf, Rt, offset, type) \
35 aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
36 AARCH64_INSN_BRANCH_COMP_##type)
37#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
38#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
39
40/* Conditional branch (immediate) */
41#define A64_COND_BRANCH(cond, offset) \
42 aarch64_insn_gen_cond_branch_imm(0, offset, cond)
43#define A64_COND_EQ AARCH64_INSN_COND_EQ /* == */
44#define A64_COND_NE AARCH64_INSN_COND_NE /* != */
45#define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */
46#define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */
47#define A64_COND_LS AARCH64_INSN_COND_LS /* unsigned <= */
48#define A64_COND_CC AARCH64_INSN_COND_CC /* unsigned < */
49#define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */
50#define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */
51#define A64_COND_LE AARCH64_INSN_COND_LE /* signed <= */
52#define A64_COND_LT AARCH64_INSN_COND_LT /* signed < */
53#define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
54
55/* Unconditional branch (immediate) */
56#define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \
57 AARCH64_INSN_BRANCH_##type)
58#define A64_B(imm26) A64_BRANCH((imm26) << 2, NOLINK)
59#define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
60
61/* Unconditional branch (register) */
62#define A64_BR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_NOLINK)
63#define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
64#define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
65
66/* Load/store register (register offset) */
67#define A64_LS_REG(Rt, Rn, Rm, size, type) \
68 aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \
69 AARCH64_INSN_SIZE_##size, \
70 AARCH64_INSN_LDST_##type##_REG_OFFSET)
71#define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE)
72#define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
73#define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE)
74#define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
75#define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
76#define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
77#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
78#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
79
80/* Load/store register pair */
81#define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
82 aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
83 AARCH64_INSN_VARIANT_64BIT, \
84 AARCH64_INSN_LDST_##ls##_PAIR_##type)
85/* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */
86#define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX)
87/* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
88#define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
89
90/* Load/store exclusive */
91#define A64_SIZE(sf) \
92 ((sf) ? AARCH64_INSN_SIZE_64 : AARCH64_INSN_SIZE_32)
93#define A64_LSX(sf, Rt, Rn, Rs, type) \
94 aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
95 AARCH64_INSN_LDST_##type)
96/* Rt = [Rn]; (atomic) */
97#define A64_LDXR(sf, Rt, Rn) \
98 A64_LSX(sf, Rt, Rn, A64_ZR, LOAD_EX)
99/* [Rn] = Rt; (atomic) Rs = [state] */
100#define A64_STXR(sf, Rt, Rn, Rs) \
101 A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
102
103/* Prefetch */
104#define A64_PRFM(Rn, type, target, policy) \
105 aarch64_insn_gen_prefetch(Rn, AARCH64_INSN_PRFM_TYPE_##type, \
106 AARCH64_INSN_PRFM_TARGET_##target, \
107 AARCH64_INSN_PRFM_POLICY_##policy)
108
109/* Add/subtract (immediate) */
110#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
111 aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
112 A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
113/* Rd = Rn OP imm12 */
114#define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
115#define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
116/* Rd = Rn */
117#define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
118
119/* Bitfield move */
120#define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \
121 aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
122 A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type)
123/* Signed, with sign replication to left and zeros to right */
124#define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED)
125/* Unsigned, with zeros to left and right */
126#define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED)
127
128/* Rd = Rn << shift */
129#define A64_LSL(sf, Rd, Rn, shift) ({ \
130 int sz = (sf) ? 64 : 32; \
131 A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
132})
133/* Rd = Rn >> shift */
134#define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
135/* Rd = Rn >> shift; signed */
136#define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
137
138/* Zero extend */
139#define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
140#define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
141
142/* Move wide (immediate) */
143#define A64_MOVEW(sf, Rd, imm16, shift, type) \
144 aarch64_insn_gen_movewide(Rd, imm16, shift, \
145 A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type)
146/* Rd = Zeros (for MOVZ);
147 * Rd |= imm16 << shift (where shift is {0, 16, 32, 48});
148 * Rd = ~Rd; (for MOVN); */
149#define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE)
150#define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO)
151#define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP)
152
153/* Add/subtract (shifted register) */
154#define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \
155 aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
156 A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
157/* Rd = Rn OP Rm */
158#define A64_ADD(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD)
159#define A64_SUB(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB)
160#define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS)
161/* Rd = -Rm */
162#define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm)
163/* Rn - Rm; set condition flags */
164#define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm)
165
166/* Data-processing (1 source) */
167#define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \
168 A64_VARIANT(sf), AARCH64_INSN_DATA1_##type)
169/* Rd = BSWAPx(Rn) */
170#define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16)
171#define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32)
172#define A64_REV64(Rd, Rn) A64_DATA1(1, Rd, Rn, REVERSE_64)
173
174/* Data-processing (2 source) */
175/* Rd = Rn OP Rm */
176#define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \
177 A64_VARIANT(sf), AARCH64_INSN_DATA2_##type)
178#define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV)
179#define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV)
180#define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV)
181#define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV)
182
183/* Data-processing (3 source) */
184/* Rd = Ra + Rn * Rm */
185#define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
186 A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD)
187/* Rd = Rn * Rm */
188#define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm)
189
190/* Logical (shifted register) */
191#define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \
192 aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
193 A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type)
194/* Rd = Rn OP Rm */
195#define A64_AND(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND)
196#define A64_ORR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR)
197#define A64_EOR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR)
198#define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
199/* Rn & Rm; set condition flags */
200#define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
201
202#endif /* _BPF_JIT_H */
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * BPF JIT compiler for ARM64
4 *
5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
6 */
7#ifndef _BPF_JIT_H
8#define _BPF_JIT_H
9
10#include <asm/insn.h>
11
12/* 5-bit Register Operand */
13#define A64_R(x) AARCH64_INSN_REG_##x
14#define A64_FP AARCH64_INSN_REG_FP
15#define A64_LR AARCH64_INSN_REG_LR
16#define A64_ZR AARCH64_INSN_REG_ZR
17#define A64_SP AARCH64_INSN_REG_SP
18
19#define A64_VARIANT(sf) \
20 ((sf) ? AARCH64_INSN_VARIANT_64BIT : AARCH64_INSN_VARIANT_32BIT)
21
22/* Compare & branch (immediate) */
23#define A64_COMP_BRANCH(sf, Rt, offset, type) \
24 aarch64_insn_gen_comp_branch_imm(0, offset, Rt, A64_VARIANT(sf), \
25 AARCH64_INSN_BRANCH_COMP_##type)
26#define A64_CBZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, ZERO)
27#define A64_CBNZ(sf, Rt, imm19) A64_COMP_BRANCH(sf, Rt, (imm19) << 2, NONZERO)
28
29/* Conditional branch (immediate) */
30#define A64_COND_BRANCH(cond, offset) \
31 aarch64_insn_gen_cond_branch_imm(0, offset, cond)
32#define A64_COND_EQ AARCH64_INSN_COND_EQ /* == */
33#define A64_COND_NE AARCH64_INSN_COND_NE /* != */
34#define A64_COND_CS AARCH64_INSN_COND_CS /* unsigned >= */
35#define A64_COND_HI AARCH64_INSN_COND_HI /* unsigned > */
36#define A64_COND_LS AARCH64_INSN_COND_LS /* unsigned <= */
37#define A64_COND_CC AARCH64_INSN_COND_CC /* unsigned < */
38#define A64_COND_GE AARCH64_INSN_COND_GE /* signed >= */
39#define A64_COND_GT AARCH64_INSN_COND_GT /* signed > */
40#define A64_COND_LE AARCH64_INSN_COND_LE /* signed <= */
41#define A64_COND_LT AARCH64_INSN_COND_LT /* signed < */
42#define A64_B_(cond, imm19) A64_COND_BRANCH(cond, (imm19) << 2)
43
44/* Unconditional branch (immediate) */
45#define A64_BRANCH(offset, type) aarch64_insn_gen_branch_imm(0, offset, \
46 AARCH64_INSN_BRANCH_##type)
47#define A64_B(imm26) A64_BRANCH((imm26) << 2, NOLINK)
48#define A64_BL(imm26) A64_BRANCH((imm26) << 2, LINK)
49
50/* Unconditional branch (register) */
51#define A64_BR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_NOLINK)
52#define A64_BLR(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_LINK)
53#define A64_RET(Rn) aarch64_insn_gen_branch_reg(Rn, AARCH64_INSN_BRANCH_RETURN)
54
55/* Load/store register (register offset) */
56#define A64_LS_REG(Rt, Rn, Rm, size, type) \
57 aarch64_insn_gen_load_store_reg(Rt, Rn, Rm, \
58 AARCH64_INSN_SIZE_##size, \
59 AARCH64_INSN_LDST_##type##_REG_OFFSET)
60#define A64_STRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, STORE)
61#define A64_LDRB(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 8, LOAD)
62#define A64_STRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, STORE)
63#define A64_LDRH(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 16, LOAD)
64#define A64_STR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, STORE)
65#define A64_LDR32(Wt, Xn, Xm) A64_LS_REG(Wt, Xn, Xm, 32, LOAD)
66#define A64_STR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, STORE)
67#define A64_LDR64(Xt, Xn, Xm) A64_LS_REG(Xt, Xn, Xm, 64, LOAD)
68
69/* Load/store register (immediate offset) */
70#define A64_LS_IMM(Rt, Rn, imm, size, type) \
71 aarch64_insn_gen_load_store_imm(Rt, Rn, imm, \
72 AARCH64_INSN_SIZE_##size, \
73 AARCH64_INSN_LDST_##type##_IMM_OFFSET)
74#define A64_STRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, STORE)
75#define A64_LDRBI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 8, LOAD)
76#define A64_STRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, STORE)
77#define A64_LDRHI(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 16, LOAD)
78#define A64_STR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, STORE)
79#define A64_LDR32I(Wt, Xn, imm) A64_LS_IMM(Wt, Xn, imm, 32, LOAD)
80#define A64_STR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, STORE)
81#define A64_LDR64I(Xt, Xn, imm) A64_LS_IMM(Xt, Xn, imm, 64, LOAD)
82
83/* LDR (literal) */
84#define A64_LDR32LIT(Wt, offset) \
85 aarch64_insn_gen_load_literal(0, offset, Wt, false)
86#define A64_LDR64LIT(Xt, offset) \
87 aarch64_insn_gen_load_literal(0, offset, Xt, true)
88
89/* Load/store register pair */
90#define A64_LS_PAIR(Rt, Rt2, Rn, offset, ls, type) \
91 aarch64_insn_gen_load_store_pair(Rt, Rt2, Rn, offset, \
92 AARCH64_INSN_VARIANT_64BIT, \
93 AARCH64_INSN_LDST_##ls##_PAIR_##type)
94/* Rn -= 16; Rn[0] = Rt; Rn[8] = Rt2; */
95#define A64_PUSH(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, -16, STORE, PRE_INDEX)
96/* Rt = Rn[0]; Rt2 = Rn[8]; Rn += 16; */
97#define A64_POP(Rt, Rt2, Rn) A64_LS_PAIR(Rt, Rt2, Rn, 16, LOAD, POST_INDEX)
98
99/* Load/store exclusive */
100#define A64_SIZE(sf) \
101 ((sf) ? AARCH64_INSN_SIZE_64 : AARCH64_INSN_SIZE_32)
102#define A64_LSX(sf, Rt, Rn, Rs, type) \
103 aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
104 AARCH64_INSN_LDST_##type)
105/* Rt = [Rn]; (atomic) */
106#define A64_LDXR(sf, Rt, Rn) \
107 A64_LSX(sf, Rt, Rn, A64_ZR, LOAD_EX)
108/* [Rn] = Rt; (atomic) Rs = [state] */
109#define A64_STXR(sf, Rt, Rn, Rs) \
110 A64_LSX(sf, Rt, Rn, Rs, STORE_EX)
111/* [Rn] = Rt (store release); (atomic) Rs = [state] */
112#define A64_STLXR(sf, Rt, Rn, Rs) \
113 aarch64_insn_gen_load_store_ex(Rt, Rn, Rs, A64_SIZE(sf), \
114 AARCH64_INSN_LDST_STORE_REL_EX)
115
116/*
117 * LSE atomics
118 *
119 * ST{ADD,CLR,SET,EOR} is simply encoded as an alias for
120 * LDD{ADD,CLR,SET,EOR} with XZR as the destination register.
121 */
122#define A64_ST_OP(sf, Rn, Rs, op) \
123 aarch64_insn_gen_atomic_ld_op(A64_ZR, Rn, Rs, \
124 A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_##op, \
125 AARCH64_INSN_MEM_ORDER_NONE)
126/* [Rn] <op>= Rs */
127#define A64_STADD(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, ADD)
128#define A64_STCLR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, CLR)
129#define A64_STEOR(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, EOR)
130#define A64_STSET(sf, Rn, Rs) A64_ST_OP(sf, Rn, Rs, SET)
131
132#define A64_LD_OP_AL(sf, Rt, Rn, Rs, op) \
133 aarch64_insn_gen_atomic_ld_op(Rt, Rn, Rs, \
134 A64_SIZE(sf), AARCH64_INSN_MEM_ATOMIC_##op, \
135 AARCH64_INSN_MEM_ORDER_ACQREL)
136/* Rt = [Rn] (load acquire); [Rn] <op>= Rs (store release) */
137#define A64_LDADDAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, ADD)
138#define A64_LDCLRAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, CLR)
139#define A64_LDEORAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, EOR)
140#define A64_LDSETAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SET)
141/* Rt = [Rn] (load acquire); [Rn] = Rs (store release) */
142#define A64_SWPAL(sf, Rt, Rn, Rs) A64_LD_OP_AL(sf, Rt, Rn, Rs, SWP)
143/* Rs = CAS(Rn, Rs, Rt) (load acquire & store release) */
144#define A64_CASAL(sf, Rt, Rn, Rs) \
145 aarch64_insn_gen_cas(Rt, Rn, Rs, A64_SIZE(sf), \
146 AARCH64_INSN_MEM_ORDER_ACQREL)
147
148/* Add/subtract (immediate) */
149#define A64_ADDSUB_IMM(sf, Rd, Rn, imm12, type) \
150 aarch64_insn_gen_add_sub_imm(Rd, Rn, imm12, \
151 A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
152/* Rd = Rn OP imm12 */
153#define A64_ADD_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD)
154#define A64_SUB_I(sf, Rd, Rn, imm12) A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB)
155#define A64_ADDS_I(sf, Rd, Rn, imm12) \
156 A64_ADDSUB_IMM(sf, Rd, Rn, imm12, ADD_SETFLAGS)
157#define A64_SUBS_I(sf, Rd, Rn, imm12) \
158 A64_ADDSUB_IMM(sf, Rd, Rn, imm12, SUB_SETFLAGS)
159/* Rn + imm12; set condition flags */
160#define A64_CMN_I(sf, Rn, imm12) A64_ADDS_I(sf, A64_ZR, Rn, imm12)
161/* Rn - imm12; set condition flags */
162#define A64_CMP_I(sf, Rn, imm12) A64_SUBS_I(sf, A64_ZR, Rn, imm12)
163/* Rd = Rn */
164#define A64_MOV(sf, Rd, Rn) A64_ADD_I(sf, Rd, Rn, 0)
165
166/* Bitfield move */
167#define A64_BITFIELD(sf, Rd, Rn, immr, imms, type) \
168 aarch64_insn_gen_bitfield(Rd, Rn, immr, imms, \
169 A64_VARIANT(sf), AARCH64_INSN_BITFIELD_MOVE_##type)
170/* Signed, with sign replication to left and zeros to right */
171#define A64_SBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, SIGNED)
172/* Unsigned, with zeros to left and right */
173#define A64_UBFM(sf, Rd, Rn, ir, is) A64_BITFIELD(sf, Rd, Rn, ir, is, UNSIGNED)
174
175/* Rd = Rn << shift */
176#define A64_LSL(sf, Rd, Rn, shift) ({ \
177 int sz = (sf) ? 64 : 32; \
178 A64_UBFM(sf, Rd, Rn, (unsigned)-(shift) % sz, sz - 1 - (shift)); \
179})
180/* Rd = Rn >> shift */
181#define A64_LSR(sf, Rd, Rn, shift) A64_UBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
182/* Rd = Rn >> shift; signed */
183#define A64_ASR(sf, Rd, Rn, shift) A64_SBFM(sf, Rd, Rn, shift, (sf) ? 63 : 31)
184
185/* Zero extend */
186#define A64_UXTH(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 15)
187#define A64_UXTW(sf, Rd, Rn) A64_UBFM(sf, Rd, Rn, 0, 31)
188
189/* Move wide (immediate) */
190#define A64_MOVEW(sf, Rd, imm16, shift, type) \
191 aarch64_insn_gen_movewide(Rd, imm16, shift, \
192 A64_VARIANT(sf), AARCH64_INSN_MOVEWIDE_##type)
193/* Rd = Zeros (for MOVZ);
194 * Rd |= imm16 << shift (where shift is {0, 16, 32, 48});
195 * Rd = ~Rd; (for MOVN); */
196#define A64_MOVN(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, INVERSE)
197#define A64_MOVZ(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, ZERO)
198#define A64_MOVK(sf, Rd, imm16, shift) A64_MOVEW(sf, Rd, imm16, shift, KEEP)
199
200/* Add/subtract (shifted register) */
201#define A64_ADDSUB_SREG(sf, Rd, Rn, Rm, type) \
202 aarch64_insn_gen_add_sub_shifted_reg(Rd, Rn, Rm, 0, \
203 A64_VARIANT(sf), AARCH64_INSN_ADSB_##type)
204/* Rd = Rn OP Rm */
205#define A64_ADD(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, ADD)
206#define A64_SUB(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB)
207#define A64_SUBS(sf, Rd, Rn, Rm) A64_ADDSUB_SREG(sf, Rd, Rn, Rm, SUB_SETFLAGS)
208/* Rd = -Rm */
209#define A64_NEG(sf, Rd, Rm) A64_SUB(sf, Rd, A64_ZR, Rm)
210/* Rn - Rm; set condition flags */
211#define A64_CMP(sf, Rn, Rm) A64_SUBS(sf, A64_ZR, Rn, Rm)
212
213/* Data-processing (1 source) */
214#define A64_DATA1(sf, Rd, Rn, type) aarch64_insn_gen_data1(Rd, Rn, \
215 A64_VARIANT(sf), AARCH64_INSN_DATA1_##type)
216/* Rd = BSWAPx(Rn) */
217#define A64_REV16(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_16)
218#define A64_REV32(sf, Rd, Rn) A64_DATA1(sf, Rd, Rn, REVERSE_32)
219#define A64_REV64(Rd, Rn) A64_DATA1(1, Rd, Rn, REVERSE_64)
220
221/* Data-processing (2 source) */
222/* Rd = Rn OP Rm */
223#define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \
224 A64_VARIANT(sf), AARCH64_INSN_DATA2_##type)
225#define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV)
226#define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV)
227#define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV)
228#define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV)
229
230/* Data-processing (3 source) */
231/* Rd = Ra + Rn * Rm */
232#define A64_MADD(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
233 A64_VARIANT(sf), AARCH64_INSN_DATA3_MADD)
234/* Rd = Ra - Rn * Rm */
235#define A64_MSUB(sf, Rd, Ra, Rn, Rm) aarch64_insn_gen_data3(Rd, Ra, Rn, Rm, \
236 A64_VARIANT(sf), AARCH64_INSN_DATA3_MSUB)
237/* Rd = Rn * Rm */
238#define A64_MUL(sf, Rd, Rn, Rm) A64_MADD(sf, Rd, A64_ZR, Rn, Rm)
239
240/* Logical (shifted register) */
241#define A64_LOGIC_SREG(sf, Rd, Rn, Rm, type) \
242 aarch64_insn_gen_logical_shifted_reg(Rd, Rn, Rm, 0, \
243 A64_VARIANT(sf), AARCH64_INSN_LOGIC_##type)
244/* Rd = Rn OP Rm */
245#define A64_AND(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND)
246#define A64_ORR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, ORR)
247#define A64_EOR(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, EOR)
248#define A64_ANDS(sf, Rd, Rn, Rm) A64_LOGIC_SREG(sf, Rd, Rn, Rm, AND_SETFLAGS)
249/* Rn & Rm; set condition flags */
250#define A64_TST(sf, Rn, Rm) A64_ANDS(sf, A64_ZR, Rn, Rm)
251/* Rd = ~Rm (alias of ORN with A64_ZR as Rn) */
252#define A64_MVN(sf, Rd, Rm) \
253 A64_LOGIC_SREG(sf, Rd, A64_ZR, Rm, ORN)
254
255/* Logical (immediate) */
256#define A64_LOGIC_IMM(sf, Rd, Rn, imm, type) ({ \
257 u64 imm64 = (sf) ? (u64)imm : (u64)(u32)imm; \
258 aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_##type, \
259 A64_VARIANT(sf), Rn, Rd, imm64); \
260})
261/* Rd = Rn OP imm */
262#define A64_AND_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND)
263#define A64_ORR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, ORR)
264#define A64_EOR_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, EOR)
265#define A64_ANDS_I(sf, Rd, Rn, imm) A64_LOGIC_IMM(sf, Rd, Rn, imm, AND_SETFLAGS)
266/* Rn & imm; set condition flags */
267#define A64_TST_I(sf, Rn, imm) A64_ANDS_I(sf, A64_ZR, Rn, imm)
268
269/* HINTs */
270#define A64_HINT(x) aarch64_insn_gen_hint(x)
271
272#define A64_PACIASP A64_HINT(AARCH64_INSN_HINT_PACIASP)
273#define A64_AUTIASP A64_HINT(AARCH64_INSN_HINT_AUTIASP)
274
275/* BTI */
276#define A64_BTI_C A64_HINT(AARCH64_INSN_HINT_BTIC)
277#define A64_BTI_J A64_HINT(AARCH64_INSN_HINT_BTIJ)
278#define A64_BTI_JC A64_HINT(AARCH64_INSN_HINT_BTIJC)
279#define A64_NOP A64_HINT(AARCH64_INSN_HINT_NOP)
280
281/* DMB */
282#define A64_DMB_ISH aarch64_insn_gen_dmb(AARCH64_INSN_MB_ISH)
283
284#endif /* _BPF_JIT_H */