Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.14.15.
   1/*
   2 * Copyright (C) 2013 Huawei Ltd.
   3 * Author: Jiang Liu <liuj97@gmail.com>
   4 *
   5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  18 */
  19#include <linux/bitops.h>
  20#include <linux/bug.h>
  21#include <linux/compiler.h>
  22#include <linux/kernel.h>
  23#include <linux/mm.h>
  24#include <linux/smp.h>
  25#include <linux/spinlock.h>
  26#include <linux/stop_machine.h>
  27#include <linux/types.h>
  28#include <linux/uaccess.h>
  29
  30#include <asm/cacheflush.h>
  31#include <asm/debug-monitors.h>
  32#include <asm/fixmap.h>
  33#include <asm/insn.h>
  34#include <asm/kprobes.h>
  35
  36#define AARCH64_INSN_SF_BIT	BIT(31)
  37#define AARCH64_INSN_N_BIT	BIT(22)
  38#define AARCH64_INSN_LSL_12	BIT(22)
  39
  40static int aarch64_insn_encoding_class[] = {
  41	AARCH64_INSN_CLS_UNKNOWN,
  42	AARCH64_INSN_CLS_UNKNOWN,
  43	AARCH64_INSN_CLS_UNKNOWN,
  44	AARCH64_INSN_CLS_UNKNOWN,
  45	AARCH64_INSN_CLS_LDST,
  46	AARCH64_INSN_CLS_DP_REG,
  47	AARCH64_INSN_CLS_LDST,
  48	AARCH64_INSN_CLS_DP_FPSIMD,
  49	AARCH64_INSN_CLS_DP_IMM,
  50	AARCH64_INSN_CLS_DP_IMM,
  51	AARCH64_INSN_CLS_BR_SYS,
  52	AARCH64_INSN_CLS_BR_SYS,
  53	AARCH64_INSN_CLS_LDST,
  54	AARCH64_INSN_CLS_DP_REG,
  55	AARCH64_INSN_CLS_LDST,
  56	AARCH64_INSN_CLS_DP_FPSIMD,
  57};
  58
  59enum aarch64_insn_encoding_class __kprobes aarch64_get_insn_class(u32 insn)
  60{
  61	return aarch64_insn_encoding_class[(insn >> 25) & 0xf];
  62}
  63
  64/* NOP is an alias of HINT */
  65bool __kprobes aarch64_insn_is_nop(u32 insn)
  66{
  67	if (!aarch64_insn_is_hint(insn))
  68		return false;
  69
  70	switch (insn & 0xFE0) {
  71	case AARCH64_INSN_HINT_YIELD:
  72	case AARCH64_INSN_HINT_WFE:
  73	case AARCH64_INSN_HINT_WFI:
  74	case AARCH64_INSN_HINT_SEV:
  75	case AARCH64_INSN_HINT_SEVL:
  76		return false;
  77	default:
  78		return true;
  79	}
  80}
  81
  82bool aarch64_insn_is_branch_imm(u32 insn)
  83{
  84	return (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn) ||
  85		aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn) ||
  86		aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
  87		aarch64_insn_is_bcond(insn));
  88}
  89
  90static DEFINE_RAW_SPINLOCK(patch_lock);
  91
  92static void __kprobes *patch_map(void *addr, int fixmap)
  93{
  94	unsigned long uintaddr = (uintptr_t) addr;
  95	bool module = !core_kernel_text(uintaddr);
  96	struct page *page;
  97
  98	if (module && IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
  99		page = vmalloc_to_page(addr);
 100	else if (!module)
 101		page = phys_to_page(__pa_symbol(addr));
 102	else
 103		return addr;
 104
 105	BUG_ON(!page);
 106	return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
 107			(uintaddr & ~PAGE_MASK));
 108}
 109
 110static void __kprobes patch_unmap(int fixmap)
 111{
 112	clear_fixmap(fixmap);
 113}
 114/*
 115 * In ARMv8-A, A64 instructions have a fixed length of 32 bits and are always
 116 * little-endian.
 117 */
 118int __kprobes aarch64_insn_read(void *addr, u32 *insnp)
 119{
 120	int ret;
 121	__le32 val;
 122
 123	ret = probe_kernel_read(&val, addr, AARCH64_INSN_SIZE);
 124	if (!ret)
 125		*insnp = le32_to_cpu(val);
 126
 127	return ret;
 128}
 129
 130static int __kprobes __aarch64_insn_write(void *addr, __le32 insn)
 131{
 132	void *waddr = addr;
 133	unsigned long flags = 0;
 134	int ret;
 135
 136	raw_spin_lock_irqsave(&patch_lock, flags);
 137	waddr = patch_map(addr, FIX_TEXT_POKE0);
 138
 139	ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
 140
 141	patch_unmap(FIX_TEXT_POKE0);
 142	raw_spin_unlock_irqrestore(&patch_lock, flags);
 143
 144	return ret;
 145}
 146
 147int __kprobes aarch64_insn_write(void *addr, u32 insn)
 148{
 149	return __aarch64_insn_write(addr, cpu_to_le32(insn));
 150}
 151
 152static bool __kprobes __aarch64_insn_hotpatch_safe(u32 insn)
 153{
 154	if (aarch64_get_insn_class(insn) != AARCH64_INSN_CLS_BR_SYS)
 155		return false;
 156
 157	return	aarch64_insn_is_b(insn) ||
 158		aarch64_insn_is_bl(insn) ||
 159		aarch64_insn_is_svc(insn) ||
 160		aarch64_insn_is_hvc(insn) ||
 161		aarch64_insn_is_smc(insn) ||
 162		aarch64_insn_is_brk(insn) ||
 163		aarch64_insn_is_nop(insn);
 164}
 165
 166bool __kprobes aarch64_insn_uses_literal(u32 insn)
 167{
 168	/* ldr/ldrsw (literal), prfm */
 169
 170	return aarch64_insn_is_ldr_lit(insn) ||
 171		aarch64_insn_is_ldrsw_lit(insn) ||
 172		aarch64_insn_is_adr_adrp(insn) ||
 173		aarch64_insn_is_prfm_lit(insn);
 174}
 175
 176bool __kprobes aarch64_insn_is_branch(u32 insn)
 177{
 178	/* b, bl, cb*, tb*, b.cond, br, blr */
 179
 180	return aarch64_insn_is_b(insn) ||
 181		aarch64_insn_is_bl(insn) ||
 182		aarch64_insn_is_cbz(insn) ||
 183		aarch64_insn_is_cbnz(insn) ||
 184		aarch64_insn_is_tbz(insn) ||
 185		aarch64_insn_is_tbnz(insn) ||
 186		aarch64_insn_is_ret(insn) ||
 187		aarch64_insn_is_br(insn) ||
 188		aarch64_insn_is_blr(insn) ||
 189		aarch64_insn_is_bcond(insn);
 190}
 191
 192/*
 193 * ARM Architecture Reference Manual for ARMv8 Profile-A, Issue A.a
 194 * Section B2.6.5 "Concurrent modification and execution of instructions":
 195 * Concurrent modification and execution of instructions can lead to the
 196 * resulting instruction performing any behavior that can be achieved by
 197 * executing any sequence of instructions that can be executed from the
 198 * same Exception level, except where the instruction before modification
 199 * and the instruction after modification is a B, BL, NOP, BKPT, SVC, HVC,
 200 * or SMC instruction.
 201 */
 202bool __kprobes aarch64_insn_hotpatch_safe(u32 old_insn, u32 new_insn)
 203{
 204	return __aarch64_insn_hotpatch_safe(old_insn) &&
 205	       __aarch64_insn_hotpatch_safe(new_insn);
 206}
 207
 208int __kprobes aarch64_insn_patch_text_nosync(void *addr, u32 insn)
 209{
 210	u32 *tp = addr;
 211	int ret;
 212
 213	/* A64 instructions must be word aligned */
 214	if ((uintptr_t)tp & 0x3)
 215		return -EINVAL;
 216
 217	ret = aarch64_insn_write(tp, insn);
 218	if (ret == 0)
 219		flush_icache_range((uintptr_t)tp,
 220				   (uintptr_t)tp + AARCH64_INSN_SIZE);
 221
 222	return ret;
 223}
 224
 225struct aarch64_insn_patch {
 226	void		**text_addrs;
 227	u32		*new_insns;
 228	int		insn_cnt;
 229	atomic_t	cpu_count;
 230};
 231
 232static int __kprobes aarch64_insn_patch_text_cb(void *arg)
 233{
 234	int i, ret = 0;
 235	struct aarch64_insn_patch *pp = arg;
 236
 237	/* The first CPU becomes master */
 238	if (atomic_inc_return(&pp->cpu_count) == 1) {
 239		for (i = 0; ret == 0 && i < pp->insn_cnt; i++)
 240			ret = aarch64_insn_patch_text_nosync(pp->text_addrs[i],
 241							     pp->new_insns[i]);
 242		/*
 243		 * aarch64_insn_patch_text_nosync() calls flush_icache_range(),
 244		 * which ends with "dsb; isb" pair guaranteeing global
 245		 * visibility.
 246		 */
 247		/* Notify other processors with an additional increment. */
 248		atomic_inc(&pp->cpu_count);
 249	} else {
 250		while (atomic_read(&pp->cpu_count) <= num_online_cpus())
 251			cpu_relax();
 252		isb();
 253	}
 254
 255	return ret;
 256}
 257
 258static
 259int __kprobes aarch64_insn_patch_text_sync(void *addrs[], u32 insns[], int cnt)
 260{
 261	struct aarch64_insn_patch patch = {
 262		.text_addrs = addrs,
 263		.new_insns = insns,
 264		.insn_cnt = cnt,
 265		.cpu_count = ATOMIC_INIT(0),
 266	};
 267
 268	if (cnt <= 0)
 269		return -EINVAL;
 270
 271	return stop_machine_cpuslocked(aarch64_insn_patch_text_cb, &patch,
 272				       cpu_online_mask);
 273}
 274
 275int __kprobes aarch64_insn_patch_text(void *addrs[], u32 insns[], int cnt)
 276{
 277	int ret;
 278	u32 insn;
 279
 280	/* Unsafe to patch multiple instructions without synchronizaiton */
 281	if (cnt == 1) {
 282		ret = aarch64_insn_read(addrs[0], &insn);
 283		if (ret)
 284			return ret;
 285
 286		if (aarch64_insn_hotpatch_safe(insn, insns[0])) {
 287			/*
 288			 * ARMv8 architecture doesn't guarantee all CPUs see
 289			 * the new instruction after returning from function
 290			 * aarch64_insn_patch_text_nosync(). So send IPIs to
 291			 * all other CPUs to achieve instruction
 292			 * synchronization.
 293			 */
 294			ret = aarch64_insn_patch_text_nosync(addrs[0], insns[0]);
 295			kick_all_cpus_sync();
 296			return ret;
 297		}
 298	}
 299
 300	return aarch64_insn_patch_text_sync(addrs, insns, cnt);
 301}
 302
 303static int __kprobes aarch64_get_imm_shift_mask(enum aarch64_insn_imm_type type,
 304						u32 *maskp, int *shiftp)
 305{
 306	u32 mask;
 307	int shift;
 308
 309	switch (type) {
 310	case AARCH64_INSN_IMM_26:
 311		mask = BIT(26) - 1;
 312		shift = 0;
 313		break;
 314	case AARCH64_INSN_IMM_19:
 315		mask = BIT(19) - 1;
 316		shift = 5;
 317		break;
 318	case AARCH64_INSN_IMM_16:
 319		mask = BIT(16) - 1;
 320		shift = 5;
 321		break;
 322	case AARCH64_INSN_IMM_14:
 323		mask = BIT(14) - 1;
 324		shift = 5;
 325		break;
 326	case AARCH64_INSN_IMM_12:
 327		mask = BIT(12) - 1;
 328		shift = 10;
 329		break;
 330	case AARCH64_INSN_IMM_9:
 331		mask = BIT(9) - 1;
 332		shift = 12;
 333		break;
 334	case AARCH64_INSN_IMM_7:
 335		mask = BIT(7) - 1;
 336		shift = 15;
 337		break;
 338	case AARCH64_INSN_IMM_6:
 339	case AARCH64_INSN_IMM_S:
 340		mask = BIT(6) - 1;
 341		shift = 10;
 342		break;
 343	case AARCH64_INSN_IMM_R:
 344		mask = BIT(6) - 1;
 345		shift = 16;
 346		break;
 347	case AARCH64_INSN_IMM_N:
 348		mask = 1;
 349		shift = 22;
 350		break;
 351	default:
 352		return -EINVAL;
 353	}
 354
 355	*maskp = mask;
 356	*shiftp = shift;
 357
 358	return 0;
 359}
 360
 361#define ADR_IMM_HILOSPLIT	2
 362#define ADR_IMM_SIZE		SZ_2M
 363#define ADR_IMM_LOMASK		((1 << ADR_IMM_HILOSPLIT) - 1)
 364#define ADR_IMM_HIMASK		((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
 365#define ADR_IMM_LOSHIFT		29
 366#define ADR_IMM_HISHIFT		5
 367
 368u64 aarch64_insn_decode_immediate(enum aarch64_insn_imm_type type, u32 insn)
 369{
 370	u32 immlo, immhi, mask;
 371	int shift;
 372
 373	switch (type) {
 374	case AARCH64_INSN_IMM_ADR:
 375		shift = 0;
 376		immlo = (insn >> ADR_IMM_LOSHIFT) & ADR_IMM_LOMASK;
 377		immhi = (insn >> ADR_IMM_HISHIFT) & ADR_IMM_HIMASK;
 378		insn = (immhi << ADR_IMM_HILOSPLIT) | immlo;
 379		mask = ADR_IMM_SIZE - 1;
 380		break;
 381	default:
 382		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
 383			pr_err("aarch64_insn_decode_immediate: unknown immediate encoding %d\n",
 384			       type);
 385			return 0;
 386		}
 387	}
 388
 389	return (insn >> shift) & mask;
 390}
 391
 392u32 __kprobes aarch64_insn_encode_immediate(enum aarch64_insn_imm_type type,
 393				  u32 insn, u64 imm)
 394{
 395	u32 immlo, immhi, mask;
 396	int shift;
 397
 398	if (insn == AARCH64_BREAK_FAULT)
 399		return AARCH64_BREAK_FAULT;
 400
 401	switch (type) {
 402	case AARCH64_INSN_IMM_ADR:
 403		shift = 0;
 404		immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
 405		imm >>= ADR_IMM_HILOSPLIT;
 406		immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
 407		imm = immlo | immhi;
 408		mask = ((ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
 409			(ADR_IMM_HIMASK << ADR_IMM_HISHIFT));
 410		break;
 411	default:
 412		if (aarch64_get_imm_shift_mask(type, &mask, &shift) < 0) {
 413			pr_err("aarch64_insn_encode_immediate: unknown immediate encoding %d\n",
 414			       type);
 415			return AARCH64_BREAK_FAULT;
 416		}
 417	}
 418
 419	/* Update the immediate field. */
 420	insn &= ~(mask << shift);
 421	insn |= (imm & mask) << shift;
 422
 423	return insn;
 424}
 425
 426u32 aarch64_insn_decode_register(enum aarch64_insn_register_type type,
 427					u32 insn)
 428{
 429	int shift;
 430
 431	switch (type) {
 432	case AARCH64_INSN_REGTYPE_RT:
 433	case AARCH64_INSN_REGTYPE_RD:
 434		shift = 0;
 435		break;
 436	case AARCH64_INSN_REGTYPE_RN:
 437		shift = 5;
 438		break;
 439	case AARCH64_INSN_REGTYPE_RT2:
 440	case AARCH64_INSN_REGTYPE_RA:
 441		shift = 10;
 442		break;
 443	case AARCH64_INSN_REGTYPE_RM:
 444		shift = 16;
 445		break;
 446	default:
 447		pr_err("%s: unknown register type encoding %d\n", __func__,
 448		       type);
 449		return 0;
 450	}
 451
 452	return (insn >> shift) & GENMASK(4, 0);
 453}
 454
 455static u32 aarch64_insn_encode_register(enum aarch64_insn_register_type type,
 456					u32 insn,
 457					enum aarch64_insn_register reg)
 458{
 459	int shift;
 460
 461	if (insn == AARCH64_BREAK_FAULT)
 462		return AARCH64_BREAK_FAULT;
 463
 464	if (reg < AARCH64_INSN_REG_0 || reg > AARCH64_INSN_REG_SP) {
 465		pr_err("%s: unknown register encoding %d\n", __func__, reg);
 466		return AARCH64_BREAK_FAULT;
 467	}
 468
 469	switch (type) {
 470	case AARCH64_INSN_REGTYPE_RT:
 471	case AARCH64_INSN_REGTYPE_RD:
 472		shift = 0;
 473		break;
 474	case AARCH64_INSN_REGTYPE_RN:
 475		shift = 5;
 476		break;
 477	case AARCH64_INSN_REGTYPE_RT2:
 478	case AARCH64_INSN_REGTYPE_RA:
 479		shift = 10;
 480		break;
 481	case AARCH64_INSN_REGTYPE_RM:
 482	case AARCH64_INSN_REGTYPE_RS:
 483		shift = 16;
 484		break;
 485	default:
 486		pr_err("%s: unknown register type encoding %d\n", __func__,
 487		       type);
 488		return AARCH64_BREAK_FAULT;
 489	}
 490
 491	insn &= ~(GENMASK(4, 0) << shift);
 492	insn |= reg << shift;
 493
 494	return insn;
 495}
 496
 497static u32 aarch64_insn_encode_ldst_size(enum aarch64_insn_size_type type,
 498					 u32 insn)
 499{
 500	u32 size;
 501
 502	switch (type) {
 503	case AARCH64_INSN_SIZE_8:
 504		size = 0;
 505		break;
 506	case AARCH64_INSN_SIZE_16:
 507		size = 1;
 508		break;
 509	case AARCH64_INSN_SIZE_32:
 510		size = 2;
 511		break;
 512	case AARCH64_INSN_SIZE_64:
 513		size = 3;
 514		break;
 515	default:
 516		pr_err("%s: unknown size encoding %d\n", __func__, type);
 517		return AARCH64_BREAK_FAULT;
 518	}
 519
 520	insn &= ~GENMASK(31, 30);
 521	insn |= size << 30;
 522
 523	return insn;
 524}
 525
 526static inline long branch_imm_common(unsigned long pc, unsigned long addr,
 527				     long range)
 528{
 529	long offset;
 530
 531	if ((pc & 0x3) || (addr & 0x3)) {
 532		pr_err("%s: A64 instructions must be word aligned\n", __func__);
 533		return range;
 534	}
 535
 536	offset = ((long)addr - (long)pc);
 537
 538	if (offset < -range || offset >= range) {
 539		pr_err("%s: offset out of range\n", __func__);
 540		return range;
 541	}
 542
 543	return offset;
 544}
 545
 546u32 __kprobes aarch64_insn_gen_branch_imm(unsigned long pc, unsigned long addr,
 547					  enum aarch64_insn_branch_type type)
 548{
 549	u32 insn;
 550	long offset;
 551
 552	/*
 553	 * B/BL support [-128M, 128M) offset
 554	 * ARM64 virtual address arrangement guarantees all kernel and module
 555	 * texts are within +/-128M.
 556	 */
 557	offset = branch_imm_common(pc, addr, SZ_128M);
 558	if (offset >= SZ_128M)
 559		return AARCH64_BREAK_FAULT;
 560
 561	switch (type) {
 562	case AARCH64_INSN_BRANCH_LINK:
 563		insn = aarch64_insn_get_bl_value();
 564		break;
 565	case AARCH64_INSN_BRANCH_NOLINK:
 566		insn = aarch64_insn_get_b_value();
 567		break;
 568	default:
 569		pr_err("%s: unknown branch encoding %d\n", __func__, type);
 570		return AARCH64_BREAK_FAULT;
 571	}
 572
 573	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
 574					     offset >> 2);
 575}
 576
 577u32 aarch64_insn_gen_comp_branch_imm(unsigned long pc, unsigned long addr,
 578				     enum aarch64_insn_register reg,
 579				     enum aarch64_insn_variant variant,
 580				     enum aarch64_insn_branch_type type)
 581{
 582	u32 insn;
 583	long offset;
 584
 585	offset = branch_imm_common(pc, addr, SZ_1M);
 586	if (offset >= SZ_1M)
 587		return AARCH64_BREAK_FAULT;
 588
 589	switch (type) {
 590	case AARCH64_INSN_BRANCH_COMP_ZERO:
 591		insn = aarch64_insn_get_cbz_value();
 592		break;
 593	case AARCH64_INSN_BRANCH_COMP_NONZERO:
 594		insn = aarch64_insn_get_cbnz_value();
 595		break;
 596	default:
 597		pr_err("%s: unknown branch encoding %d\n", __func__, type);
 598		return AARCH64_BREAK_FAULT;
 599	}
 600
 601	switch (variant) {
 602	case AARCH64_INSN_VARIANT_32BIT:
 603		break;
 604	case AARCH64_INSN_VARIANT_64BIT:
 605		insn |= AARCH64_INSN_SF_BIT;
 606		break;
 607	default:
 608		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 609		return AARCH64_BREAK_FAULT;
 610	}
 611
 612	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
 613
 614	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
 615					     offset >> 2);
 616}
 617
 618u32 aarch64_insn_gen_cond_branch_imm(unsigned long pc, unsigned long addr,
 619				     enum aarch64_insn_condition cond)
 620{
 621	u32 insn;
 622	long offset;
 623
 624	offset = branch_imm_common(pc, addr, SZ_1M);
 625
 626	insn = aarch64_insn_get_bcond_value();
 627
 628	if (cond < AARCH64_INSN_COND_EQ || cond > AARCH64_INSN_COND_AL) {
 629		pr_err("%s: unknown condition encoding %d\n", __func__, cond);
 630		return AARCH64_BREAK_FAULT;
 631	}
 632	insn |= cond;
 633
 634	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
 635					     offset >> 2);
 636}
 637
 638u32 __kprobes aarch64_insn_gen_hint(enum aarch64_insn_hint_op op)
 639{
 640	return aarch64_insn_get_hint_value() | op;
 641}
 642
 643u32 __kprobes aarch64_insn_gen_nop(void)
 644{
 645	return aarch64_insn_gen_hint(AARCH64_INSN_HINT_NOP);
 646}
 647
 648u32 aarch64_insn_gen_branch_reg(enum aarch64_insn_register reg,
 649				enum aarch64_insn_branch_type type)
 650{
 651	u32 insn;
 652
 653	switch (type) {
 654	case AARCH64_INSN_BRANCH_NOLINK:
 655		insn = aarch64_insn_get_br_value();
 656		break;
 657	case AARCH64_INSN_BRANCH_LINK:
 658		insn = aarch64_insn_get_blr_value();
 659		break;
 660	case AARCH64_INSN_BRANCH_RETURN:
 661		insn = aarch64_insn_get_ret_value();
 662		break;
 663	default:
 664		pr_err("%s: unknown branch encoding %d\n", __func__, type);
 665		return AARCH64_BREAK_FAULT;
 666	}
 667
 668	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, reg);
 669}
 670
 671u32 aarch64_insn_gen_load_store_reg(enum aarch64_insn_register reg,
 672				    enum aarch64_insn_register base,
 673				    enum aarch64_insn_register offset,
 674				    enum aarch64_insn_size_type size,
 675				    enum aarch64_insn_ldst_type type)
 676{
 677	u32 insn;
 678
 679	switch (type) {
 680	case AARCH64_INSN_LDST_LOAD_REG_OFFSET:
 681		insn = aarch64_insn_get_ldr_reg_value();
 682		break;
 683	case AARCH64_INSN_LDST_STORE_REG_OFFSET:
 684		insn = aarch64_insn_get_str_reg_value();
 685		break;
 686	default:
 687		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
 688		return AARCH64_BREAK_FAULT;
 689	}
 690
 691	insn = aarch64_insn_encode_ldst_size(size, insn);
 692
 693	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn, reg);
 694
 695	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
 696					    base);
 697
 698	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
 699					    offset);
 700}
 701
 702u32 aarch64_insn_gen_load_store_pair(enum aarch64_insn_register reg1,
 703				     enum aarch64_insn_register reg2,
 704				     enum aarch64_insn_register base,
 705				     int offset,
 706				     enum aarch64_insn_variant variant,
 707				     enum aarch64_insn_ldst_type type)
 708{
 709	u32 insn;
 710	int shift;
 711
 712	switch (type) {
 713	case AARCH64_INSN_LDST_LOAD_PAIR_PRE_INDEX:
 714		insn = aarch64_insn_get_ldp_pre_value();
 715		break;
 716	case AARCH64_INSN_LDST_STORE_PAIR_PRE_INDEX:
 717		insn = aarch64_insn_get_stp_pre_value();
 718		break;
 719	case AARCH64_INSN_LDST_LOAD_PAIR_POST_INDEX:
 720		insn = aarch64_insn_get_ldp_post_value();
 721		break;
 722	case AARCH64_INSN_LDST_STORE_PAIR_POST_INDEX:
 723		insn = aarch64_insn_get_stp_post_value();
 724		break;
 725	default:
 726		pr_err("%s: unknown load/store encoding %d\n", __func__, type);
 727		return AARCH64_BREAK_FAULT;
 728	}
 729
 730	switch (variant) {
 731	case AARCH64_INSN_VARIANT_32BIT:
 732		if ((offset & 0x3) || (offset < -256) || (offset > 252)) {
 733			pr_err("%s: offset must be multiples of 4 in the range of [-256, 252] %d\n",
 734			       __func__, offset);
 735			return AARCH64_BREAK_FAULT;
 736		}
 737		shift = 2;
 738		break;
 739	case AARCH64_INSN_VARIANT_64BIT:
 740		if ((offset & 0x7) || (offset < -512) || (offset > 504)) {
 741			pr_err("%s: offset must be multiples of 8 in the range of [-512, 504] %d\n",
 742			       __func__, offset);
 743			return AARCH64_BREAK_FAULT;
 744		}
 745		shift = 3;
 746		insn |= AARCH64_INSN_SF_BIT;
 747		break;
 748	default:
 749		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 750		return AARCH64_BREAK_FAULT;
 751	}
 752
 753	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
 754					    reg1);
 755
 756	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
 757					    reg2);
 758
 759	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
 760					    base);
 761
 762	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_7, insn,
 763					     offset >> shift);
 764}
 765
 766u32 aarch64_insn_gen_load_store_ex(enum aarch64_insn_register reg,
 767				   enum aarch64_insn_register base,
 768				   enum aarch64_insn_register state,
 769				   enum aarch64_insn_size_type size,
 770				   enum aarch64_insn_ldst_type type)
 771{
 772	u32 insn;
 773
 774	switch (type) {
 775	case AARCH64_INSN_LDST_LOAD_EX:
 776		insn = aarch64_insn_get_load_ex_value();
 777		break;
 778	case AARCH64_INSN_LDST_STORE_EX:
 779		insn = aarch64_insn_get_store_ex_value();
 780		break;
 781	default:
 782		pr_err("%s: unknown load/store exclusive encoding %d\n", __func__, type);
 783		return AARCH64_BREAK_FAULT;
 784	}
 785
 786	insn = aarch64_insn_encode_ldst_size(size, insn);
 787
 788	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT, insn,
 789					    reg);
 790
 791	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
 792					    base);
 793
 794	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RT2, insn,
 795					    AARCH64_INSN_REG_ZR);
 796
 797	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RS, insn,
 798					    state);
 799}
 800
 801static u32 aarch64_insn_encode_prfm_imm(enum aarch64_insn_prfm_type type,
 802					enum aarch64_insn_prfm_target target,
 803					enum aarch64_insn_prfm_policy policy,
 804					u32 insn)
 805{
 806	u32 imm_type = 0, imm_target = 0, imm_policy = 0;
 807
 808	switch (type) {
 809	case AARCH64_INSN_PRFM_TYPE_PLD:
 810		break;
 811	case AARCH64_INSN_PRFM_TYPE_PLI:
 812		imm_type = BIT(0);
 813		break;
 814	case AARCH64_INSN_PRFM_TYPE_PST:
 815		imm_type = BIT(1);
 816		break;
 817	default:
 818		pr_err("%s: unknown prfm type encoding %d\n", __func__, type);
 819		return AARCH64_BREAK_FAULT;
 820	}
 821
 822	switch (target) {
 823	case AARCH64_INSN_PRFM_TARGET_L1:
 824		break;
 825	case AARCH64_INSN_PRFM_TARGET_L2:
 826		imm_target = BIT(0);
 827		break;
 828	case AARCH64_INSN_PRFM_TARGET_L3:
 829		imm_target = BIT(1);
 830		break;
 831	default:
 832		pr_err("%s: unknown prfm target encoding %d\n", __func__, target);
 833		return AARCH64_BREAK_FAULT;
 834	}
 835
 836	switch (policy) {
 837	case AARCH64_INSN_PRFM_POLICY_KEEP:
 838		break;
 839	case AARCH64_INSN_PRFM_POLICY_STRM:
 840		imm_policy = BIT(0);
 841		break;
 842	default:
 843		pr_err("%s: unknown prfm policy encoding %d\n", __func__, policy);
 844		return AARCH64_BREAK_FAULT;
 845	}
 846
 847	/* In this case, imm5 is encoded into Rt field. */
 848	insn &= ~GENMASK(4, 0);
 849	insn |= imm_policy | (imm_target << 1) | (imm_type << 3);
 850
 851	return insn;
 852}
 853
 854u32 aarch64_insn_gen_prefetch(enum aarch64_insn_register base,
 855			      enum aarch64_insn_prfm_type type,
 856			      enum aarch64_insn_prfm_target target,
 857			      enum aarch64_insn_prfm_policy policy)
 858{
 859	u32 insn = aarch64_insn_get_prfm_value();
 860
 861	insn = aarch64_insn_encode_ldst_size(AARCH64_INSN_SIZE_64, insn);
 862
 863	insn = aarch64_insn_encode_prfm_imm(type, target, policy, insn);
 864
 865	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
 866					    base);
 867
 868	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, 0);
 869}
 870
 871u32 aarch64_insn_gen_add_sub_imm(enum aarch64_insn_register dst,
 872				 enum aarch64_insn_register src,
 873				 int imm, enum aarch64_insn_variant variant,
 874				 enum aarch64_insn_adsb_type type)
 875{
 876	u32 insn;
 877
 878	switch (type) {
 879	case AARCH64_INSN_ADSB_ADD:
 880		insn = aarch64_insn_get_add_imm_value();
 881		break;
 882	case AARCH64_INSN_ADSB_SUB:
 883		insn = aarch64_insn_get_sub_imm_value();
 884		break;
 885	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
 886		insn = aarch64_insn_get_adds_imm_value();
 887		break;
 888	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
 889		insn = aarch64_insn_get_subs_imm_value();
 890		break;
 891	default:
 892		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
 893		return AARCH64_BREAK_FAULT;
 894	}
 895
 896	switch (variant) {
 897	case AARCH64_INSN_VARIANT_32BIT:
 898		break;
 899	case AARCH64_INSN_VARIANT_64BIT:
 900		insn |= AARCH64_INSN_SF_BIT;
 901		break;
 902	default:
 903		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 904		return AARCH64_BREAK_FAULT;
 905	}
 906
 907	/* We can't encode more than a 24bit value (12bit + 12bit shift) */
 908	if (imm & ~(BIT(24) - 1))
 909		goto out;
 910
 911	/* If we have something in the top 12 bits... */
 912	if (imm & ~(SZ_4K - 1)) {
 913		/* ... and in the low 12 bits -> error */
 914		if (imm & (SZ_4K - 1))
 915			goto out;
 916
 917		imm >>= 12;
 918		insn |= AARCH64_INSN_LSL_12;
 919	}
 920
 921	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
 922
 923	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
 924
 925	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_12, insn, imm);
 926
 927out:
 928	pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
 929	return AARCH64_BREAK_FAULT;
 930}
 931
 932u32 aarch64_insn_gen_bitfield(enum aarch64_insn_register dst,
 933			      enum aarch64_insn_register src,
 934			      int immr, int imms,
 935			      enum aarch64_insn_variant variant,
 936			      enum aarch64_insn_bitfield_type type)
 937{
 938	u32 insn;
 939	u32 mask;
 940
 941	switch (type) {
 942	case AARCH64_INSN_BITFIELD_MOVE:
 943		insn = aarch64_insn_get_bfm_value();
 944		break;
 945	case AARCH64_INSN_BITFIELD_MOVE_UNSIGNED:
 946		insn = aarch64_insn_get_ubfm_value();
 947		break;
 948	case AARCH64_INSN_BITFIELD_MOVE_SIGNED:
 949		insn = aarch64_insn_get_sbfm_value();
 950		break;
 951	default:
 952		pr_err("%s: unknown bitfield encoding %d\n", __func__, type);
 953		return AARCH64_BREAK_FAULT;
 954	}
 955
 956	switch (variant) {
 957	case AARCH64_INSN_VARIANT_32BIT:
 958		mask = GENMASK(4, 0);
 959		break;
 960	case AARCH64_INSN_VARIANT_64BIT:
 961		insn |= AARCH64_INSN_SF_BIT | AARCH64_INSN_N_BIT;
 962		mask = GENMASK(5, 0);
 963		break;
 964	default:
 965		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
 966		return AARCH64_BREAK_FAULT;
 967	}
 968
 969	if (immr & ~mask) {
 970		pr_err("%s: invalid immr encoding %d\n", __func__, immr);
 971		return AARCH64_BREAK_FAULT;
 972	}
 973	if (imms & ~mask) {
 974		pr_err("%s: invalid imms encoding %d\n", __func__, imms);
 975		return AARCH64_BREAK_FAULT;
 976	}
 977
 978	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
 979
 980	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
 981
 982	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
 983
 984	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
 985}
 986
 987u32 aarch64_insn_gen_movewide(enum aarch64_insn_register dst,
 988			      int imm, int shift,
 989			      enum aarch64_insn_variant variant,
 990			      enum aarch64_insn_movewide_type type)
 991{
 992	u32 insn;
 993
 994	switch (type) {
 995	case AARCH64_INSN_MOVEWIDE_ZERO:
 996		insn = aarch64_insn_get_movz_value();
 997		break;
 998	case AARCH64_INSN_MOVEWIDE_KEEP:
 999		insn = aarch64_insn_get_movk_value();
1000		break;
1001	case AARCH64_INSN_MOVEWIDE_INVERSE:
1002		insn = aarch64_insn_get_movn_value();
1003		break;
1004	default:
1005		pr_err("%s: unknown movewide encoding %d\n", __func__, type);
1006		return AARCH64_BREAK_FAULT;
1007	}
1008
1009	if (imm & ~(SZ_64K - 1)) {
1010		pr_err("%s: invalid immediate encoding %d\n", __func__, imm);
1011		return AARCH64_BREAK_FAULT;
1012	}
1013
1014	switch (variant) {
1015	case AARCH64_INSN_VARIANT_32BIT:
1016		if (shift != 0 && shift != 16) {
1017			pr_err("%s: invalid shift encoding %d\n", __func__,
1018			       shift);
1019			return AARCH64_BREAK_FAULT;
1020		}
1021		break;
1022	case AARCH64_INSN_VARIANT_64BIT:
1023		insn |= AARCH64_INSN_SF_BIT;
1024		if (shift != 0 && shift != 16 && shift != 32 && shift != 48) {
1025			pr_err("%s: invalid shift encoding %d\n", __func__,
1026			       shift);
1027			return AARCH64_BREAK_FAULT;
1028		}
1029		break;
1030	default:
1031		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1032		return AARCH64_BREAK_FAULT;
1033	}
1034
1035	insn |= (shift >> 4) << 21;
1036
1037	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1038
1039	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm);
1040}
1041
1042u32 aarch64_insn_gen_add_sub_shifted_reg(enum aarch64_insn_register dst,
1043					 enum aarch64_insn_register src,
1044					 enum aarch64_insn_register reg,
1045					 int shift,
1046					 enum aarch64_insn_variant variant,
1047					 enum aarch64_insn_adsb_type type)
1048{
1049	u32 insn;
1050
1051	switch (type) {
1052	case AARCH64_INSN_ADSB_ADD:
1053		insn = aarch64_insn_get_add_value();
1054		break;
1055	case AARCH64_INSN_ADSB_SUB:
1056		insn = aarch64_insn_get_sub_value();
1057		break;
1058	case AARCH64_INSN_ADSB_ADD_SETFLAGS:
1059		insn = aarch64_insn_get_adds_value();
1060		break;
1061	case AARCH64_INSN_ADSB_SUB_SETFLAGS:
1062		insn = aarch64_insn_get_subs_value();
1063		break;
1064	default:
1065		pr_err("%s: unknown add/sub encoding %d\n", __func__, type);
1066		return AARCH64_BREAK_FAULT;
1067	}
1068
1069	switch (variant) {
1070	case AARCH64_INSN_VARIANT_32BIT:
1071		if (shift & ~(SZ_32 - 1)) {
1072			pr_err("%s: invalid shift encoding %d\n", __func__,
1073			       shift);
1074			return AARCH64_BREAK_FAULT;
1075		}
1076		break;
1077	case AARCH64_INSN_VARIANT_64BIT:
1078		insn |= AARCH64_INSN_SF_BIT;
1079		if (shift & ~(SZ_64 - 1)) {
1080			pr_err("%s: invalid shift encoding %d\n", __func__,
1081			       shift);
1082			return AARCH64_BREAK_FAULT;
1083		}
1084		break;
1085	default:
1086		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1087		return AARCH64_BREAK_FAULT;
1088	}
1089
1090
1091	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1092
1093	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1094
1095	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1096
1097	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1098}
1099
1100u32 aarch64_insn_gen_data1(enum aarch64_insn_register dst,
1101			   enum aarch64_insn_register src,
1102			   enum aarch64_insn_variant variant,
1103			   enum aarch64_insn_data1_type type)
1104{
1105	u32 insn;
1106
1107	switch (type) {
1108	case AARCH64_INSN_DATA1_REVERSE_16:
1109		insn = aarch64_insn_get_rev16_value();
1110		break;
1111	case AARCH64_INSN_DATA1_REVERSE_32:
1112		insn = aarch64_insn_get_rev32_value();
1113		break;
1114	case AARCH64_INSN_DATA1_REVERSE_64:
1115		if (variant != AARCH64_INSN_VARIANT_64BIT) {
1116			pr_err("%s: invalid variant for reverse64 %d\n",
1117			       __func__, variant);
1118			return AARCH64_BREAK_FAULT;
1119		}
1120		insn = aarch64_insn_get_rev64_value();
1121		break;
1122	default:
1123		pr_err("%s: unknown data1 encoding %d\n", __func__, type);
1124		return AARCH64_BREAK_FAULT;
1125	}
1126
1127	switch (variant) {
1128	case AARCH64_INSN_VARIANT_32BIT:
1129		break;
1130	case AARCH64_INSN_VARIANT_64BIT:
1131		insn |= AARCH64_INSN_SF_BIT;
1132		break;
1133	default:
1134		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1135		return AARCH64_BREAK_FAULT;
1136	}
1137
1138	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1139
1140	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1141}
1142
1143u32 aarch64_insn_gen_data2(enum aarch64_insn_register dst,
1144			   enum aarch64_insn_register src,
1145			   enum aarch64_insn_register reg,
1146			   enum aarch64_insn_variant variant,
1147			   enum aarch64_insn_data2_type type)
1148{
1149	u32 insn;
1150
1151	switch (type) {
1152	case AARCH64_INSN_DATA2_UDIV:
1153		insn = aarch64_insn_get_udiv_value();
1154		break;
1155	case AARCH64_INSN_DATA2_SDIV:
1156		insn = aarch64_insn_get_sdiv_value();
1157		break;
1158	case AARCH64_INSN_DATA2_LSLV:
1159		insn = aarch64_insn_get_lslv_value();
1160		break;
1161	case AARCH64_INSN_DATA2_LSRV:
1162		insn = aarch64_insn_get_lsrv_value();
1163		break;
1164	case AARCH64_INSN_DATA2_ASRV:
1165		insn = aarch64_insn_get_asrv_value();
1166		break;
1167	case AARCH64_INSN_DATA2_RORV:
1168		insn = aarch64_insn_get_rorv_value();
1169		break;
1170	default:
1171		pr_err("%s: unknown data2 encoding %d\n", __func__, type);
1172		return AARCH64_BREAK_FAULT;
1173	}
1174
1175	switch (variant) {
1176	case AARCH64_INSN_VARIANT_32BIT:
1177		break;
1178	case AARCH64_INSN_VARIANT_64BIT:
1179		insn |= AARCH64_INSN_SF_BIT;
1180		break;
1181	default:
1182		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1183		return AARCH64_BREAK_FAULT;
1184	}
1185
1186	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1187
1188	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1189
1190	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1191}
1192
1193u32 aarch64_insn_gen_data3(enum aarch64_insn_register dst,
1194			   enum aarch64_insn_register src,
1195			   enum aarch64_insn_register reg1,
1196			   enum aarch64_insn_register reg2,
1197			   enum aarch64_insn_variant variant,
1198			   enum aarch64_insn_data3_type type)
1199{
1200	u32 insn;
1201
1202	switch (type) {
1203	case AARCH64_INSN_DATA3_MADD:
1204		insn = aarch64_insn_get_madd_value();
1205		break;
1206	case AARCH64_INSN_DATA3_MSUB:
1207		insn = aarch64_insn_get_msub_value();
1208		break;
1209	default:
1210		pr_err("%s: unknown data3 encoding %d\n", __func__, type);
1211		return AARCH64_BREAK_FAULT;
1212	}
1213
1214	switch (variant) {
1215	case AARCH64_INSN_VARIANT_32BIT:
1216		break;
1217	case AARCH64_INSN_VARIANT_64BIT:
1218		insn |= AARCH64_INSN_SF_BIT;
1219		break;
1220	default:
1221		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1222		return AARCH64_BREAK_FAULT;
1223	}
1224
1225	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1226
1227	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RA, insn, src);
1228
1229	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn,
1230					    reg1);
1231
1232	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn,
1233					    reg2);
1234}
1235
1236u32 aarch64_insn_gen_logical_shifted_reg(enum aarch64_insn_register dst,
1237					 enum aarch64_insn_register src,
1238					 enum aarch64_insn_register reg,
1239					 int shift,
1240					 enum aarch64_insn_variant variant,
1241					 enum aarch64_insn_logic_type type)
1242{
1243	u32 insn;
1244
1245	switch (type) {
1246	case AARCH64_INSN_LOGIC_AND:
1247		insn = aarch64_insn_get_and_value();
1248		break;
1249	case AARCH64_INSN_LOGIC_BIC:
1250		insn = aarch64_insn_get_bic_value();
1251		break;
1252	case AARCH64_INSN_LOGIC_ORR:
1253		insn = aarch64_insn_get_orr_value();
1254		break;
1255	case AARCH64_INSN_LOGIC_ORN:
1256		insn = aarch64_insn_get_orn_value();
1257		break;
1258	case AARCH64_INSN_LOGIC_EOR:
1259		insn = aarch64_insn_get_eor_value();
1260		break;
1261	case AARCH64_INSN_LOGIC_EON:
1262		insn = aarch64_insn_get_eon_value();
1263		break;
1264	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1265		insn = aarch64_insn_get_ands_value();
1266		break;
1267	case AARCH64_INSN_LOGIC_BIC_SETFLAGS:
1268		insn = aarch64_insn_get_bics_value();
1269		break;
1270	default:
1271		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1272		return AARCH64_BREAK_FAULT;
1273	}
1274
1275	switch (variant) {
1276	case AARCH64_INSN_VARIANT_32BIT:
1277		if (shift & ~(SZ_32 - 1)) {
1278			pr_err("%s: invalid shift encoding %d\n", __func__,
1279			       shift);
1280			return AARCH64_BREAK_FAULT;
1281		}
1282		break;
1283	case AARCH64_INSN_VARIANT_64BIT:
1284		insn |= AARCH64_INSN_SF_BIT;
1285		if (shift & ~(SZ_64 - 1)) {
1286			pr_err("%s: invalid shift encoding %d\n", __func__,
1287			       shift);
1288			return AARCH64_BREAK_FAULT;
1289		}
1290		break;
1291	default:
1292		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1293		return AARCH64_BREAK_FAULT;
1294	}
1295
1296
1297	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, dst);
1298
1299	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, src);
1300
1301	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, reg);
1302
1303	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_6, insn, shift);
1304}
1305
1306/*
1307 * Decode the imm field of a branch, and return the byte offset as a
1308 * signed value (so it can be used when computing a new branch
1309 * target).
1310 */
1311s32 aarch64_get_branch_offset(u32 insn)
1312{
1313	s32 imm;
1314
1315	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
1316		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
1317		return (imm << 6) >> 4;
1318	}
1319
1320	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1321	    aarch64_insn_is_bcond(insn)) {
1322		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_19, insn);
1323		return (imm << 13) >> 11;
1324	}
1325
1326	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn)) {
1327		imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_14, insn);
1328		return (imm << 18) >> 16;
1329	}
1330
1331	/* Unhandled instruction */
1332	BUG();
1333}
1334
1335/*
1336 * Encode the displacement of a branch in the imm field and return the
1337 * updated instruction.
1338 */
1339u32 aarch64_set_branch_offset(u32 insn, s32 offset)
1340{
1341	if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn))
1342		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_26, insn,
1343						     offset >> 2);
1344
1345	if (aarch64_insn_is_cbz(insn) || aarch64_insn_is_cbnz(insn) ||
1346	    aarch64_insn_is_bcond(insn))
1347		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_19, insn,
1348						     offset >> 2);
1349
1350	if (aarch64_insn_is_tbz(insn) || aarch64_insn_is_tbnz(insn))
1351		return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_14, insn,
1352						     offset >> 2);
1353
1354	/* Unhandled instruction */
1355	BUG();
1356}
1357
1358s32 aarch64_insn_adrp_get_offset(u32 insn)
1359{
1360	BUG_ON(!aarch64_insn_is_adrp(insn));
1361	return aarch64_insn_decode_immediate(AARCH64_INSN_IMM_ADR, insn) << 12;
1362}
1363
1364u32 aarch64_insn_adrp_set_offset(u32 insn, s32 offset)
1365{
1366	BUG_ON(!aarch64_insn_is_adrp(insn));
1367	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_ADR, insn,
1368						offset >> 12);
1369}
1370
1371/*
1372 * Extract the Op/CR data from a msr/mrs instruction.
1373 */
1374u32 aarch64_insn_extract_system_reg(u32 insn)
1375{
1376	return (insn & 0x1FFFE0) >> 5;
1377}
1378
1379bool aarch32_insn_is_wide(u32 insn)
1380{
1381	return insn >= 0xe800;
1382}
1383
1384/*
1385 * Macros/defines for extracting register numbers from instruction.
1386 */
1387u32 aarch32_insn_extract_reg_num(u32 insn, int offset)
1388{
1389	return (insn & (0xf << offset)) >> offset;
1390}
1391
1392#define OPC2_MASK	0x7
1393#define OPC2_OFFSET	5
1394u32 aarch32_insn_mcr_extract_opc2(u32 insn)
1395{
1396	return (insn & (OPC2_MASK << OPC2_OFFSET)) >> OPC2_OFFSET;
1397}
1398
1399#define CRM_MASK	0xf
1400u32 aarch32_insn_mcr_extract_crm(u32 insn)
1401{
1402	return insn & CRM_MASK;
1403}
1404
1405static bool __kprobes __check_eq(unsigned long pstate)
1406{
1407	return (pstate & PSR_Z_BIT) != 0;
1408}
1409
1410static bool __kprobes __check_ne(unsigned long pstate)
1411{
1412	return (pstate & PSR_Z_BIT) == 0;
1413}
1414
1415static bool __kprobes __check_cs(unsigned long pstate)
1416{
1417	return (pstate & PSR_C_BIT) != 0;
1418}
1419
1420static bool __kprobes __check_cc(unsigned long pstate)
1421{
1422	return (pstate & PSR_C_BIT) == 0;
1423}
1424
1425static bool __kprobes __check_mi(unsigned long pstate)
1426{
1427	return (pstate & PSR_N_BIT) != 0;
1428}
1429
1430static bool __kprobes __check_pl(unsigned long pstate)
1431{
1432	return (pstate & PSR_N_BIT) == 0;
1433}
1434
1435static bool __kprobes __check_vs(unsigned long pstate)
1436{
1437	return (pstate & PSR_V_BIT) != 0;
1438}
1439
1440static bool __kprobes __check_vc(unsigned long pstate)
1441{
1442	return (pstate & PSR_V_BIT) == 0;
1443}
1444
1445static bool __kprobes __check_hi(unsigned long pstate)
1446{
1447	pstate &= ~(pstate >> 1);	/* PSR_C_BIT &= ~PSR_Z_BIT */
1448	return (pstate & PSR_C_BIT) != 0;
1449}
1450
1451static bool __kprobes __check_ls(unsigned long pstate)
1452{
1453	pstate &= ~(pstate >> 1);	/* PSR_C_BIT &= ~PSR_Z_BIT */
1454	return (pstate & PSR_C_BIT) == 0;
1455}
1456
1457static bool __kprobes __check_ge(unsigned long pstate)
1458{
1459	pstate ^= (pstate << 3);	/* PSR_N_BIT ^= PSR_V_BIT */
1460	return (pstate & PSR_N_BIT) == 0;
1461}
1462
1463static bool __kprobes __check_lt(unsigned long pstate)
1464{
1465	pstate ^= (pstate << 3);	/* PSR_N_BIT ^= PSR_V_BIT */
1466	return (pstate & PSR_N_BIT) != 0;
1467}
1468
1469static bool __kprobes __check_gt(unsigned long pstate)
1470{
1471	/*PSR_N_BIT ^= PSR_V_BIT */
1472	unsigned long temp = pstate ^ (pstate << 3);
1473
1474	temp |= (pstate << 1);	/*PSR_N_BIT |= PSR_Z_BIT */
1475	return (temp & PSR_N_BIT) == 0;
1476}
1477
1478static bool __kprobes __check_le(unsigned long pstate)
1479{
1480	/*PSR_N_BIT ^= PSR_V_BIT */
1481	unsigned long temp = pstate ^ (pstate << 3);
1482
1483	temp |= (pstate << 1);	/*PSR_N_BIT |= PSR_Z_BIT */
1484	return (temp & PSR_N_BIT) != 0;
1485}
1486
1487static bool __kprobes __check_al(unsigned long pstate)
1488{
1489	return true;
1490}
1491
1492/*
1493 * Note that the ARMv8 ARM calls condition code 0b1111 "nv", but states that
1494 * it behaves identically to 0b1110 ("al").
1495 */
1496pstate_check_t * const aarch32_opcode_cond_checks[16] = {
1497	__check_eq, __check_ne, __check_cs, __check_cc,
1498	__check_mi, __check_pl, __check_vs, __check_vc,
1499	__check_hi, __check_ls, __check_ge, __check_lt,
1500	__check_gt, __check_le, __check_al, __check_al
1501};
1502
1503static bool range_of_ones(u64 val)
1504{
1505	/* Doesn't handle full ones or full zeroes */
1506	u64 sval = val >> __ffs64(val);
1507
1508	/* One of Sean Eron Anderson's bithack tricks */
1509	return ((sval + 1) & (sval)) == 0;
1510}
1511
1512static u32 aarch64_encode_immediate(u64 imm,
1513				    enum aarch64_insn_variant variant,
1514				    u32 insn)
1515{
1516	unsigned int immr, imms, n, ones, ror, esz, tmp;
1517	u64 mask = ~0UL;
1518
1519	/* Can't encode full zeroes or full ones */
1520	if (!imm || !~imm)
1521		return AARCH64_BREAK_FAULT;
1522
1523	switch (variant) {
1524	case AARCH64_INSN_VARIANT_32BIT:
1525		if (upper_32_bits(imm))
1526			return AARCH64_BREAK_FAULT;
1527		esz = 32;
1528		break;
1529	case AARCH64_INSN_VARIANT_64BIT:
1530		insn |= AARCH64_INSN_SF_BIT;
1531		esz = 64;
1532		break;
1533	default:
1534		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1535		return AARCH64_BREAK_FAULT;
1536	}
1537
1538	/*
1539	 * Inverse of Replicate(). Try to spot a repeating pattern
1540	 * with a pow2 stride.
1541	 */
1542	for (tmp = esz / 2; tmp >= 2; tmp /= 2) {
1543		u64 emask = BIT(tmp) - 1;
1544
1545		if ((imm & emask) != ((imm >> tmp) & emask))
1546			break;
1547
1548		esz = tmp;
1549		mask = emask;
1550	}
1551
1552	/* N is only set if we're encoding a 64bit value */
1553	n = esz == 64;
1554
1555	/* Trim imm to the element size */
1556	imm &= mask;
1557
1558	/* That's how many ones we need to encode */
1559	ones = hweight64(imm);
1560
1561	/*
1562	 * imms is set to (ones - 1), prefixed with a string of ones
1563	 * and a zero if they fit. Cap it to 6 bits.
1564	 */
1565	imms  = ones - 1;
1566	imms |= 0xf << ffs(esz);
1567	imms &= BIT(6) - 1;
1568
1569	/* Compute the rotation */
1570	if (range_of_ones(imm)) {
1571		/*
1572		 * Pattern: 0..01..10..0
1573		 *
1574		 * Compute how many rotate we need to align it right
1575		 */
1576		ror = __ffs64(imm);
1577	} else {
1578		/*
1579		 * Pattern: 0..01..10..01..1
1580		 *
1581		 * Fill the unused top bits with ones, and check if
1582		 * the result is a valid immediate (all ones with a
1583		 * contiguous ranges of zeroes).
1584		 */
1585		imm |= ~mask;
1586		if (!range_of_ones(~imm))
1587			return AARCH64_BREAK_FAULT;
1588
1589		/*
1590		 * Compute the rotation to get a continuous set of
1591		 * ones, with the first bit set at position 0
1592		 */
1593		ror = fls(~imm);
1594	}
1595
1596	/*
1597	 * immr is the number of bits we need to rotate back to the
1598	 * original set of ones. Note that this is relative to the
1599	 * element size...
1600	 */
1601	immr = (esz - ror) % esz;
1602
1603	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, n);
1604	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_R, insn, immr);
1605	return aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, imms);
1606}
1607
1608u32 aarch64_insn_gen_logical_immediate(enum aarch64_insn_logic_type type,
1609				       enum aarch64_insn_variant variant,
1610				       enum aarch64_insn_register Rn,
1611				       enum aarch64_insn_register Rd,
1612				       u64 imm)
1613{
1614	u32 insn;
1615
1616	switch (type) {
1617	case AARCH64_INSN_LOGIC_AND:
1618		insn = aarch64_insn_get_and_imm_value();
1619		break;
1620	case AARCH64_INSN_LOGIC_ORR:
1621		insn = aarch64_insn_get_orr_imm_value();
1622		break;
1623	case AARCH64_INSN_LOGIC_EOR:
1624		insn = aarch64_insn_get_eor_imm_value();
1625		break;
1626	case AARCH64_INSN_LOGIC_AND_SETFLAGS:
1627		insn = aarch64_insn_get_ands_imm_value();
1628		break;
1629	default:
1630		pr_err("%s: unknown logical encoding %d\n", __func__, type);
1631		return AARCH64_BREAK_FAULT;
1632	}
1633
1634	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1635	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1636	return aarch64_encode_immediate(imm, variant, insn);
1637}
1638
1639u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
1640			  enum aarch64_insn_register Rm,
1641			  enum aarch64_insn_register Rn,
1642			  enum aarch64_insn_register Rd,
1643			  u8 lsb)
1644{
1645	u32 insn;
1646
1647	insn = aarch64_insn_get_extr_value();
1648
1649	switch (variant) {
1650	case AARCH64_INSN_VARIANT_32BIT:
1651		if (lsb > 31)
1652			return AARCH64_BREAK_FAULT;
1653		break;
1654	case AARCH64_INSN_VARIANT_64BIT:
1655		if (lsb > 63)
1656			return AARCH64_BREAK_FAULT;
1657		insn |= AARCH64_INSN_SF_BIT;
1658		insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_N, insn, 1);
1659		break;
1660	default:
1661		pr_err("%s: unknown variant encoding %d\n", __func__, variant);
1662		return AARCH64_BREAK_FAULT;
1663	}
1664
1665	insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_S, insn, lsb);
1666	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RD, insn, Rd);
1667	insn = aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RN, insn, Rn);
1668	return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
1669}