Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * alternative runtime patching
  4 * inspired by the x86 version
  5 *
  6 * Copyright (C) 2014 ARM Ltd.
  7 */
  8
  9#define pr_fmt(fmt) "alternatives: " fmt
 10
 11#include <linux/init.h>
 12#include <linux/cpu.h>
 13#include <linux/elf.h>
 14#include <asm/cacheflush.h>
 15#include <asm/alternative.h>
 16#include <asm/cpufeature.h>
 17#include <asm/insn.h>
 18#include <asm/module.h>
 19#include <asm/sections.h>
 20#include <asm/vdso.h>
 21#include <linux/stop_machine.h>
 22
 23#define __ALT_PTR(a, f)		((void *)&(a)->f + (a)->f)
 24#define ALT_ORIG_PTR(a)		__ALT_PTR(a, orig_offset)
 25#define ALT_REPL_PTR(a)		__ALT_PTR(a, alt_offset)
 26
 27#define ALT_CAP(a)		((a)->cpucap & ~ARM64_CB_BIT)
 28#define ALT_HAS_CB(a)		((a)->cpucap & ARM64_CB_BIT)
 29
 30/* Volatile, as we may be patching the guts of READ_ONCE() */
 31static volatile int all_alternatives_applied;
 32
 33static DECLARE_BITMAP(applied_alternatives, ARM64_NCAPS);
 34
 35struct alt_region {
 36	struct alt_instr *begin;
 37	struct alt_instr *end;
 38};
 39
 40bool alternative_is_applied(u16 cpucap)
 41{
 42	if (WARN_ON(cpucap >= ARM64_NCAPS))
 43		return false;
 44
 45	return test_bit(cpucap, applied_alternatives);
 46}
 47
 48/*
 49 * Check if the target PC is within an alternative block.
 50 */
 51static __always_inline bool branch_insn_requires_update(struct alt_instr *alt, unsigned long pc)
 52{
 53	unsigned long replptr = (unsigned long)ALT_REPL_PTR(alt);
 54	return !(pc >= replptr && pc <= (replptr + alt->alt_len));
 55}
 56
 57#define align_down(x, a)	((unsigned long)(x) & ~(((unsigned long)(a)) - 1))
 58
 59static __always_inline u32 get_alt_insn(struct alt_instr *alt, __le32 *insnptr, __le32 *altinsnptr)
 60{
 61	u32 insn;
 62
 63	insn = le32_to_cpu(*altinsnptr);
 64
 65	if (aarch64_insn_is_branch_imm(insn)) {
 66		s32 offset = aarch64_get_branch_offset(insn);
 67		unsigned long target;
 68
 69		target = (unsigned long)altinsnptr + offset;
 70
 71		/*
 72		 * If we're branching inside the alternate sequence,
 73		 * do not rewrite the instruction, as it is already
 74		 * correct. Otherwise, generate the new instruction.
 75		 */
 76		if (branch_insn_requires_update(alt, target)) {
 77			offset = target - (unsigned long)insnptr;
 78			insn = aarch64_set_branch_offset(insn, offset);
 79		}
 80	} else if (aarch64_insn_is_adrp(insn)) {
 81		s32 orig_offset, new_offset;
 82		unsigned long target;
 83
 84		/*
 85		 * If we're replacing an adrp instruction, which uses PC-relative
 86		 * immediate addressing, adjust the offset to reflect the new
 87		 * PC. adrp operates on 4K aligned addresses.
 88		 */
 89		orig_offset  = aarch64_insn_adrp_get_offset(insn);
 90		target = align_down(altinsnptr, SZ_4K) + orig_offset;
 91		new_offset = target - align_down(insnptr, SZ_4K);
 92		insn = aarch64_insn_adrp_set_offset(insn, new_offset);
 93	} else if (aarch64_insn_uses_literal(insn)) {
 94		/*
 95		 * Disallow patching unhandled instructions using PC relative
 96		 * literal addresses
 97		 */
 98		BUG();
 99	}
100
101	return insn;
102}
103
104static noinstr void patch_alternative(struct alt_instr *alt,
105			      __le32 *origptr, __le32 *updptr, int nr_inst)
106{
107	__le32 *replptr;
108	int i;
109
110	replptr = ALT_REPL_PTR(alt);
111	for (i = 0; i < nr_inst; i++) {
112		u32 insn;
113
114		insn = get_alt_insn(alt, origptr + i, replptr + i);
115		updptr[i] = cpu_to_le32(insn);
116	}
117}
118
119/*
120 * We provide our own, private D-cache cleaning function so that we don't
121 * accidentally call into the cache.S code, which is patched by us at
122 * runtime.
123 */
124static noinstr void clean_dcache_range_nopatch(u64 start, u64 end)
125{
126	u64 cur, d_size, ctr_el0;
127
128	ctr_el0 = arm64_ftr_reg_ctrel0.sys_val;
129	d_size = 4 << cpuid_feature_extract_unsigned_field(ctr_el0,
130							   CTR_EL0_DminLine_SHIFT);
131	cur = start & ~(d_size - 1);
132	do {
133		/*
134		 * We must clean+invalidate to the PoC in order to avoid
135		 * Cortex-A53 errata 826319, 827319, 824069 and 819472
136		 * (this corresponds to ARM64_WORKAROUND_CLEAN_CACHE)
137		 */
138		asm volatile("dc civac, %0" : : "r" (cur) : "memory");
139	} while (cur += d_size, cur < end);
140}
141
142static void __apply_alternatives(const struct alt_region *region,
143				 bool is_module,
144				 unsigned long *cpucap_mask)
145{
146	struct alt_instr *alt;
147	__le32 *origptr, *updptr;
148	alternative_cb_t alt_cb;
149
150	for (alt = region->begin; alt < region->end; alt++) {
151		int nr_inst;
152		int cap = ALT_CAP(alt);
153
154		if (!test_bit(cap, cpucap_mask))
155			continue;
156
157		if (!cpus_have_cap(cap))
158			continue;
159
160		if (ALT_HAS_CB(alt))
161			BUG_ON(alt->alt_len != 0);
162		else
163			BUG_ON(alt->alt_len != alt->orig_len);
164
165		origptr = ALT_ORIG_PTR(alt);
166		updptr = is_module ? origptr : lm_alias(origptr);
167		nr_inst = alt->orig_len / AARCH64_INSN_SIZE;
168
169		if (ALT_HAS_CB(alt))
170			alt_cb  = ALT_REPL_PTR(alt);
171		else
172			alt_cb = patch_alternative;
173
174		alt_cb(alt, origptr, updptr, nr_inst);
175
176		if (!is_module) {
177			clean_dcache_range_nopatch((u64)origptr,
178						   (u64)(origptr + nr_inst));
179		}
180	}
181
182	/*
183	 * The core module code takes care of cache maintenance in
184	 * flush_module_icache().
185	 */
186	if (!is_module) {
187		dsb(ish);
188		icache_inval_all_pou();
189		isb();
190
191		bitmap_or(applied_alternatives, applied_alternatives,
192			  cpucap_mask, ARM64_NCAPS);
193		bitmap_and(applied_alternatives, applied_alternatives,
194			   system_cpucaps, ARM64_NCAPS);
195	}
196}
197
198static void __init apply_alternatives_vdso(void)
199{
200	struct alt_region region;
201	const struct elf64_hdr *hdr;
202	const struct elf64_shdr *shdr;
203	const struct elf64_shdr *alt;
204	DECLARE_BITMAP(all_capabilities, ARM64_NCAPS);
205
206	bitmap_fill(all_capabilities, ARM64_NCAPS);
207
208	hdr = (struct elf64_hdr *)vdso_start;
209	shdr = (void *)hdr + hdr->e_shoff;
210	alt = find_section(hdr, shdr, ".altinstructions");
211	if (!alt)
212		return;
213
214	region = (struct alt_region){
215		.begin	= (void *)hdr + alt->sh_offset,
216		.end	= (void *)hdr + alt->sh_offset + alt->sh_size,
217	};
218
219	__apply_alternatives(&region, false, &all_capabilities[0]);
220}
221
222static const struct alt_region kernel_alternatives __initconst = {
223	.begin	= (struct alt_instr *)__alt_instructions,
224	.end	= (struct alt_instr *)__alt_instructions_end,
225};
226
227/*
228 * We might be patching the stop_machine state machine, so implement a
229 * really simple polling protocol here.
230 */
231static int __init __apply_alternatives_multi_stop(void *unused)
232{
233	/* We always have a CPU 0 at this point (__init) */
234	if (smp_processor_id()) {
235		while (!all_alternatives_applied)
236			cpu_relax();
237		isb();
238	} else {
239		DECLARE_BITMAP(remaining_capabilities, ARM64_NCAPS);
240
241		bitmap_complement(remaining_capabilities, boot_cpucaps,
242				  ARM64_NCAPS);
243
244		BUG_ON(all_alternatives_applied);
245		__apply_alternatives(&kernel_alternatives, false,
246				     remaining_capabilities);
247		/* Barriers provided by the cache flushing */
248		all_alternatives_applied = 1;
249	}
250
251	return 0;
252}
253
254void __init apply_alternatives_all(void)
255{
256	pr_info("applying system-wide alternatives\n");
257
258	apply_alternatives_vdso();
259	/* better not try code patching on a live SMP system */
260	stop_machine(__apply_alternatives_multi_stop, NULL, cpu_online_mask);
261}
262
263/*
264 * This is called very early in the boot process (directly after we run
265 * a feature detect on the boot CPU). No need to worry about other CPUs
266 * here.
267 */
268void __init apply_boot_alternatives(void)
269{
270	/* If called on non-boot cpu things could go wrong */
271	WARN_ON(smp_processor_id() != 0);
272
273	pr_info("applying boot alternatives\n");
274
275	__apply_alternatives(&kernel_alternatives, false,
276			     &boot_cpucaps[0]);
277}
278
279#ifdef CONFIG_MODULES
280void apply_alternatives_module(void *start, size_t length)
281{
282	struct alt_region region = {
283		.begin	= start,
284		.end	= start + length,
285	};
286	DECLARE_BITMAP(all_capabilities, ARM64_NCAPS);
287
288	bitmap_fill(all_capabilities, ARM64_NCAPS);
289
290	__apply_alternatives(&region, true, &all_capabilities[0]);
291}
292#endif
293
294noinstr void alt_cb_patch_nops(struct alt_instr *alt, __le32 *origptr,
295			       __le32 *updptr, int nr_inst)
296{
297	for (int i = 0; i < nr_inst; i++)
298		updptr[i] = cpu_to_le32(aarch64_insn_gen_nop());
299}
300EXPORT_SYMBOL(alt_cb_patch_nops);