Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/arm-smccc.h>
  3#include <linux/kernel.h>
  4#include <linux/smp.h>
  5
  6#include <asm/cp15.h>
  7#include <asm/cputype.h>
  8#include <asm/proc-fns.h>
  9#include <asm/spectre.h>
 10#include <asm/system_misc.h>
 11
 12#ifdef CONFIG_ARM_PSCI
 13static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
 14{
 15	struct arm_smccc_res res;
 16
 17	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 18			     ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 19
 20	switch ((int)res.a0) {
 21	case SMCCC_RET_SUCCESS:
 22		return SPECTRE_MITIGATED;
 23
 24	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
 25		return SPECTRE_UNAFFECTED;
 26
 27	default:
 28		return SPECTRE_VULNERABLE;
 29	}
 30}
 31#else
 32static int __maybe_unused spectre_v2_get_cpu_fw_mitigation_state(void)
 33{
 34	return SPECTRE_VULNERABLE;
 35}
 36#endif
 37
 38#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 39DEFINE_PER_CPU(harden_branch_predictor_fn_t, harden_branch_predictor_fn);
 40
 41extern void cpu_v7_iciallu_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
 42extern void cpu_v7_bpiall_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
 43extern void cpu_v7_smc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
 44extern void cpu_v7_hvc_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm);
 45
 46static void harden_branch_predictor_bpiall(void)
 47{
 48	write_sysreg(0, BPIALL);
 49}
 50
 51static void harden_branch_predictor_iciallu(void)
 52{
 53	write_sysreg(0, ICIALLU);
 54}
 55
 56static void __maybe_unused call_smc_arch_workaround_1(void)
 57{
 58	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
 59}
 60
 61static void __maybe_unused call_hvc_arch_workaround_1(void)
 62{
 63	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
 64}
 65
 66static unsigned int spectre_v2_install_workaround(unsigned int method)
 67{
 68	const char *spectre_v2_method = NULL;
 69	int cpu = smp_processor_id();
 70
 71	if (per_cpu(harden_branch_predictor_fn, cpu))
 72		return SPECTRE_MITIGATED;
 73
 74	switch (method) {
 75	case SPECTRE_V2_METHOD_BPIALL:
 76		per_cpu(harden_branch_predictor_fn, cpu) =
 77			harden_branch_predictor_bpiall;
 78		spectre_v2_method = "BPIALL";
 79		break;
 80
 81	case SPECTRE_V2_METHOD_ICIALLU:
 82		per_cpu(harden_branch_predictor_fn, cpu) =
 83			harden_branch_predictor_iciallu;
 84		spectre_v2_method = "ICIALLU";
 85		break;
 86
 87	case SPECTRE_V2_METHOD_HVC:
 88		per_cpu(harden_branch_predictor_fn, cpu) =
 89			call_hvc_arch_workaround_1;
 90		cpu_do_switch_mm = cpu_v7_hvc_switch_mm;
 91		spectre_v2_method = "hypervisor";
 92		break;
 93
 94	case SPECTRE_V2_METHOD_SMC:
 95		per_cpu(harden_branch_predictor_fn, cpu) =
 96			call_smc_arch_workaround_1;
 97		cpu_do_switch_mm = cpu_v7_smc_switch_mm;
 98		spectre_v2_method = "firmware";
 99		break;
100	}
101
102	if (spectre_v2_method)
103		pr_info("CPU%u: Spectre v2: using %s workaround\n",
104			smp_processor_id(), spectre_v2_method);
105
106	return SPECTRE_MITIGATED;
107}
108#else
109static unsigned int spectre_v2_install_workaround(unsigned int method)
110{
111	pr_info_once("Spectre V2: workarounds disabled by configuration\n");
112
113	return SPECTRE_VULNERABLE;
114}
115#endif
116
117static void cpu_v7_spectre_v2_init(void)
118{
119	unsigned int state, method = 0;
120
121	switch (read_cpuid_part()) {
122	case ARM_CPU_PART_CORTEX_A8:
123	case ARM_CPU_PART_CORTEX_A9:
124	case ARM_CPU_PART_CORTEX_A12:
125	case ARM_CPU_PART_CORTEX_A17:
126	case ARM_CPU_PART_CORTEX_A73:
127	case ARM_CPU_PART_CORTEX_A75:
128		state = SPECTRE_MITIGATED;
129		method = SPECTRE_V2_METHOD_BPIALL;
130		break;
131
132	case ARM_CPU_PART_CORTEX_A15:
133	case ARM_CPU_PART_BRAHMA_B15:
134		state = SPECTRE_MITIGATED;
135		method = SPECTRE_V2_METHOD_ICIALLU;
136		break;
137
138	case ARM_CPU_PART_BRAHMA_B53:
139		/* Requires no workaround */
140		state = SPECTRE_UNAFFECTED;
141		break;
142
143	default:
144		/* Other ARM CPUs require no workaround */
145		if (read_cpuid_implementor() == ARM_CPU_IMP_ARM) {
146			state = SPECTRE_UNAFFECTED;
147			break;
148		}
149
150		fallthrough;
151
152	/* Cortex A57/A72 require firmware workaround */
153	case ARM_CPU_PART_CORTEX_A57:
154	case ARM_CPU_PART_CORTEX_A72:
155		state = spectre_v2_get_cpu_fw_mitigation_state();
156		if (state != SPECTRE_MITIGATED)
157			break;
158
159		switch (arm_smccc_1_1_get_conduit()) {
160		case SMCCC_CONDUIT_HVC:
161			method = SPECTRE_V2_METHOD_HVC;
162			break;
163
164		case SMCCC_CONDUIT_SMC:
165			method = SPECTRE_V2_METHOD_SMC;
166			break;
167
168		default:
169			state = SPECTRE_VULNERABLE;
170			break;
171		}
172	}
173
174	if (state == SPECTRE_MITIGATED)
175		state = spectre_v2_install_workaround(method);
176
177	spectre_v2_update_state(state, method);
178}
179
180#ifdef CONFIG_HARDEN_BRANCH_HISTORY
181static int spectre_bhb_method;
182
183static const char *spectre_bhb_method_name(int method)
184{
185	switch (method) {
186	case SPECTRE_V2_METHOD_LOOP8:
187		return "loop";
188
189	case SPECTRE_V2_METHOD_BPIALL:
190		return "BPIALL";
191
192	default:
193		return "unknown";
194	}
195}
196
197static int spectre_bhb_install_workaround(int method)
198{
199	if (spectre_bhb_method != method) {
200		if (spectre_bhb_method) {
201			pr_err("CPU%u: Spectre BHB: method disagreement, system vulnerable\n",
202			       smp_processor_id());
203
204			return SPECTRE_VULNERABLE;
205		}
206
207		if (spectre_bhb_update_vectors(method) == SPECTRE_VULNERABLE)
208			return SPECTRE_VULNERABLE;
209
210		spectre_bhb_method = method;
211
212		pr_info("CPU%u: Spectre BHB: enabling %s workaround for all CPUs\n",
213			smp_processor_id(), spectre_bhb_method_name(method));
214	}
215
216	return SPECTRE_MITIGATED;
217}
218#else
219static int spectre_bhb_install_workaround(int method)
220{
221	return SPECTRE_VULNERABLE;
222}
223#endif
224
225static void cpu_v7_spectre_bhb_init(void)
226{
227	unsigned int state, method = 0;
228
229	switch (read_cpuid_part()) {
230	case ARM_CPU_PART_CORTEX_A15:
231	case ARM_CPU_PART_BRAHMA_B15:
232	case ARM_CPU_PART_CORTEX_A57:
233	case ARM_CPU_PART_CORTEX_A72:
234		state = SPECTRE_MITIGATED;
235		method = SPECTRE_V2_METHOD_LOOP8;
236		break;
237
238	case ARM_CPU_PART_CORTEX_A73:
239	case ARM_CPU_PART_CORTEX_A75:
240		state = SPECTRE_MITIGATED;
241		method = SPECTRE_V2_METHOD_BPIALL;
242		break;
243
244	default:
245		state = SPECTRE_UNAFFECTED;
246		break;
247	}
248
249	if (state == SPECTRE_MITIGATED)
250		state = spectre_bhb_install_workaround(method);
251
252	spectre_v2_update_state(state, method);
253}
254
255static __maybe_unused bool cpu_v7_check_auxcr_set(bool *warned,
256						  u32 mask, const char *msg)
257{
258	u32 aux_cr;
259
260	asm("mrc p15, 0, %0, c1, c0, 1" : "=r" (aux_cr));
261
262	if ((aux_cr & mask) != mask) {
263		if (!*warned)
264			pr_err("CPU%u: %s", smp_processor_id(), msg);
265		*warned = true;
266		return false;
267	}
268	return true;
269}
270
271static DEFINE_PER_CPU(bool, spectre_warned);
272
273static bool check_spectre_auxcr(bool *warned, u32 bit)
274{
275	return IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR) &&
276		cpu_v7_check_auxcr_set(warned, bit,
277				       "Spectre v2: firmware did not set auxiliary control register IBE bit, system vulnerable\n");
278}
279
280void cpu_v7_ca8_ibe(void)
281{
282	if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(6)))
283		cpu_v7_spectre_v2_init();
284}
285
286void cpu_v7_ca15_ibe(void)
287{
288	if (check_spectre_auxcr(this_cpu_ptr(&spectre_warned), BIT(0)))
289		cpu_v7_spectre_v2_init();
290	cpu_v7_spectre_bhb_init();
291}
292
293void cpu_v7_bugs_init(void)
294{
295	cpu_v7_spectre_v2_init();
296	cpu_v7_spectre_bhb_init();
297}