Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * Contains CPU specific errata definitions
  3 *
  4 * Copyright (C) 2014 ARM Ltd.
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 * You should have received a copy of the GNU General Public License
 16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 17 */
 18
 
 
 19#include <linux/types.h>
 
 20#include <asm/cpu.h>
 21#include <asm/cputype.h>
 22#include <asm/cpufeature.h>
 
 23
 24static bool __maybe_unused
 25is_affected_midr_range(const struct arm64_cpu_capabilities *entry)
 26{
 27	return MIDR_IS_CPU_MODEL_RANGE(read_cpuid_id(), entry->midr_model,
 28				       entry->midr_range_min,
 29				       entry->midr_range_max);
 
 
 
 
 
 
 
 
 
 
 
 30}
 31
 32#define MIDR_RANGE(model, min, max) \
 33	.matches = is_affected_midr_range, \
 34	.midr_model = model, \
 35	.midr_range_min = min, \
 36	.midr_range_max = max
 
 
 37
 38const struct arm64_cpu_capabilities arm64_errata[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 39#if	defined(CONFIG_ARM64_ERRATUM_826319) || \
 40	defined(CONFIG_ARM64_ERRATUM_827319) || \
 41	defined(CONFIG_ARM64_ERRATUM_824069)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42	{
 43	/* Cortex-A53 r0p[012] */
 44		.desc = "ARM errata 826319, 827319, 824069",
 45		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
 46		MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x02),
 47	},
 
 
 
 
 
 
 
 48#endif
 49#ifdef CONFIG_ARM64_ERRATUM_819472
 
 
 50	{
 51	/* Cortex-A53 r0p[01] */
 52		.desc = "ARM errata 819472",
 53		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
 54		MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x01),
 
 55	},
 56#endif
 57#ifdef CONFIG_ARM64_ERRATUM_832075
 58	{
 59	/* Cortex-A57 r0p0 - r1p2 */
 60		.desc = "ARM erratum 832075",
 61		.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
 62		MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
 63			   (1 << MIDR_VARIANT_SHIFT) | 2),
 
 64	},
 65#endif
 66#ifdef CONFIG_ARM64_ERRATUM_834220
 67	{
 68	/* Cortex-A57 r0p0 - r1p2 */
 69		.desc = "ARM erratum 834220",
 70		.capability = ARM64_WORKAROUND_834220,
 71		MIDR_RANGE(MIDR_CORTEX_A57, 0x00,
 72			   (1 << MIDR_VARIANT_SHIFT) | 2),
 
 
 
 
 
 
 
 
 
 
 73	},
 74#endif
 75#ifdef CONFIG_ARM64_ERRATUM_845719
 76	{
 77	/* Cortex-A53 r0p[01234] */
 78		.desc = "ARM erratum 845719",
 79		.capability = ARM64_WORKAROUND_845719,
 80		MIDR_RANGE(MIDR_CORTEX_A53, 0x00, 0x04),
 81	},
 82#endif
 83#ifdef CONFIG_CAVIUM_ERRATUM_23154
 84	{
 85	/* Cavium ThunderX, pass 1.x */
 86		.desc = "Cavium erratum 23154",
 87		.capability = ARM64_WORKAROUND_CAVIUM_23154,
 88		MIDR_RANGE(MIDR_THUNDERX, 0x00, 0x01),
 89	},
 90#endif
 91#ifdef CONFIG_CAVIUM_ERRATUM_27456
 92	{
 93	/* Cavium ThunderX, T88 pass 1.x - 2.1 */
 94		.desc = "Cavium erratum 27456",
 95		.capability = ARM64_WORKAROUND_CAVIUM_27456,
 96		MIDR_RANGE(MIDR_THUNDERX, 0x00,
 97			   (1 << MIDR_VARIANT_SHIFT) | 1),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 98	},
 99#endif
100	{
101	}
102};
103
104void check_local_cpu_errata(void)
 
 
 
 
 
 
 
105{
106	update_cpu_capabilities(arm64_errata, "enabling workaround for");
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107}
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Contains CPU specific errata definitions
  4 *
  5 * Copyright (C) 2014 ARM Ltd.
 
 
 
 
 
 
 
 
 
 
 
 
  6 */
  7
  8#include <linux/arm-smccc.h>
  9#include <linux/psci.h>
 10#include <linux/types.h>
 11#include <linux/cpu.h>
 12#include <asm/cpu.h>
 13#include <asm/cputype.h>
 14#include <asm/cpufeature.h>
 15#include <asm/smp_plat.h>
 16
 17static bool __maybe_unused
 18is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
 19{
 20	const struct arm64_midr_revidr *fix;
 21	u32 midr = read_cpuid_id(), revidr;
 22
 23	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 24	if (!is_midr_in_range(midr, &entry->midr_range))
 25		return false;
 26
 27	midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
 28	revidr = read_cpuid(REVIDR_EL1);
 29	for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
 30		if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
 31			return false;
 32
 33	return true;
 34}
 35
 36static bool __maybe_unused
 37is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
 38			    int scope)
 39{
 40	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 41	return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
 42}
 43
 44static bool __maybe_unused
 45is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
 46{
 47	u32 model;
 48
 49	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 50
 51	model = read_cpuid_id();
 52	model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
 53		 MIDR_ARCHITECTURE_MASK;
 54
 55	return model == entry->midr_range.model;
 56}
 57
 58static bool
 59has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
 60			  int scope)
 61{
 62	u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
 63	u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
 64	u64 ctr_raw, ctr_real;
 65
 66	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 67
 68	/*
 69	 * We want to make sure that all the CPUs in the system expose
 70	 * a consistent CTR_EL0 to make sure that applications behaves
 71	 * correctly with migration.
 72	 *
 73	 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
 74	 *
 75	 * 1) It is safe if the system doesn't support IDC, as CPU anyway
 76	 *    reports IDC = 0, consistent with the rest.
 77	 *
 78	 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
 79	 *    access on this CPU via the ARM64_HAS_CACHE_IDC capability.
 80	 *
 81	 * So, we need to make sure either the raw CTR_EL0 or the effective
 82	 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
 83	 */
 84	ctr_raw = read_cpuid_cachetype() & mask;
 85	ctr_real = read_cpuid_effective_cachetype() & mask;
 86
 87	return (ctr_real != sys) && (ctr_raw != sys);
 88}
 89
 90static void
 91cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
 92{
 93	u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
 94
 95	/* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
 96	if ((read_cpuid_cachetype() & mask) !=
 97	    (arm64_ftr_reg_ctrel0.sys_val & mask))
 98		sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
 99}
100
101atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
102
103#include <asm/mmu_context.h>
104#include <asm/cacheflush.h>
105
106DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
107
108#ifdef CONFIG_KVM_INDIRECT_VECTORS
109extern char __smccc_workaround_1_smc_start[];
110extern char __smccc_workaround_1_smc_end[];
111
112static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
113				const char *hyp_vecs_end)
114{
115	void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
116	int i;
117
118	for (i = 0; i < SZ_2K; i += 0x80)
119		memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
120
121	__flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
122}
123
124static void install_bp_hardening_cb(bp_hardening_cb_t fn,
125				    const char *hyp_vecs_start,
126				    const char *hyp_vecs_end)
127{
128	static DEFINE_RAW_SPINLOCK(bp_lock);
129	int cpu, slot = -1;
130
131	/*
132	 * detect_harden_bp_fw() passes NULL for the hyp_vecs start/end if
133	 * we're a guest. Skip the hyp-vectors work.
134	 */
135	if (!hyp_vecs_start) {
136		__this_cpu_write(bp_hardening_data.fn, fn);
137		return;
138	}
139
140	raw_spin_lock(&bp_lock);
141	for_each_possible_cpu(cpu) {
142		if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
143			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
144			break;
145		}
146	}
147
148	if (slot == -1) {
149		slot = atomic_inc_return(&arm64_el2_vector_last_slot);
150		BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
151		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
152	}
153
154	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
155	__this_cpu_write(bp_hardening_data.fn, fn);
156	raw_spin_unlock(&bp_lock);
157}
158#else
159#define __smccc_workaround_1_smc_start		NULL
160#define __smccc_workaround_1_smc_end		NULL
161
162static void install_bp_hardening_cb(bp_hardening_cb_t fn,
163				      const char *hyp_vecs_start,
164				      const char *hyp_vecs_end)
165{
166	__this_cpu_write(bp_hardening_data.fn, fn);
167}
168#endif	/* CONFIG_KVM_INDIRECT_VECTORS */
169
170#include <uapi/linux/psci.h>
171#include <linux/arm-smccc.h>
172#include <linux/psci.h>
173
174static void call_smc_arch_workaround_1(void)
175{
176	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
177}
178
179static void call_hvc_arch_workaround_1(void)
180{
181	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
182}
183
184static void qcom_link_stack_sanitization(void)
185{
186	u64 tmp;
187
188	asm volatile("mov	%0, x30		\n"
189		     ".rept	16		\n"
190		     "bl	. + 4		\n"
191		     ".endr			\n"
192		     "mov	x30, %0		\n"
193		     : "=&r" (tmp));
194}
195
196static bool __nospectre_v2;
197static int __init parse_nospectre_v2(char *str)
198{
199	__nospectre_v2 = true;
200	return 0;
201}
202early_param("nospectre_v2", parse_nospectre_v2);
203
204/*
205 * -1: No workaround
206 *  0: No workaround required
207 *  1: Workaround installed
208 */
209static int detect_harden_bp_fw(void)
210{
211	bp_hardening_cb_t cb;
212	void *smccc_start, *smccc_end;
213	struct arm_smccc_res res;
214	u32 midr = read_cpuid_id();
215
216	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
217		return -1;
218
219	switch (psci_ops.conduit) {
220	case PSCI_CONDUIT_HVC:
221		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
222				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
223		switch ((int)res.a0) {
224		case 1:
225			/* Firmware says we're just fine */
226			return 0;
227		case 0:
228			cb = call_hvc_arch_workaround_1;
229			/* This is a guest, no need to patch KVM vectors */
230			smccc_start = NULL;
231			smccc_end = NULL;
232			break;
233		default:
234			return -1;
235		}
236		break;
237
238	case PSCI_CONDUIT_SMC:
239		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
240				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
241		switch ((int)res.a0) {
242		case 1:
243			/* Firmware says we're just fine */
244			return 0;
245		case 0:
246			cb = call_smc_arch_workaround_1;
247			smccc_start = __smccc_workaround_1_smc_start;
248			smccc_end = __smccc_workaround_1_smc_end;
249			break;
250		default:
251			return -1;
252		}
253		break;
254
255	default:
256		return -1;
257	}
258
259	if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
260	    ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
261		cb = qcom_link_stack_sanitization;
262
263	if (IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR))
264		install_bp_hardening_cb(cb, smccc_start, smccc_end);
265
266	return 1;
267}
268
269DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
270
271int ssbd_state __read_mostly = ARM64_SSBD_KERNEL;
272static bool __ssb_safe = true;
273
274static const struct ssbd_options {
275	const char	*str;
276	int		state;
277} ssbd_options[] = {
278	{ "force-on",	ARM64_SSBD_FORCE_ENABLE, },
279	{ "force-off",	ARM64_SSBD_FORCE_DISABLE, },
280	{ "kernel",	ARM64_SSBD_KERNEL, },
281};
282
283static int __init ssbd_cfg(char *buf)
284{
285	int i;
286
287	if (!buf || !buf[0])
288		return -EINVAL;
289
290	for (i = 0; i < ARRAY_SIZE(ssbd_options); i++) {
291		int len = strlen(ssbd_options[i].str);
292
293		if (strncmp(buf, ssbd_options[i].str, len))
294			continue;
295
296		ssbd_state = ssbd_options[i].state;
297		return 0;
298	}
299
300	return -EINVAL;
301}
302early_param("ssbd", ssbd_cfg);
303
304void __init arm64_update_smccc_conduit(struct alt_instr *alt,
305				       __le32 *origptr, __le32 *updptr,
306				       int nr_inst)
307{
308	u32 insn;
309
310	BUG_ON(nr_inst != 1);
311
312	switch (psci_ops.conduit) {
313	case PSCI_CONDUIT_HVC:
314		insn = aarch64_insn_get_hvc_value();
315		break;
316	case PSCI_CONDUIT_SMC:
317		insn = aarch64_insn_get_smc_value();
318		break;
319	default:
320		return;
321	}
322
323	*updptr = cpu_to_le32(insn);
324}
325
326void __init arm64_enable_wa2_handling(struct alt_instr *alt,
327				      __le32 *origptr, __le32 *updptr,
328				      int nr_inst)
329{
330	BUG_ON(nr_inst != 1);
331	/*
332	 * Only allow mitigation on EL1 entry/exit and guest
333	 * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
334	 * be flipped.
335	 */
336	if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
337		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
338}
339
340void arm64_set_ssbd_mitigation(bool state)
341{
342	if (!IS_ENABLED(CONFIG_ARM64_SSBD)) {
343		pr_info_once("SSBD disabled by kernel configuration\n");
344		return;
345	}
346
347	if (this_cpu_has_cap(ARM64_SSBS)) {
348		if (state)
349			asm volatile(SET_PSTATE_SSBS(0));
350		else
351			asm volatile(SET_PSTATE_SSBS(1));
352		return;
353	}
354
355	switch (psci_ops.conduit) {
356	case PSCI_CONDUIT_HVC:
357		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
358		break;
359
360	case PSCI_CONDUIT_SMC:
361		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, state, NULL);
362		break;
363
364	default:
365		WARN_ON_ONCE(1);
366		break;
367	}
368}
369
370static bool has_ssbd_mitigation(const struct arm64_cpu_capabilities *entry,
371				    int scope)
372{
373	struct arm_smccc_res res;
374	bool required = true;
375	s32 val;
376	bool this_cpu_safe = false;
377
378	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
379
380	if (cpu_mitigations_off())
381		ssbd_state = ARM64_SSBD_FORCE_DISABLE;
382
383	/* delay setting __ssb_safe until we get a firmware response */
384	if (is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list))
385		this_cpu_safe = true;
386
387	if (this_cpu_has_cap(ARM64_SSBS)) {
388		if (!this_cpu_safe)
389			__ssb_safe = false;
390		required = false;
391		goto out_printmsg;
392	}
393
394	if (psci_ops.smccc_version == SMCCC_VERSION_1_0) {
395		ssbd_state = ARM64_SSBD_UNKNOWN;
396		if (!this_cpu_safe)
397			__ssb_safe = false;
398		return false;
399	}
400
401	switch (psci_ops.conduit) {
402	case PSCI_CONDUIT_HVC:
403		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
404				  ARM_SMCCC_ARCH_WORKAROUND_2, &res);
405		break;
406
407	case PSCI_CONDUIT_SMC:
408		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
409				  ARM_SMCCC_ARCH_WORKAROUND_2, &res);
410		break;
411
412	default:
413		ssbd_state = ARM64_SSBD_UNKNOWN;
414		if (!this_cpu_safe)
415			__ssb_safe = false;
416		return false;
417	}
418
419	val = (s32)res.a0;
420
421	switch (val) {
422	case SMCCC_RET_NOT_SUPPORTED:
423		ssbd_state = ARM64_SSBD_UNKNOWN;
424		if (!this_cpu_safe)
425			__ssb_safe = false;
426		return false;
427
428	/* machines with mixed mitigation requirements must not return this */
429	case SMCCC_RET_NOT_REQUIRED:
430		pr_info_once("%s mitigation not required\n", entry->desc);
431		ssbd_state = ARM64_SSBD_MITIGATED;
432		return false;
433
434	case SMCCC_RET_SUCCESS:
435		__ssb_safe = false;
436		required = true;
437		break;
438
439	case 1:	/* Mitigation not required on this CPU */
440		required = false;
441		break;
442
443	default:
444		WARN_ON(1);
445		if (!this_cpu_safe)
446			__ssb_safe = false;
447		return false;
448	}
449
450	switch (ssbd_state) {
451	case ARM64_SSBD_FORCE_DISABLE:
452		arm64_set_ssbd_mitigation(false);
453		required = false;
454		break;
455
456	case ARM64_SSBD_KERNEL:
457		if (required) {
458			__this_cpu_write(arm64_ssbd_callback_required, 1);
459			arm64_set_ssbd_mitigation(true);
460		}
461		break;
462
463	case ARM64_SSBD_FORCE_ENABLE:
464		arm64_set_ssbd_mitigation(true);
465		required = true;
466		break;
467
468	default:
469		WARN_ON(1);
470		break;
471	}
472
473out_printmsg:
474	switch (ssbd_state) {
475	case ARM64_SSBD_FORCE_DISABLE:
476		pr_info_once("%s disabled from command-line\n", entry->desc);
477		break;
478
479	case ARM64_SSBD_FORCE_ENABLE:
480		pr_info_once("%s forced from command-line\n", entry->desc);
481		break;
482	}
483
484	return required;
485}
486
487/* known invulnerable cores */
488static const struct midr_range arm64_ssb_cpus[] = {
489	MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
490	MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
491	MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
492	MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
493	{},
494};
495
496#ifdef CONFIG_ARM64_ERRATUM_1463225
497DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
498
499static bool
500has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
501			       int scope)
502{
503	u32 midr = read_cpuid_id();
504	/* Cortex-A76 r0p0 - r3p1 */
505	struct midr_range range = MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1);
506
507	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
508	return is_midr_in_range(midr, &range) && is_kernel_in_hyp_mode();
509}
510#endif
511
512static void __maybe_unused
513cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
514{
515	sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
516}
517
518#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)	\
519	.matches = is_affected_midr_range,			\
520	.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
521
522#define CAP_MIDR_ALL_VERSIONS(model)					\
523	.matches = is_affected_midr_range,				\
524	.midr_range = MIDR_ALL_VERSIONS(model)
525
526#define MIDR_FIXED(rev, revidr_mask) \
527	.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
528
529#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)		\
530	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,				\
531	CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
532
533#define CAP_MIDR_RANGE_LIST(list)				\
534	.matches = is_affected_midr_range_list,			\
535	.midr_range_list = list
536
537/* Errata affecting a range of revisions of  given model variant */
538#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)	 \
539	ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
540
541/* Errata affecting a single variant/revision of a model */
542#define ERRATA_MIDR_REV(model, var, rev)	\
543	ERRATA_MIDR_RANGE(model, var, rev, var, rev)
544
545/* Errata affecting all variants/revisions of a given a model */
546#define ERRATA_MIDR_ALL_VERSIONS(model)				\
547	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
548	CAP_MIDR_ALL_VERSIONS(model)
549
550/* Errata affecting a list of midr ranges, with same work around */
551#define ERRATA_MIDR_RANGE_LIST(midr_list)			\
552	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
553	CAP_MIDR_RANGE_LIST(midr_list)
554
555/* Track overall mitigation state. We are only mitigated if all cores are ok */
556static bool __hardenbp_enab = true;
557static bool __spectrev2_safe = true;
558
559int get_spectre_v2_workaround_state(void)
560{
561	if (__spectrev2_safe)
562		return ARM64_BP_HARDEN_NOT_REQUIRED;
563
564	if (!__hardenbp_enab)
565		return ARM64_BP_HARDEN_UNKNOWN;
566
567	return ARM64_BP_HARDEN_WA_NEEDED;
568}
569
570/*
571 * List of CPUs that do not need any Spectre-v2 mitigation at all.
572 */
573static const struct midr_range spectre_v2_safe_list[] = {
574	MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
575	MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
576	MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
577	MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
578	{ /* sentinel */ }
579};
580
581/*
582 * Track overall bp hardening for all heterogeneous cores in the machine.
583 * We are only considered "safe" if all booted cores are known safe.
584 */
585static bool __maybe_unused
586check_branch_predictor(const struct arm64_cpu_capabilities *entry, int scope)
587{
588	int need_wa;
589
590	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
591
592	/* If the CPU has CSV2 set, we're safe */
593	if (cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64PFR0_EL1),
594						 ID_AA64PFR0_CSV2_SHIFT))
595		return false;
596
597	/* Alternatively, we have a list of unaffected CPUs */
598	if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
599		return false;
600
601	/* Fallback to firmware detection */
602	need_wa = detect_harden_bp_fw();
603	if (!need_wa)
604		return false;
605
606	__spectrev2_safe = false;
607
608	if (!IS_ENABLED(CONFIG_HARDEN_BRANCH_PREDICTOR)) {
609		pr_warn_once("spectrev2 mitigation disabled by kernel configuration\n");
610		__hardenbp_enab = false;
611		return false;
612	}
613
614	/* forced off */
615	if (__nospectre_v2 || cpu_mitigations_off()) {
616		pr_info_once("spectrev2 mitigation disabled by command line option\n");
617		__hardenbp_enab = false;
618		return false;
619	}
620
621	if (need_wa < 0) {
622		pr_warn_once("ARM_SMCCC_ARCH_WORKAROUND_1 missing from firmware\n");
623		__hardenbp_enab = false;
624	}
625
626	return (need_wa > 0);
627}
628
629static const __maybe_unused struct midr_range tx2_family_cpus[] = {
630	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
631	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
632	{},
633};
634
635static bool __maybe_unused
636needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
637			 int scope)
638{
639	int i;
640
641	if (!is_affected_midr_range_list(entry, scope) ||
642	    !is_hyp_mode_available())
643		return false;
644
645	for_each_possible_cpu(i) {
646		if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
647			return true;
648	}
649
650	return false;
651}
652
653#ifdef CONFIG_HARDEN_EL2_VECTORS
654
655static const struct midr_range arm64_harden_el2_vectors[] = {
656	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
657	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
658	{},
659};
660
661#endif
662
663#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
664static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
665#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
666	{
667		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
668	},
669	{
670		.midr_range.model = MIDR_QCOM_KRYO,
671		.matches = is_kryo_midr,
672	},
673#endif
674#ifdef CONFIG_ARM64_ERRATUM_1286807
675	{
676		ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
677	},
678#endif
679	{},
680};
681#endif
682
683#ifdef CONFIG_CAVIUM_ERRATUM_27456
684const struct midr_range cavium_erratum_27456_cpus[] = {
685	/* Cavium ThunderX, T88 pass 1.x - 2.1 */
686	MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
687	/* Cavium ThunderX, T81 pass 1.0 */
688	MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
689	{},
690};
691#endif
692
693#ifdef CONFIG_CAVIUM_ERRATUM_30115
694static const struct midr_range cavium_erratum_30115_cpus[] = {
695	/* Cavium ThunderX, T88 pass 1.x - 2.2 */
696	MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
697	/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
698	MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
699	/* Cavium ThunderX, T83 pass 1.0 */
700	MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
701	{},
702};
703#endif
704
705#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
706static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
707	{
708		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
709	},
710	{
711		.midr_range.model = MIDR_QCOM_KRYO,
712		.matches = is_kryo_midr,
713	},
714	{},
715};
716#endif
717
718#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
719static const struct midr_range workaround_clean_cache[] = {
720#if	defined(CONFIG_ARM64_ERRATUM_826319) || \
721	defined(CONFIG_ARM64_ERRATUM_827319) || \
722	defined(CONFIG_ARM64_ERRATUM_824069)
723	/* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
724	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
725#endif
726#ifdef	CONFIG_ARM64_ERRATUM_819472
727	/* Cortex-A53 r0p[01] : ARM errata 819472 */
728	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
729#endif
730	{},
731};
732#endif
733
734#ifdef CONFIG_ARM64_ERRATUM_1418040
735/*
736 * - 1188873 affects r0p0 to r2p0
737 * - 1418040 affects r0p0 to r3p1
738 */
739static const struct midr_range erratum_1418040_list[] = {
740	/* Cortex-A76 r0p0 to r3p1 */
741	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
742	/* Neoverse-N1 r0p0 to r3p1 */
743	MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
744	{},
745};
746#endif
747
748#ifdef CONFIG_ARM64_ERRATUM_845719
749static const struct midr_range erratum_845719_list[] = {
750	/* Cortex-A53 r0p[01234] */
751	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
752	/* Brahma-B53 r0p[0] */
753	MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
754	{},
755};
756#endif
757
758#ifdef CONFIG_ARM64_ERRATUM_843419
759static const struct arm64_cpu_capabilities erratum_843419_list[] = {
760	{
761		/* Cortex-A53 r0p[01234] */
762		.matches = is_affected_midr_range,
763		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
764		MIDR_FIXED(0x4, BIT(8)),
765	},
766	{
767		/* Brahma-B53 r0p[0] */
768		.matches = is_affected_midr_range,
769		ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
770	},
771	{},
772};
773#endif
774
775const struct arm64_cpu_capabilities arm64_errata[] = {
776#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
777	{
778		.desc = "ARM errata 826319, 827319, 824069, 819472",
 
779		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
780		ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
781		.cpu_enable = cpu_enable_cache_maint_trap,
782	},
783#endif
784#ifdef CONFIG_ARM64_ERRATUM_832075
785	{
786	/* Cortex-A57 r0p0 - r1p2 */
787		.desc = "ARM erratum 832075",
788		.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
789		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
790				  0, 0,
791				  1, 2),
792	},
793#endif
794#ifdef CONFIG_ARM64_ERRATUM_834220
795	{
796	/* Cortex-A57 r0p0 - r1p2 */
797		.desc = "ARM erratum 834220",
798		.capability = ARM64_WORKAROUND_834220,
799		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
800				  0, 0,
801				  1, 2),
802	},
803#endif
804#ifdef CONFIG_ARM64_ERRATUM_843419
805	{
806		.desc = "ARM erratum 843419",
807		.capability = ARM64_WORKAROUND_843419,
808		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
809		.matches = cpucap_multi_entry_cap_matches,
810		.match_list = erratum_843419_list,
811	},
812#endif
813#ifdef CONFIG_ARM64_ERRATUM_845719
814	{
 
815		.desc = "ARM erratum 845719",
816		.capability = ARM64_WORKAROUND_845719,
817		ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
818	},
819#endif
820#ifdef CONFIG_CAVIUM_ERRATUM_23154
821	{
822	/* Cavium ThunderX, pass 1.x */
823		.desc = "Cavium erratum 23154",
824		.capability = ARM64_WORKAROUND_CAVIUM_23154,
825		ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
826	},
827#endif
828#ifdef CONFIG_CAVIUM_ERRATUM_27456
829	{
 
830		.desc = "Cavium erratum 27456",
831		.capability = ARM64_WORKAROUND_CAVIUM_27456,
832		ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
833	},
834#endif
835#ifdef CONFIG_CAVIUM_ERRATUM_30115
836	{
837		.desc = "Cavium erratum 30115",
838		.capability = ARM64_WORKAROUND_CAVIUM_30115,
839		ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
840	},
841#endif
842	{
843		.desc = "Mismatched cache type (CTR_EL0)",
844		.capability = ARM64_MISMATCHED_CACHE_TYPE,
845		.matches = has_mismatched_cache_type,
846		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
847		.cpu_enable = cpu_enable_trap_ctr_access,
848	},
849#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
850	{
851		.desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
852		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
853		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
854		.matches = cpucap_multi_entry_cap_matches,
855		.match_list = qcom_erratum_1003_list,
856	},
857#endif
858#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
859	{
860		.desc = "Qualcomm erratum 1009, ARM erratum 1286807",
861		.capability = ARM64_WORKAROUND_REPEAT_TLBI,
862		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
863		.matches = cpucap_multi_entry_cap_matches,
864		.match_list = arm64_repeat_tlbi_list,
865	},
866#endif
867#ifdef CONFIG_ARM64_ERRATUM_858921
868	{
869	/* Cortex-A73 all versions */
870		.desc = "ARM erratum 858921",
871		.capability = ARM64_WORKAROUND_858921,
872		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
873	},
874#endif
875	{
876		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
877		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
878		.matches = check_branch_predictor,
879	},
880#ifdef CONFIG_HARDEN_EL2_VECTORS
881	{
882		.desc = "EL2 vector hardening",
883		.capability = ARM64_HARDEN_EL2_VECTORS,
884		ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
885	},
886#endif
887	{
888		.desc = "Speculative Store Bypass Disable",
889		.capability = ARM64_SSBD,
890		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
891		.matches = has_ssbd_mitigation,
892		.midr_range_list = arm64_ssb_cpus,
893	},
894#ifdef CONFIG_ARM64_ERRATUM_1418040
895	{
896		.desc = "ARM erratum 1418040",
897		.capability = ARM64_WORKAROUND_1418040,
898		ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
899	},
900#endif
901#ifdef CONFIG_ARM64_ERRATUM_1165522
902	{
903		/* Cortex-A76 r0p0 to r2p0 */
904		.desc = "ARM erratum 1165522",
905		.capability = ARM64_WORKAROUND_1165522,
906		ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
907	},
908#endif
909#ifdef CONFIG_ARM64_ERRATUM_1463225
910	{
911		.desc = "ARM erratum 1463225",
912		.capability = ARM64_WORKAROUND_1463225,
913		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
914		.matches = has_cortex_a76_erratum_1463225,
915	},
916#endif
917#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
918	{
919		.desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
920		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
921		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
922		.matches = needs_tx2_tvm_workaround,
923	},
924	{
925		.desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
926		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
927		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
928	},
929#endif
930	{
931	}
932};
933
934ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
935			    char *buf)
936{
937	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
938}
939
940ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
941		char *buf)
942{
943	switch (get_spectre_v2_workaround_state()) {
944	case ARM64_BP_HARDEN_NOT_REQUIRED:
945		return sprintf(buf, "Not affected\n");
946        case ARM64_BP_HARDEN_WA_NEEDED:
947		return sprintf(buf, "Mitigation: Branch predictor hardening\n");
948        case ARM64_BP_HARDEN_UNKNOWN:
949	default:
950		return sprintf(buf, "Vulnerable\n");
951	}
952}
953
954ssize_t cpu_show_spec_store_bypass(struct device *dev,
955		struct device_attribute *attr, char *buf)
956{
957	if (__ssb_safe)
958		return sprintf(buf, "Not affected\n");
959
960	switch (ssbd_state) {
961	case ARM64_SSBD_KERNEL:
962	case ARM64_SSBD_FORCE_ENABLE:
963		if (IS_ENABLED(CONFIG_ARM64_SSBD))
964			return sprintf(buf,
965			    "Mitigation: Speculative Store Bypass disabled via prctl\n");
966	}
967
968	return sprintf(buf, "Vulnerable\n");
969}