Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * Contains CPU specific errata definitions
  3 *
  4 * Copyright (C) 2014 ARM Ltd.
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License version 2 as
  8 * published by the Free Software Foundation.
  9 *
 10 * This program is distributed in the hope that it will be useful,
 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 13 * GNU General Public License for more details.
 14 *
 15 * You should have received a copy of the GNU General Public License
 16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
 17 */
 18
 
 19#include <linux/types.h>
 
 20#include <asm/cpu.h>
 21#include <asm/cputype.h>
 22#include <asm/cpufeature.h>
 
 
 23
 24static bool __maybe_unused
 25is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
 26{
 27	const struct arm64_midr_revidr *fix;
 28	u32 midr = read_cpuid_id(), revidr;
 29
 30	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 31	if (!is_midr_in_range(midr, &entry->midr_range))
 32		return false;
 33
 34	midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
 35	revidr = read_cpuid(REVIDR_EL1);
 36	for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
 37		if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
 38			return false;
 39
 40	return true;
 41}
 42
 43static bool __maybe_unused
 44is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
 45			    int scope)
 46{
 47	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 48	return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
 49}
 50
 51static bool __maybe_unused
 52is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
 53{
 54	u32 model;
 55
 56	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 57
 58	model = read_cpuid_id();
 59	model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
 60		 MIDR_ARCHITECTURE_MASK;
 61
 62	return model == entry->midr_range.model;
 63}
 64
 65static bool
 66has_mismatched_cache_line_size(const struct arm64_cpu_capabilities *entry,
 67				int scope)
 68{
 69	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 70	return (read_cpuid_cachetype() & arm64_ftr_reg_ctrel0.strict_mask) !=
 71		(arm64_ftr_reg_ctrel0.sys_val & arm64_ftr_reg_ctrel0.strict_mask);
 72}
 73
 74static void
 75cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *__unused)
 76{
 77	/* Clear SCTLR_EL1.UCT */
 78	config_sctlr_el1(SCTLR_EL1_UCT, 0);
 79}
 80
 81atomic_t arm64_el2_vector_last_slot = ATOMIC_INIT(-1);
 82
 83#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 84#include <asm/mmu_context.h>
 85#include <asm/cacheflush.h>
 86
 87DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
 88
 89#ifdef CONFIG_KVM_INDIRECT_VECTORS
 90extern char __smccc_workaround_1_smc_start[];
 91extern char __smccc_workaround_1_smc_end[];
 92
 93static void __copy_hyp_vect_bpi(int slot, const char *hyp_vecs_start,
 94				const char *hyp_vecs_end)
 95{
 96	void *dst = lm_alias(__bp_harden_hyp_vecs_start + slot * SZ_2K);
 97	int i;
 98
 99	for (i = 0; i < SZ_2K; i += 0x80)
100		memcpy(dst + i, hyp_vecs_start, hyp_vecs_end - hyp_vecs_start);
101
102	flush_icache_range((uintptr_t)dst, (uintptr_t)dst + SZ_2K);
103}
104
105static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
106				      const char *hyp_vecs_start,
107				      const char *hyp_vecs_end)
108{
109	static DEFINE_SPINLOCK(bp_lock);
110	int cpu, slot = -1;
111
112	spin_lock(&bp_lock);
113	for_each_possible_cpu(cpu) {
114		if (per_cpu(bp_hardening_data.fn, cpu) == fn) {
115			slot = per_cpu(bp_hardening_data.hyp_vectors_slot, cpu);
116			break;
117		}
118	}
119
120	if (slot == -1) {
121		slot = atomic_inc_return(&arm64_el2_vector_last_slot);
122		BUG_ON(slot >= BP_HARDEN_EL2_SLOTS);
123		__copy_hyp_vect_bpi(slot, hyp_vecs_start, hyp_vecs_end);
124	}
 
 
 
 
 
 
 
 
 
 
 
 
 
125
126	__this_cpu_write(bp_hardening_data.hyp_vectors_slot, slot);
127	__this_cpu_write(bp_hardening_data.fn, fn);
128	spin_unlock(&bp_lock);
129}
130#else
131#define __smccc_workaround_1_smc_start		NULL
132#define __smccc_workaround_1_smc_end		NULL
133
134static void __install_bp_hardening_cb(bp_hardening_cb_t fn,
135				      const char *hyp_vecs_start,
136				      const char *hyp_vecs_end)
137{
138	__this_cpu_write(bp_hardening_data.fn, fn);
139}
140#endif	/* CONFIG_KVM_INDIRECT_VECTORS */
141
142static void  install_bp_hardening_cb(const struct arm64_cpu_capabilities *entry,
143				     bp_hardening_cb_t fn,
144				     const char *hyp_vecs_start,
145				     const char *hyp_vecs_end)
146{
147	u64 pfr0;
148
149	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
150		return;
151
152	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
153	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_CSV2_SHIFT))
154		return;
 
155
156	__install_bp_hardening_cb(fn, hyp_vecs_start, hyp_vecs_end);
157}
158
159#include <uapi/linux/psci.h>
160#include <linux/arm-smccc.h>
161#include <linux/psci.h>
162
163static void call_smc_arch_workaround_1(void)
164{
165	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
166}
167
168static void call_hvc_arch_workaround_1(void)
169{
170	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
171}
172
173static void qcom_link_stack_sanitization(void)
 
 
 
174{
175	u64 tmp;
176
177	asm volatile("mov	%0, x30		\n"
178		     ".rept	16		\n"
179		     "bl	. + 4		\n"
180		     ".endr			\n"
181		     "mov	x30, %0		\n"
182		     : "=&r" (tmp));
183}
 
184
185static void
186enable_smccc_arch_workaround_1(const struct arm64_cpu_capabilities *entry)
187{
188	bp_hardening_cb_t cb;
189	void *smccc_start, *smccc_end;
190	struct arm_smccc_res res;
191	u32 midr = read_cpuid_id();
192
193	if (!entry->matches(entry, SCOPE_LOCAL_CPU))
194		return;
195
196	if (psci_ops.smccc_version == SMCCC_VERSION_1_0)
197		return;
198
199	switch (psci_ops.conduit) {
200	case PSCI_CONDUIT_HVC:
201		arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
202				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
203		if ((int)res.a0 < 0)
204			return;
205		cb = call_hvc_arch_workaround_1;
206		/* This is a guest, no need to patch KVM vectors */
207		smccc_start = NULL;
208		smccc_end = NULL;
209		break;
210
211	case PSCI_CONDUIT_SMC:
212		arm_smccc_1_1_smc(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
213				  ARM_SMCCC_ARCH_WORKAROUND_1, &res);
214		if ((int)res.a0 < 0)
215			return;
216		cb = call_smc_arch_workaround_1;
217		smccc_start = __smccc_workaround_1_smc_start;
218		smccc_end = __smccc_workaround_1_smc_end;
219		break;
220
221	default:
222		return;
223	}
224
225	if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) ||
226	    ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1))
227		cb = qcom_link_stack_sanitization;
228
229	install_bp_hardening_cb(entry, cb, smccc_start, smccc_end);
230
231	return;
232}
233#endif	/* CONFIG_HARDEN_BRANCH_PREDICTOR */
234
235#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)	\
236	.matches = is_affected_midr_range,			\
237	.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
238
239#define CAP_MIDR_ALL_VERSIONS(model)					\
240	.matches = is_affected_midr_range,				\
241	.midr_range = MIDR_ALL_VERSIONS(model)
242
243#define MIDR_FIXED(rev, revidr_mask) \
244	.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
245
246#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)		\
247	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,				\
248	CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
249
250#define CAP_MIDR_RANGE_LIST(list)				\
251	.matches = is_affected_midr_range_list,			\
252	.midr_range_list = list
253
254/* Errata affecting a range of revisions of  given model variant */
255#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)	 \
256	ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
257
258/* Errata affecting a single variant/revision of a model */
259#define ERRATA_MIDR_REV(model, var, rev)	\
260	ERRATA_MIDR_RANGE(model, var, rev, var, rev)
261
262/* Errata affecting all variants/revisions of a given a model */
263#define ERRATA_MIDR_ALL_VERSIONS(model)				\
264	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
265	CAP_MIDR_ALL_VERSIONS(model)
266
267/* Errata affecting a list of midr ranges, with same work around */
268#define ERRATA_MIDR_RANGE_LIST(midr_list)			\
269	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
270	CAP_MIDR_RANGE_LIST(midr_list)
271
272/*
273 * Generic helper for handling capabilties with multiple (match,enable) pairs
274 * of call backs, sharing the same capability bit.
275 * Iterate over each entry to see if at least one matches.
276 */
 
277static bool __maybe_unused
278multi_entry_cap_matches(const struct arm64_cpu_capabilities *entry, int scope)
 
279{
280	const struct arm64_cpu_capabilities *caps;
 
 
 
 
281
282	for (caps = entry->match_list; caps->matches; caps++)
283		if (caps->matches(caps, scope))
284			return true;
 
285
286	return false;
287}
288
289/*
290 * Take appropriate action for all matching entries in the shared capability
291 * entry.
292 */
293static void __maybe_unused
294multi_entry_cap_cpu_enable(const struct arm64_cpu_capabilities *entry)
295{
296	const struct arm64_cpu_capabilities *caps;
 
 
297
298	for (caps = entry->match_list; caps->matches; caps++)
299		if (caps->matches(caps, SCOPE_LOCAL_CPU) &&
300		    caps->cpu_enable)
301			caps->cpu_enable(caps);
302}
303
304#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
305
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306/*
307 * List of CPUs where we need to issue a psci call to
308 * harden the branch predictor.
309 */
310static const struct midr_range arm64_bp_harden_smccc_cpus[] = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
311	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
312	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
313	MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
314	MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
315	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
316	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
317	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR_V1),
318	MIDR_ALL_VERSIONS(MIDR_QCOM_FALKOR),
319	MIDR_ALL_VERSIONS(MIDR_NVIDIA_DENVER),
320	{},
321};
 
322
 
 
 
 
 
 
 
 
323#endif
324
325#ifdef CONFIG_HARDEN_EL2_VECTORS
 
 
 
 
 
 
 
 
 
 
 
 
326
327static const struct midr_range arm64_harden_el2_vectors[] = {
328	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
329	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
330	{},
331};
 
332
 
 
 
 
 
 
 
 
 
 
 
333#endif
334
335const struct arm64_cpu_capabilities arm64_errata[] = {
336#if	defined(CONFIG_ARM64_ERRATUM_826319) || \
337	defined(CONFIG_ARM64_ERRATUM_827319) || \
338	defined(CONFIG_ARM64_ERRATUM_824069)
339	{
340	/* Cortex-A53 r0p[012] */
341		.desc = "ARM errata 826319, 827319, 824069",
342		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
343		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
344		.cpu_enable = cpu_enable_cache_maint_trap,
345	},
346#endif
347#ifdef CONFIG_ARM64_ERRATUM_819472
348	{
349	/* Cortex-A53 r0p[01] */
350		.desc = "ARM errata 819472",
351		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
352		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
353		.cpu_enable = cpu_enable_cache_maint_trap,
354	},
355#endif
356#ifdef CONFIG_ARM64_ERRATUM_832075
357	{
358	/* Cortex-A57 r0p0 - r1p2 */
359		.desc = "ARM erratum 832075",
360		.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
361		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
362				  0, 0,
363				  1, 2),
364	},
365#endif
366#ifdef CONFIG_ARM64_ERRATUM_834220
367	{
368	/* Cortex-A57 r0p0 - r1p2 */
369		.desc = "ARM erratum 834220",
370		.capability = ARM64_WORKAROUND_834220,
371		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
372				  0, 0,
373				  1, 2),
374	},
375#endif
376#ifdef CONFIG_ARM64_ERRATUM_843419
377	{
378	/* Cortex-A53 r0p[01234] */
379		.desc = "ARM erratum 843419",
380		.capability = ARM64_WORKAROUND_843419,
381		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
382		MIDR_FIXED(0x4, BIT(8)),
 
383	},
384#endif
385#ifdef CONFIG_ARM64_ERRATUM_845719
386	{
387	/* Cortex-A53 r0p[01234] */
388		.desc = "ARM erratum 845719",
389		.capability = ARM64_WORKAROUND_845719,
390		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
391	},
392#endif
393#ifdef CONFIG_CAVIUM_ERRATUM_23154
394	{
395	/* Cavium ThunderX, pass 1.x */
396		.desc = "Cavium erratum 23154",
397		.capability = ARM64_WORKAROUND_CAVIUM_23154,
398		ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX, 0, 0, 1),
 
399	},
400#endif
401#ifdef CONFIG_CAVIUM_ERRATUM_27456
402	{
403	/* Cavium ThunderX, T88 pass 1.x - 2.1 */
404		.desc = "Cavium erratum 27456",
405		.capability = ARM64_WORKAROUND_CAVIUM_27456,
406		ERRATA_MIDR_RANGE(MIDR_THUNDERX,
407				  0, 0,
408				  1, 1),
409	},
410	{
411	/* Cavium ThunderX, T81 pass 1.0 */
412		.desc = "Cavium erratum 27456",
413		.capability = ARM64_WORKAROUND_CAVIUM_27456,
414		ERRATA_MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
415	},
416#endif
417#ifdef CONFIG_CAVIUM_ERRATUM_30115
418	{
419	/* Cavium ThunderX, T88 pass 1.x - 2.2 */
420		.desc = "Cavium erratum 30115",
421		.capability = ARM64_WORKAROUND_CAVIUM_30115,
422		ERRATA_MIDR_RANGE(MIDR_THUNDERX,
423				      0, 0,
424				      1, 2),
425	},
426	{
427	/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
428		.desc = "Cavium erratum 30115",
429		.capability = ARM64_WORKAROUND_CAVIUM_30115,
430		ERRATA_MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
431	},
432	{
433	/* Cavium ThunderX, T83 pass 1.0 */
434		.desc = "Cavium erratum 30115",
435		.capability = ARM64_WORKAROUND_CAVIUM_30115,
436		ERRATA_MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
437	},
438#endif
439	{
440		.desc = "Mismatched cache line size",
441		.capability = ARM64_MISMATCHED_CACHE_LINE_SIZE,
442		.matches = has_mismatched_cache_line_size,
443		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
444		.cpu_enable = cpu_enable_trap_ctr_access,
445	},
446#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
447	{
448		.desc = "Qualcomm Technologies Falkor erratum 1003",
449		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
450		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
451	},
452	{
453		.desc = "Qualcomm Technologies Kryo erratum 1003",
454		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
455		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
456		.midr_range.model = MIDR_QCOM_KRYO,
457		.matches = is_kryo_midr,
458	},
459#endif
460#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
461	{
462		.desc = "Qualcomm Technologies Falkor erratum 1009",
463		.capability = ARM64_WORKAROUND_REPEAT_TLBI,
464		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
 
 
465	},
466#endif
467#ifdef CONFIG_ARM64_ERRATUM_858921
468	{
469	/* Cortex-A73 all versions */
470		.desc = "ARM erratum 858921",
471		.capability = ARM64_WORKAROUND_858921,
472		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
473	},
474#endif
475#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
476	{
477		.capability = ARM64_HARDEN_BRANCH_PREDICTOR,
 
 
 
 
 
 
 
 
 
 
478		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
479		.cpu_enable = enable_smccc_arch_workaround_1,
480		ERRATA_MIDR_RANGE_LIST(arm64_bp_harden_smccc_cpus),
481	},
482#endif
483#ifdef CONFIG_HARDEN_EL2_VECTORS
484	{
485		.desc = "EL2 vector hardening",
486		.capability = ARM64_HARDEN_EL2_VECTORS,
487		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
488		ERRATA_MIDR_RANGE_LIST(arm64_harden_el2_vectors),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
489	},
490#endif
491	{
492	}
493};
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Contains CPU specific errata definitions
  4 *
  5 * Copyright (C) 2014 ARM Ltd.
 
 
 
 
 
 
 
 
 
 
 
 
  6 */
  7
  8#include <linux/arm-smccc.h>
  9#include <linux/types.h>
 10#include <linux/cpu.h>
 11#include <asm/cpu.h>
 12#include <asm/cputype.h>
 13#include <asm/cpufeature.h>
 14#include <asm/kvm_asm.h>
 15#include <asm/smp_plat.h>
 16
 17static bool __maybe_unused
 18is_affected_midr_range(const struct arm64_cpu_capabilities *entry, int scope)
 19{
 20	const struct arm64_midr_revidr *fix;
 21	u32 midr = read_cpuid_id(), revidr;
 22
 23	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 24	if (!is_midr_in_range(midr, &entry->midr_range))
 25		return false;
 26
 27	midr &= MIDR_REVISION_MASK | MIDR_VARIANT_MASK;
 28	revidr = read_cpuid(REVIDR_EL1);
 29	for (fix = entry->fixed_revs; fix && fix->revidr_mask; fix++)
 30		if (midr == fix->midr_rv && (revidr & fix->revidr_mask))
 31			return false;
 32
 33	return true;
 34}
 35
 36static bool __maybe_unused
 37is_affected_midr_range_list(const struct arm64_cpu_capabilities *entry,
 38			    int scope)
 39{
 40	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 41	return is_midr_in_range_list(read_cpuid_id(), entry->midr_range_list);
 42}
 43
 44static bool __maybe_unused
 45is_kryo_midr(const struct arm64_cpu_capabilities *entry, int scope)
 46{
 47	u32 model;
 48
 49	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 50
 51	model = read_cpuid_id();
 52	model &= MIDR_IMPLEMENTOR_MASK | (0xf00 << MIDR_PARTNUM_SHIFT) |
 53		 MIDR_ARCHITECTURE_MASK;
 54
 55	return model == entry->midr_range.model;
 56}
 57
 58static bool
 59has_mismatched_cache_type(const struct arm64_cpu_capabilities *entry,
 60			  int scope)
 
 
 
 
 
 
 
 
 61{
 62	u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
 63	u64 sys = arm64_ftr_reg_ctrel0.sys_val & mask;
 64	u64 ctr_raw, ctr_real;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 65
 66	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 67
 68	/*
 69	 * We want to make sure that all the CPUs in the system expose
 70	 * a consistent CTR_EL0 to make sure that applications behaves
 71	 * correctly with migration.
 72	 *
 73	 * If a CPU has CTR_EL0.IDC but does not advertise it via CTR_EL0 :
 74	 *
 75	 * 1) It is safe if the system doesn't support IDC, as CPU anyway
 76	 *    reports IDC = 0, consistent with the rest.
 77	 *
 78	 * 2) If the system has IDC, it is still safe as we trap CTR_EL0
 79	 *    access on this CPU via the ARM64_HAS_CACHE_IDC capability.
 80	 *
 81	 * So, we need to make sure either the raw CTR_EL0 or the effective
 82	 * CTR_EL0 matches the system's copy to allow a secondary CPU to boot.
 83	 */
 84	ctr_raw = read_cpuid_cachetype() & mask;
 85	ctr_real = read_cpuid_effective_cachetype() & mask;
 86
 87	return (ctr_real != sys) && (ctr_raw != sys);
 
 
 
 
 
 
 
 
 
 
 
 
 88}
 
 89
 90static void
 91cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
 
 
 92{
 93	u64 mask = arm64_ftr_reg_ctrel0.strict_mask;
 94	bool enable_uct_trap = false;
 
 
 95
 96	/* Trap CTR_EL0 access on this CPU, only if it has a mismatch */
 97	if ((read_cpuid_cachetype() & mask) !=
 98	    (arm64_ftr_reg_ctrel0.sys_val & mask))
 99		enable_uct_trap = true;
100
101	/* ... or if the system is affected by an erratum */
102	if (cap->capability == ARM64_WORKAROUND_1542419)
103		enable_uct_trap = true;
 
 
 
104
105	if (enable_uct_trap)
106		sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCT, 0);
 
 
 
 
 
 
107}
108
109#ifdef CONFIG_ARM64_ERRATUM_1463225
110static bool
111has_cortex_a76_erratum_1463225(const struct arm64_cpu_capabilities *entry,
112			       int scope)
113{
114	return is_affected_midr_range_list(entry, scope) && is_kernel_in_hyp_mode();
 
 
 
 
 
 
 
115}
116#endif
117
118static void __maybe_unused
119cpu_enable_cache_maint_trap(const struct arm64_cpu_capabilities *__unused)
120{
121	sysreg_clear_set(sctlr_el1, SCTLR_EL1_UCI, 0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122}
 
123
124#define CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)	\
125	.matches = is_affected_midr_range,			\
126	.midr_range = MIDR_RANGE(model, v_min, r_min, v_max, r_max)
127
128#define CAP_MIDR_ALL_VERSIONS(model)					\
129	.matches = is_affected_midr_range,				\
130	.midr_range = MIDR_ALL_VERSIONS(model)
131
132#define MIDR_FIXED(rev, revidr_mask) \
133	.fixed_revs = (struct arm64_midr_revidr[]){{ (rev), (revidr_mask) }, {}}
134
135#define ERRATA_MIDR_RANGE(model, v_min, r_min, v_max, r_max)		\
136	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,				\
137	CAP_MIDR_RANGE(model, v_min, r_min, v_max, r_max)
138
139#define CAP_MIDR_RANGE_LIST(list)				\
140	.matches = is_affected_midr_range_list,			\
141	.midr_range_list = list
142
143/* Errata affecting a range of revisions of  given model variant */
144#define ERRATA_MIDR_REV_RANGE(m, var, r_min, r_max)	 \
145	ERRATA_MIDR_RANGE(m, var, r_min, var, r_max)
146
147/* Errata affecting a single variant/revision of a model */
148#define ERRATA_MIDR_REV(model, var, rev)	\
149	ERRATA_MIDR_RANGE(model, var, rev, var, rev)
150
151/* Errata affecting all variants/revisions of a given a model */
152#define ERRATA_MIDR_ALL_VERSIONS(model)				\
153	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
154	CAP_MIDR_ALL_VERSIONS(model)
155
156/* Errata affecting a list of midr ranges, with same work around */
157#define ERRATA_MIDR_RANGE_LIST(midr_list)			\
158	.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,			\
159	CAP_MIDR_RANGE_LIST(midr_list)
160
161static const __maybe_unused struct midr_range tx2_family_cpus[] = {
162	MIDR_ALL_VERSIONS(MIDR_BRCM_VULCAN),
163	MIDR_ALL_VERSIONS(MIDR_CAVIUM_THUNDERX2),
164	{},
165};
166
167static bool __maybe_unused
168needs_tx2_tvm_workaround(const struct arm64_cpu_capabilities *entry,
169			 int scope)
170{
171	int i;
172
173	if (!is_affected_midr_range_list(entry, scope) ||
174	    !is_hyp_mode_available())
175		return false;
176
177	for_each_possible_cpu(i) {
178		if (MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0) != 0)
179			return true;
180	}
181
182	return false;
183}
184
185static bool __maybe_unused
186has_neoverse_n1_erratum_1542419(const struct arm64_cpu_capabilities *entry,
187				int scope)
 
 
 
188{
189	u32 midr = read_cpuid_id();
190	bool has_dic = read_cpuid_cachetype() & BIT(CTR_EL0_DIC_SHIFT);
191	const struct midr_range range = MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1);
192
193	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
194	return is_midr_in_range(midr, &range) && has_dic;
 
 
195}
196
197#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
198static const struct arm64_cpu_capabilities arm64_repeat_tlbi_list[] = {
199#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1009
200	{
201		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0)
202	},
203	{
204		.midr_range.model = MIDR_QCOM_KRYO,
205		.matches = is_kryo_midr,
206	},
207#endif
208#ifdef CONFIG_ARM64_ERRATUM_1286807
209	{
210		ERRATA_MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 0),
211	},
212	{
213		/* Kryo4xx Gold (rcpe to rfpe) => (r0p0 to r3p0) */
214		ERRATA_MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xe),
215	},
216#endif
217#ifdef CONFIG_ARM64_ERRATUM_2441007
218	{
219		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
220	},
221#endif
222#ifdef CONFIG_ARM64_ERRATUM_2441009
223	{
224		/* Cortex-A510 r0p0 -> r1p1. Fixed in r1p2 */
225		ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
226	},
227#endif
228	{},
229};
230#endif
231
232#ifdef CONFIG_CAVIUM_ERRATUM_23154
233static const struct midr_range cavium_erratum_23154_cpus[] = {
234	MIDR_ALL_VERSIONS(MIDR_THUNDERX),
235	MIDR_ALL_VERSIONS(MIDR_THUNDERX_81XX),
236	MIDR_ALL_VERSIONS(MIDR_THUNDERX_83XX),
237	MIDR_ALL_VERSIONS(MIDR_OCTX2_98XX),
238	MIDR_ALL_VERSIONS(MIDR_OCTX2_96XX),
239	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XX),
240	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXN),
241	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXMM),
242	MIDR_ALL_VERSIONS(MIDR_OCTX2_95XXO),
243	{},
244};
245#endif
246
247#ifdef CONFIG_CAVIUM_ERRATUM_27456
248const struct midr_range cavium_erratum_27456_cpus[] = {
249	/* Cavium ThunderX, T88 pass 1.x - 2.1 */
250	MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 1),
251	/* Cavium ThunderX, T81 pass 1.0 */
252	MIDR_REV(MIDR_THUNDERX_81XX, 0, 0),
253	{},
254};
255#endif
256
257#ifdef CONFIG_CAVIUM_ERRATUM_30115
258static const struct midr_range cavium_erratum_30115_cpus[] = {
259	/* Cavium ThunderX, T88 pass 1.x - 2.2 */
260	MIDR_RANGE(MIDR_THUNDERX, 0, 0, 1, 2),
261	/* Cavium ThunderX, T81 pass 1.0 - 1.2 */
262	MIDR_REV_RANGE(MIDR_THUNDERX_81XX, 0, 0, 2),
263	/* Cavium ThunderX, T83 pass 1.0 */
264	MIDR_REV(MIDR_THUNDERX_83XX, 0, 0),
265	{},
266};
267#endif
268
269#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
270static const struct arm64_cpu_capabilities qcom_erratum_1003_list[] = {
271	{
272		ERRATA_MIDR_REV(MIDR_QCOM_FALKOR_V1, 0, 0),
273	},
274	{
275		.midr_range.model = MIDR_QCOM_KRYO,
276		.matches = is_kryo_midr,
277	},
278	{},
279};
280#endif
281
282#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
283static const struct midr_range workaround_clean_cache[] = {
284#if	defined(CONFIG_ARM64_ERRATUM_826319) || \
285	defined(CONFIG_ARM64_ERRATUM_827319) || \
286	defined(CONFIG_ARM64_ERRATUM_824069)
287	/* Cortex-A53 r0p[012]: ARM errata 826319, 827319, 824069 */
288	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 2),
289#endif
290#ifdef	CONFIG_ARM64_ERRATUM_819472
291	/* Cortex-A53 r0p[01] : ARM errata 819472 */
292	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 1),
293#endif
294	{},
295};
296#endif
297
298#ifdef CONFIG_ARM64_ERRATUM_1418040
299/*
300 * - 1188873 affects r0p0 to r2p0
301 * - 1418040 affects r0p0 to r3p1
302 */
303static const struct midr_range erratum_1418040_list[] = {
304	/* Cortex-A76 r0p0 to r3p1 */
305	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
306	/* Neoverse-N1 r0p0 to r3p1 */
307	MIDR_RANGE(MIDR_NEOVERSE_N1, 0, 0, 3, 1),
308	/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
309	MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
310	{},
311};
312#endif
313
314#ifdef CONFIG_ARM64_ERRATUM_845719
315static const struct midr_range erratum_845719_list[] = {
316	/* Cortex-A53 r0p[01234] */
317	MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
318	/* Brahma-B53 r0p[0] */
319	MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
320	/* Kryo2XX Silver rAp4 */
321	MIDR_REV(MIDR_QCOM_KRYO_2XX_SILVER, 0xa, 0x4),
322	{},
323};
324#endif
325
326#ifdef CONFIG_ARM64_ERRATUM_843419
327static const struct arm64_cpu_capabilities erratum_843419_list[] = {
328	{
329		/* Cortex-A53 r0p[01234] */
330		.matches = is_affected_midr_range,
331		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A53, 0, 0, 4),
332		MIDR_FIXED(0x4, BIT(8)),
333	},
334	{
335		/* Brahma-B53 r0p[0] */
336		.matches = is_affected_midr_range,
337		ERRATA_MIDR_REV(MIDR_BRAHMA_B53, 0, 0),
338	},
339	{},
340};
341#endif
342
343#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
344static const struct midr_range erratum_speculative_at_list[] = {
345#ifdef CONFIG_ARM64_ERRATUM_1165522
346	/* Cortex A76 r0p0 to r2p0 */
347	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 2, 0),
348#endif
349#ifdef CONFIG_ARM64_ERRATUM_1319367
350	MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
351	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
352#endif
353#ifdef CONFIG_ARM64_ERRATUM_1530923
354	/* Cortex A55 r0p0 to r2p0 */
355	MIDR_RANGE(MIDR_CORTEX_A55, 0, 0, 2, 0),
356	/* Kryo4xx Silver (rdpe => r1p0) */
357	MIDR_REV(MIDR_QCOM_KRYO_4XX_SILVER, 0xd, 0xe),
358#endif
359	{},
360};
361#endif
362
363#ifdef CONFIG_ARM64_ERRATUM_1463225
364static const struct midr_range erratum_1463225[] = {
365	/* Cortex-A76 r0p0 - r3p1 */
366	MIDR_RANGE(MIDR_CORTEX_A76, 0, 0, 3, 1),
367	/* Kryo4xx Gold (rcpe to rfpf) => (r0p0 to r3p1) */
368	MIDR_RANGE(MIDR_QCOM_KRYO_4XX_GOLD, 0xc, 0xe, 0xf, 0xf),
369	{},
370};
371#endif
372
373#ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
374static const struct midr_range trbe_overwrite_fill_mode_cpus[] = {
375#ifdef CONFIG_ARM64_ERRATUM_2139208
376	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
377	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
378#endif
379#ifdef CONFIG_ARM64_ERRATUM_2119858
380	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
381	MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
382#endif
383	{},
384};
385#endif	/* CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE */
386
387#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
388static const struct midr_range tsb_flush_fail_cpus[] = {
389#ifdef CONFIG_ARM64_ERRATUM_2067961
390	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
391	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
392#endif
393#ifdef CONFIG_ARM64_ERRATUM_2054223
394	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
395#endif
396	{},
397};
398#endif	/* CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE */
399
400#ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
401static struct midr_range trbe_write_out_of_range_cpus[] = {
402#ifdef CONFIG_ARM64_ERRATUM_2253138
403	MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
404	MIDR_ALL_VERSIONS(MIDR_MICROSOFT_AZURE_COBALT_100),
405#endif
406#ifdef CONFIG_ARM64_ERRATUM_2224489
407	MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
408	MIDR_RANGE(MIDR_CORTEX_X2, 0, 0, 2, 0),
409#endif
410	{},
411};
412#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
413
414#ifdef CONFIG_ARM64_ERRATUM_1742098
415static struct midr_range broken_aarch32_aes[] = {
416	MIDR_RANGE(MIDR_CORTEX_A57, 0, 1, 0xf, 0xf),
417	MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
418	{},
419};
420#endif /* CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE */
421
422#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
423static const struct midr_range erratum_spec_unpriv_load_list[] = {
424#ifdef CONFIG_ARM64_ERRATUM_3117295
425	MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
426#endif
427#ifdef CONFIG_ARM64_ERRATUM_2966298
428	/* Cortex-A520 r0p0 to r0p1 */
429	MIDR_REV_RANGE(MIDR_CORTEX_A520, 0, 0, 1),
430#endif
431	{},
432};
433#endif
434
435const struct arm64_cpu_capabilities arm64_errata[] = {
436#ifdef CONFIG_ARM64_WORKAROUND_CLEAN_CACHE
 
 
 
 
 
 
 
 
 
 
 
437	{
438		.desc = "ARM errata 826319, 827319, 824069, or 819472",
 
439		.capability = ARM64_WORKAROUND_CLEAN_CACHE,
440		ERRATA_MIDR_RANGE_LIST(workaround_clean_cache),
441		.cpu_enable = cpu_enable_cache_maint_trap,
442	},
443#endif
444#ifdef CONFIG_ARM64_ERRATUM_832075
445	{
446	/* Cortex-A57 r0p0 - r1p2 */
447		.desc = "ARM erratum 832075",
448		.capability = ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE,
449		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
450				  0, 0,
451				  1, 2),
452	},
453#endif
454#ifdef CONFIG_ARM64_ERRATUM_834220
455	{
456	/* Cortex-A57 r0p0 - r1p2 */
457		.desc = "ARM erratum 834220",
458		.capability = ARM64_WORKAROUND_834220,
459		ERRATA_MIDR_RANGE(MIDR_CORTEX_A57,
460				  0, 0,
461				  1, 2),
462	},
463#endif
464#ifdef CONFIG_ARM64_ERRATUM_843419
465	{
 
466		.desc = "ARM erratum 843419",
467		.capability = ARM64_WORKAROUND_843419,
468		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
469		.matches = cpucap_multi_entry_cap_matches,
470		.match_list = erratum_843419_list,
471	},
472#endif
473#ifdef CONFIG_ARM64_ERRATUM_845719
474	{
 
475		.desc = "ARM erratum 845719",
476		.capability = ARM64_WORKAROUND_845719,
477		ERRATA_MIDR_RANGE_LIST(erratum_845719_list),
478	},
479#endif
480#ifdef CONFIG_CAVIUM_ERRATUM_23154
481	{
482		.desc = "Cavium errata 23154 and 38545",
 
483		.capability = ARM64_WORKAROUND_CAVIUM_23154,
484		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
485		ERRATA_MIDR_RANGE_LIST(cavium_erratum_23154_cpus),
486	},
487#endif
488#ifdef CONFIG_CAVIUM_ERRATUM_27456
489	{
 
490		.desc = "Cavium erratum 27456",
491		.capability = ARM64_WORKAROUND_CAVIUM_27456,
492		ERRATA_MIDR_RANGE_LIST(cavium_erratum_27456_cpus),
 
 
 
 
 
 
 
 
493	},
494#endif
495#ifdef CONFIG_CAVIUM_ERRATUM_30115
496	{
 
497		.desc = "Cavium erratum 30115",
498		.capability = ARM64_WORKAROUND_CAVIUM_30115,
499		ERRATA_MIDR_RANGE_LIST(cavium_erratum_30115_cpus),
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500	},
501#endif
502	{
503		.desc = "Mismatched cache type (CTR_EL0)",
504		.capability = ARM64_MISMATCHED_CACHE_TYPE,
505		.matches = has_mismatched_cache_type,
506		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
507		.cpu_enable = cpu_enable_trap_ctr_access,
508	},
509#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
510	{
511		.desc = "Qualcomm Technologies Falkor/Kryo erratum 1003",
 
 
 
 
 
512		.capability = ARM64_WORKAROUND_QCOM_FALKOR_E1003,
513		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
514		.matches = cpucap_multi_entry_cap_matches,
515		.match_list = qcom_erratum_1003_list,
516	},
517#endif
518#ifdef CONFIG_ARM64_WORKAROUND_REPEAT_TLBI
519	{
520		.desc = "Qualcomm erratum 1009, or ARM erratum 1286807, 2441009",
521		.capability = ARM64_WORKAROUND_REPEAT_TLBI,
522		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
523		.matches = cpucap_multi_entry_cap_matches,
524		.match_list = arm64_repeat_tlbi_list,
525	},
526#endif
527#ifdef CONFIG_ARM64_ERRATUM_858921
528	{
529	/* Cortex-A73 all versions */
530		.desc = "ARM erratum 858921",
531		.capability = ARM64_WORKAROUND_858921,
532		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
533	},
534#endif
 
535	{
536		.desc = "Spectre-v2",
537		.capability = ARM64_SPECTRE_V2,
538		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
539		.matches = has_spectre_v2,
540		.cpu_enable = spectre_v2_enable_mitigation,
541	},
542#ifdef CONFIG_RANDOMIZE_BASE
543	{
544	/* Must come after the Spectre-v2 entry */
545		.desc = "Spectre-v3a",
546		.capability = ARM64_SPECTRE_V3A,
547		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
548		.matches = has_spectre_v3a,
549		.cpu_enable = spectre_v3a_enable_mitigation,
550	},
551#endif
 
552	{
553		.desc = "Spectre-v4",
554		.capability = ARM64_SPECTRE_V4,
555		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
556		.matches = has_spectre_v4,
557		.cpu_enable = spectre_v4_enable_mitigation,
558	},
559	{
560		.desc = "Spectre-BHB",
561		.capability = ARM64_SPECTRE_BHB,
562		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
563		.matches = is_spectre_bhb_affected,
564		.cpu_enable = spectre_bhb_enable_mitigation,
565	},
566#ifdef CONFIG_ARM64_ERRATUM_1418040
567	{
568		.desc = "ARM erratum 1418040",
569		.capability = ARM64_WORKAROUND_1418040,
570		ERRATA_MIDR_RANGE_LIST(erratum_1418040_list),
571		/*
572		 * We need to allow affected CPUs to come in late, but
573		 * also need the non-affected CPUs to be able to come
574		 * in at any point in time. Wonderful.
575		 */
576		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
577	},
578#endif
579#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_AT
580	{
581		.desc = "ARM errata 1165522, 1319367, or 1530923",
582		.capability = ARM64_WORKAROUND_SPECULATIVE_AT,
583		ERRATA_MIDR_RANGE_LIST(erratum_speculative_at_list),
584	},
585#endif
586#ifdef CONFIG_ARM64_ERRATUM_1463225
587	{
588		.desc = "ARM erratum 1463225",
589		.capability = ARM64_WORKAROUND_1463225,
590		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
591		.matches = has_cortex_a76_erratum_1463225,
592		.midr_range_list = erratum_1463225,
593	},
594#endif
595#ifdef CONFIG_CAVIUM_TX2_ERRATUM_219
596	{
597		.desc = "Cavium ThunderX2 erratum 219 (KVM guest sysreg trapping)",
598		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_TVM,
599		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
600		.matches = needs_tx2_tvm_workaround,
601	},
602	{
603		.desc = "Cavium ThunderX2 erratum 219 (PRFM removal)",
604		.capability = ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM,
605		ERRATA_MIDR_RANGE_LIST(tx2_family_cpus),
606	},
607#endif
608#ifdef CONFIG_ARM64_ERRATUM_1542419
609	{
610		/* we depend on the firmware portion for correctness */
611		.desc = "ARM erratum 1542419 (kernel portion)",
612		.capability = ARM64_WORKAROUND_1542419,
613		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
614		.matches = has_neoverse_n1_erratum_1542419,
615		.cpu_enable = cpu_enable_trap_ctr_access,
616	},
617#endif
618#ifdef CONFIG_ARM64_ERRATUM_1508412
619	{
620		/* we depend on the firmware portion for correctness */
621		.desc = "ARM erratum 1508412 (kernel portion)",
622		.capability = ARM64_WORKAROUND_1508412,
623		ERRATA_MIDR_RANGE(MIDR_CORTEX_A77,
624				  0, 0,
625				  1, 0),
626	},
627#endif
628#ifdef CONFIG_NVIDIA_CARMEL_CNP_ERRATUM
629	{
630		/* NVIDIA Carmel */
631		.desc = "NVIDIA Carmel CNP erratum",
632		.capability = ARM64_WORKAROUND_NVIDIA_CARMEL_CNP,
633		ERRATA_MIDR_ALL_VERSIONS(MIDR_NVIDIA_CARMEL),
634	},
635#endif
636#ifdef CONFIG_ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE
637	{
638		/*
639		 * The erratum work around is handled within the TRBE
640		 * driver and can be applied per-cpu. So, we can allow
641		 * a late CPU to come online with this erratum.
642		 */
643		.desc = "ARM erratum 2119858 or 2139208",
644		.capability = ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE,
645		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
646		CAP_MIDR_RANGE_LIST(trbe_overwrite_fill_mode_cpus),
647	},
648#endif
649#ifdef CONFIG_ARM64_WORKAROUND_TSB_FLUSH_FAILURE
650	{
651		.desc = "ARM erratum 2067961 or 2054223",
652		.capability = ARM64_WORKAROUND_TSB_FLUSH_FAILURE,
653		ERRATA_MIDR_RANGE_LIST(tsb_flush_fail_cpus),
654	},
655#endif
656#ifdef CONFIG_ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE
657	{
658		.desc = "ARM erratum 2253138 or 2224489",
659		.capability = ARM64_WORKAROUND_TRBE_WRITE_OUT_OF_RANGE,
660		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
661		CAP_MIDR_RANGE_LIST(trbe_write_out_of_range_cpus),
662	},
663#endif
664#ifdef CONFIG_ARM64_ERRATUM_2645198
665	{
666		.desc = "ARM erratum 2645198",
667		.capability = ARM64_WORKAROUND_2645198,
668		ERRATA_MIDR_ALL_VERSIONS(MIDR_CORTEX_A715)
669	},
670#endif
671#ifdef CONFIG_ARM64_ERRATUM_2077057
672	{
673		.desc = "ARM erratum 2077057",
674		.capability = ARM64_WORKAROUND_2077057,
675		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2),
676	},
677#endif
678#ifdef CONFIG_ARM64_ERRATUM_2064142
679	{
680		.desc = "ARM erratum 2064142",
681		.capability = ARM64_WORKAROUND_2064142,
682
683		/* Cortex-A510 r0p0 - r0p2 */
684		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
685	},
686#endif
687#ifdef CONFIG_ARM64_ERRATUM_2457168
688	{
689		.desc = "ARM erratum 2457168",
690		.capability = ARM64_WORKAROUND_2457168,
691		.type = ARM64_CPUCAP_WEAK_LOCAL_CPU_FEATURE,
692
693		/* Cortex-A510 r0p0-r1p1 */
694		CAP_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1)
695	},
696#endif
697#ifdef CONFIG_ARM64_ERRATUM_2038923
698	{
699		.desc = "ARM erratum 2038923",
700		.capability = ARM64_WORKAROUND_2038923,
701
702		/* Cortex-A510 r0p0 - r0p2 */
703		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 2)
704	},
705#endif
706#ifdef CONFIG_ARM64_ERRATUM_1902691
707	{
708		.desc = "ARM erratum 1902691",
709		.capability = ARM64_WORKAROUND_1902691,
710
711		/* Cortex-A510 r0p0 - r0p1 */
712		ERRATA_MIDR_REV_RANGE(MIDR_CORTEX_A510, 0, 0, 1)
713	},
714#endif
715#ifdef CONFIG_ARM64_ERRATUM_1742098
716	{
717		.desc = "ARM erratum 1742098",
718		.capability = ARM64_WORKAROUND_1742098,
719		CAP_MIDR_RANGE_LIST(broken_aarch32_aes),
720		.type = ARM64_CPUCAP_LOCAL_CPU_ERRATUM,
721	},
722#endif
723#ifdef CONFIG_ARM64_ERRATUM_2658417
724	{
725		.desc = "ARM erratum 2658417",
726		.capability = ARM64_WORKAROUND_2658417,
727		/* Cortex-A510 r0p0 - r1p1 */
728		ERRATA_MIDR_RANGE(MIDR_CORTEX_A510, 0, 0, 1, 1),
729		MIDR_FIXED(MIDR_CPU_VAR_REV(1,1), BIT(25)),
730	},
731#endif
732#ifdef CONFIG_ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD
733	{
734		.desc = "ARM errata 2966298, 3117295",
735		.capability = ARM64_WORKAROUND_SPECULATIVE_UNPRIV_LOAD,
736		/* Cortex-A520 r0p0 - r0p1 */
737		ERRATA_MIDR_RANGE_LIST(erratum_spec_unpriv_load_list),
738	},
739#endif
740#ifdef CONFIG_AMPERE_ERRATUM_AC03_CPU_38
741	{
742		.desc = "AmpereOne erratum AC03_CPU_38",
743		.capability = ARM64_WORKAROUND_AMPERE_AC03_CPU_38,
744		ERRATA_MIDR_ALL_VERSIONS(MIDR_AMPERE1),
745	},
746#endif
747	{
748	}
749};