Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.6.
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Handle detection, reporting and mitigation of Spectre v1, v2, v3a and v4, as
   4 * detailed at:
   5 *
   6 *   https://developer.arm.com/support/arm-security-updates/speculative-processor-vulnerability
   7 *
   8 * This code was originally written hastily under an awful lot of stress and so
   9 * aspects of it are somewhat hacky. Unfortunately, changing anything in here
  10 * instantly makes me feel ill. Thanks, Jann. Thann.
  11 *
  12 * Copyright (C) 2018 ARM Ltd, All Rights Reserved.
  13 * Copyright (C) 2020 Google LLC
  14 *
  15 * "If there's something strange in your neighbourhood, who you gonna call?"
  16 *
  17 * Authors: Will Deacon <will@kernel.org> and Marc Zyngier <maz@kernel.org>
  18 */
  19
  20#include <linux/arm-smccc.h>
  21#include <linux/bpf.h>
  22#include <linux/cpu.h>
  23#include <linux/device.h>
  24#include <linux/nospec.h>
  25#include <linux/prctl.h>
  26#include <linux/sched/task_stack.h>
  27
  28#include <asm/debug-monitors.h>
  29#include <asm/insn.h>
  30#include <asm/spectre.h>
  31#include <asm/traps.h>
  32#include <asm/vectors.h>
  33#include <asm/virt.h>
  34
  35/*
  36 * We try to ensure that the mitigation state can never change as the result of
  37 * onlining a late CPU.
  38 */
  39static void update_mitigation_state(enum mitigation_state *oldp,
  40				    enum mitigation_state new)
  41{
  42	enum mitigation_state state;
  43
  44	do {
  45		state = READ_ONCE(*oldp);
  46		if (new <= state)
  47			break;
  48
  49		/* Userspace almost certainly can't deal with this. */
  50		if (WARN_ON(system_capabilities_finalized()))
  51			break;
  52	} while (cmpxchg_relaxed(oldp, state, new) != state);
  53}
  54
  55/*
  56 * Spectre v1.
  57 *
  58 * The kernel can't protect userspace for this one: it's each person for
  59 * themselves. Advertise what we're doing and be done with it.
  60 */
  61ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr,
  62			    char *buf)
  63{
  64	return sprintf(buf, "Mitigation: __user pointer sanitization\n");
  65}
  66
  67/*
  68 * Spectre v2.
  69 *
  70 * This one sucks. A CPU is either:
  71 *
  72 * - Mitigated in hardware and advertised by ID_AA64PFR0_EL1.CSV2.
  73 * - Mitigated in hardware and listed in our "safe list".
  74 * - Mitigated in software by firmware.
  75 * - Mitigated in software by a CPU-specific dance in the kernel and a
  76 *   firmware call at EL2.
  77 * - Vulnerable.
  78 *
  79 * It's not unlikely for different CPUs in a big.LITTLE system to fall into
  80 * different camps.
  81 */
  82static enum mitigation_state spectre_v2_state;
  83
  84static bool __read_mostly __nospectre_v2;
  85static int __init parse_spectre_v2_param(char *str)
  86{
  87	__nospectre_v2 = true;
  88	return 0;
  89}
  90early_param("nospectre_v2", parse_spectre_v2_param);
  91
  92static bool spectre_v2_mitigations_off(void)
  93{
  94	bool ret = __nospectre_v2 || cpu_mitigations_off();
  95
  96	if (ret)
  97		pr_info_once("spectre-v2 mitigation disabled by command line option\n");
  98
  99	return ret;
 100}
 101
 102static const char *get_bhb_affected_string(enum mitigation_state bhb_state)
 103{
 104	switch (bhb_state) {
 105	case SPECTRE_UNAFFECTED:
 106		return "";
 107	default:
 108	case SPECTRE_VULNERABLE:
 109		return ", but not BHB";
 110	case SPECTRE_MITIGATED:
 111		return ", BHB";
 112	}
 113}
 114
 115static bool _unprivileged_ebpf_enabled(void)
 116{
 117#ifdef CONFIG_BPF_SYSCALL
 118	return !sysctl_unprivileged_bpf_disabled;
 119#else
 120	return false;
 121#endif
 122}
 123
 124ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr,
 125			    char *buf)
 126{
 127	enum mitigation_state bhb_state = arm64_get_spectre_bhb_state();
 128	const char *bhb_str = get_bhb_affected_string(bhb_state);
 129	const char *v2_str = "Branch predictor hardening";
 130
 131	switch (spectre_v2_state) {
 132	case SPECTRE_UNAFFECTED:
 133		if (bhb_state == SPECTRE_UNAFFECTED)
 134			return sprintf(buf, "Not affected\n");
 135
 136		/*
 137		 * Platforms affected by Spectre-BHB can't report
 138		 * "Not affected" for Spectre-v2.
 139		 */
 140		v2_str = "CSV2";
 141		fallthrough;
 142	case SPECTRE_MITIGATED:
 143		if (bhb_state == SPECTRE_MITIGATED && _unprivileged_ebpf_enabled())
 144			return sprintf(buf, "Vulnerable: Unprivileged eBPF enabled\n");
 145
 146		return sprintf(buf, "Mitigation: %s%s\n", v2_str, bhb_str);
 147	case SPECTRE_VULNERABLE:
 148		fallthrough;
 149	default:
 150		return sprintf(buf, "Vulnerable\n");
 151	}
 152}
 153
 154static enum mitigation_state spectre_v2_get_cpu_hw_mitigation_state(void)
 155{
 156	u64 pfr0;
 157	static const struct midr_range spectre_v2_safe_list[] = {
 158		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
 159		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
 160		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
 161		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
 162		MIDR_ALL_VERSIONS(MIDR_HISI_TSV110),
 163		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_2XX_SILVER),
 164		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
 165		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
 166		{ /* sentinel */ }
 167	};
 168
 169	/* If the CPU has CSV2 set, we're safe */
 170	pfr0 = read_cpuid(ID_AA64PFR0_EL1);
 171	if (cpuid_feature_extract_unsigned_field(pfr0, ID_AA64PFR0_EL1_CSV2_SHIFT))
 172		return SPECTRE_UNAFFECTED;
 173
 174	/* Alternatively, we have a list of unaffected CPUs */
 175	if (is_midr_in_range_list(read_cpuid_id(), spectre_v2_safe_list))
 176		return SPECTRE_UNAFFECTED;
 177
 178	return SPECTRE_VULNERABLE;
 179}
 180
 181static enum mitigation_state spectre_v2_get_cpu_fw_mitigation_state(void)
 182{
 183	int ret;
 184	struct arm_smccc_res res;
 185
 186	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 187			     ARM_SMCCC_ARCH_WORKAROUND_1, &res);
 188
 189	ret = res.a0;
 190	switch (ret) {
 191	case SMCCC_RET_SUCCESS:
 192		return SPECTRE_MITIGATED;
 193	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
 194		return SPECTRE_UNAFFECTED;
 195	default:
 196		fallthrough;
 197	case SMCCC_RET_NOT_SUPPORTED:
 198		return SPECTRE_VULNERABLE;
 199	}
 200}
 201
 202bool has_spectre_v2(const struct arm64_cpu_capabilities *entry, int scope)
 203{
 204	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 205
 206	if (spectre_v2_get_cpu_hw_mitigation_state() == SPECTRE_UNAFFECTED)
 207		return false;
 208
 209	if (spectre_v2_get_cpu_fw_mitigation_state() == SPECTRE_UNAFFECTED)
 210		return false;
 211
 212	return true;
 213}
 214
 215enum mitigation_state arm64_get_spectre_v2_state(void)
 216{
 217	return spectre_v2_state;
 218}
 219
 220DEFINE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
 221
 222static void install_bp_hardening_cb(bp_hardening_cb_t fn)
 223{
 224	__this_cpu_write(bp_hardening_data.fn, fn);
 225
 226	/*
 227	 * Vinz Clortho takes the hyp_vecs start/end "keys" at
 228	 * the door when we're a guest. Skip the hyp-vectors work.
 229	 */
 230	if (!is_hyp_mode_available())
 231		return;
 232
 233	__this_cpu_write(bp_hardening_data.slot, HYP_VECTOR_SPECTRE_DIRECT);
 234}
 235
 236/* Called during entry so must be noinstr */
 237static noinstr void call_smc_arch_workaround_1(void)
 238{
 239	arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
 240}
 241
 242/* Called during entry so must be noinstr */
 243static noinstr void call_hvc_arch_workaround_1(void)
 244{
 245	arm_smccc_1_1_hvc(ARM_SMCCC_ARCH_WORKAROUND_1, NULL);
 246}
 247
 248/* Called during entry so must be noinstr */
 249static noinstr void qcom_link_stack_sanitisation(void)
 250{
 251	u64 tmp;
 252
 253	asm volatile("mov	%0, x30		\n"
 254		     ".rept	16		\n"
 255		     "bl	. + 4		\n"
 256		     ".endr			\n"
 257		     "mov	x30, %0		\n"
 258		     : "=&r" (tmp));
 259}
 260
 261static bp_hardening_cb_t spectre_v2_get_sw_mitigation_cb(void)
 262{
 263	u32 midr = read_cpuid_id();
 264	if (((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR) &&
 265	    ((midr & MIDR_CPU_MODEL_MASK) != MIDR_QCOM_FALKOR_V1))
 266		return NULL;
 267
 268	return qcom_link_stack_sanitisation;
 269}
 270
 271static enum mitigation_state spectre_v2_enable_fw_mitigation(void)
 272{
 273	bp_hardening_cb_t cb;
 274	enum mitigation_state state;
 275
 276	state = spectre_v2_get_cpu_fw_mitigation_state();
 277	if (state != SPECTRE_MITIGATED)
 278		return state;
 279
 280	if (spectre_v2_mitigations_off())
 281		return SPECTRE_VULNERABLE;
 282
 283	switch (arm_smccc_1_1_get_conduit()) {
 284	case SMCCC_CONDUIT_HVC:
 285		cb = call_hvc_arch_workaround_1;
 286		break;
 287
 288	case SMCCC_CONDUIT_SMC:
 289		cb = call_smc_arch_workaround_1;
 290		break;
 291
 292	default:
 293		return SPECTRE_VULNERABLE;
 294	}
 295
 296	/*
 297	 * Prefer a CPU-specific workaround if it exists. Note that we
 298	 * still rely on firmware for the mitigation at EL2.
 299	 */
 300	cb = spectre_v2_get_sw_mitigation_cb() ?: cb;
 301	install_bp_hardening_cb(cb);
 302	return SPECTRE_MITIGATED;
 303}
 304
 305void spectre_v2_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
 306{
 307	enum mitigation_state state;
 308
 309	WARN_ON(preemptible());
 310
 311	state = spectre_v2_get_cpu_hw_mitigation_state();
 312	if (state == SPECTRE_VULNERABLE)
 313		state = spectre_v2_enable_fw_mitigation();
 314
 315	update_mitigation_state(&spectre_v2_state, state);
 316}
 317
 318/*
 319 * Spectre-v3a.
 320 *
 321 * Phew, there's not an awful lot to do here! We just instruct EL2 to use
 322 * an indirect trampoline for the hyp vectors so that guests can't read
 323 * VBAR_EL2 to defeat randomisation of the hypervisor VA layout.
 324 */
 325bool has_spectre_v3a(const struct arm64_cpu_capabilities *entry, int scope)
 326{
 327	static const struct midr_range spectre_v3a_unsafe_list[] = {
 328		MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
 329		MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
 330		{},
 331	};
 332
 333	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 334	return is_midr_in_range_list(read_cpuid_id(), spectre_v3a_unsafe_list);
 335}
 336
 337void spectre_v3a_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
 338{
 339	struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
 340
 341	if (this_cpu_has_cap(ARM64_SPECTRE_V3A))
 342		data->slot += HYP_VECTOR_INDIRECT;
 343}
 344
 345/*
 346 * Spectre v4.
 347 *
 348 * If you thought Spectre v2 was nasty, wait until you see this mess. A CPU is
 349 * either:
 350 *
 351 * - Mitigated in hardware and listed in our "safe list".
 352 * - Mitigated in hardware via PSTATE.SSBS.
 353 * - Mitigated in software by firmware (sometimes referred to as SSBD).
 354 *
 355 * Wait, that doesn't sound so bad, does it? Keep reading...
 356 *
 357 * A major source of headaches is that the software mitigation is enabled both
 358 * on a per-task basis, but can also be forced on for the kernel, necessitating
 359 * both context-switch *and* entry/exit hooks. To make it even worse, some CPUs
 360 * allow EL0 to toggle SSBS directly, which can end up with the prctl() state
 361 * being stale when re-entering the kernel. The usual big.LITTLE caveats apply,
 362 * so you can have systems that have both firmware and SSBS mitigations. This
 363 * means we actually have to reject late onlining of CPUs with mitigations if
 364 * all of the currently onlined CPUs are safelisted, as the mitigation tends to
 365 * be opt-in for userspace. Yes, really, the cure is worse than the disease.
 366 *
 367 * The only good part is that if the firmware mitigation is present, then it is
 368 * present for all CPUs, meaning we don't have to worry about late onlining of a
 369 * vulnerable CPU if one of the boot CPUs is using the firmware mitigation.
 370 *
 371 * Give me a VAX-11/780 any day of the week...
 372 */
 373static enum mitigation_state spectre_v4_state;
 374
 375/* This is the per-cpu state tracking whether we need to talk to firmware */
 376DEFINE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
 377
 378enum spectre_v4_policy {
 379	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC,
 380	SPECTRE_V4_POLICY_MITIGATION_ENABLED,
 381	SPECTRE_V4_POLICY_MITIGATION_DISABLED,
 382};
 383
 384static enum spectre_v4_policy __read_mostly __spectre_v4_policy;
 385
 386static const struct spectre_v4_param {
 387	const char		*str;
 388	enum spectre_v4_policy	policy;
 389} spectre_v4_params[] = {
 390	{ "force-on",	SPECTRE_V4_POLICY_MITIGATION_ENABLED, },
 391	{ "force-off",	SPECTRE_V4_POLICY_MITIGATION_DISABLED, },
 392	{ "kernel",	SPECTRE_V4_POLICY_MITIGATION_DYNAMIC, },
 393};
 394static int __init parse_spectre_v4_param(char *str)
 395{
 396	int i;
 397
 398	if (!str || !str[0])
 399		return -EINVAL;
 400
 401	for (i = 0; i < ARRAY_SIZE(spectre_v4_params); i++) {
 402		const struct spectre_v4_param *param = &spectre_v4_params[i];
 403
 404		if (strncmp(str, param->str, strlen(param->str)))
 405			continue;
 406
 407		__spectre_v4_policy = param->policy;
 408		return 0;
 409	}
 410
 411	return -EINVAL;
 412}
 413early_param("ssbd", parse_spectre_v4_param);
 414
 415/*
 416 * Because this was all written in a rush by people working in different silos,
 417 * we've ended up with multiple command line options to control the same thing.
 418 * Wrap these up in some helpers, which prefer disabling the mitigation if faced
 419 * with contradictory parameters. The mitigation is always either "off",
 420 * "dynamic" or "on".
 421 */
 422static bool spectre_v4_mitigations_off(void)
 423{
 424	bool ret = cpu_mitigations_off() ||
 425		   __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DISABLED;
 426
 427	if (ret)
 428		pr_info_once("spectre-v4 mitigation disabled by command-line option\n");
 429
 430	return ret;
 431}
 432
 433/* Do we need to toggle the mitigation state on entry to/exit from the kernel? */
 434static bool spectre_v4_mitigations_dynamic(void)
 435{
 436	return !spectre_v4_mitigations_off() &&
 437	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_DYNAMIC;
 438}
 439
 440static bool spectre_v4_mitigations_on(void)
 441{
 442	return !spectre_v4_mitigations_off() &&
 443	       __spectre_v4_policy == SPECTRE_V4_POLICY_MITIGATION_ENABLED;
 444}
 445
 446ssize_t cpu_show_spec_store_bypass(struct device *dev,
 447				   struct device_attribute *attr, char *buf)
 448{
 449	switch (spectre_v4_state) {
 450	case SPECTRE_UNAFFECTED:
 451		return sprintf(buf, "Not affected\n");
 452	case SPECTRE_MITIGATED:
 453		return sprintf(buf, "Mitigation: Speculative Store Bypass disabled via prctl\n");
 454	case SPECTRE_VULNERABLE:
 455		fallthrough;
 456	default:
 457		return sprintf(buf, "Vulnerable\n");
 458	}
 459}
 460
 461enum mitigation_state arm64_get_spectre_v4_state(void)
 462{
 463	return spectre_v4_state;
 464}
 465
 466static enum mitigation_state spectre_v4_get_cpu_hw_mitigation_state(void)
 467{
 468	static const struct midr_range spectre_v4_safe_list[] = {
 469		MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
 470		MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
 471		MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
 472		MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
 473		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_3XX_SILVER),
 474		MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_SILVER),
 475		{ /* sentinel */ },
 476	};
 477
 478	if (is_midr_in_range_list(read_cpuid_id(), spectre_v4_safe_list))
 479		return SPECTRE_UNAFFECTED;
 480
 481	/* CPU features are detected first */
 482	if (this_cpu_has_cap(ARM64_SSBS))
 483		return SPECTRE_MITIGATED;
 484
 485	return SPECTRE_VULNERABLE;
 486}
 487
 488static enum mitigation_state spectre_v4_get_cpu_fw_mitigation_state(void)
 489{
 490	int ret;
 491	struct arm_smccc_res res;
 492
 493	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 494			     ARM_SMCCC_ARCH_WORKAROUND_2, &res);
 495
 496	ret = res.a0;
 497	switch (ret) {
 498	case SMCCC_RET_SUCCESS:
 499		return SPECTRE_MITIGATED;
 500	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
 501		fallthrough;
 502	case SMCCC_RET_NOT_REQUIRED:
 503		return SPECTRE_UNAFFECTED;
 504	default:
 505		fallthrough;
 506	case SMCCC_RET_NOT_SUPPORTED:
 507		return SPECTRE_VULNERABLE;
 508	}
 509}
 510
 511bool has_spectre_v4(const struct arm64_cpu_capabilities *cap, int scope)
 512{
 513	enum mitigation_state state;
 514
 515	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 516
 517	state = spectre_v4_get_cpu_hw_mitigation_state();
 518	if (state == SPECTRE_VULNERABLE)
 519		state = spectre_v4_get_cpu_fw_mitigation_state();
 520
 521	return state != SPECTRE_UNAFFECTED;
 522}
 523
 524bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr)
 525{
 526	const u32 instr_mask = ~(1U << PSTATE_Imm_shift);
 527	const u32 instr_val = 0xd500401f | PSTATE_SSBS;
 528
 529	if ((instr & instr_mask) != instr_val)
 530		return false;
 531
 532	if (instr & BIT(PSTATE_Imm_shift))
 533		regs->pstate |= PSR_SSBS_BIT;
 534	else
 535		regs->pstate &= ~PSR_SSBS_BIT;
 536
 537	arm64_skip_faulting_instruction(regs, 4);
 538	return true;
 539}
 540
 541static enum mitigation_state spectre_v4_enable_hw_mitigation(void)
 542{
 543	enum mitigation_state state;
 544
 545	/*
 546	 * If the system is mitigated but this CPU doesn't have SSBS, then
 547	 * we must be on the safelist and there's nothing more to do.
 548	 */
 549	state = spectre_v4_get_cpu_hw_mitigation_state();
 550	if (state != SPECTRE_MITIGATED || !this_cpu_has_cap(ARM64_SSBS))
 551		return state;
 552
 553	if (spectre_v4_mitigations_off()) {
 554		sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_DSSBS);
 555		set_pstate_ssbs(1);
 556		return SPECTRE_VULNERABLE;
 557	}
 558
 559	/* SCTLR_EL1.DSSBS was initialised to 0 during boot */
 560	set_pstate_ssbs(0);
 561	return SPECTRE_MITIGATED;
 562}
 563
 564/*
 565 * Patch a branch over the Spectre-v4 mitigation code with a NOP so that
 566 * we fallthrough and check whether firmware needs to be called on this CPU.
 567 */
 568void __init spectre_v4_patch_fw_mitigation_enable(struct alt_instr *alt,
 569						  __le32 *origptr,
 570						  __le32 *updptr, int nr_inst)
 571{
 572	BUG_ON(nr_inst != 1); /* Branch -> NOP */
 573
 574	if (spectre_v4_mitigations_off())
 575		return;
 576
 577	if (cpus_have_cap(ARM64_SSBS))
 578		return;
 579
 580	if (spectre_v4_mitigations_dynamic())
 581		*updptr = cpu_to_le32(aarch64_insn_gen_nop());
 582}
 583
 584/*
 585 * Patch a NOP in the Spectre-v4 mitigation code with an SMC/HVC instruction
 586 * to call into firmware to adjust the mitigation state.
 587 */
 588void __init smccc_patch_fw_mitigation_conduit(struct alt_instr *alt,
 589					       __le32 *origptr,
 590					       __le32 *updptr, int nr_inst)
 591{
 592	u32 insn;
 593
 594	BUG_ON(nr_inst != 1); /* NOP -> HVC/SMC */
 595
 596	switch (arm_smccc_1_1_get_conduit()) {
 597	case SMCCC_CONDUIT_HVC:
 598		insn = aarch64_insn_get_hvc_value();
 599		break;
 600	case SMCCC_CONDUIT_SMC:
 601		insn = aarch64_insn_get_smc_value();
 602		break;
 603	default:
 604		return;
 605	}
 606
 607	*updptr = cpu_to_le32(insn);
 608}
 609
 610static enum mitigation_state spectre_v4_enable_fw_mitigation(void)
 611{
 612	enum mitigation_state state;
 613
 614	state = spectre_v4_get_cpu_fw_mitigation_state();
 615	if (state != SPECTRE_MITIGATED)
 616		return state;
 617
 618	if (spectre_v4_mitigations_off()) {
 619		arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, false, NULL);
 620		return SPECTRE_VULNERABLE;
 621	}
 622
 623	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_WORKAROUND_2, true, NULL);
 624
 625	if (spectre_v4_mitigations_dynamic())
 626		__this_cpu_write(arm64_ssbd_callback_required, 1);
 627
 628	return SPECTRE_MITIGATED;
 629}
 630
 631void spectre_v4_enable_mitigation(const struct arm64_cpu_capabilities *__unused)
 632{
 633	enum mitigation_state state;
 634
 635	WARN_ON(preemptible());
 636
 637	state = spectre_v4_enable_hw_mitigation();
 638	if (state == SPECTRE_VULNERABLE)
 639		state = spectre_v4_enable_fw_mitigation();
 640
 641	update_mitigation_state(&spectre_v4_state, state);
 642}
 643
 644static void __update_pstate_ssbs(struct pt_regs *regs, bool state)
 645{
 646	u64 bit = compat_user_mode(regs) ? PSR_AA32_SSBS_BIT : PSR_SSBS_BIT;
 647
 648	if (state)
 649		regs->pstate |= bit;
 650	else
 651		regs->pstate &= ~bit;
 652}
 653
 654void spectre_v4_enable_task_mitigation(struct task_struct *tsk)
 655{
 656	struct pt_regs *regs = task_pt_regs(tsk);
 657	bool ssbs = false, kthread = tsk->flags & PF_KTHREAD;
 658
 659	if (spectre_v4_mitigations_off())
 660		ssbs = true;
 661	else if (spectre_v4_mitigations_dynamic() && !kthread)
 662		ssbs = !test_tsk_thread_flag(tsk, TIF_SSBD);
 663
 664	__update_pstate_ssbs(regs, ssbs);
 665}
 666
 667/*
 668 * The Spectre-v4 mitigation can be controlled via a prctl() from userspace.
 669 * This is interesting because the "speculation disabled" behaviour can be
 670 * configured so that it is preserved across exec(), which means that the
 671 * prctl() may be necessary even when PSTATE.SSBS can be toggled directly
 672 * from userspace.
 673 */
 674static void ssbd_prctl_enable_mitigation(struct task_struct *task)
 675{
 676	task_clear_spec_ssb_noexec(task);
 677	task_set_spec_ssb_disable(task);
 678	set_tsk_thread_flag(task, TIF_SSBD);
 679}
 680
 681static void ssbd_prctl_disable_mitigation(struct task_struct *task)
 682{
 683	task_clear_spec_ssb_noexec(task);
 684	task_clear_spec_ssb_disable(task);
 685	clear_tsk_thread_flag(task, TIF_SSBD);
 686}
 687
 688static int ssbd_prctl_set(struct task_struct *task, unsigned long ctrl)
 689{
 690	switch (ctrl) {
 691	case PR_SPEC_ENABLE:
 692		/* Enable speculation: disable mitigation */
 693		/*
 694		 * Force disabled speculation prevents it from being
 695		 * re-enabled.
 696		 */
 697		if (task_spec_ssb_force_disable(task))
 698			return -EPERM;
 699
 700		/*
 701		 * If the mitigation is forced on, then speculation is forced
 702		 * off and we again prevent it from being re-enabled.
 703		 */
 704		if (spectre_v4_mitigations_on())
 705			return -EPERM;
 706
 707		ssbd_prctl_disable_mitigation(task);
 708		break;
 709	case PR_SPEC_FORCE_DISABLE:
 710		/* Force disable speculation: force enable mitigation */
 711		/*
 712		 * If the mitigation is forced off, then speculation is forced
 713		 * on and we prevent it from being disabled.
 714		 */
 715		if (spectre_v4_mitigations_off())
 716			return -EPERM;
 717
 718		task_set_spec_ssb_force_disable(task);
 719		fallthrough;
 720	case PR_SPEC_DISABLE:
 721		/* Disable speculation: enable mitigation */
 722		/* Same as PR_SPEC_FORCE_DISABLE */
 723		if (spectre_v4_mitigations_off())
 724			return -EPERM;
 725
 726		ssbd_prctl_enable_mitigation(task);
 727		break;
 728	case PR_SPEC_DISABLE_NOEXEC:
 729		/* Disable speculation until execve(): enable mitigation */
 730		/*
 731		 * If the mitigation state is forced one way or the other, then
 732		 * we must fail now before we try to toggle it on execve().
 733		 */
 734		if (task_spec_ssb_force_disable(task) ||
 735		    spectre_v4_mitigations_off() ||
 736		    spectre_v4_mitigations_on()) {
 737			return -EPERM;
 738		}
 739
 740		ssbd_prctl_enable_mitigation(task);
 741		task_set_spec_ssb_noexec(task);
 742		break;
 743	default:
 744		return -ERANGE;
 745	}
 746
 747	spectre_v4_enable_task_mitigation(task);
 748	return 0;
 749}
 750
 751int arch_prctl_spec_ctrl_set(struct task_struct *task, unsigned long which,
 752			     unsigned long ctrl)
 753{
 754	switch (which) {
 755	case PR_SPEC_STORE_BYPASS:
 756		return ssbd_prctl_set(task, ctrl);
 757	default:
 758		return -ENODEV;
 759	}
 760}
 761
 762static int ssbd_prctl_get(struct task_struct *task)
 763{
 764	switch (spectre_v4_state) {
 765	case SPECTRE_UNAFFECTED:
 766		return PR_SPEC_NOT_AFFECTED;
 767	case SPECTRE_MITIGATED:
 768		if (spectre_v4_mitigations_on())
 769			return PR_SPEC_NOT_AFFECTED;
 770
 771		if (spectre_v4_mitigations_dynamic())
 772			break;
 773
 774		/* Mitigations are disabled, so we're vulnerable. */
 775		fallthrough;
 776	case SPECTRE_VULNERABLE:
 777		fallthrough;
 778	default:
 779		return PR_SPEC_ENABLE;
 780	}
 781
 782	/* Check the mitigation state for this task */
 783	if (task_spec_ssb_force_disable(task))
 784		return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE;
 785
 786	if (task_spec_ssb_noexec(task))
 787		return PR_SPEC_PRCTL | PR_SPEC_DISABLE_NOEXEC;
 788
 789	if (task_spec_ssb_disable(task))
 790		return PR_SPEC_PRCTL | PR_SPEC_DISABLE;
 791
 792	return PR_SPEC_PRCTL | PR_SPEC_ENABLE;
 793}
 794
 795int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which)
 796{
 797	switch (which) {
 798	case PR_SPEC_STORE_BYPASS:
 799		return ssbd_prctl_get(task);
 800	default:
 801		return -ENODEV;
 802	}
 803}
 804
 805/*
 806 * Spectre BHB.
 807 *
 808 * A CPU is either:
 809 * - Mitigated by a branchy loop a CPU specific number of times, and listed
 810 *   in our "loop mitigated list".
 811 * - Mitigated in software by the firmware Spectre v2 call.
 812 * - Has the ClearBHB instruction to perform the mitigation.
 813 * - Has the 'Exception Clears Branch History Buffer' (ECBHB) feature, so no
 814 *   software mitigation in the vectors is needed.
 815 * - Has CSV2.3, so is unaffected.
 816 */
 817static enum mitigation_state spectre_bhb_state;
 818
 819enum mitigation_state arm64_get_spectre_bhb_state(void)
 820{
 821	return spectre_bhb_state;
 822}
 823
 824enum bhb_mitigation_bits {
 825	BHB_LOOP,
 826	BHB_FW,
 827	BHB_HW,
 828	BHB_INSN,
 829};
 830static unsigned long system_bhb_mitigations;
 831
 832/*
 833 * This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
 834 * SCOPE_SYSTEM call will give the right answer.
 835 */
 836u8 spectre_bhb_loop_affected(int scope)
 837{
 838	u8 k = 0;
 839	static u8 max_bhb_k;
 840
 841	if (scope == SCOPE_LOCAL_CPU) {
 842		static const struct midr_range spectre_bhb_k32_list[] = {
 843			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
 844			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
 845			MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
 846			MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
 847			MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
 848			MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
 849			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
 850			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
 851			{},
 852		};
 853		static const struct midr_range spectre_bhb_k24_list[] = {
 854			MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
 855			MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
 856			MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
 857			{},
 858		};
 859		static const struct midr_range spectre_bhb_k11_list[] = {
 860			MIDR_ALL_VERSIONS(MIDR_AMPERE1),
 861			{},
 862		};
 863		static const struct midr_range spectre_bhb_k8_list[] = {
 864			MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
 865			MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
 866			{},
 867		};
 868
 869		if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
 870			k = 32;
 871		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
 872			k = 24;
 873		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
 874			k = 11;
 875		else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
 876			k =  8;
 877
 878		max_bhb_k = max(max_bhb_k, k);
 879	} else {
 880		k = max_bhb_k;
 881	}
 882
 883	return k;
 884}
 885
 886static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
 887{
 888	int ret;
 889	struct arm_smccc_res res;
 890
 891	arm_smccc_1_1_invoke(ARM_SMCCC_ARCH_FEATURES_FUNC_ID,
 892			     ARM_SMCCC_ARCH_WORKAROUND_3, &res);
 893
 894	ret = res.a0;
 895	switch (ret) {
 896	case SMCCC_RET_SUCCESS:
 897		return SPECTRE_MITIGATED;
 898	case SMCCC_ARCH_WORKAROUND_RET_UNAFFECTED:
 899		return SPECTRE_UNAFFECTED;
 900	default:
 901		fallthrough;
 902	case SMCCC_RET_NOT_SUPPORTED:
 903		return SPECTRE_VULNERABLE;
 904	}
 905}
 906
 907static bool is_spectre_bhb_fw_affected(int scope)
 908{
 909	static bool system_affected;
 910	enum mitigation_state fw_state;
 911	bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
 912	static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
 913		MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
 914		MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
 915		{},
 916	};
 917	bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
 918					 spectre_bhb_firmware_mitigated_list);
 919
 920	if (scope != SCOPE_LOCAL_CPU)
 921		return system_affected;
 922
 923	fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
 924	if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
 925		system_affected = true;
 926		return true;
 927	}
 928
 929	return false;
 930}
 931
 932static bool supports_ecbhb(int scope)
 933{
 934	u64 mmfr1;
 935
 936	if (scope == SCOPE_LOCAL_CPU)
 937		mmfr1 = read_sysreg_s(SYS_ID_AA64MMFR1_EL1);
 938	else
 939		mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
 940
 941	return cpuid_feature_extract_unsigned_field(mmfr1,
 942						    ID_AA64MMFR1_EL1_ECBHB_SHIFT);
 943}
 944
 945bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
 946			     int scope)
 947{
 948	WARN_ON(scope != SCOPE_LOCAL_CPU || preemptible());
 949
 950	if (supports_csv2p3(scope))
 951		return false;
 952
 953	if (supports_clearbhb(scope))
 954		return true;
 955
 956	if (spectre_bhb_loop_affected(scope))
 957		return true;
 958
 959	if (is_spectre_bhb_fw_affected(scope))
 960		return true;
 961
 962	return false;
 963}
 964
 965static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
 966{
 967	const char *v = arm64_get_bp_hardening_vector(slot);
 968
 969	if (slot < 0)
 970		return;
 971
 972	__this_cpu_write(this_cpu_vector, v);
 973
 974	/*
 975	 * When KPTI is in use, the vectors are switched when exiting to
 976	 * user-space.
 977	 */
 978	if (arm64_kernel_unmapped_at_el0())
 979		return;
 980
 981	write_sysreg(v, vbar_el1);
 982	isb();
 983}
 984
 985static bool __read_mostly __nospectre_bhb;
 986static int __init parse_spectre_bhb_param(char *str)
 987{
 988	__nospectre_bhb = true;
 989	return 0;
 990}
 991early_param("nospectre_bhb", parse_spectre_bhb_param);
 992
 993void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
 994{
 995	bp_hardening_cb_t cpu_cb;
 996	enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
 997	struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
 998
 999	if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
1000		return;
1001
1002	if (arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE) {
1003		/* No point mitigating Spectre-BHB alone. */
1004	} else if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY)) {
1005		pr_info_once("spectre-bhb mitigation disabled by compile time option\n");
1006	} else if (cpu_mitigations_off() || __nospectre_bhb) {
1007		pr_info_once("spectre-bhb mitigation disabled by command line option\n");
1008	} else if (supports_ecbhb(SCOPE_LOCAL_CPU)) {
1009		state = SPECTRE_MITIGATED;
1010		set_bit(BHB_HW, &system_bhb_mitigations);
1011	} else if (supports_clearbhb(SCOPE_LOCAL_CPU)) {
1012		/*
1013		 * Ensure KVM uses the indirect vector which will have ClearBHB
1014		 * added.
1015		 */
1016		if (!data->slot)
1017			data->slot = HYP_VECTOR_INDIRECT;
1018
1019		this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
1020		state = SPECTRE_MITIGATED;
1021		set_bit(BHB_INSN, &system_bhb_mitigations);
1022	} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
1023		/*
1024		 * Ensure KVM uses the indirect vector which will have the
1025		 * branchy-loop added. A57/A72-r0 will already have selected
1026		 * the spectre-indirect vector, which is sufficient for BHB
1027		 * too.
1028		 */
1029		if (!data->slot)
1030			data->slot = HYP_VECTOR_INDIRECT;
1031
1032		this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
1033		state = SPECTRE_MITIGATED;
1034		set_bit(BHB_LOOP, &system_bhb_mitigations);
1035	} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
1036		fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
1037		if (fw_state == SPECTRE_MITIGATED) {
1038			/*
1039			 * Ensure KVM uses one of the spectre bp_hardening
1040			 * vectors. The indirect vector doesn't include the EL3
1041			 * call, so needs upgrading to
1042			 * HYP_VECTOR_SPECTRE_INDIRECT.
1043			 */
1044			if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
1045				data->slot += 1;
1046
1047			this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
1048
1049			/*
1050			 * The WA3 call in the vectors supersedes the WA1 call
1051			 * made during context-switch. Uninstall any firmware
1052			 * bp_hardening callback.
1053			 */
1054			cpu_cb = spectre_v2_get_sw_mitigation_cb();
1055			if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
1056				__this_cpu_write(bp_hardening_data.fn, NULL);
1057
1058			state = SPECTRE_MITIGATED;
1059			set_bit(BHB_FW, &system_bhb_mitigations);
1060		}
1061	}
1062
1063	update_mitigation_state(&spectre_bhb_state, state);
1064}
1065
1066/* Patched to NOP when enabled */
1067void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
1068						     __le32 *origptr,
1069						      __le32 *updptr, int nr_inst)
1070{
1071	BUG_ON(nr_inst != 1);
1072
1073	if (test_bit(BHB_LOOP, &system_bhb_mitigations))
1074		*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1075}
1076
1077/* Patched to NOP when enabled */
1078void noinstr spectre_bhb_patch_fw_mitigation_enabled(struct alt_instr *alt,
1079						   __le32 *origptr,
1080						   __le32 *updptr, int nr_inst)
1081{
1082	BUG_ON(nr_inst != 1);
1083
1084	if (test_bit(BHB_FW, &system_bhb_mitigations))
1085		*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1086}
1087
1088/* Patched to correct the immediate */
1089void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
1090				   __le32 *origptr, __le32 *updptr, int nr_inst)
1091{
1092	u8 rd;
1093	u32 insn;
1094	u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
1095
1096	BUG_ON(nr_inst != 1); /* MOV -> MOV */
1097
1098	if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY))
1099		return;
1100
1101	insn = le32_to_cpu(*origptr);
1102	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1103	insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
1104					 AARCH64_INSN_VARIANT_64BIT,
1105					 AARCH64_INSN_MOVEWIDE_ZERO);
1106	*updptr++ = cpu_to_le32(insn);
1107}
1108
1109/* Patched to mov WA3 when supported */
1110void noinstr spectre_bhb_patch_wa3(struct alt_instr *alt,
1111				   __le32 *origptr, __le32 *updptr, int nr_inst)
1112{
1113	u8 rd;
1114	u32 insn;
1115
1116	BUG_ON(nr_inst != 1); /* MOV -> MOV */
1117
1118	if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
1119	    !test_bit(BHB_FW, &system_bhb_mitigations))
1120		return;
1121
1122	insn = le32_to_cpu(*origptr);
1123	rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
1124
1125	insn = aarch64_insn_gen_logical_immediate(AARCH64_INSN_LOGIC_ORR,
1126						  AARCH64_INSN_VARIANT_32BIT,
1127						  AARCH64_INSN_REG_ZR, rd,
1128						  ARM_SMCCC_ARCH_WORKAROUND_3);
1129	if (WARN_ON_ONCE(insn == AARCH64_BREAK_FAULT))
1130		return;
1131
1132	*updptr++ = cpu_to_le32(insn);
1133}
1134
1135/* Patched to NOP when not supported */
1136void __init spectre_bhb_patch_clearbhb(struct alt_instr *alt,
1137				   __le32 *origptr, __le32 *updptr, int nr_inst)
1138{
1139	BUG_ON(nr_inst != 2);
1140
1141	if (test_bit(BHB_INSN, &system_bhb_mitigations))
1142		return;
1143
1144	*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1145	*updptr++ = cpu_to_le32(aarch64_insn_gen_nop());
1146}
1147
1148#ifdef CONFIG_BPF_SYSCALL
1149#define EBPF_WARN "Unprivileged eBPF is enabled, data leaks possible via Spectre v2 BHB attacks!\n"
1150void unpriv_ebpf_notify(int new_state)
1151{
1152	if (spectre_v2_state == SPECTRE_VULNERABLE ||
1153	    spectre_bhb_state != SPECTRE_MITIGATED)
1154		return;
1155
1156	if (!new_state)
1157		pr_err("WARNING: %s", EBPF_WARN);
1158}
1159#endif