Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
  1// SPDX-License-Identifier: GPL-2.0
  2
  3#define pr_fmt(fmt) "x86/split lock detection: " fmt
  4
  5#include <linux/semaphore.h>
  6#include <linux/workqueue.h>
  7#include <linux/delay.h>
  8#include <linux/cpuhotplug.h>
  9#include <asm/cpu_device_id.h>
 10#include <asm/cmdline.h>
 11#include <asm/traps.h>
 12#include <asm/cpu.h>
 13
 14enum split_lock_detect_state {
 15	sld_off = 0,
 16	sld_warn,
 17	sld_fatal,
 18	sld_ratelimit,
 19};
 20
 21/*
 22 * Default to sld_off because most systems do not support split lock detection.
 23 * sld_state_setup() will switch this to sld_warn on systems that support
 24 * split lock/bus lock detect, unless there is a command line override.
 25 */
 26static enum split_lock_detect_state sld_state __ro_after_init = sld_off;
 27static u64 msr_test_ctrl_cache __ro_after_init;
 28
 29/*
 30 * With a name like MSR_TEST_CTL it should go without saying, but don't touch
 31 * MSR_TEST_CTL unless the CPU is one of the whitelisted models.  Writing it
 32 * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
 33 */
 34static bool cpu_model_supports_sld __ro_after_init;
 35
 36static const struct {
 37	const char			*option;
 38	enum split_lock_detect_state	state;
 39} sld_options[] __initconst = {
 40	{ "off",	sld_off   },
 41	{ "warn",	sld_warn  },
 42	{ "fatal",	sld_fatal },
 43	{ "ratelimit:", sld_ratelimit },
 44};
 45
 46static struct ratelimit_state bld_ratelimit;
 47
 48static unsigned int sysctl_sld_mitigate = 1;
 49static DEFINE_SEMAPHORE(buslock_sem, 1);
 50
 51#ifdef CONFIG_PROC_SYSCTL
 52static struct ctl_table sld_sysctls[] = {
 53	{
 54		.procname       = "split_lock_mitigate",
 55		.data           = &sysctl_sld_mitigate,
 56		.maxlen         = sizeof(unsigned int),
 57		.mode           = 0644,
 58		.proc_handler	= proc_douintvec_minmax,
 59		.extra1         = SYSCTL_ZERO,
 60		.extra2         = SYSCTL_ONE,
 61	},
 62};
 63
 64static int __init sld_mitigate_sysctl_init(void)
 65{
 66	register_sysctl_init("kernel", sld_sysctls);
 67	return 0;
 68}
 69
 70late_initcall(sld_mitigate_sysctl_init);
 71#endif
 72
 73static inline bool match_option(const char *arg, int arglen, const char *opt)
 74{
 75	int len = strlen(opt), ratelimit;
 76
 77	if (strncmp(arg, opt, len))
 78		return false;
 79
 80	/*
 81	 * Min ratelimit is 1 bus lock/sec.
 82	 * Max ratelimit is 1000 bus locks/sec.
 83	 */
 84	if (sscanf(arg, "ratelimit:%d", &ratelimit) == 1 &&
 85	    ratelimit > 0 && ratelimit <= 1000) {
 86		ratelimit_state_init(&bld_ratelimit, HZ, ratelimit);
 87		ratelimit_set_flags(&bld_ratelimit, RATELIMIT_MSG_ON_RELEASE);
 88		return true;
 89	}
 90
 91	return len == arglen;
 92}
 93
 94static bool split_lock_verify_msr(bool on)
 95{
 96	u64 ctrl, tmp;
 97
 98	if (rdmsrl_safe(MSR_TEST_CTRL, &ctrl))
 99		return false;
100	if (on)
101		ctrl |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
102	else
103		ctrl &= ~MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
104	if (wrmsrl_safe(MSR_TEST_CTRL, ctrl))
105		return false;
106	rdmsrl(MSR_TEST_CTRL, tmp);
107	return ctrl == tmp;
108}
109
110static void __init sld_state_setup(void)
111{
112	enum split_lock_detect_state state = sld_warn;
113	char arg[20];
114	int i, ret;
115
116	if (!boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
117	    !boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
118		return;
119
120	ret = cmdline_find_option(boot_command_line, "split_lock_detect",
121				  arg, sizeof(arg));
122	if (ret >= 0) {
123		for (i = 0; i < ARRAY_SIZE(sld_options); i++) {
124			if (match_option(arg, ret, sld_options[i].option)) {
125				state = sld_options[i].state;
126				break;
127			}
128		}
129	}
130	sld_state = state;
131}
132
133static void __init __split_lock_setup(void)
134{
135	if (!split_lock_verify_msr(false)) {
136		pr_info("MSR access failed: Disabled\n");
137		return;
138	}
139
140	rdmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
141
142	if (!split_lock_verify_msr(true)) {
143		pr_info("MSR access failed: Disabled\n");
144		return;
145	}
146
147	/* Restore the MSR to its cached value. */
148	wrmsrl(MSR_TEST_CTRL, msr_test_ctrl_cache);
149
150	setup_force_cpu_cap(X86_FEATURE_SPLIT_LOCK_DETECT);
151}
152
153/*
154 * MSR_TEST_CTRL is per core, but we treat it like a per CPU MSR. Locking
155 * is not implemented as one thread could undo the setting of the other
156 * thread immediately after dropping the lock anyway.
157 */
158static void sld_update_msr(bool on)
159{
160	u64 test_ctrl_val = msr_test_ctrl_cache;
161
162	if (on)
163		test_ctrl_val |= MSR_TEST_CTRL_SPLIT_LOCK_DETECT;
164
165	wrmsrl(MSR_TEST_CTRL, test_ctrl_val);
166}
167
168void split_lock_init(void)
169{
170	/*
171	 * #DB for bus lock handles ratelimit and #AC for split lock is
172	 * disabled.
173	 */
174	if (sld_state == sld_ratelimit) {
175		split_lock_verify_msr(false);
176		return;
177	}
178
179	if (cpu_model_supports_sld)
180		split_lock_verify_msr(sld_state != sld_off);
181}
182
183static void __split_lock_reenable_unlock(struct work_struct *work)
184{
185	sld_update_msr(true);
186	up(&buslock_sem);
187}
188
189static DECLARE_DELAYED_WORK(sl_reenable_unlock, __split_lock_reenable_unlock);
190
191static void __split_lock_reenable(struct work_struct *work)
192{
193	sld_update_msr(true);
194}
195static DECLARE_DELAYED_WORK(sl_reenable, __split_lock_reenable);
196
197/*
198 * If a CPU goes offline with pending delayed work to re-enable split lock
199 * detection then the delayed work will be executed on some other CPU. That
200 * handles releasing the buslock_sem, but because it executes on a
201 * different CPU probably won't re-enable split lock detection. This is a
202 * problem on HT systems since the sibling CPU on the same core may then be
203 * left running with split lock detection disabled.
204 *
205 * Unconditionally re-enable detection here.
206 */
207static int splitlock_cpu_offline(unsigned int cpu)
208{
209	sld_update_msr(true);
210
211	return 0;
212}
213
214static void split_lock_warn(unsigned long ip)
215{
216	struct delayed_work *work;
217	int cpu;
218
219	if (!current->reported_split_lock)
220		pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
221				    current->comm, current->pid, ip);
222	current->reported_split_lock = 1;
223
224	if (sysctl_sld_mitigate) {
225		/*
226		 * misery factor #1:
227		 * sleep 10ms before trying to execute split lock.
228		 */
229		if (msleep_interruptible(10) > 0)
230			return;
231		/*
232		 * Misery factor #2:
233		 * only allow one buslocked disabled core at a time.
234		 */
235		if (down_interruptible(&buslock_sem) == -EINTR)
236			return;
237		work = &sl_reenable_unlock;
238	} else {
239		work = &sl_reenable;
240	}
241
242	cpu = get_cpu();
243	schedule_delayed_work_on(cpu, work, 2);
244
245	/* Disable split lock detection on this CPU to make progress */
246	sld_update_msr(false);
247	put_cpu();
248}
249
250bool handle_guest_split_lock(unsigned long ip)
251{
252	if (sld_state == sld_warn) {
253		split_lock_warn(ip);
254		return true;
255	}
256
257	pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n",
258		     current->comm, current->pid,
259		     sld_state == sld_fatal ? "fatal" : "bogus", ip);
260
261	current->thread.error_code = 0;
262	current->thread.trap_nr = X86_TRAP_AC;
263	force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
264	return false;
265}
266EXPORT_SYMBOL_GPL(handle_guest_split_lock);
267
268void bus_lock_init(void)
269{
270	u64 val;
271
272	if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
273		return;
274
275	rdmsrl(MSR_IA32_DEBUGCTLMSR, val);
276
277	if ((boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) &&
278	    (sld_state == sld_warn || sld_state == sld_fatal)) ||
279	    sld_state == sld_off) {
280		/*
281		 * Warn and fatal are handled by #AC for split lock if #AC for
282		 * split lock is supported.
283		 */
284		val &= ~DEBUGCTLMSR_BUS_LOCK_DETECT;
285	} else {
286		val |= DEBUGCTLMSR_BUS_LOCK_DETECT;
287	}
288
289	wrmsrl(MSR_IA32_DEBUGCTLMSR, val);
290}
291
292bool handle_user_split_lock(struct pt_regs *regs, long error_code)
293{
294	if ((regs->flags & X86_EFLAGS_AC) || sld_state == sld_fatal)
295		return false;
296	split_lock_warn(regs->ip);
297	return true;
298}
299
300void handle_bus_lock(struct pt_regs *regs)
301{
302	switch (sld_state) {
303	case sld_off:
304		break;
305	case sld_ratelimit:
306		/* Enforce no more than bld_ratelimit bus locks/sec. */
307		while (!__ratelimit(&bld_ratelimit))
308			msleep(20);
309		/* Warn on the bus lock. */
310		fallthrough;
311	case sld_warn:
312		pr_warn_ratelimited("#DB: %s/%d took a bus_lock trap at address: 0x%lx\n",
313				    current->comm, current->pid, regs->ip);
314		break;
315	case sld_fatal:
316		force_sig_fault(SIGBUS, BUS_ADRALN, NULL);
317		break;
318	}
319}
320
321/*
322 * CPU models that are known to have the per-core split-lock detection
323 * feature even though they do not enumerate IA32_CORE_CAPABILITIES.
324 */
325static const struct x86_cpu_id split_lock_cpu_ids[] __initconst = {
326	X86_MATCH_VFM(INTEL_ICELAKE_X,	0),
327	X86_MATCH_VFM(INTEL_ICELAKE_L,	0),
328	X86_MATCH_VFM(INTEL_ICELAKE_D,	0),
329	{}
330};
331
332static void __init split_lock_setup(struct cpuinfo_x86 *c)
333{
334	const struct x86_cpu_id *m;
335	u64 ia32_core_caps;
336
337	if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
338		return;
339
340	/* Check for CPUs that have support but do not enumerate it: */
341	m = x86_match_cpu(split_lock_cpu_ids);
342	if (m)
343		goto supported;
344
345	if (!cpu_has(c, X86_FEATURE_CORE_CAPABILITIES))
346		return;
347
348	/*
349	 * Not all bits in MSR_IA32_CORE_CAPS are architectural, but
350	 * MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT is.  All CPUs that set
351	 * it have split lock detection.
352	 */
353	rdmsrl(MSR_IA32_CORE_CAPS, ia32_core_caps);
354	if (ia32_core_caps & MSR_IA32_CORE_CAPS_SPLIT_LOCK_DETECT)
355		goto supported;
356
357	/* CPU is not in the model list and does not have the MSR bit: */
358	return;
359
360supported:
361	cpu_model_supports_sld = true;
362	__split_lock_setup();
363}
364
365static void sld_state_show(void)
366{
367	if (!boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT) &&
368	    !boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT))
369		return;
370
371	switch (sld_state) {
372	case sld_off:
373		pr_info("disabled\n");
374		break;
375	case sld_warn:
376		if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
377			pr_info("#AC: crashing the kernel on kernel split_locks and warning on user-space split_locks\n");
378			if (cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
379					      "x86/splitlock", NULL, splitlock_cpu_offline) < 0)
380				pr_warn("No splitlock CPU offline handler\n");
381		} else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
382			pr_info("#DB: warning on user-space bus_locks\n");
383		}
384		break;
385	case sld_fatal:
386		if (boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT)) {
387			pr_info("#AC: crashing the kernel on kernel split_locks and sending SIGBUS on user-space split_locks\n");
388		} else if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT)) {
389			pr_info("#DB: sending SIGBUS on user-space bus_locks%s\n",
390				boot_cpu_has(X86_FEATURE_SPLIT_LOCK_DETECT) ?
391				" from non-WB" : "");
392		}
393		break;
394	case sld_ratelimit:
395		if (boot_cpu_has(X86_FEATURE_BUS_LOCK_DETECT))
396			pr_info("#DB: setting system wide bus lock rate limit to %u/sec\n", bld_ratelimit.burst);
397		break;
398	}
399}
400
401void __init sld_setup(struct cpuinfo_x86 *c)
402{
403	split_lock_setup(c);
404	sld_state_setup();
405	sld_state_show();
406}