Linux Audio

Check our new training course

Loading...
Note: File does not exist in v4.17.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * KCSAN debugfs interface.
  4 *
  5 * Copyright (C) 2019, Google LLC.
  6 */
  7
  8#define pr_fmt(fmt) "kcsan: " fmt
  9
 10#include <linux/atomic.h>
 11#include <linux/bsearch.h>
 12#include <linux/bug.h>
 13#include <linux/debugfs.h>
 14#include <linux/init.h>
 15#include <linux/kallsyms.h>
 16#include <linux/sched.h>
 17#include <linux/seq_file.h>
 18#include <linux/slab.h>
 19#include <linux/sort.h>
 20#include <linux/string.h>
 21#include <linux/uaccess.h>
 22
 23#include "kcsan.h"
 24
 25atomic_long_t kcsan_counters[KCSAN_COUNTER_COUNT];
 26static const char *const counter_names[] = {
 27	[KCSAN_COUNTER_USED_WATCHPOINTS]		= "used_watchpoints",
 28	[KCSAN_COUNTER_SETUP_WATCHPOINTS]		= "setup_watchpoints",
 29	[KCSAN_COUNTER_DATA_RACES]			= "data_races",
 30	[KCSAN_COUNTER_ASSERT_FAILURES]			= "assert_failures",
 31	[KCSAN_COUNTER_NO_CAPACITY]			= "no_capacity",
 32	[KCSAN_COUNTER_REPORT_RACES]			= "report_races",
 33	[KCSAN_COUNTER_RACES_UNKNOWN_ORIGIN]		= "races_unknown_origin",
 34	[KCSAN_COUNTER_UNENCODABLE_ACCESSES]		= "unencodable_accesses",
 35	[KCSAN_COUNTER_ENCODING_FALSE_POSITIVES]	= "encoding_false_positives",
 36};
 37static_assert(ARRAY_SIZE(counter_names) == KCSAN_COUNTER_COUNT);
 38
 39/*
 40 * Addresses for filtering functions from reporting. This list can be used as a
 41 * whitelist or blacklist.
 42 */
 43static struct {
 44	unsigned long	*addrs;		/* array of addresses */
 45	size_t		size;		/* current size */
 46	int		used;		/* number of elements used */
 47	bool		sorted;		/* if elements are sorted */
 48	bool		whitelist;	/* if list is a blacklist or whitelist */
 49} report_filterlist;
 50static DEFINE_RAW_SPINLOCK(report_filterlist_lock);
 51
 52/*
 53 * The microbenchmark allows benchmarking KCSAN core runtime only. To run
 54 * multiple threads, pipe 'microbench=<iters>' from multiple tasks into the
 55 * debugfs file. This will not generate any conflicts, and tests fast-path only.
 56 */
 57static noinline void microbenchmark(unsigned long iters)
 58{
 59	const struct kcsan_ctx ctx_save = current->kcsan_ctx;
 60	const bool was_enabled = READ_ONCE(kcsan_enabled);
 61	u64 cycles;
 62
 63	/* We may have been called from an atomic region; reset context. */
 64	memset(&current->kcsan_ctx, 0, sizeof(current->kcsan_ctx));
 65	/*
 66	 * Disable to benchmark fast-path for all accesses, and (expected
 67	 * negligible) call into slow-path, but never set up watchpoints.
 68	 */
 69	WRITE_ONCE(kcsan_enabled, false);
 70
 71	pr_info("%s begin | iters: %lu\n", __func__, iters);
 72
 73	cycles = get_cycles();
 74	while (iters--) {
 75		unsigned long addr = iters & ((PAGE_SIZE << 8) - 1);
 76		int type = !(iters & 0x7f) ? KCSAN_ACCESS_ATOMIC :
 77				(!(iters & 0xf) ? KCSAN_ACCESS_WRITE : 0);
 78		__kcsan_check_access((void *)addr, sizeof(long), type);
 79	}
 80	cycles = get_cycles() - cycles;
 81
 82	pr_info("%s end   | cycles: %llu\n", __func__, cycles);
 83
 84	WRITE_ONCE(kcsan_enabled, was_enabled);
 85	/* restore context */
 86	current->kcsan_ctx = ctx_save;
 87}
 88
 89static int cmp_filterlist_addrs(const void *rhs, const void *lhs)
 90{
 91	const unsigned long a = *(const unsigned long *)rhs;
 92	const unsigned long b = *(const unsigned long *)lhs;
 93
 94	return a < b ? -1 : a == b ? 0 : 1;
 95}
 96
 97bool kcsan_skip_report_debugfs(unsigned long func_addr)
 98{
 99	unsigned long symbolsize, offset;
100	unsigned long flags;
101	bool ret = false;
102
103	if (!kallsyms_lookup_size_offset(func_addr, &symbolsize, &offset))
104		return false;
105	func_addr -= offset; /* Get function start */
106
107	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
108	if (report_filterlist.used == 0)
109		goto out;
110
111	/* Sort array if it is unsorted, and then do a binary search. */
112	if (!report_filterlist.sorted) {
113		sort(report_filterlist.addrs, report_filterlist.used,
114		     sizeof(unsigned long), cmp_filterlist_addrs, NULL);
115		report_filterlist.sorted = true;
116	}
117	ret = !!bsearch(&func_addr, report_filterlist.addrs,
118			report_filterlist.used, sizeof(unsigned long),
119			cmp_filterlist_addrs);
120	if (report_filterlist.whitelist)
121		ret = !ret;
122
123out:
124	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
125	return ret;
126}
127
128static void set_report_filterlist_whitelist(bool whitelist)
129{
130	unsigned long flags;
131
132	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
133	report_filterlist.whitelist = whitelist;
134	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
135}
136
137/* Returns 0 on success, error-code otherwise. */
138static ssize_t insert_report_filterlist(const char *func)
139{
140	unsigned long flags;
141	unsigned long addr = kallsyms_lookup_name(func);
142	unsigned long *delay_free = NULL;
143	unsigned long *new_addrs = NULL;
144	size_t new_size = 0;
145	ssize_t ret = 0;
146
147	if (!addr) {
148		pr_err("could not find function: '%s'\n", func);
149		return -ENOENT;
150	}
151
152retry_alloc:
153	/*
154	 * Check if we need an allocation, and re-validate under the lock. Since
155	 * the report_filterlist_lock is a raw, cannot allocate under the lock.
156	 */
157	if (data_race(report_filterlist.used == report_filterlist.size)) {
158		new_size = (report_filterlist.size ?: 4) * 2;
159		delay_free = new_addrs = kmalloc_array(new_size, sizeof(unsigned long), GFP_KERNEL);
160		if (!new_addrs)
161			return -ENOMEM;
162	}
163
164	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
165	if (report_filterlist.used == report_filterlist.size) {
166		/* Check we pre-allocated enough, and retry if not. */
167		if (report_filterlist.used >= new_size) {
168			raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
169			kfree(new_addrs); /* kfree(NULL) is safe */
170			delay_free = new_addrs = NULL;
171			goto retry_alloc;
172		}
173
174		if (report_filterlist.used)
175			memcpy(new_addrs, report_filterlist.addrs, report_filterlist.used * sizeof(unsigned long));
176		delay_free = report_filterlist.addrs; /* free the old list */
177		report_filterlist.addrs = new_addrs;  /* switch to the new list */
178		report_filterlist.size = new_size;
179	}
180
181	/* Note: deduplicating should be done in userspace. */
182	report_filterlist.addrs[report_filterlist.used++] = addr;
183	report_filterlist.sorted = false;
184
185	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
186
187	kfree(delay_free);
188	return ret;
189}
190
191static int show_info(struct seq_file *file, void *v)
192{
193	int i;
194	unsigned long flags;
195
196	/* show stats */
197	seq_printf(file, "enabled: %i\n", READ_ONCE(kcsan_enabled));
198	for (i = 0; i < KCSAN_COUNTER_COUNT; ++i) {
199		seq_printf(file, "%s: %ld\n", counter_names[i],
200			   atomic_long_read(&kcsan_counters[i]));
201	}
202
203	/* show filter functions, and filter type */
204	raw_spin_lock_irqsave(&report_filterlist_lock, flags);
205	seq_printf(file, "\n%s functions: %s\n",
206		   report_filterlist.whitelist ? "whitelisted" : "blacklisted",
207		   report_filterlist.used == 0 ? "none" : "");
208	for (i = 0; i < report_filterlist.used; ++i)
209		seq_printf(file, " %ps\n", (void *)report_filterlist.addrs[i]);
210	raw_spin_unlock_irqrestore(&report_filterlist_lock, flags);
211
212	return 0;
213}
214
215static int debugfs_open(struct inode *inode, struct file *file)
216{
217	return single_open(file, show_info, NULL);
218}
219
220static ssize_t
221debugfs_write(struct file *file, const char __user *buf, size_t count, loff_t *off)
222{
223	char kbuf[KSYM_NAME_LEN];
224	char *arg;
225	const size_t read_len = min(count, sizeof(kbuf) - 1);
226
227	if (copy_from_user(kbuf, buf, read_len))
228		return -EFAULT;
229	kbuf[read_len] = '\0';
230	arg = strstrip(kbuf);
231
232	if (!strcmp(arg, "on")) {
233		WRITE_ONCE(kcsan_enabled, true);
234	} else if (!strcmp(arg, "off")) {
235		WRITE_ONCE(kcsan_enabled, false);
236	} else if (str_has_prefix(arg, "microbench=")) {
237		unsigned long iters;
238
239		if (kstrtoul(&arg[strlen("microbench=")], 0, &iters))
240			return -EINVAL;
241		microbenchmark(iters);
242	} else if (!strcmp(arg, "whitelist")) {
243		set_report_filterlist_whitelist(true);
244	} else if (!strcmp(arg, "blacklist")) {
245		set_report_filterlist_whitelist(false);
246	} else if (arg[0] == '!') {
247		ssize_t ret = insert_report_filterlist(&arg[1]);
248
249		if (ret < 0)
250			return ret;
251	} else {
252		return -EINVAL;
253	}
254
255	return count;
256}
257
258static const struct file_operations debugfs_ops =
259{
260	.read	 = seq_read,
261	.open	 = debugfs_open,
262	.write	 = debugfs_write,
263	.release = single_release
264};
265
266static int __init kcsan_debugfs_init(void)
267{
268	debugfs_create_file("kcsan", 0644, NULL, NULL, &debugfs_ops);
269	return 0;
270}
271
272late_initcall(kcsan_debugfs_init);