Linux Audio

Check our new training course

Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#define pr_fmt(fmt) "kcov: " fmt
  3
  4#define DISABLE_BRANCH_PROFILING
  5#include <linux/atomic.h>
  6#include <linux/compiler.h>
  7#include <linux/errno.h>
  8#include <linux/export.h>
  9#include <linux/types.h>
 10#include <linux/file.h>
 11#include <linux/fs.h>
 
 12#include <linux/init.h>
 
 13#include <linux/mm.h>
 14#include <linux/preempt.h>
 15#include <linux/printk.h>
 16#include <linux/sched.h>
 17#include <linux/slab.h>
 18#include <linux/spinlock.h>
 19#include <linux/vmalloc.h>
 20#include <linux/debugfs.h>
 21#include <linux/uaccess.h>
 22#include <linux/kcov.h>
 23#include <linux/refcount.h>
 
 24#include <asm/setup.h>
 25
 
 
 26/* Number of 64-bit words written per one comparison: */
 27#define KCOV_WORDS_PER_CMP 4
 28
 29/*
 30 * kcov descriptor (one per opened debugfs file).
 31 * State transitions of the descriptor:
 32 *  - initial state after open()
 33 *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
 34 *  - then, mmap() call (several calls are allowed but not useful)
 35 *  - then, ioctl(KCOV_ENABLE, arg), where arg is
 36 *	KCOV_TRACE_PC - to trace only the PCs
 37 *	or
 38 *	KCOV_TRACE_CMP - to trace only the comparison operands
 39 *  - then, ioctl(KCOV_DISABLE) to disable the task.
 40 * Enabling/disabling ioctls can be repeated (only one task a time allowed).
 41 */
 42struct kcov {
 43	/*
 44	 * Reference counter. We keep one for:
 45	 *  - opened file descriptor
 46	 *  - task with enabled coverage (we can't unwire it from another task)
 
 47	 */
 48	refcount_t		refcount;
 49	/* The lock protects mode, size, area and t. */
 50	spinlock_t		lock;
 51	enum kcov_mode		mode;
 52	/* Size of arena (in long's for KCOV_MODE_TRACE). */
 53	unsigned		size;
 54	/* Coverage buffer shared with user space. */
 55	void			*area;
 56	/* Task for which we collect coverage, or NULL. */
 57	struct task_struct	*t;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 58};
 59
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 60static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
 61{
 62	unsigned int mode;
 63
 64	/*
 65	 * We are interested in code coverage as a function of a syscall inputs,
 66	 * so we ignore code executed in interrupts.
 
 67	 */
 68	if (!in_task())
 69		return false;
 70	mode = READ_ONCE(t->kcov_mode);
 71	/*
 72	 * There is some code that runs in interrupts but for which
 73	 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
 74	 * READ_ONCE()/barrier() effectively provides load-acquire wrt
 75	 * interrupts, there are paired barrier()/WRITE_ONCE() in
 76	 * kcov_ioctl_locked().
 77	 */
 78	barrier();
 79	return mode == needed_mode;
 80}
 81
 82static notrace unsigned long canonicalize_ip(unsigned long ip)
 83{
 84#ifdef CONFIG_RANDOMIZE_BASE
 85	ip -= kaslr_offset();
 86#endif
 87	return ip;
 88}
 89
 90/*
 91 * Entry point from instrumented code.
 92 * This is called once per basic-block/edge.
 93 */
 94void notrace __sanitizer_cov_trace_pc(void)
 95{
 96	struct task_struct *t;
 97	unsigned long *area;
 98	unsigned long ip = canonicalize_ip(_RET_IP_);
 99	unsigned long pos;
100
101	t = current;
102	if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
103		return;
104
105	area = t->kcov_area;
106	/* The first 64-bit word is the number of subsequent PCs. */
107	pos = READ_ONCE(area[0]) + 1;
108	if (likely(pos < t->kcov_size)) {
109		area[pos] = ip;
 
 
 
 
 
 
110		WRITE_ONCE(area[0], pos);
 
 
111	}
112}
113EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
114
115#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
116static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
117{
118	struct task_struct *t;
119	u64 *area;
120	u64 count, start_index, end_pos, max_pos;
121
122	t = current;
123	if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
124		return;
125
126	ip = canonicalize_ip(ip);
127
128	/*
129	 * We write all comparison arguments and types as u64.
130	 * The buffer was allocated for t->kcov_size unsigned longs.
131	 */
132	area = (u64 *)t->kcov_area;
133	max_pos = t->kcov_size * sizeof(unsigned long);
134
135	count = READ_ONCE(area[0]);
136
137	/* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
138	start_index = 1 + count * KCOV_WORDS_PER_CMP;
139	end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
140	if (likely(end_pos <= max_pos)) {
 
 
 
141		area[start_index] = type;
142		area[start_index + 1] = arg1;
143		area[start_index + 2] = arg2;
144		area[start_index + 3] = ip;
145		WRITE_ONCE(area[0], count + 1);
146	}
147}
148
149void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
150{
151	write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
152}
153EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
154
155void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
156{
157	write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
158}
159EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
160
161void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
162{
163	write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
164}
165EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
166
167void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
168{
169	write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
170}
171EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
172
173void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
174{
175	write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
176			_RET_IP_);
177}
178EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
179
180void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
181{
182	write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
183			_RET_IP_);
184}
185EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
186
187void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
188{
189	write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
190			_RET_IP_);
191}
192EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
193
194void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
195{
196	write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
197			_RET_IP_);
198}
199EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
200
201void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
202{
203	u64 i;
 
204	u64 count = cases[0];
205	u64 size = cases[1];
206	u64 type = KCOV_CMP_CONST;
207
208	switch (size) {
209	case 8:
210		type |= KCOV_CMP_SIZE(0);
211		break;
212	case 16:
213		type |= KCOV_CMP_SIZE(1);
214		break;
215	case 32:
216		type |= KCOV_CMP_SIZE(2);
217		break;
218	case 64:
219		type |= KCOV_CMP_SIZE(3);
220		break;
221	default:
222		return;
223	}
224	for (i = 0; i < count; i++)
225		write_comp_data(type, cases[i + 2], val, _RET_IP_);
226}
227EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
228#endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
229
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
230static void kcov_get(struct kcov *kcov)
231{
232	refcount_inc(&kcov->refcount);
233}
234
235static void kcov_put(struct kcov *kcov)
236{
237	if (refcount_dec_and_test(&kcov->refcount)) {
 
238		vfree(kcov->area);
239		kfree(kcov);
240	}
241}
242
243void kcov_task_init(struct task_struct *t)
244{
245	WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
246	barrier();
247	t->kcov_size = 0;
248	t->kcov_area = NULL;
249	t->kcov = NULL;
250}
251
252void kcov_task_exit(struct task_struct *t)
253{
254	struct kcov *kcov;
 
255
256	kcov = t->kcov;
257	if (kcov == NULL)
258		return;
259	spin_lock(&kcov->lock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
260	if (WARN_ON(kcov->t != t)) {
261		spin_unlock(&kcov->lock);
262		return;
263	}
264	/* Just to not leave dangling references behind. */
265	kcov_task_init(t);
266	kcov->t = NULL;
267	kcov->mode = KCOV_MODE_INIT;
268	spin_unlock(&kcov->lock);
269	kcov_put(kcov);
270}
271
272static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
273{
274	int res = 0;
275	void *area;
276	struct kcov *kcov = vma->vm_file->private_data;
277	unsigned long size, off;
278	struct page *page;
 
279
280	area = vmalloc_user(vma->vm_end - vma->vm_start);
281	if (!area)
282		return -ENOMEM;
283
284	spin_lock(&kcov->lock);
285	size = kcov->size * sizeof(unsigned long);
286	if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
287	    vma->vm_end - vma->vm_start != size) {
288		res = -EINVAL;
289		goto exit;
290	}
291	if (!kcov->area) {
292		kcov->area = area;
293		vma->vm_flags |= VM_DONTEXPAND;
294		spin_unlock(&kcov->lock);
295		for (off = 0; off < size; off += PAGE_SIZE) {
296			page = vmalloc_to_page(kcov->area + off);
297			if (vm_insert_page(vma, vma->vm_start + off, page))
298				WARN_ONCE(1, "vm_insert_page() failed");
299		}
300		return 0;
301	}
 
302exit:
303	spin_unlock(&kcov->lock);
304	vfree(area);
305	return res;
306}
307
308static int kcov_open(struct inode *inode, struct file *filep)
309{
310	struct kcov *kcov;
311
312	kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
313	if (!kcov)
314		return -ENOMEM;
315	kcov->mode = KCOV_MODE_DISABLED;
 
316	refcount_set(&kcov->refcount, 1);
317	spin_lock_init(&kcov->lock);
318	filep->private_data = kcov;
319	return nonseekable_open(inode, filep);
320}
321
322static int kcov_close(struct inode *inode, struct file *filep)
323{
324	kcov_put(filep->private_data);
325	return 0;
326}
327
 
 
 
 
 
 
 
 
 
 
 
 
 
 
328/*
329 * Fault in a lazily-faulted vmalloc area before it can be used by
330 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
331 * vmalloc fault handling path is instrumented.
332 */
333static void kcov_fault_in_area(struct kcov *kcov)
334{
335	unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
336	unsigned long *area = kcov->area;
337	unsigned long offset;
338
339	for (offset = 0; offset < kcov->size; offset += stride)
340		READ_ONCE(area[offset]);
341}
342
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
343static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
344			     unsigned long arg)
345{
346	struct task_struct *t;
347	unsigned long size, unused;
 
 
 
348
349	switch (cmd) {
350	case KCOV_INIT_TRACE:
351		/*
352		 * Enable kcov in trace mode and setup buffer size.
353		 * Must happen before anything else.
354		 */
355		if (kcov->mode != KCOV_MODE_DISABLED)
356			return -EBUSY;
357		/*
358		 * Size must be at least 2 to hold current position and one PC.
359		 * Later we allocate size * sizeof(unsigned long) memory,
360		 * that must not overflow.
361		 */
362		size = arg;
363		if (size < 2 || size > INT_MAX / sizeof(unsigned long))
364			return -EINVAL;
365		kcov->size = size;
366		kcov->mode = KCOV_MODE_INIT;
367		return 0;
368	case KCOV_ENABLE:
369		/*
370		 * Enable coverage for the current task.
371		 * At this point user must have been enabled trace mode,
372		 * and mmapped the file. Coverage collection is disabled only
373		 * at task exit or voluntary by KCOV_DISABLE. After that it can
374		 * be enabled for another task.
375		 */
376		if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
377			return -EINVAL;
378		t = current;
379		if (kcov->t != NULL || t->kcov != NULL)
380			return -EBUSY;
381		if (arg == KCOV_TRACE_PC)
382			kcov->mode = KCOV_MODE_TRACE_PC;
383		else if (arg == KCOV_TRACE_CMP)
384#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
385			kcov->mode = KCOV_MODE_TRACE_CMP;
386#else
387		return -ENOTSUPP;
388#endif
389		else
390			return -EINVAL;
391		kcov_fault_in_area(kcov);
392		/* Cache in task struct for performance. */
393		t->kcov_size = kcov->size;
394		t->kcov_area = kcov->area;
395		/* See comment in check_kcov_mode(). */
396		barrier();
397		WRITE_ONCE(t->kcov_mode, kcov->mode);
398		t->kcov = kcov;
399		kcov->t = t;
400		/* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
401		kcov_get(kcov);
402		return 0;
403	case KCOV_DISABLE:
404		/* Disable coverage for the current task. */
405		unused = arg;
406		if (unused != 0 || current->kcov != kcov)
407			return -EINVAL;
408		t = current;
409		if (WARN_ON(kcov->t != t))
410			return -EINVAL;
411		kcov_task_init(t);
412		kcov->t = NULL;
413		kcov->mode = KCOV_MODE_INIT;
414		kcov_put(kcov);
415		return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
416	default:
417		return -ENOTTY;
418	}
419}
420
421static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
422{
423	struct kcov *kcov;
424	int res;
 
 
 
 
 
425
426	kcov = filep->private_data;
427	spin_lock(&kcov->lock);
428	res = kcov_ioctl_locked(kcov, cmd, arg);
429	spin_unlock(&kcov->lock);
430	return res;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
431}
432
433static const struct file_operations kcov_fops = {
434	.open		= kcov_open,
435	.unlocked_ioctl	= kcov_ioctl,
436	.compat_ioctl	= kcov_ioctl,
437	.mmap		= kcov_mmap,
438	.release        = kcov_close,
439};
440
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
441static int __init kcov_init(void)
442{
 
 
 
 
 
 
 
 
 
 
443	/*
444	 * The kcov debugfs file won't ever get removed and thus,
445	 * there is no need to protect it against removal races. The
446	 * use of debugfs_create_file_unsafe() is actually safe here.
447	 */
448	debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
449
450	return 0;
451}
452
453device_initcall(kcov_init);
v6.8
   1// SPDX-License-Identifier: GPL-2.0
   2#define pr_fmt(fmt) "kcov: " fmt
   3
   4#define DISABLE_BRANCH_PROFILING
   5#include <linux/atomic.h>
   6#include <linux/compiler.h>
   7#include <linux/errno.h>
   8#include <linux/export.h>
   9#include <linux/types.h>
  10#include <linux/file.h>
  11#include <linux/fs.h>
  12#include <linux/hashtable.h>
  13#include <linux/init.h>
  14#include <linux/kmsan-checks.h>
  15#include <linux/mm.h>
  16#include <linux/preempt.h>
  17#include <linux/printk.h>
  18#include <linux/sched.h>
  19#include <linux/slab.h>
  20#include <linux/spinlock.h>
  21#include <linux/vmalloc.h>
  22#include <linux/debugfs.h>
  23#include <linux/uaccess.h>
  24#include <linux/kcov.h>
  25#include <linux/refcount.h>
  26#include <linux/log2.h>
  27#include <asm/setup.h>
  28
  29#define kcov_debug(fmt, ...) pr_debug("%s: " fmt, __func__, ##__VA_ARGS__)
  30
  31/* Number of 64-bit words written per one comparison: */
  32#define KCOV_WORDS_PER_CMP 4
  33
  34/*
  35 * kcov descriptor (one per opened debugfs file).
  36 * State transitions of the descriptor:
  37 *  - initial state after open()
  38 *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
  39 *  - then, mmap() call (several calls are allowed but not useful)
  40 *  - then, ioctl(KCOV_ENABLE, arg), where arg is
  41 *	KCOV_TRACE_PC - to trace only the PCs
  42 *	or
  43 *	KCOV_TRACE_CMP - to trace only the comparison operands
  44 *  - then, ioctl(KCOV_DISABLE) to disable the task.
  45 * Enabling/disabling ioctls can be repeated (only one task a time allowed).
  46 */
  47struct kcov {
  48	/*
  49	 * Reference counter. We keep one for:
  50	 *  - opened file descriptor
  51	 *  - task with enabled coverage (we can't unwire it from another task)
  52	 *  - each code section for remote coverage collection
  53	 */
  54	refcount_t		refcount;
  55	/* The lock protects mode, size, area and t. */
  56	spinlock_t		lock;
  57	enum kcov_mode		mode;
  58	/* Size of arena (in long's). */
  59	unsigned int		size;
  60	/* Coverage buffer shared with user space. */
  61	void			*area;
  62	/* Task for which we collect coverage, or NULL. */
  63	struct task_struct	*t;
  64	/* Collecting coverage from remote (background) threads. */
  65	bool			remote;
  66	/* Size of remote area (in long's). */
  67	unsigned int		remote_size;
  68	/*
  69	 * Sequence is incremented each time kcov is reenabled, used by
  70	 * kcov_remote_stop(), see the comment there.
  71	 */
  72	int			sequence;
  73};
  74
  75struct kcov_remote_area {
  76	struct list_head	list;
  77	unsigned int		size;
  78};
  79
  80struct kcov_remote {
  81	u64			handle;
  82	struct kcov		*kcov;
  83	struct hlist_node	hnode;
  84};
  85
  86static DEFINE_SPINLOCK(kcov_remote_lock);
  87static DEFINE_HASHTABLE(kcov_remote_map, 4);
  88static struct list_head kcov_remote_areas = LIST_HEAD_INIT(kcov_remote_areas);
  89
  90struct kcov_percpu_data {
  91	void			*irq_area;
  92	local_lock_t		lock;
  93
  94	unsigned int		saved_mode;
  95	unsigned int		saved_size;
  96	void			*saved_area;
  97	struct kcov		*saved_kcov;
  98	int			saved_sequence;
  99};
 100
 101static DEFINE_PER_CPU(struct kcov_percpu_data, kcov_percpu_data) = {
 102	.lock = INIT_LOCAL_LOCK(lock),
 103};
 104
 105/* Must be called with kcov_remote_lock locked. */
 106static struct kcov_remote *kcov_remote_find(u64 handle)
 107{
 108	struct kcov_remote *remote;
 109
 110	hash_for_each_possible(kcov_remote_map, remote, hnode, handle) {
 111		if (remote->handle == handle)
 112			return remote;
 113	}
 114	return NULL;
 115}
 116
 117/* Must be called with kcov_remote_lock locked. */
 118static struct kcov_remote *kcov_remote_add(struct kcov *kcov, u64 handle)
 119{
 120	struct kcov_remote *remote;
 121
 122	if (kcov_remote_find(handle))
 123		return ERR_PTR(-EEXIST);
 124	remote = kmalloc(sizeof(*remote), GFP_ATOMIC);
 125	if (!remote)
 126		return ERR_PTR(-ENOMEM);
 127	remote->handle = handle;
 128	remote->kcov = kcov;
 129	hash_add(kcov_remote_map, &remote->hnode, handle);
 130	return remote;
 131}
 132
 133/* Must be called with kcov_remote_lock locked. */
 134static struct kcov_remote_area *kcov_remote_area_get(unsigned int size)
 135{
 136	struct kcov_remote_area *area;
 137	struct list_head *pos;
 138
 139	list_for_each(pos, &kcov_remote_areas) {
 140		area = list_entry(pos, struct kcov_remote_area, list);
 141		if (area->size == size) {
 142			list_del(&area->list);
 143			return area;
 144		}
 145	}
 146	return NULL;
 147}
 148
 149/* Must be called with kcov_remote_lock locked. */
 150static void kcov_remote_area_put(struct kcov_remote_area *area,
 151					unsigned int size)
 152{
 153	INIT_LIST_HEAD(&area->list);
 154	area->size = size;
 155	list_add(&area->list, &kcov_remote_areas);
 156	/*
 157	 * KMSAN doesn't instrument this file, so it may not know area->list
 158	 * is initialized. Unpoison it explicitly to avoid reports in
 159	 * kcov_remote_area_get().
 160	 */
 161	kmsan_unpoison_memory(&area->list, sizeof(area->list));
 162}
 163
 164static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
 165{
 166	unsigned int mode;
 167
 168	/*
 169	 * We are interested in code coverage as a function of a syscall inputs,
 170	 * so we ignore code executed in interrupts, unless we are in a remote
 171	 * coverage collection section in a softirq.
 172	 */
 173	if (!in_task() && !(in_serving_softirq() && t->kcov_softirq))
 174		return false;
 175	mode = READ_ONCE(t->kcov_mode);
 176	/*
 177	 * There is some code that runs in interrupts but for which
 178	 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
 179	 * READ_ONCE()/barrier() effectively provides load-acquire wrt
 180	 * interrupts, there are paired barrier()/WRITE_ONCE() in
 181	 * kcov_start().
 182	 */
 183	barrier();
 184	return mode == needed_mode;
 185}
 186
 187static notrace unsigned long canonicalize_ip(unsigned long ip)
 188{
 189#ifdef CONFIG_RANDOMIZE_BASE
 190	ip -= kaslr_offset();
 191#endif
 192	return ip;
 193}
 194
 195/*
 196 * Entry point from instrumented code.
 197 * This is called once per basic-block/edge.
 198 */
 199void notrace __sanitizer_cov_trace_pc(void)
 200{
 201	struct task_struct *t;
 202	unsigned long *area;
 203	unsigned long ip = canonicalize_ip(_RET_IP_);
 204	unsigned long pos;
 205
 206	t = current;
 207	if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
 208		return;
 209
 210	area = t->kcov_area;
 211	/* The first 64-bit word is the number of subsequent PCs. */
 212	pos = READ_ONCE(area[0]) + 1;
 213	if (likely(pos < t->kcov_size)) {
 214		/* Previously we write pc before updating pos. However, some
 215		 * early interrupt code could bypass check_kcov_mode() check
 216		 * and invoke __sanitizer_cov_trace_pc(). If such interrupt is
 217		 * raised between writing pc and updating pos, the pc could be
 218		 * overitten by the recursive __sanitizer_cov_trace_pc().
 219		 * Update pos before writing pc to avoid such interleaving.
 220		 */
 221		WRITE_ONCE(area[0], pos);
 222		barrier();
 223		area[pos] = ip;
 224	}
 225}
 226EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
 227
 228#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
 229static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
 230{
 231	struct task_struct *t;
 232	u64 *area;
 233	u64 count, start_index, end_pos, max_pos;
 234
 235	t = current;
 236	if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
 237		return;
 238
 239	ip = canonicalize_ip(ip);
 240
 241	/*
 242	 * We write all comparison arguments and types as u64.
 243	 * The buffer was allocated for t->kcov_size unsigned longs.
 244	 */
 245	area = (u64 *)t->kcov_area;
 246	max_pos = t->kcov_size * sizeof(unsigned long);
 247
 248	count = READ_ONCE(area[0]);
 249
 250	/* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
 251	start_index = 1 + count * KCOV_WORDS_PER_CMP;
 252	end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
 253	if (likely(end_pos <= max_pos)) {
 254		/* See comment in __sanitizer_cov_trace_pc(). */
 255		WRITE_ONCE(area[0], count + 1);
 256		barrier();
 257		area[start_index] = type;
 258		area[start_index + 1] = arg1;
 259		area[start_index + 2] = arg2;
 260		area[start_index + 3] = ip;
 
 261	}
 262}
 263
 264void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
 265{
 266	write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
 267}
 268EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
 269
 270void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
 271{
 272	write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
 273}
 274EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
 275
 276void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
 277{
 278	write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
 279}
 280EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
 281
 282void notrace __sanitizer_cov_trace_cmp8(kcov_u64 arg1, kcov_u64 arg2)
 283{
 284	write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
 285}
 286EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
 287
 288void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
 289{
 290	write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
 291			_RET_IP_);
 292}
 293EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
 294
 295void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
 296{
 297	write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
 298			_RET_IP_);
 299}
 300EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
 301
 302void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
 303{
 304	write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
 305			_RET_IP_);
 306}
 307EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
 308
 309void notrace __sanitizer_cov_trace_const_cmp8(kcov_u64 arg1, kcov_u64 arg2)
 310{
 311	write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
 312			_RET_IP_);
 313}
 314EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
 315
 316void notrace __sanitizer_cov_trace_switch(kcov_u64 val, void *arg)
 317{
 318	u64 i;
 319	u64 *cases = arg;
 320	u64 count = cases[0];
 321	u64 size = cases[1];
 322	u64 type = KCOV_CMP_CONST;
 323
 324	switch (size) {
 325	case 8:
 326		type |= KCOV_CMP_SIZE(0);
 327		break;
 328	case 16:
 329		type |= KCOV_CMP_SIZE(1);
 330		break;
 331	case 32:
 332		type |= KCOV_CMP_SIZE(2);
 333		break;
 334	case 64:
 335		type |= KCOV_CMP_SIZE(3);
 336		break;
 337	default:
 338		return;
 339	}
 340	for (i = 0; i < count; i++)
 341		write_comp_data(type, cases[i + 2], val, _RET_IP_);
 342}
 343EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
 344#endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
 345
 346static void kcov_start(struct task_struct *t, struct kcov *kcov,
 347			unsigned int size, void *area, enum kcov_mode mode,
 348			int sequence)
 349{
 350	kcov_debug("t = %px, size = %u, area = %px\n", t, size, area);
 351	t->kcov = kcov;
 352	/* Cache in task struct for performance. */
 353	t->kcov_size = size;
 354	t->kcov_area = area;
 355	t->kcov_sequence = sequence;
 356	/* See comment in check_kcov_mode(). */
 357	barrier();
 358	WRITE_ONCE(t->kcov_mode, mode);
 359}
 360
 361static void kcov_stop(struct task_struct *t)
 362{
 363	WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
 364	barrier();
 365	t->kcov = NULL;
 366	t->kcov_size = 0;
 367	t->kcov_area = NULL;
 368}
 369
 370static void kcov_task_reset(struct task_struct *t)
 371{
 372	kcov_stop(t);
 373	t->kcov_sequence = 0;
 374	t->kcov_handle = 0;
 375}
 376
 377void kcov_task_init(struct task_struct *t)
 378{
 379	kcov_task_reset(t);
 380	t->kcov_handle = current->kcov_handle;
 381}
 382
 383static void kcov_reset(struct kcov *kcov)
 384{
 385	kcov->t = NULL;
 386	kcov->mode = KCOV_MODE_INIT;
 387	kcov->remote = false;
 388	kcov->remote_size = 0;
 389	kcov->sequence++;
 390}
 391
 392static void kcov_remote_reset(struct kcov *kcov)
 393{
 394	int bkt;
 395	struct kcov_remote *remote;
 396	struct hlist_node *tmp;
 397	unsigned long flags;
 398
 399	spin_lock_irqsave(&kcov_remote_lock, flags);
 400	hash_for_each_safe(kcov_remote_map, bkt, tmp, remote, hnode) {
 401		if (remote->kcov != kcov)
 402			continue;
 403		hash_del(&remote->hnode);
 404		kfree(remote);
 405	}
 406	/* Do reset before unlock to prevent races with kcov_remote_start(). */
 407	kcov_reset(kcov);
 408	spin_unlock_irqrestore(&kcov_remote_lock, flags);
 409}
 410
 411static void kcov_disable(struct task_struct *t, struct kcov *kcov)
 412{
 413	kcov_task_reset(t);
 414	if (kcov->remote)
 415		kcov_remote_reset(kcov);
 416	else
 417		kcov_reset(kcov);
 418}
 419
 420static void kcov_get(struct kcov *kcov)
 421{
 422	refcount_inc(&kcov->refcount);
 423}
 424
 425static void kcov_put(struct kcov *kcov)
 426{
 427	if (refcount_dec_and_test(&kcov->refcount)) {
 428		kcov_remote_reset(kcov);
 429		vfree(kcov->area);
 430		kfree(kcov);
 431	}
 432}
 433
 
 
 
 
 
 
 
 
 
 434void kcov_task_exit(struct task_struct *t)
 435{
 436	struct kcov *kcov;
 437	unsigned long flags;
 438
 439	kcov = t->kcov;
 440	if (kcov == NULL)
 441		return;
 442
 443	spin_lock_irqsave(&kcov->lock, flags);
 444	kcov_debug("t = %px, kcov->t = %px\n", t, kcov->t);
 445	/*
 446	 * For KCOV_ENABLE devices we want to make sure that t->kcov->t == t,
 447	 * which comes down to:
 448	 *        WARN_ON(!kcov->remote && kcov->t != t);
 449	 *
 450	 * For KCOV_REMOTE_ENABLE devices, the exiting task is either:
 451	 *
 452	 * 1. A remote task between kcov_remote_start() and kcov_remote_stop().
 453	 *    In this case we should print a warning right away, since a task
 454	 *    shouldn't be exiting when it's in a kcov coverage collection
 455	 *    section. Here t points to the task that is collecting remote
 456	 *    coverage, and t->kcov->t points to the thread that created the
 457	 *    kcov device. Which means that to detect this case we need to
 458	 *    check that t != t->kcov->t, and this gives us the following:
 459	 *        WARN_ON(kcov->remote && kcov->t != t);
 460	 *
 461	 * 2. The task that created kcov exiting without calling KCOV_DISABLE,
 462	 *    and then again we make sure that t->kcov->t == t:
 463	 *        WARN_ON(kcov->remote && kcov->t != t);
 464	 *
 465	 * By combining all three checks into one we get:
 466	 */
 467	if (WARN_ON(kcov->t != t)) {
 468		spin_unlock_irqrestore(&kcov->lock, flags);
 469		return;
 470	}
 471	/* Just to not leave dangling references behind. */
 472	kcov_disable(t, kcov);
 473	spin_unlock_irqrestore(&kcov->lock, flags);
 
 
 474	kcov_put(kcov);
 475}
 476
 477static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
 478{
 479	int res = 0;
 
 480	struct kcov *kcov = vma->vm_file->private_data;
 481	unsigned long size, off;
 482	struct page *page;
 483	unsigned long flags;
 484
 485	spin_lock_irqsave(&kcov->lock, flags);
 
 
 
 
 486	size = kcov->size * sizeof(unsigned long);
 487	if (kcov->area == NULL || vma->vm_pgoff != 0 ||
 488	    vma->vm_end - vma->vm_start != size) {
 489		res = -EINVAL;
 490		goto exit;
 491	}
 492	spin_unlock_irqrestore(&kcov->lock, flags);
 493	vm_flags_set(vma, VM_DONTEXPAND);
 494	for (off = 0; off < size; off += PAGE_SIZE) {
 495		page = vmalloc_to_page(kcov->area + off);
 496		res = vm_insert_page(vma, vma->vm_start + off, page);
 497		if (res) {
 498			pr_warn_once("kcov: vm_insert_page() failed\n");
 499			return res;
 500		}
 
 501	}
 502	return 0;
 503exit:
 504	spin_unlock_irqrestore(&kcov->lock, flags);
 
 505	return res;
 506}
 507
 508static int kcov_open(struct inode *inode, struct file *filep)
 509{
 510	struct kcov *kcov;
 511
 512	kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
 513	if (!kcov)
 514		return -ENOMEM;
 515	kcov->mode = KCOV_MODE_DISABLED;
 516	kcov->sequence = 1;
 517	refcount_set(&kcov->refcount, 1);
 518	spin_lock_init(&kcov->lock);
 519	filep->private_data = kcov;
 520	return nonseekable_open(inode, filep);
 521}
 522
 523static int kcov_close(struct inode *inode, struct file *filep)
 524{
 525	kcov_put(filep->private_data);
 526	return 0;
 527}
 528
 529static int kcov_get_mode(unsigned long arg)
 530{
 531	if (arg == KCOV_TRACE_PC)
 532		return KCOV_MODE_TRACE_PC;
 533	else if (arg == KCOV_TRACE_CMP)
 534#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
 535		return KCOV_MODE_TRACE_CMP;
 536#else
 537		return -ENOTSUPP;
 538#endif
 539	else
 540		return -EINVAL;
 541}
 542
 543/*
 544 * Fault in a lazily-faulted vmalloc area before it can be used by
 545 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
 546 * vmalloc fault handling path is instrumented.
 547 */
 548static void kcov_fault_in_area(struct kcov *kcov)
 549{
 550	unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
 551	unsigned long *area = kcov->area;
 552	unsigned long offset;
 553
 554	for (offset = 0; offset < kcov->size; offset += stride)
 555		READ_ONCE(area[offset]);
 556}
 557
 558static inline bool kcov_check_handle(u64 handle, bool common_valid,
 559				bool uncommon_valid, bool zero_valid)
 560{
 561	if (handle & ~(KCOV_SUBSYSTEM_MASK | KCOV_INSTANCE_MASK))
 562		return false;
 563	switch (handle & KCOV_SUBSYSTEM_MASK) {
 564	case KCOV_SUBSYSTEM_COMMON:
 565		return (handle & KCOV_INSTANCE_MASK) ?
 566			common_valid : zero_valid;
 567	case KCOV_SUBSYSTEM_USB:
 568		return uncommon_valid;
 569	default:
 570		return false;
 571	}
 572	return false;
 573}
 574
 575static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
 576			     unsigned long arg)
 577{
 578	struct task_struct *t;
 579	unsigned long flags, unused;
 580	int mode, i;
 581	struct kcov_remote_arg *remote_arg;
 582	struct kcov_remote *remote;
 583
 584	switch (cmd) {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 585	case KCOV_ENABLE:
 586		/*
 587		 * Enable coverage for the current task.
 588		 * At this point user must have been enabled trace mode,
 589		 * and mmapped the file. Coverage collection is disabled only
 590		 * at task exit or voluntary by KCOV_DISABLE. After that it can
 591		 * be enabled for another task.
 592		 */
 593		if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
 594			return -EINVAL;
 595		t = current;
 596		if (kcov->t != NULL || t->kcov != NULL)
 597			return -EBUSY;
 598		mode = kcov_get_mode(arg);
 599		if (mode < 0)
 600			return mode;
 
 
 
 
 
 
 
 601		kcov_fault_in_area(kcov);
 602		kcov->mode = mode;
 603		kcov_start(t, kcov, kcov->size, kcov->area, kcov->mode,
 604				kcov->sequence);
 
 
 
 
 605		kcov->t = t;
 606		/* Put either in kcov_task_exit() or in KCOV_DISABLE. */
 607		kcov_get(kcov);
 608		return 0;
 609	case KCOV_DISABLE:
 610		/* Disable coverage for the current task. */
 611		unused = arg;
 612		if (unused != 0 || current->kcov != kcov)
 613			return -EINVAL;
 614		t = current;
 615		if (WARN_ON(kcov->t != t))
 616			return -EINVAL;
 617		kcov_disable(t, kcov);
 
 
 618		kcov_put(kcov);
 619		return 0;
 620	case KCOV_REMOTE_ENABLE:
 621		if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
 622			return -EINVAL;
 623		t = current;
 624		if (kcov->t != NULL || t->kcov != NULL)
 625			return -EBUSY;
 626		remote_arg = (struct kcov_remote_arg *)arg;
 627		mode = kcov_get_mode(remote_arg->trace_mode);
 628		if (mode < 0)
 629			return mode;
 630		if (remote_arg->area_size > LONG_MAX / sizeof(unsigned long))
 631			return -EINVAL;
 632		kcov->mode = mode;
 633		t->kcov = kcov;
 634		kcov->t = t;
 635		kcov->remote = true;
 636		kcov->remote_size = remote_arg->area_size;
 637		spin_lock_irqsave(&kcov_remote_lock, flags);
 638		for (i = 0; i < remote_arg->num_handles; i++) {
 639			if (!kcov_check_handle(remote_arg->handles[i],
 640						false, true, false)) {
 641				spin_unlock_irqrestore(&kcov_remote_lock,
 642							flags);
 643				kcov_disable(t, kcov);
 644				return -EINVAL;
 645			}
 646			remote = kcov_remote_add(kcov, remote_arg->handles[i]);
 647			if (IS_ERR(remote)) {
 648				spin_unlock_irqrestore(&kcov_remote_lock,
 649							flags);
 650				kcov_disable(t, kcov);
 651				return PTR_ERR(remote);
 652			}
 653		}
 654		if (remote_arg->common_handle) {
 655			if (!kcov_check_handle(remote_arg->common_handle,
 656						true, false, false)) {
 657				spin_unlock_irqrestore(&kcov_remote_lock,
 658							flags);
 659				kcov_disable(t, kcov);
 660				return -EINVAL;
 661			}
 662			remote = kcov_remote_add(kcov,
 663					remote_arg->common_handle);
 664			if (IS_ERR(remote)) {
 665				spin_unlock_irqrestore(&kcov_remote_lock,
 666							flags);
 667				kcov_disable(t, kcov);
 668				return PTR_ERR(remote);
 669			}
 670			t->kcov_handle = remote_arg->common_handle;
 671		}
 672		spin_unlock_irqrestore(&kcov_remote_lock, flags);
 673		/* Put either in kcov_task_exit() or in KCOV_DISABLE. */
 674		kcov_get(kcov);
 675		return 0;
 676	default:
 677		return -ENOTTY;
 678	}
 679}
 680
 681static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
 682{
 683	struct kcov *kcov;
 684	int res;
 685	struct kcov_remote_arg *remote_arg = NULL;
 686	unsigned int remote_num_handles;
 687	unsigned long remote_arg_size;
 688	unsigned long size, flags;
 689	void *area;
 690
 691	kcov = filep->private_data;
 692	switch (cmd) {
 693	case KCOV_INIT_TRACE:
 694		/*
 695		 * Enable kcov in trace mode and setup buffer size.
 696		 * Must happen before anything else.
 697		 *
 698		 * First check the size argument - it must be at least 2
 699		 * to hold the current position and one PC.
 700		 */
 701		size = arg;
 702		if (size < 2 || size > INT_MAX / sizeof(unsigned long))
 703			return -EINVAL;
 704		area = vmalloc_user(size * sizeof(unsigned long));
 705		if (area == NULL)
 706			return -ENOMEM;
 707		spin_lock_irqsave(&kcov->lock, flags);
 708		if (kcov->mode != KCOV_MODE_DISABLED) {
 709			spin_unlock_irqrestore(&kcov->lock, flags);
 710			vfree(area);
 711			return -EBUSY;
 712		}
 713		kcov->area = area;
 714		kcov->size = size;
 715		kcov->mode = KCOV_MODE_INIT;
 716		spin_unlock_irqrestore(&kcov->lock, flags);
 717		return 0;
 718	case KCOV_REMOTE_ENABLE:
 719		if (get_user(remote_num_handles, (unsigned __user *)(arg +
 720				offsetof(struct kcov_remote_arg, num_handles))))
 721			return -EFAULT;
 722		if (remote_num_handles > KCOV_REMOTE_MAX_HANDLES)
 723			return -EINVAL;
 724		remote_arg_size = struct_size(remote_arg, handles,
 725					remote_num_handles);
 726		remote_arg = memdup_user((void __user *)arg, remote_arg_size);
 727		if (IS_ERR(remote_arg))
 728			return PTR_ERR(remote_arg);
 729		if (remote_arg->num_handles != remote_num_handles) {
 730			kfree(remote_arg);
 731			return -EINVAL;
 732		}
 733		arg = (unsigned long)remote_arg;
 734		fallthrough;
 735	default:
 736		/*
 737		 * All other commands can be normally executed under a spin lock, so we
 738		 * obtain and release it here in order to simplify kcov_ioctl_locked().
 739		 */
 740		spin_lock_irqsave(&kcov->lock, flags);
 741		res = kcov_ioctl_locked(kcov, cmd, arg);
 742		spin_unlock_irqrestore(&kcov->lock, flags);
 743		kfree(remote_arg);
 744		return res;
 745	}
 746}
 747
 748static const struct file_operations kcov_fops = {
 749	.open		= kcov_open,
 750	.unlocked_ioctl	= kcov_ioctl,
 751	.compat_ioctl	= kcov_ioctl,
 752	.mmap		= kcov_mmap,
 753	.release        = kcov_close,
 754};
 755
 756/*
 757 * kcov_remote_start() and kcov_remote_stop() can be used to annotate a section
 758 * of code in a kernel background thread or in a softirq to allow kcov to be
 759 * used to collect coverage from that part of code.
 760 *
 761 * The handle argument of kcov_remote_start() identifies a code section that is
 762 * used for coverage collection. A userspace process passes this handle to
 763 * KCOV_REMOTE_ENABLE ioctl to make the used kcov device start collecting
 764 * coverage for the code section identified by this handle.
 765 *
 766 * The usage of these annotations in the kernel code is different depending on
 767 * the type of the kernel thread whose code is being annotated.
 768 *
 769 * For global kernel threads that are spawned in a limited number of instances
 770 * (e.g. one USB hub_event() worker thread is spawned per USB HCD) and for
 771 * softirqs, each instance must be assigned a unique 4-byte instance id. The
 772 * instance id is then combined with a 1-byte subsystem id to get a handle via
 773 * kcov_remote_handle(subsystem_id, instance_id).
 774 *
 775 * For local kernel threads that are spawned from system calls handler when a
 776 * user interacts with some kernel interface (e.g. vhost workers), a handle is
 777 * passed from a userspace process as the common_handle field of the
 778 * kcov_remote_arg struct (note, that the user must generate a handle by using
 779 * kcov_remote_handle() with KCOV_SUBSYSTEM_COMMON as the subsystem id and an
 780 * arbitrary 4-byte non-zero number as the instance id). This common handle
 781 * then gets saved into the task_struct of the process that issued the
 782 * KCOV_REMOTE_ENABLE ioctl. When this process issues system calls that spawn
 783 * kernel threads, the common handle must be retrieved via kcov_common_handle()
 784 * and passed to the spawned threads via custom annotations. Those kernel
 785 * threads must in turn be annotated with kcov_remote_start(common_handle) and
 786 * kcov_remote_stop(). All of the threads that are spawned by the same process
 787 * obtain the same handle, hence the name "common".
 788 *
 789 * See Documentation/dev-tools/kcov.rst for more details.
 790 *
 791 * Internally, kcov_remote_start() looks up the kcov device associated with the
 792 * provided handle, allocates an area for coverage collection, and saves the
 793 * pointers to kcov and area into the current task_struct to allow coverage to
 794 * be collected via __sanitizer_cov_trace_pc().
 795 * In turns kcov_remote_stop() clears those pointers from task_struct to stop
 796 * collecting coverage and copies all collected coverage into the kcov area.
 797 */
 798
 799static inline bool kcov_mode_enabled(unsigned int mode)
 800{
 801	return (mode & ~KCOV_IN_CTXSW) != KCOV_MODE_DISABLED;
 802}
 803
 804static void kcov_remote_softirq_start(struct task_struct *t)
 805{
 806	struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
 807	unsigned int mode;
 808
 809	mode = READ_ONCE(t->kcov_mode);
 810	barrier();
 811	if (kcov_mode_enabled(mode)) {
 812		data->saved_mode = mode;
 813		data->saved_size = t->kcov_size;
 814		data->saved_area = t->kcov_area;
 815		data->saved_sequence = t->kcov_sequence;
 816		data->saved_kcov = t->kcov;
 817		kcov_stop(t);
 818	}
 819}
 820
 821static void kcov_remote_softirq_stop(struct task_struct *t)
 822{
 823	struct kcov_percpu_data *data = this_cpu_ptr(&kcov_percpu_data);
 824
 825	if (data->saved_kcov) {
 826		kcov_start(t, data->saved_kcov, data->saved_size,
 827				data->saved_area, data->saved_mode,
 828				data->saved_sequence);
 829		data->saved_mode = 0;
 830		data->saved_size = 0;
 831		data->saved_area = NULL;
 832		data->saved_sequence = 0;
 833		data->saved_kcov = NULL;
 834	}
 835}
 836
 837void kcov_remote_start(u64 handle)
 838{
 839	struct task_struct *t = current;
 840	struct kcov_remote *remote;
 841	struct kcov *kcov;
 842	unsigned int mode;
 843	void *area;
 844	unsigned int size;
 845	int sequence;
 846	unsigned long flags;
 847
 848	if (WARN_ON(!kcov_check_handle(handle, true, true, true)))
 849		return;
 850	if (!in_task() && !in_serving_softirq())
 851		return;
 852
 853	local_lock_irqsave(&kcov_percpu_data.lock, flags);
 854
 855	/*
 856	 * Check that kcov_remote_start() is not called twice in background
 857	 * threads nor called by user tasks (with enabled kcov).
 858	 */
 859	mode = READ_ONCE(t->kcov_mode);
 860	if (WARN_ON(in_task() && kcov_mode_enabled(mode))) {
 861		local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
 862		return;
 863	}
 864	/*
 865	 * Check that kcov_remote_start() is not called twice in softirqs.
 866	 * Note, that kcov_remote_start() can be called from a softirq that
 867	 * happened while collecting coverage from a background thread.
 868	 */
 869	if (WARN_ON(in_serving_softirq() && t->kcov_softirq)) {
 870		local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
 871		return;
 872	}
 873
 874	spin_lock(&kcov_remote_lock);
 875	remote = kcov_remote_find(handle);
 876	if (!remote) {
 877		spin_unlock(&kcov_remote_lock);
 878		local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
 879		return;
 880	}
 881	kcov_debug("handle = %llx, context: %s\n", handle,
 882			in_task() ? "task" : "softirq");
 883	kcov = remote->kcov;
 884	/* Put in kcov_remote_stop(). */
 885	kcov_get(kcov);
 886	/*
 887	 * Read kcov fields before unlock to prevent races with
 888	 * KCOV_DISABLE / kcov_remote_reset().
 889	 */
 890	mode = kcov->mode;
 891	sequence = kcov->sequence;
 892	if (in_task()) {
 893		size = kcov->remote_size;
 894		area = kcov_remote_area_get(size);
 895	} else {
 896		size = CONFIG_KCOV_IRQ_AREA_SIZE;
 897		area = this_cpu_ptr(&kcov_percpu_data)->irq_area;
 898	}
 899	spin_unlock(&kcov_remote_lock);
 900
 901	/* Can only happen when in_task(). */
 902	if (!area) {
 903		local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
 904		area = vmalloc(size * sizeof(unsigned long));
 905		if (!area) {
 906			kcov_put(kcov);
 907			return;
 908		}
 909		local_lock_irqsave(&kcov_percpu_data.lock, flags);
 910	}
 911
 912	/* Reset coverage size. */
 913	*(u64 *)area = 0;
 914
 915	if (in_serving_softirq()) {
 916		kcov_remote_softirq_start(t);
 917		t->kcov_softirq = 1;
 918	}
 919	kcov_start(t, kcov, size, area, mode, sequence);
 920
 921	local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
 922
 923}
 924EXPORT_SYMBOL(kcov_remote_start);
 925
 926static void kcov_move_area(enum kcov_mode mode, void *dst_area,
 927				unsigned int dst_area_size, void *src_area)
 928{
 929	u64 word_size = sizeof(unsigned long);
 930	u64 count_size, entry_size_log;
 931	u64 dst_len, src_len;
 932	void *dst_entries, *src_entries;
 933	u64 dst_occupied, dst_free, bytes_to_move, entries_moved;
 934
 935	kcov_debug("%px %u <= %px %lu\n",
 936		dst_area, dst_area_size, src_area, *(unsigned long *)src_area);
 937
 938	switch (mode) {
 939	case KCOV_MODE_TRACE_PC:
 940		dst_len = READ_ONCE(*(unsigned long *)dst_area);
 941		src_len = *(unsigned long *)src_area;
 942		count_size = sizeof(unsigned long);
 943		entry_size_log = __ilog2_u64(sizeof(unsigned long));
 944		break;
 945	case KCOV_MODE_TRACE_CMP:
 946		dst_len = READ_ONCE(*(u64 *)dst_area);
 947		src_len = *(u64 *)src_area;
 948		count_size = sizeof(u64);
 949		BUILD_BUG_ON(!is_power_of_2(KCOV_WORDS_PER_CMP));
 950		entry_size_log = __ilog2_u64(sizeof(u64) * KCOV_WORDS_PER_CMP);
 951		break;
 952	default:
 953		WARN_ON(1);
 954		return;
 955	}
 956
 957	/* As arm can't divide u64 integers use log of entry size. */
 958	if (dst_len > ((dst_area_size * word_size - count_size) >>
 959				entry_size_log))
 960		return;
 961	dst_occupied = count_size + (dst_len << entry_size_log);
 962	dst_free = dst_area_size * word_size - dst_occupied;
 963	bytes_to_move = min(dst_free, src_len << entry_size_log);
 964	dst_entries = dst_area + dst_occupied;
 965	src_entries = src_area + count_size;
 966	memcpy(dst_entries, src_entries, bytes_to_move);
 967	entries_moved = bytes_to_move >> entry_size_log;
 968
 969	switch (mode) {
 970	case KCOV_MODE_TRACE_PC:
 971		WRITE_ONCE(*(unsigned long *)dst_area, dst_len + entries_moved);
 972		break;
 973	case KCOV_MODE_TRACE_CMP:
 974		WRITE_ONCE(*(u64 *)dst_area, dst_len + entries_moved);
 975		break;
 976	default:
 977		break;
 978	}
 979}
 980
 981/* See the comment before kcov_remote_start() for usage details. */
 982void kcov_remote_stop(void)
 983{
 984	struct task_struct *t = current;
 985	struct kcov *kcov;
 986	unsigned int mode;
 987	void *area;
 988	unsigned int size;
 989	int sequence;
 990	unsigned long flags;
 991
 992	if (!in_task() && !in_serving_softirq())
 993		return;
 994
 995	local_lock_irqsave(&kcov_percpu_data.lock, flags);
 996
 997	mode = READ_ONCE(t->kcov_mode);
 998	barrier();
 999	if (!kcov_mode_enabled(mode)) {
1000		local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1001		return;
1002	}
1003	/*
1004	 * When in softirq, check if the corresponding kcov_remote_start()
1005	 * actually found the remote handle and started collecting coverage.
1006	 */
1007	if (in_serving_softirq() && !t->kcov_softirq) {
1008		local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1009		return;
1010	}
1011	/* Make sure that kcov_softirq is only set when in softirq. */
1012	if (WARN_ON(!in_serving_softirq() && t->kcov_softirq)) {
1013		local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1014		return;
1015	}
1016
1017	kcov = t->kcov;
1018	area = t->kcov_area;
1019	size = t->kcov_size;
1020	sequence = t->kcov_sequence;
1021
1022	kcov_stop(t);
1023	if (in_serving_softirq()) {
1024		t->kcov_softirq = 0;
1025		kcov_remote_softirq_stop(t);
1026	}
1027
1028	spin_lock(&kcov->lock);
1029	/*
1030	 * KCOV_DISABLE could have been called between kcov_remote_start()
1031	 * and kcov_remote_stop(), hence the sequence check.
1032	 */
1033	if (sequence == kcov->sequence && kcov->remote)
1034		kcov_move_area(kcov->mode, kcov->area, kcov->size, area);
1035	spin_unlock(&kcov->lock);
1036
1037	if (in_task()) {
1038		spin_lock(&kcov_remote_lock);
1039		kcov_remote_area_put(area, size);
1040		spin_unlock(&kcov_remote_lock);
1041	}
1042
1043	local_unlock_irqrestore(&kcov_percpu_data.lock, flags);
1044
1045	/* Get in kcov_remote_start(). */
1046	kcov_put(kcov);
1047}
1048EXPORT_SYMBOL(kcov_remote_stop);
1049
1050/* See the comment before kcov_remote_start() for usage details. */
1051u64 kcov_common_handle(void)
1052{
1053	if (!in_task())
1054		return 0;
1055	return current->kcov_handle;
1056}
1057EXPORT_SYMBOL(kcov_common_handle);
1058
1059static int __init kcov_init(void)
1060{
1061	int cpu;
1062
1063	for_each_possible_cpu(cpu) {
1064		void *area = vmalloc_node(CONFIG_KCOV_IRQ_AREA_SIZE *
1065				sizeof(unsigned long), cpu_to_node(cpu));
1066		if (!area)
1067			return -ENOMEM;
1068		per_cpu_ptr(&kcov_percpu_data, cpu)->irq_area = area;
1069	}
1070
1071	/*
1072	 * The kcov debugfs file won't ever get removed and thus,
1073	 * there is no need to protect it against removal races. The
1074	 * use of debugfs_create_file_unsafe() is actually safe here.
1075	 */
1076	debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
1077
1078	return 0;
1079}
1080
1081device_initcall(kcov_init);