Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1#define pr_fmt(fmt) "kcov: " fmt
  2
  3#define DISABLE_BRANCH_PROFILING
  4#include <linux/atomic.h>
  5#include <linux/compiler.h>
  6#include <linux/errno.h>
  7#include <linux/export.h>
  8#include <linux/types.h>
  9#include <linux/file.h>
 10#include <linux/fs.h>
 11#include <linux/init.h>
 12#include <linux/mm.h>
 13#include <linux/preempt.h>
 14#include <linux/printk.h>
 15#include <linux/sched.h>
 16#include <linux/slab.h>
 17#include <linux/spinlock.h>
 18#include <linux/vmalloc.h>
 19#include <linux/debugfs.h>
 20#include <linux/uaccess.h>
 21#include <linux/kcov.h>
 22#include <asm/setup.h>
 23
 
 
 
 24/*
 25 * kcov descriptor (one per opened debugfs file).
 26 * State transitions of the descriptor:
 27 *  - initial state after open()
 28 *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
 29 *  - then, mmap() call (several calls are allowed but not useful)
 30 *  - then, repeated enable/disable for a task (only one task a time allowed)
 
 
 
 
 
 31 */
 32struct kcov {
 33	/*
 34	 * Reference counter. We keep one for:
 35	 *  - opened file descriptor
 36	 *  - task with enabled coverage (we can't unwire it from another task)
 37	 */
 38	atomic_t		refcount;
 39	/* The lock protects mode, size, area and t. */
 40	spinlock_t		lock;
 41	enum kcov_mode		mode;
 42	/* Size of arena (in long's for KCOV_MODE_TRACE). */
 43	unsigned		size;
 44	/* Coverage buffer shared with user space. */
 45	void			*area;
 46	/* Task for which we collect coverage, or NULL. */
 47	struct task_struct	*t;
 48};
 49
 50/*
 51 * Entry point from instrumented code.
 52 * This is called once per basic-block/edge.
 53 */
 54void notrace __sanitizer_cov_trace_pc(void)
 55{
 56	struct task_struct *t;
 57	enum kcov_mode mode;
 58
 59	t = current;
 60	/*
 61	 * We are interested in code coverage as a function of a syscall inputs,
 62	 * so we ignore code executed in interrupts.
 63	 * The checks for whether we are in an interrupt are open-coded, because
 64	 * 1. We can't use in_interrupt() here, since it also returns true
 65	 *    when we are inside local_bh_disable() section.
 66	 * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
 67	 *    since that leads to slower generated code (three separate tests,
 68	 *    one for each of the flags).
 69	 */
 70	if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
 71							| NMI_MASK)))
 72		return;
 73	mode = READ_ONCE(t->kcov_mode);
 74	if (mode == KCOV_MODE_TRACE) {
 75		unsigned long *area;
 76		unsigned long pos;
 77		unsigned long ip = _RET_IP_;
 
 
 
 
 
 
 78
 
 
 79#ifdef CONFIG_RANDOMIZE_BASE
 80		ip -= kaslr_offset();
 81#endif
 
 
 82
 83		/*
 84		 * There is some code that runs in interrupts but for which
 85		 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
 86		 * READ_ONCE()/barrier() effectively provides load-acquire wrt
 87		 * interrupts, there are paired barrier()/WRITE_ONCE() in
 88		 * kcov_ioctl_locked().
 89		 */
 90		barrier();
 91		area = t->kcov_area;
 92		/* The first word is number of subsequent PCs. */
 93		pos = READ_ONCE(area[0]) + 1;
 94		if (likely(pos < t->kcov_size)) {
 95			area[pos] = ip;
 96			WRITE_ONCE(area[0], pos);
 97		}
 
 
 
 
 
 
 98	}
 99}
100EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
101
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102static void kcov_get(struct kcov *kcov)
103{
104	atomic_inc(&kcov->refcount);
105}
106
107static void kcov_put(struct kcov *kcov)
108{
109	if (atomic_dec_and_test(&kcov->refcount)) {
110		vfree(kcov->area);
111		kfree(kcov);
112	}
113}
114
115void kcov_task_init(struct task_struct *t)
116{
117	t->kcov_mode = KCOV_MODE_DISABLED;
118	t->kcov_size = 0;
119	t->kcov_area = NULL;
120	t->kcov = NULL;
121}
122
123void kcov_task_exit(struct task_struct *t)
124{
125	struct kcov *kcov;
126
127	kcov = t->kcov;
128	if (kcov == NULL)
129		return;
130	spin_lock(&kcov->lock);
131	if (WARN_ON(kcov->t != t)) {
132		spin_unlock(&kcov->lock);
133		return;
134	}
135	/* Just to not leave dangling references behind. */
136	kcov_task_init(t);
137	kcov->t = NULL;
 
138	spin_unlock(&kcov->lock);
139	kcov_put(kcov);
140}
141
142static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
143{
144	int res = 0;
145	void *area;
146	struct kcov *kcov = vma->vm_file->private_data;
147	unsigned long size, off;
148	struct page *page;
149
150	area = vmalloc_user(vma->vm_end - vma->vm_start);
151	if (!area)
152		return -ENOMEM;
153
154	spin_lock(&kcov->lock);
155	size = kcov->size * sizeof(unsigned long);
156	if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
157	    vma->vm_end - vma->vm_start != size) {
158		res = -EINVAL;
159		goto exit;
160	}
161	if (!kcov->area) {
162		kcov->area = area;
163		vma->vm_flags |= VM_DONTEXPAND;
164		spin_unlock(&kcov->lock);
165		for (off = 0; off < size; off += PAGE_SIZE) {
166			page = vmalloc_to_page(kcov->area + off);
167			if (vm_insert_page(vma, vma->vm_start + off, page))
168				WARN_ONCE(1, "vm_insert_page() failed");
169		}
170		return 0;
171	}
172exit:
173	spin_unlock(&kcov->lock);
174	vfree(area);
175	return res;
176}
177
178static int kcov_open(struct inode *inode, struct file *filep)
179{
180	struct kcov *kcov;
181
182	kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
183	if (!kcov)
184		return -ENOMEM;
 
185	atomic_set(&kcov->refcount, 1);
186	spin_lock_init(&kcov->lock);
187	filep->private_data = kcov;
188	return nonseekable_open(inode, filep);
189}
190
191static int kcov_close(struct inode *inode, struct file *filep)
192{
193	kcov_put(filep->private_data);
194	return 0;
195}
196
197static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
198			     unsigned long arg)
199{
200	struct task_struct *t;
201	unsigned long size, unused;
202
203	switch (cmd) {
204	case KCOV_INIT_TRACE:
205		/*
206		 * Enable kcov in trace mode and setup buffer size.
207		 * Must happen before anything else.
208		 */
209		if (kcov->mode != KCOV_MODE_DISABLED)
210			return -EBUSY;
211		/*
212		 * Size must be at least 2 to hold current position and one PC.
213		 * Later we allocate size * sizeof(unsigned long) memory,
214		 * that must not overflow.
215		 */
216		size = arg;
217		if (size < 2 || size > INT_MAX / sizeof(unsigned long))
218			return -EINVAL;
219		kcov->size = size;
220		kcov->mode = KCOV_MODE_TRACE;
221		return 0;
222	case KCOV_ENABLE:
223		/*
224		 * Enable coverage for the current task.
225		 * At this point user must have been enabled trace mode,
226		 * and mmapped the file. Coverage collection is disabled only
227		 * at task exit or voluntary by KCOV_DISABLE. After that it can
228		 * be enabled for another task.
229		 */
230		unused = arg;
231		if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
232		    kcov->area == NULL)
233			return -EINVAL;
234		if (kcov->t != NULL)
235			return -EBUSY;
236		t = current;
 
 
 
 
 
 
 
 
 
 
 
 
237		/* Cache in task struct for performance. */
238		t->kcov_size = kcov->size;
239		t->kcov_area = kcov->area;
240		/* See comment in __sanitizer_cov_trace_pc(). */
241		barrier();
242		WRITE_ONCE(t->kcov_mode, kcov->mode);
243		t->kcov = kcov;
244		kcov->t = t;
245		/* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
246		kcov_get(kcov);
247		return 0;
248	case KCOV_DISABLE:
249		/* Disable coverage for the current task. */
250		unused = arg;
251		if (unused != 0 || current->kcov != kcov)
252			return -EINVAL;
253		t = current;
254		if (WARN_ON(kcov->t != t))
255			return -EINVAL;
256		kcov_task_init(t);
257		kcov->t = NULL;
 
258		kcov_put(kcov);
259		return 0;
260	default:
261		return -ENOTTY;
262	}
263}
264
265static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
266{
267	struct kcov *kcov;
268	int res;
269
270	kcov = filep->private_data;
271	spin_lock(&kcov->lock);
272	res = kcov_ioctl_locked(kcov, cmd, arg);
273	spin_unlock(&kcov->lock);
274	return res;
275}
276
277static const struct file_operations kcov_fops = {
278	.open		= kcov_open,
279	.unlocked_ioctl	= kcov_ioctl,
 
280	.mmap		= kcov_mmap,
281	.release        = kcov_close,
282};
283
284static int __init kcov_init(void)
285{
286	/*
287	 * The kcov debugfs file won't ever get removed and thus,
288	 * there is no need to protect it against removal races. The
289	 * use of debugfs_create_file_unsafe() is actually safe here.
290	 */
291	if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
292		pr_err("failed to create kcov in debugfs\n");
293		return -ENOMEM;
294	}
295	return 0;
296}
297
298device_initcall(kcov_init);
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#define pr_fmt(fmt) "kcov: " fmt
  3
  4#define DISABLE_BRANCH_PROFILING
  5#include <linux/atomic.h>
  6#include <linux/compiler.h>
  7#include <linux/errno.h>
  8#include <linux/export.h>
  9#include <linux/types.h>
 10#include <linux/file.h>
 11#include <linux/fs.h>
 12#include <linux/init.h>
 13#include <linux/mm.h>
 14#include <linux/preempt.h>
 15#include <linux/printk.h>
 16#include <linux/sched.h>
 17#include <linux/slab.h>
 18#include <linux/spinlock.h>
 19#include <linux/vmalloc.h>
 20#include <linux/debugfs.h>
 21#include <linux/uaccess.h>
 22#include <linux/kcov.h>
 23#include <asm/setup.h>
 24
 25/* Number of 64-bit words written per one comparison: */
 26#define KCOV_WORDS_PER_CMP 4
 27
 28/*
 29 * kcov descriptor (one per opened debugfs file).
 30 * State transitions of the descriptor:
 31 *  - initial state after open()
 32 *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
 33 *  - then, mmap() call (several calls are allowed but not useful)
 34 *  - then, ioctl(KCOV_ENABLE, arg), where arg is
 35 *	KCOV_TRACE_PC - to trace only the PCs
 36 *	or
 37 *	KCOV_TRACE_CMP - to trace only the comparison operands
 38 *  - then, ioctl(KCOV_DISABLE) to disable the task.
 39 * Enabling/disabling ioctls can be repeated (only one task a time allowed).
 40 */
 41struct kcov {
 42	/*
 43	 * Reference counter. We keep one for:
 44	 *  - opened file descriptor
 45	 *  - task with enabled coverage (we can't unwire it from another task)
 46	 */
 47	atomic_t		refcount;
 48	/* The lock protects mode, size, area and t. */
 49	spinlock_t		lock;
 50	enum kcov_mode		mode;
 51	/* Size of arena (in long's for KCOV_MODE_TRACE). */
 52	unsigned		size;
 53	/* Coverage buffer shared with user space. */
 54	void			*area;
 55	/* Task for which we collect coverage, or NULL. */
 56	struct task_struct	*t;
 57};
 58
 59static bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
 
 
 
 
 60{
 
 61	enum kcov_mode mode;
 62
 
 63	/*
 64	 * We are interested in code coverage as a function of a syscall inputs,
 65	 * so we ignore code executed in interrupts.
 
 
 
 
 
 
 66	 */
 67	if (!in_task())
 68		return false;
 
 69	mode = READ_ONCE(t->kcov_mode);
 70	/*
 71	 * There is some code that runs in interrupts but for which
 72	 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
 73	 * READ_ONCE()/barrier() effectively provides load-acquire wrt
 74	 * interrupts, there are paired barrier()/WRITE_ONCE() in
 75	 * kcov_ioctl_locked().
 76	 */
 77	barrier();
 78	return mode == needed_mode;
 79}
 80
 81static unsigned long canonicalize_ip(unsigned long ip)
 82{
 83#ifdef CONFIG_RANDOMIZE_BASE
 84	ip -= kaslr_offset();
 85#endif
 86	return ip;
 87}
 88
 89/*
 90 * Entry point from instrumented code.
 91 * This is called once per basic-block/edge.
 92 */
 93void notrace __sanitizer_cov_trace_pc(void)
 94{
 95	struct task_struct *t;
 96	unsigned long *area;
 97	unsigned long ip = canonicalize_ip(_RET_IP_);
 98	unsigned long pos;
 99
100	t = current;
101	if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
102		return;
103
104	area = t->kcov_area;
105	/* The first 64-bit word is the number of subsequent PCs. */
106	pos = READ_ONCE(area[0]) + 1;
107	if (likely(pos < t->kcov_size)) {
108		area[pos] = ip;
109		WRITE_ONCE(area[0], pos);
110	}
111}
112EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
113
114#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
115static void write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
116{
117	struct task_struct *t;
118	u64 *area;
119	u64 count, start_index, end_pos, max_pos;
120
121	t = current;
122	if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
123		return;
124
125	ip = canonicalize_ip(ip);
126
127	/*
128	 * We write all comparison arguments and types as u64.
129	 * The buffer was allocated for t->kcov_size unsigned longs.
130	 */
131	area = (u64 *)t->kcov_area;
132	max_pos = t->kcov_size * sizeof(unsigned long);
133
134	count = READ_ONCE(area[0]);
135
136	/* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
137	start_index = 1 + count * KCOV_WORDS_PER_CMP;
138	end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
139	if (likely(end_pos <= max_pos)) {
140		area[start_index] = type;
141		area[start_index + 1] = arg1;
142		area[start_index + 2] = arg2;
143		area[start_index + 3] = ip;
144		WRITE_ONCE(area[0], count + 1);
145	}
146}
147
148void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
149{
150	write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
151}
152EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
153
154void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
155{
156	write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
157}
158EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
159
160void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
161{
162	write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
163}
164EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
165
166void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
167{
168	write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
169}
170EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
171
172void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
173{
174	write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
175			_RET_IP_);
176}
177EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
178
179void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
180{
181	write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
182			_RET_IP_);
183}
184EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
185
186void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
187{
188	write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
189			_RET_IP_);
190}
191EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
192
193void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
194{
195	write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
196			_RET_IP_);
197}
198EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
199
200void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
201{
202	u64 i;
203	u64 count = cases[0];
204	u64 size = cases[1];
205	u64 type = KCOV_CMP_CONST;
206
207	switch (size) {
208	case 8:
209		type |= KCOV_CMP_SIZE(0);
210		break;
211	case 16:
212		type |= KCOV_CMP_SIZE(1);
213		break;
214	case 32:
215		type |= KCOV_CMP_SIZE(2);
216		break;
217	case 64:
218		type |= KCOV_CMP_SIZE(3);
219		break;
220	default:
221		return;
222	}
223	for (i = 0; i < count; i++)
224		write_comp_data(type, cases[i + 2], val, _RET_IP_);
225}
226EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
227#endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
228
229static void kcov_get(struct kcov *kcov)
230{
231	atomic_inc(&kcov->refcount);
232}
233
234static void kcov_put(struct kcov *kcov)
235{
236	if (atomic_dec_and_test(&kcov->refcount)) {
237		vfree(kcov->area);
238		kfree(kcov);
239	}
240}
241
242void kcov_task_init(struct task_struct *t)
243{
244	t->kcov_mode = KCOV_MODE_DISABLED;
245	t->kcov_size = 0;
246	t->kcov_area = NULL;
247	t->kcov = NULL;
248}
249
250void kcov_task_exit(struct task_struct *t)
251{
252	struct kcov *kcov;
253
254	kcov = t->kcov;
255	if (kcov == NULL)
256		return;
257	spin_lock(&kcov->lock);
258	if (WARN_ON(kcov->t != t)) {
259		spin_unlock(&kcov->lock);
260		return;
261	}
262	/* Just to not leave dangling references behind. */
263	kcov_task_init(t);
264	kcov->t = NULL;
265	kcov->mode = KCOV_MODE_INIT;
266	spin_unlock(&kcov->lock);
267	kcov_put(kcov);
268}
269
270static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
271{
272	int res = 0;
273	void *area;
274	struct kcov *kcov = vma->vm_file->private_data;
275	unsigned long size, off;
276	struct page *page;
277
278	area = vmalloc_user(vma->vm_end - vma->vm_start);
279	if (!area)
280		return -ENOMEM;
281
282	spin_lock(&kcov->lock);
283	size = kcov->size * sizeof(unsigned long);
284	if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
285	    vma->vm_end - vma->vm_start != size) {
286		res = -EINVAL;
287		goto exit;
288	}
289	if (!kcov->area) {
290		kcov->area = area;
291		vma->vm_flags |= VM_DONTEXPAND;
292		spin_unlock(&kcov->lock);
293		for (off = 0; off < size; off += PAGE_SIZE) {
294			page = vmalloc_to_page(kcov->area + off);
295			if (vm_insert_page(vma, vma->vm_start + off, page))
296				WARN_ONCE(1, "vm_insert_page() failed");
297		}
298		return 0;
299	}
300exit:
301	spin_unlock(&kcov->lock);
302	vfree(area);
303	return res;
304}
305
306static int kcov_open(struct inode *inode, struct file *filep)
307{
308	struct kcov *kcov;
309
310	kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
311	if (!kcov)
312		return -ENOMEM;
313	kcov->mode = KCOV_MODE_DISABLED;
314	atomic_set(&kcov->refcount, 1);
315	spin_lock_init(&kcov->lock);
316	filep->private_data = kcov;
317	return nonseekable_open(inode, filep);
318}
319
320static int kcov_close(struct inode *inode, struct file *filep)
321{
322	kcov_put(filep->private_data);
323	return 0;
324}
325
326static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
327			     unsigned long arg)
328{
329	struct task_struct *t;
330	unsigned long size, unused;
331
332	switch (cmd) {
333	case KCOV_INIT_TRACE:
334		/*
335		 * Enable kcov in trace mode and setup buffer size.
336		 * Must happen before anything else.
337		 */
338		if (kcov->mode != KCOV_MODE_DISABLED)
339			return -EBUSY;
340		/*
341		 * Size must be at least 2 to hold current position and one PC.
342		 * Later we allocate size * sizeof(unsigned long) memory,
343		 * that must not overflow.
344		 */
345		size = arg;
346		if (size < 2 || size > INT_MAX / sizeof(unsigned long))
347			return -EINVAL;
348		kcov->size = size;
349		kcov->mode = KCOV_MODE_INIT;
350		return 0;
351	case KCOV_ENABLE:
352		/*
353		 * Enable coverage for the current task.
354		 * At this point user must have been enabled trace mode,
355		 * and mmapped the file. Coverage collection is disabled only
356		 * at task exit or voluntary by KCOV_DISABLE. After that it can
357		 * be enabled for another task.
358		 */
359		if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
 
 
360			return -EINVAL;
 
 
361		t = current;
362		if (kcov->t != NULL || t->kcov != NULL)
363			return -EBUSY;
364		if (arg == KCOV_TRACE_PC)
365			kcov->mode = KCOV_MODE_TRACE_PC;
366		else if (arg == KCOV_TRACE_CMP)
367#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
368			kcov->mode = KCOV_MODE_TRACE_CMP;
369#else
370		return -ENOTSUPP;
371#endif
372		else
373			return -EINVAL;
374		/* Cache in task struct for performance. */
375		t->kcov_size = kcov->size;
376		t->kcov_area = kcov->area;
377		/* See comment in check_kcov_mode(). */
378		barrier();
379		WRITE_ONCE(t->kcov_mode, kcov->mode);
380		t->kcov = kcov;
381		kcov->t = t;
382		/* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
383		kcov_get(kcov);
384		return 0;
385	case KCOV_DISABLE:
386		/* Disable coverage for the current task. */
387		unused = arg;
388		if (unused != 0 || current->kcov != kcov)
389			return -EINVAL;
390		t = current;
391		if (WARN_ON(kcov->t != t))
392			return -EINVAL;
393		kcov_task_init(t);
394		kcov->t = NULL;
395		kcov->mode = KCOV_MODE_INIT;
396		kcov_put(kcov);
397		return 0;
398	default:
399		return -ENOTTY;
400	}
401}
402
403static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
404{
405	struct kcov *kcov;
406	int res;
407
408	kcov = filep->private_data;
409	spin_lock(&kcov->lock);
410	res = kcov_ioctl_locked(kcov, cmd, arg);
411	spin_unlock(&kcov->lock);
412	return res;
413}
414
415static const struct file_operations kcov_fops = {
416	.open		= kcov_open,
417	.unlocked_ioctl	= kcov_ioctl,
418	.compat_ioctl	= kcov_ioctl,
419	.mmap		= kcov_mmap,
420	.release        = kcov_close,
421};
422
423static int __init kcov_init(void)
424{
425	/*
426	 * The kcov debugfs file won't ever get removed and thus,
427	 * there is no need to protect it against removal races. The
428	 * use of debugfs_create_file_unsafe() is actually safe here.
429	 */
430	if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
431		pr_err("failed to create kcov in debugfs\n");
432		return -ENOMEM;
433	}
434	return 0;
435}
436
437device_initcall(kcov_init);