Linux Audio

Check our new training course

Loading...
v4.10.11
 
  1#define pr_fmt(fmt) "kcov: " fmt
  2
  3#define DISABLE_BRANCH_PROFILING
  4#include <linux/atomic.h>
  5#include <linux/compiler.h>
  6#include <linux/errno.h>
  7#include <linux/export.h>
  8#include <linux/types.h>
  9#include <linux/file.h>
 10#include <linux/fs.h>
 11#include <linux/init.h>
 12#include <linux/mm.h>
 13#include <linux/preempt.h>
 14#include <linux/printk.h>
 15#include <linux/sched.h>
 16#include <linux/slab.h>
 17#include <linux/spinlock.h>
 18#include <linux/vmalloc.h>
 19#include <linux/debugfs.h>
 20#include <linux/uaccess.h>
 21#include <linux/kcov.h>
 
 22#include <asm/setup.h>
 23
 
 
 
 24/*
 25 * kcov descriptor (one per opened debugfs file).
 26 * State transitions of the descriptor:
 27 *  - initial state after open()
 28 *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
 29 *  - then, mmap() call (several calls are allowed but not useful)
 30 *  - then, repeated enable/disable for a task (only one task a time allowed)
 
 
 
 
 
 31 */
 32struct kcov {
 33	/*
 34	 * Reference counter. We keep one for:
 35	 *  - opened file descriptor
 36	 *  - task with enabled coverage (we can't unwire it from another task)
 37	 */
 38	atomic_t		refcount;
 39	/* The lock protects mode, size, area and t. */
 40	spinlock_t		lock;
 41	enum kcov_mode		mode;
 42	/* Size of arena (in long's for KCOV_MODE_TRACE). */
 43	unsigned		size;
 44	/* Coverage buffer shared with user space. */
 45	void			*area;
 46	/* Task for which we collect coverage, or NULL. */
 47	struct task_struct	*t;
 48};
 49
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 50/*
 51 * Entry point from instrumented code.
 52 * This is called once per basic-block/edge.
 53 */
 54void notrace __sanitizer_cov_trace_pc(void)
 55{
 56	struct task_struct *t;
 57	enum kcov_mode mode;
 
 
 58
 59	t = current;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 60	/*
 61	 * We are interested in code coverage as a function of a syscall inputs,
 62	 * so we ignore code executed in interrupts.
 63	 * The checks for whether we are in an interrupt are open-coded, because
 64	 * 1. We can't use in_interrupt() here, since it also returns true
 65	 *    when we are inside local_bh_disable() section.
 66	 * 2. We don't want to use (in_irq() | in_serving_softirq() | in_nmi()),
 67	 *    since that leads to slower generated code (three separate tests,
 68	 *    one for each of the flags).
 69	 */
 70	if (!t || (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_OFFSET
 71							| NMI_MASK)))
 72		return;
 73	mode = READ_ONCE(t->kcov_mode);
 74	if (mode == KCOV_MODE_TRACE) {
 75		unsigned long *area;
 76		unsigned long pos;
 77		unsigned long ip = _RET_IP_;
 78
 79#ifdef CONFIG_RANDOMIZE_BASE
 80		ip -= kaslr_offset();
 81#endif
 82
 83		/*
 84		 * There is some code that runs in interrupts but for which
 85		 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
 86		 * READ_ONCE()/barrier() effectively provides load-acquire wrt
 87		 * interrupts, there are paired barrier()/WRITE_ONCE() in
 88		 * kcov_ioctl_locked().
 89		 */
 90		barrier();
 91		area = t->kcov_area;
 92		/* The first word is number of subsequent PCs. */
 93		pos = READ_ONCE(area[0]) + 1;
 94		if (likely(pos < t->kcov_size)) {
 95			area[pos] = ip;
 96			WRITE_ONCE(area[0], pos);
 97		}
 98	}
 99}
100EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
101
102static void kcov_get(struct kcov *kcov)
103{
104	atomic_inc(&kcov->refcount);
105}
106
107static void kcov_put(struct kcov *kcov)
108{
109	if (atomic_dec_and_test(&kcov->refcount)) {
110		vfree(kcov->area);
111		kfree(kcov);
112	}
113}
114
115void kcov_task_init(struct task_struct *t)
116{
117	t->kcov_mode = KCOV_MODE_DISABLED;
 
118	t->kcov_size = 0;
119	t->kcov_area = NULL;
120	t->kcov = NULL;
121}
122
123void kcov_task_exit(struct task_struct *t)
124{
125	struct kcov *kcov;
126
127	kcov = t->kcov;
128	if (kcov == NULL)
129		return;
130	spin_lock(&kcov->lock);
131	if (WARN_ON(kcov->t != t)) {
132		spin_unlock(&kcov->lock);
133		return;
134	}
135	/* Just to not leave dangling references behind. */
136	kcov_task_init(t);
137	kcov->t = NULL;
 
138	spin_unlock(&kcov->lock);
139	kcov_put(kcov);
140}
141
142static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
143{
144	int res = 0;
145	void *area;
146	struct kcov *kcov = vma->vm_file->private_data;
147	unsigned long size, off;
148	struct page *page;
149
150	area = vmalloc_user(vma->vm_end - vma->vm_start);
151	if (!area)
152		return -ENOMEM;
153
154	spin_lock(&kcov->lock);
155	size = kcov->size * sizeof(unsigned long);
156	if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
157	    vma->vm_end - vma->vm_start != size) {
158		res = -EINVAL;
159		goto exit;
160	}
161	if (!kcov->area) {
162		kcov->area = area;
163		vma->vm_flags |= VM_DONTEXPAND;
164		spin_unlock(&kcov->lock);
165		for (off = 0; off < size; off += PAGE_SIZE) {
166			page = vmalloc_to_page(kcov->area + off);
167			if (vm_insert_page(vma, vma->vm_start + off, page))
168				WARN_ONCE(1, "vm_insert_page() failed");
169		}
170		return 0;
171	}
172exit:
173	spin_unlock(&kcov->lock);
174	vfree(area);
175	return res;
176}
177
178static int kcov_open(struct inode *inode, struct file *filep)
179{
180	struct kcov *kcov;
181
182	kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
183	if (!kcov)
184		return -ENOMEM;
185	atomic_set(&kcov->refcount, 1);
 
186	spin_lock_init(&kcov->lock);
187	filep->private_data = kcov;
188	return nonseekable_open(inode, filep);
189}
190
191static int kcov_close(struct inode *inode, struct file *filep)
192{
193	kcov_put(filep->private_data);
194	return 0;
195}
196
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
197static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
198			     unsigned long arg)
199{
200	struct task_struct *t;
201	unsigned long size, unused;
202
203	switch (cmd) {
204	case KCOV_INIT_TRACE:
205		/*
206		 * Enable kcov in trace mode and setup buffer size.
207		 * Must happen before anything else.
208		 */
209		if (kcov->mode != KCOV_MODE_DISABLED)
210			return -EBUSY;
211		/*
212		 * Size must be at least 2 to hold current position and one PC.
213		 * Later we allocate size * sizeof(unsigned long) memory,
214		 * that must not overflow.
215		 */
216		size = arg;
217		if (size < 2 || size > INT_MAX / sizeof(unsigned long))
218			return -EINVAL;
219		kcov->size = size;
220		kcov->mode = KCOV_MODE_TRACE;
221		return 0;
222	case KCOV_ENABLE:
223		/*
224		 * Enable coverage for the current task.
225		 * At this point user must have been enabled trace mode,
226		 * and mmapped the file. Coverage collection is disabled only
227		 * at task exit or voluntary by KCOV_DISABLE. After that it can
228		 * be enabled for another task.
229		 */
230		unused = arg;
231		if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
232		    kcov->area == NULL)
233			return -EINVAL;
234		if (kcov->t != NULL)
235			return -EBUSY;
236		t = current;
 
 
 
 
 
 
 
 
 
 
 
 
 
237		/* Cache in task struct for performance. */
238		t->kcov_size = kcov->size;
239		t->kcov_area = kcov->area;
240		/* See comment in __sanitizer_cov_trace_pc(). */
241		barrier();
242		WRITE_ONCE(t->kcov_mode, kcov->mode);
243		t->kcov = kcov;
244		kcov->t = t;
245		/* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
246		kcov_get(kcov);
247		return 0;
248	case KCOV_DISABLE:
249		/* Disable coverage for the current task. */
250		unused = arg;
251		if (unused != 0 || current->kcov != kcov)
252			return -EINVAL;
253		t = current;
254		if (WARN_ON(kcov->t != t))
255			return -EINVAL;
256		kcov_task_init(t);
257		kcov->t = NULL;
 
258		kcov_put(kcov);
259		return 0;
260	default:
261		return -ENOTTY;
262	}
263}
264
265static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
266{
267	struct kcov *kcov;
268	int res;
269
270	kcov = filep->private_data;
271	spin_lock(&kcov->lock);
272	res = kcov_ioctl_locked(kcov, cmd, arg);
273	spin_unlock(&kcov->lock);
274	return res;
275}
276
277static const struct file_operations kcov_fops = {
278	.open		= kcov_open,
279	.unlocked_ioctl	= kcov_ioctl,
 
280	.mmap		= kcov_mmap,
281	.release        = kcov_close,
282};
283
284static int __init kcov_init(void)
285{
286	/*
287	 * The kcov debugfs file won't ever get removed and thus,
288	 * there is no need to protect it against removal races. The
289	 * use of debugfs_create_file_unsafe() is actually safe here.
290	 */
291	if (!debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops)) {
292		pr_err("failed to create kcov in debugfs\n");
293		return -ENOMEM;
294	}
295	return 0;
296}
297
298device_initcall(kcov_init);
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2#define pr_fmt(fmt) "kcov: " fmt
  3
  4#define DISABLE_BRANCH_PROFILING
  5#include <linux/atomic.h>
  6#include <linux/compiler.h>
  7#include <linux/errno.h>
  8#include <linux/export.h>
  9#include <linux/types.h>
 10#include <linux/file.h>
 11#include <linux/fs.h>
 12#include <linux/init.h>
 13#include <linux/mm.h>
 14#include <linux/preempt.h>
 15#include <linux/printk.h>
 16#include <linux/sched.h>
 17#include <linux/slab.h>
 18#include <linux/spinlock.h>
 19#include <linux/vmalloc.h>
 20#include <linux/debugfs.h>
 21#include <linux/uaccess.h>
 22#include <linux/kcov.h>
 23#include <linux/refcount.h>
 24#include <asm/setup.h>
 25
 26/* Number of 64-bit words written per one comparison: */
 27#define KCOV_WORDS_PER_CMP 4
 28
 29/*
 30 * kcov descriptor (one per opened debugfs file).
 31 * State transitions of the descriptor:
 32 *  - initial state after open()
 33 *  - then there must be a single ioctl(KCOV_INIT_TRACE) call
 34 *  - then, mmap() call (several calls are allowed but not useful)
 35 *  - then, ioctl(KCOV_ENABLE, arg), where arg is
 36 *	KCOV_TRACE_PC - to trace only the PCs
 37 *	or
 38 *	KCOV_TRACE_CMP - to trace only the comparison operands
 39 *  - then, ioctl(KCOV_DISABLE) to disable the task.
 40 * Enabling/disabling ioctls can be repeated (only one task a time allowed).
 41 */
 42struct kcov {
 43	/*
 44	 * Reference counter. We keep one for:
 45	 *  - opened file descriptor
 46	 *  - task with enabled coverage (we can't unwire it from another task)
 47	 */
 48	refcount_t		refcount;
 49	/* The lock protects mode, size, area and t. */
 50	spinlock_t		lock;
 51	enum kcov_mode		mode;
 52	/* Size of arena (in long's for KCOV_MODE_TRACE). */
 53	unsigned		size;
 54	/* Coverage buffer shared with user space. */
 55	void			*area;
 56	/* Task for which we collect coverage, or NULL. */
 57	struct task_struct	*t;
 58};
 59
 60static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
 61{
 62	unsigned int mode;
 63
 64	/*
 65	 * We are interested in code coverage as a function of a syscall inputs,
 66	 * so we ignore code executed in interrupts.
 67	 */
 68	if (!in_task())
 69		return false;
 70	mode = READ_ONCE(t->kcov_mode);
 71	/*
 72	 * There is some code that runs in interrupts but for which
 73	 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
 74	 * READ_ONCE()/barrier() effectively provides load-acquire wrt
 75	 * interrupts, there are paired barrier()/WRITE_ONCE() in
 76	 * kcov_ioctl_locked().
 77	 */
 78	barrier();
 79	return mode == needed_mode;
 80}
 81
 82static notrace unsigned long canonicalize_ip(unsigned long ip)
 83{
 84#ifdef CONFIG_RANDOMIZE_BASE
 85	ip -= kaslr_offset();
 86#endif
 87	return ip;
 88}
 89
 90/*
 91 * Entry point from instrumented code.
 92 * This is called once per basic-block/edge.
 93 */
 94void notrace __sanitizer_cov_trace_pc(void)
 95{
 96	struct task_struct *t;
 97	unsigned long *area;
 98	unsigned long ip = canonicalize_ip(_RET_IP_);
 99	unsigned long pos;
100
101	t = current;
102	if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
103		return;
104
105	area = t->kcov_area;
106	/* The first 64-bit word is the number of subsequent PCs. */
107	pos = READ_ONCE(area[0]) + 1;
108	if (likely(pos < t->kcov_size)) {
109		area[pos] = ip;
110		WRITE_ONCE(area[0], pos);
111	}
112}
113EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
114
115#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
116static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
117{
118	struct task_struct *t;
119	u64 *area;
120	u64 count, start_index, end_pos, max_pos;
121
122	t = current;
123	if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
124		return;
125
126	ip = canonicalize_ip(ip);
127
128	/*
129	 * We write all comparison arguments and types as u64.
130	 * The buffer was allocated for t->kcov_size unsigned longs.
 
 
 
 
 
 
131	 */
132	area = (u64 *)t->kcov_area;
133	max_pos = t->kcov_size * sizeof(unsigned long);
 
 
 
 
 
 
134
135	count = READ_ONCE(area[0]);
 
 
136
137	/* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
138	start_index = 1 + count * KCOV_WORDS_PER_CMP;
139	end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
140	if (likely(end_pos <= max_pos)) {
141		area[start_index] = type;
142		area[start_index + 1] = arg1;
143		area[start_index + 2] = arg2;
144		area[start_index + 3] = ip;
145		WRITE_ONCE(area[0], count + 1);
 
 
 
 
 
 
146	}
147}
148
149void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
150{
151	write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
152}
153EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
154
155void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
156{
157	write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
158}
159EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
160
161void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
162{
163	write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
164}
165EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
166
167void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
168{
169	write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
170}
171EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
172
173void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
174{
175	write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
176			_RET_IP_);
177}
178EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
179
180void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
181{
182	write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
183			_RET_IP_);
184}
185EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
186
187void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
188{
189	write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
190			_RET_IP_);
191}
192EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
193
194void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
195{
196	write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
197			_RET_IP_);
198}
199EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
200
201void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
202{
203	u64 i;
204	u64 count = cases[0];
205	u64 size = cases[1];
206	u64 type = KCOV_CMP_CONST;
207
208	switch (size) {
209	case 8:
210		type |= KCOV_CMP_SIZE(0);
211		break;
212	case 16:
213		type |= KCOV_CMP_SIZE(1);
214		break;
215	case 32:
216		type |= KCOV_CMP_SIZE(2);
217		break;
218	case 64:
219		type |= KCOV_CMP_SIZE(3);
220		break;
221	default:
222		return;
223	}
224	for (i = 0; i < count; i++)
225		write_comp_data(type, cases[i + 2], val, _RET_IP_);
226}
227EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
228#endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
229
230static void kcov_get(struct kcov *kcov)
231{
232	refcount_inc(&kcov->refcount);
233}
234
235static void kcov_put(struct kcov *kcov)
236{
237	if (refcount_dec_and_test(&kcov->refcount)) {
238		vfree(kcov->area);
239		kfree(kcov);
240	}
241}
242
243void kcov_task_init(struct task_struct *t)
244{
245	WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
246	barrier();
247	t->kcov_size = 0;
248	t->kcov_area = NULL;
249	t->kcov = NULL;
250}
251
252void kcov_task_exit(struct task_struct *t)
253{
254	struct kcov *kcov;
255
256	kcov = t->kcov;
257	if (kcov == NULL)
258		return;
259	spin_lock(&kcov->lock);
260	if (WARN_ON(kcov->t != t)) {
261		spin_unlock(&kcov->lock);
262		return;
263	}
264	/* Just to not leave dangling references behind. */
265	kcov_task_init(t);
266	kcov->t = NULL;
267	kcov->mode = KCOV_MODE_INIT;
268	spin_unlock(&kcov->lock);
269	kcov_put(kcov);
270}
271
272static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
273{
274	int res = 0;
275	void *area;
276	struct kcov *kcov = vma->vm_file->private_data;
277	unsigned long size, off;
278	struct page *page;
279
280	area = vmalloc_user(vma->vm_end - vma->vm_start);
281	if (!area)
282		return -ENOMEM;
283
284	spin_lock(&kcov->lock);
285	size = kcov->size * sizeof(unsigned long);
286	if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
287	    vma->vm_end - vma->vm_start != size) {
288		res = -EINVAL;
289		goto exit;
290	}
291	if (!kcov->area) {
292		kcov->area = area;
293		vma->vm_flags |= VM_DONTEXPAND;
294		spin_unlock(&kcov->lock);
295		for (off = 0; off < size; off += PAGE_SIZE) {
296			page = vmalloc_to_page(kcov->area + off);
297			if (vm_insert_page(vma, vma->vm_start + off, page))
298				WARN_ONCE(1, "vm_insert_page() failed");
299		}
300		return 0;
301	}
302exit:
303	spin_unlock(&kcov->lock);
304	vfree(area);
305	return res;
306}
307
308static int kcov_open(struct inode *inode, struct file *filep)
309{
310	struct kcov *kcov;
311
312	kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
313	if (!kcov)
314		return -ENOMEM;
315	kcov->mode = KCOV_MODE_DISABLED;
316	refcount_set(&kcov->refcount, 1);
317	spin_lock_init(&kcov->lock);
318	filep->private_data = kcov;
319	return nonseekable_open(inode, filep);
320}
321
322static int kcov_close(struct inode *inode, struct file *filep)
323{
324	kcov_put(filep->private_data);
325	return 0;
326}
327
328/*
329 * Fault in a lazily-faulted vmalloc area before it can be used by
330 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
331 * vmalloc fault handling path is instrumented.
332 */
333static void kcov_fault_in_area(struct kcov *kcov)
334{
335	unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
336	unsigned long *area = kcov->area;
337	unsigned long offset;
338
339	for (offset = 0; offset < kcov->size; offset += stride)
340		READ_ONCE(area[offset]);
341}
342
343static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
344			     unsigned long arg)
345{
346	struct task_struct *t;
347	unsigned long size, unused;
348
349	switch (cmd) {
350	case KCOV_INIT_TRACE:
351		/*
352		 * Enable kcov in trace mode and setup buffer size.
353		 * Must happen before anything else.
354		 */
355		if (kcov->mode != KCOV_MODE_DISABLED)
356			return -EBUSY;
357		/*
358		 * Size must be at least 2 to hold current position and one PC.
359		 * Later we allocate size * sizeof(unsigned long) memory,
360		 * that must not overflow.
361		 */
362		size = arg;
363		if (size < 2 || size > INT_MAX / sizeof(unsigned long))
364			return -EINVAL;
365		kcov->size = size;
366		kcov->mode = KCOV_MODE_INIT;
367		return 0;
368	case KCOV_ENABLE:
369		/*
370		 * Enable coverage for the current task.
371		 * At this point user must have been enabled trace mode,
372		 * and mmapped the file. Coverage collection is disabled only
373		 * at task exit or voluntary by KCOV_DISABLE. After that it can
374		 * be enabled for another task.
375		 */
376		if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
 
 
377			return -EINVAL;
 
 
378		t = current;
379		if (kcov->t != NULL || t->kcov != NULL)
380			return -EBUSY;
381		if (arg == KCOV_TRACE_PC)
382			kcov->mode = KCOV_MODE_TRACE_PC;
383		else if (arg == KCOV_TRACE_CMP)
384#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
385			kcov->mode = KCOV_MODE_TRACE_CMP;
386#else
387		return -ENOTSUPP;
388#endif
389		else
390			return -EINVAL;
391		kcov_fault_in_area(kcov);
392		/* Cache in task struct for performance. */
393		t->kcov_size = kcov->size;
394		t->kcov_area = kcov->area;
395		/* See comment in check_kcov_mode(). */
396		barrier();
397		WRITE_ONCE(t->kcov_mode, kcov->mode);
398		t->kcov = kcov;
399		kcov->t = t;
400		/* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
401		kcov_get(kcov);
402		return 0;
403	case KCOV_DISABLE:
404		/* Disable coverage for the current task. */
405		unused = arg;
406		if (unused != 0 || current->kcov != kcov)
407			return -EINVAL;
408		t = current;
409		if (WARN_ON(kcov->t != t))
410			return -EINVAL;
411		kcov_task_init(t);
412		kcov->t = NULL;
413		kcov->mode = KCOV_MODE_INIT;
414		kcov_put(kcov);
415		return 0;
416	default:
417		return -ENOTTY;
418	}
419}
420
421static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
422{
423	struct kcov *kcov;
424	int res;
425
426	kcov = filep->private_data;
427	spin_lock(&kcov->lock);
428	res = kcov_ioctl_locked(kcov, cmd, arg);
429	spin_unlock(&kcov->lock);
430	return res;
431}
432
433static const struct file_operations kcov_fops = {
434	.open		= kcov_open,
435	.unlocked_ioctl	= kcov_ioctl,
436	.compat_ioctl	= kcov_ioctl,
437	.mmap		= kcov_mmap,
438	.release        = kcov_close,
439};
440
441static int __init kcov_init(void)
442{
443	/*
444	 * The kcov debugfs file won't ever get removed and thus,
445	 * there is no need to protect it against removal races. The
446	 * use of debugfs_create_file_unsafe() is actually safe here.
447	 */
448	debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
449
 
 
450	return 0;
451}
452
453device_initcall(kcov_init);