Loading...
1#define pr_fmt(fmt) "kcov: " fmt
2
3#define DISABLE_BRANCH_PROFILING
4#include <linux/compiler.h>
5#include <linux/types.h>
6#include <linux/file.h>
7#include <linux/fs.h>
8#include <linux/mm.h>
9#include <linux/printk.h>
10#include <linux/slab.h>
11#include <linux/spinlock.h>
12#include <linux/vmalloc.h>
13#include <linux/debugfs.h>
14#include <linux/uaccess.h>
15#include <linux/kcov.h>
16
17/*
18 * kcov descriptor (one per opened debugfs file).
19 * State transitions of the descriptor:
20 * - initial state after open()
21 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
22 * - then, mmap() call (several calls are allowed but not useful)
23 * - then, repeated enable/disable for a task (only one task a time allowed)
24 */
25struct kcov {
26 /*
27 * Reference counter. We keep one for:
28 * - opened file descriptor
29 * - task with enabled coverage (we can't unwire it from another task)
30 */
31 atomic_t refcount;
32 /* The lock protects mode, size, area and t. */
33 spinlock_t lock;
34 enum kcov_mode mode;
35 /* Size of arena (in long's for KCOV_MODE_TRACE). */
36 unsigned size;
37 /* Coverage buffer shared with user space. */
38 void *area;
39 /* Task for which we collect coverage, or NULL. */
40 struct task_struct *t;
41};
42
43/*
44 * Entry point from instrumented code.
45 * This is called once per basic-block/edge.
46 */
47void notrace __sanitizer_cov_trace_pc(void)
48{
49 struct task_struct *t;
50 enum kcov_mode mode;
51
52 t = current;
53 /*
54 * We are interested in code coverage as a function of a syscall inputs,
55 * so we ignore code executed in interrupts.
56 */
57 if (!t || in_interrupt())
58 return;
59 mode = READ_ONCE(t->kcov_mode);
60 if (mode == KCOV_MODE_TRACE) {
61 unsigned long *area;
62 unsigned long pos;
63
64 /*
65 * There is some code that runs in interrupts but for which
66 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
67 * READ_ONCE()/barrier() effectively provides load-acquire wrt
68 * interrupts, there are paired barrier()/WRITE_ONCE() in
69 * kcov_ioctl_locked().
70 */
71 barrier();
72 area = t->kcov_area;
73 /* The first word is number of subsequent PCs. */
74 pos = READ_ONCE(area[0]) + 1;
75 if (likely(pos < t->kcov_size)) {
76 area[pos] = _RET_IP_;
77 WRITE_ONCE(area[0], pos);
78 }
79 }
80}
81EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
82
83static void kcov_get(struct kcov *kcov)
84{
85 atomic_inc(&kcov->refcount);
86}
87
88static void kcov_put(struct kcov *kcov)
89{
90 if (atomic_dec_and_test(&kcov->refcount)) {
91 vfree(kcov->area);
92 kfree(kcov);
93 }
94}
95
96void kcov_task_init(struct task_struct *t)
97{
98 t->kcov_mode = KCOV_MODE_DISABLED;
99 t->kcov_size = 0;
100 t->kcov_area = NULL;
101 t->kcov = NULL;
102}
103
104void kcov_task_exit(struct task_struct *t)
105{
106 struct kcov *kcov;
107
108 kcov = t->kcov;
109 if (kcov == NULL)
110 return;
111 spin_lock(&kcov->lock);
112 if (WARN_ON(kcov->t != t)) {
113 spin_unlock(&kcov->lock);
114 return;
115 }
116 /* Just to not leave dangling references behind. */
117 kcov_task_init(t);
118 kcov->t = NULL;
119 spin_unlock(&kcov->lock);
120 kcov_put(kcov);
121}
122
123static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
124{
125 int res = 0;
126 void *area;
127 struct kcov *kcov = vma->vm_file->private_data;
128 unsigned long size, off;
129 struct page *page;
130
131 area = vmalloc_user(vma->vm_end - vma->vm_start);
132 if (!area)
133 return -ENOMEM;
134
135 spin_lock(&kcov->lock);
136 size = kcov->size * sizeof(unsigned long);
137 if (kcov->mode == KCOV_MODE_DISABLED || vma->vm_pgoff != 0 ||
138 vma->vm_end - vma->vm_start != size) {
139 res = -EINVAL;
140 goto exit;
141 }
142 if (!kcov->area) {
143 kcov->area = area;
144 vma->vm_flags |= VM_DONTEXPAND;
145 spin_unlock(&kcov->lock);
146 for (off = 0; off < size; off += PAGE_SIZE) {
147 page = vmalloc_to_page(kcov->area + off);
148 if (vm_insert_page(vma, vma->vm_start + off, page))
149 WARN_ONCE(1, "vm_insert_page() failed");
150 }
151 return 0;
152 }
153exit:
154 spin_unlock(&kcov->lock);
155 vfree(area);
156 return res;
157}
158
159static int kcov_open(struct inode *inode, struct file *filep)
160{
161 struct kcov *kcov;
162
163 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
164 if (!kcov)
165 return -ENOMEM;
166 atomic_set(&kcov->refcount, 1);
167 spin_lock_init(&kcov->lock);
168 filep->private_data = kcov;
169 return nonseekable_open(inode, filep);
170}
171
172static int kcov_close(struct inode *inode, struct file *filep)
173{
174 kcov_put(filep->private_data);
175 return 0;
176}
177
178static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
179 unsigned long arg)
180{
181 struct task_struct *t;
182 unsigned long size, unused;
183
184 switch (cmd) {
185 case KCOV_INIT_TRACE:
186 /*
187 * Enable kcov in trace mode and setup buffer size.
188 * Must happen before anything else.
189 */
190 if (kcov->mode != KCOV_MODE_DISABLED)
191 return -EBUSY;
192 /*
193 * Size must be at least 2 to hold current position and one PC.
194 * Later we allocate size * sizeof(unsigned long) memory,
195 * that must not overflow.
196 */
197 size = arg;
198 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
199 return -EINVAL;
200 kcov->size = size;
201 kcov->mode = KCOV_MODE_TRACE;
202 return 0;
203 case KCOV_ENABLE:
204 /*
205 * Enable coverage for the current task.
206 * At this point user must have been enabled trace mode,
207 * and mmapped the file. Coverage collection is disabled only
208 * at task exit or voluntary by KCOV_DISABLE. After that it can
209 * be enabled for another task.
210 */
211 unused = arg;
212 if (unused != 0 || kcov->mode == KCOV_MODE_DISABLED ||
213 kcov->area == NULL)
214 return -EINVAL;
215 if (kcov->t != NULL)
216 return -EBUSY;
217 t = current;
218 /* Cache in task struct for performance. */
219 t->kcov_size = kcov->size;
220 t->kcov_area = kcov->area;
221 /* See comment in __sanitizer_cov_trace_pc(). */
222 barrier();
223 WRITE_ONCE(t->kcov_mode, kcov->mode);
224 t->kcov = kcov;
225 kcov->t = t;
226 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
227 kcov_get(kcov);
228 return 0;
229 case KCOV_DISABLE:
230 /* Disable coverage for the current task. */
231 unused = arg;
232 if (unused != 0 || current->kcov != kcov)
233 return -EINVAL;
234 t = current;
235 if (WARN_ON(kcov->t != t))
236 return -EINVAL;
237 kcov_task_init(t);
238 kcov->t = NULL;
239 kcov_put(kcov);
240 return 0;
241 default:
242 return -ENOTTY;
243 }
244}
245
246static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
247{
248 struct kcov *kcov;
249 int res;
250
251 kcov = filep->private_data;
252 spin_lock(&kcov->lock);
253 res = kcov_ioctl_locked(kcov, cmd, arg);
254 spin_unlock(&kcov->lock);
255 return res;
256}
257
258static const struct file_operations kcov_fops = {
259 .open = kcov_open,
260 .unlocked_ioctl = kcov_ioctl,
261 .mmap = kcov_mmap,
262 .release = kcov_close,
263};
264
265static int __init kcov_init(void)
266{
267 if (!debugfs_create_file("kcov", 0600, NULL, NULL, &kcov_fops)) {
268 pr_err("failed to create kcov in debugfs\n");
269 return -ENOMEM;
270 }
271 return 0;
272}
273
274device_initcall(kcov_init);
1// SPDX-License-Identifier: GPL-2.0
2#define pr_fmt(fmt) "kcov: " fmt
3
4#define DISABLE_BRANCH_PROFILING
5#include <linux/atomic.h>
6#include <linux/compiler.h>
7#include <linux/errno.h>
8#include <linux/export.h>
9#include <linux/types.h>
10#include <linux/file.h>
11#include <linux/fs.h>
12#include <linux/init.h>
13#include <linux/mm.h>
14#include <linux/preempt.h>
15#include <linux/printk.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/spinlock.h>
19#include <linux/vmalloc.h>
20#include <linux/debugfs.h>
21#include <linux/uaccess.h>
22#include <linux/kcov.h>
23#include <linux/refcount.h>
24#include <asm/setup.h>
25
26/* Number of 64-bit words written per one comparison: */
27#define KCOV_WORDS_PER_CMP 4
28
29/*
30 * kcov descriptor (one per opened debugfs file).
31 * State transitions of the descriptor:
32 * - initial state after open()
33 * - then there must be a single ioctl(KCOV_INIT_TRACE) call
34 * - then, mmap() call (several calls are allowed but not useful)
35 * - then, ioctl(KCOV_ENABLE, arg), where arg is
36 * KCOV_TRACE_PC - to trace only the PCs
37 * or
38 * KCOV_TRACE_CMP - to trace only the comparison operands
39 * - then, ioctl(KCOV_DISABLE) to disable the task.
40 * Enabling/disabling ioctls can be repeated (only one task a time allowed).
41 */
42struct kcov {
43 /*
44 * Reference counter. We keep one for:
45 * - opened file descriptor
46 * - task with enabled coverage (we can't unwire it from another task)
47 */
48 refcount_t refcount;
49 /* The lock protects mode, size, area and t. */
50 spinlock_t lock;
51 enum kcov_mode mode;
52 /* Size of arena (in long's for KCOV_MODE_TRACE). */
53 unsigned size;
54 /* Coverage buffer shared with user space. */
55 void *area;
56 /* Task for which we collect coverage, or NULL. */
57 struct task_struct *t;
58};
59
60static notrace bool check_kcov_mode(enum kcov_mode needed_mode, struct task_struct *t)
61{
62 unsigned int mode;
63
64 /*
65 * We are interested in code coverage as a function of a syscall inputs,
66 * so we ignore code executed in interrupts.
67 */
68 if (!in_task())
69 return false;
70 mode = READ_ONCE(t->kcov_mode);
71 /*
72 * There is some code that runs in interrupts but for which
73 * in_interrupt() returns false (e.g. preempt_schedule_irq()).
74 * READ_ONCE()/barrier() effectively provides load-acquire wrt
75 * interrupts, there are paired barrier()/WRITE_ONCE() in
76 * kcov_ioctl_locked().
77 */
78 barrier();
79 return mode == needed_mode;
80}
81
82static notrace unsigned long canonicalize_ip(unsigned long ip)
83{
84#ifdef CONFIG_RANDOMIZE_BASE
85 ip -= kaslr_offset();
86#endif
87 return ip;
88}
89
90/*
91 * Entry point from instrumented code.
92 * This is called once per basic-block/edge.
93 */
94void notrace __sanitizer_cov_trace_pc(void)
95{
96 struct task_struct *t;
97 unsigned long *area;
98 unsigned long ip = canonicalize_ip(_RET_IP_);
99 unsigned long pos;
100
101 t = current;
102 if (!check_kcov_mode(KCOV_MODE_TRACE_PC, t))
103 return;
104
105 area = t->kcov_area;
106 /* The first 64-bit word is the number of subsequent PCs. */
107 pos = READ_ONCE(area[0]) + 1;
108 if (likely(pos < t->kcov_size)) {
109 area[pos] = ip;
110 WRITE_ONCE(area[0], pos);
111 }
112}
113EXPORT_SYMBOL(__sanitizer_cov_trace_pc);
114
115#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
116static void notrace write_comp_data(u64 type, u64 arg1, u64 arg2, u64 ip)
117{
118 struct task_struct *t;
119 u64 *area;
120 u64 count, start_index, end_pos, max_pos;
121
122 t = current;
123 if (!check_kcov_mode(KCOV_MODE_TRACE_CMP, t))
124 return;
125
126 ip = canonicalize_ip(ip);
127
128 /*
129 * We write all comparison arguments and types as u64.
130 * The buffer was allocated for t->kcov_size unsigned longs.
131 */
132 area = (u64 *)t->kcov_area;
133 max_pos = t->kcov_size * sizeof(unsigned long);
134
135 count = READ_ONCE(area[0]);
136
137 /* Every record is KCOV_WORDS_PER_CMP 64-bit words. */
138 start_index = 1 + count * KCOV_WORDS_PER_CMP;
139 end_pos = (start_index + KCOV_WORDS_PER_CMP) * sizeof(u64);
140 if (likely(end_pos <= max_pos)) {
141 area[start_index] = type;
142 area[start_index + 1] = arg1;
143 area[start_index + 2] = arg2;
144 area[start_index + 3] = ip;
145 WRITE_ONCE(area[0], count + 1);
146 }
147}
148
149void notrace __sanitizer_cov_trace_cmp1(u8 arg1, u8 arg2)
150{
151 write_comp_data(KCOV_CMP_SIZE(0), arg1, arg2, _RET_IP_);
152}
153EXPORT_SYMBOL(__sanitizer_cov_trace_cmp1);
154
155void notrace __sanitizer_cov_trace_cmp2(u16 arg1, u16 arg2)
156{
157 write_comp_data(KCOV_CMP_SIZE(1), arg1, arg2, _RET_IP_);
158}
159EXPORT_SYMBOL(__sanitizer_cov_trace_cmp2);
160
161void notrace __sanitizer_cov_trace_cmp4(u32 arg1, u32 arg2)
162{
163 write_comp_data(KCOV_CMP_SIZE(2), arg1, arg2, _RET_IP_);
164}
165EXPORT_SYMBOL(__sanitizer_cov_trace_cmp4);
166
167void notrace __sanitizer_cov_trace_cmp8(u64 arg1, u64 arg2)
168{
169 write_comp_data(KCOV_CMP_SIZE(3), arg1, arg2, _RET_IP_);
170}
171EXPORT_SYMBOL(__sanitizer_cov_trace_cmp8);
172
173void notrace __sanitizer_cov_trace_const_cmp1(u8 arg1, u8 arg2)
174{
175 write_comp_data(KCOV_CMP_SIZE(0) | KCOV_CMP_CONST, arg1, arg2,
176 _RET_IP_);
177}
178EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp1);
179
180void notrace __sanitizer_cov_trace_const_cmp2(u16 arg1, u16 arg2)
181{
182 write_comp_data(KCOV_CMP_SIZE(1) | KCOV_CMP_CONST, arg1, arg2,
183 _RET_IP_);
184}
185EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp2);
186
187void notrace __sanitizer_cov_trace_const_cmp4(u32 arg1, u32 arg2)
188{
189 write_comp_data(KCOV_CMP_SIZE(2) | KCOV_CMP_CONST, arg1, arg2,
190 _RET_IP_);
191}
192EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp4);
193
194void notrace __sanitizer_cov_trace_const_cmp8(u64 arg1, u64 arg2)
195{
196 write_comp_data(KCOV_CMP_SIZE(3) | KCOV_CMP_CONST, arg1, arg2,
197 _RET_IP_);
198}
199EXPORT_SYMBOL(__sanitizer_cov_trace_const_cmp8);
200
201void notrace __sanitizer_cov_trace_switch(u64 val, u64 *cases)
202{
203 u64 i;
204 u64 count = cases[0];
205 u64 size = cases[1];
206 u64 type = KCOV_CMP_CONST;
207
208 switch (size) {
209 case 8:
210 type |= KCOV_CMP_SIZE(0);
211 break;
212 case 16:
213 type |= KCOV_CMP_SIZE(1);
214 break;
215 case 32:
216 type |= KCOV_CMP_SIZE(2);
217 break;
218 case 64:
219 type |= KCOV_CMP_SIZE(3);
220 break;
221 default:
222 return;
223 }
224 for (i = 0; i < count; i++)
225 write_comp_data(type, cases[i + 2], val, _RET_IP_);
226}
227EXPORT_SYMBOL(__sanitizer_cov_trace_switch);
228#endif /* ifdef CONFIG_KCOV_ENABLE_COMPARISONS */
229
230static void kcov_get(struct kcov *kcov)
231{
232 refcount_inc(&kcov->refcount);
233}
234
235static void kcov_put(struct kcov *kcov)
236{
237 if (refcount_dec_and_test(&kcov->refcount)) {
238 vfree(kcov->area);
239 kfree(kcov);
240 }
241}
242
243void kcov_task_init(struct task_struct *t)
244{
245 WRITE_ONCE(t->kcov_mode, KCOV_MODE_DISABLED);
246 barrier();
247 t->kcov_size = 0;
248 t->kcov_area = NULL;
249 t->kcov = NULL;
250}
251
252void kcov_task_exit(struct task_struct *t)
253{
254 struct kcov *kcov;
255
256 kcov = t->kcov;
257 if (kcov == NULL)
258 return;
259 spin_lock(&kcov->lock);
260 if (WARN_ON(kcov->t != t)) {
261 spin_unlock(&kcov->lock);
262 return;
263 }
264 /* Just to not leave dangling references behind. */
265 kcov_task_init(t);
266 kcov->t = NULL;
267 kcov->mode = KCOV_MODE_INIT;
268 spin_unlock(&kcov->lock);
269 kcov_put(kcov);
270}
271
272static int kcov_mmap(struct file *filep, struct vm_area_struct *vma)
273{
274 int res = 0;
275 void *area;
276 struct kcov *kcov = vma->vm_file->private_data;
277 unsigned long size, off;
278 struct page *page;
279
280 area = vmalloc_user(vma->vm_end - vma->vm_start);
281 if (!area)
282 return -ENOMEM;
283
284 spin_lock(&kcov->lock);
285 size = kcov->size * sizeof(unsigned long);
286 if (kcov->mode != KCOV_MODE_INIT || vma->vm_pgoff != 0 ||
287 vma->vm_end - vma->vm_start != size) {
288 res = -EINVAL;
289 goto exit;
290 }
291 if (!kcov->area) {
292 kcov->area = area;
293 vma->vm_flags |= VM_DONTEXPAND;
294 spin_unlock(&kcov->lock);
295 for (off = 0; off < size; off += PAGE_SIZE) {
296 page = vmalloc_to_page(kcov->area + off);
297 if (vm_insert_page(vma, vma->vm_start + off, page))
298 WARN_ONCE(1, "vm_insert_page() failed");
299 }
300 return 0;
301 }
302exit:
303 spin_unlock(&kcov->lock);
304 vfree(area);
305 return res;
306}
307
308static int kcov_open(struct inode *inode, struct file *filep)
309{
310 struct kcov *kcov;
311
312 kcov = kzalloc(sizeof(*kcov), GFP_KERNEL);
313 if (!kcov)
314 return -ENOMEM;
315 kcov->mode = KCOV_MODE_DISABLED;
316 refcount_set(&kcov->refcount, 1);
317 spin_lock_init(&kcov->lock);
318 filep->private_data = kcov;
319 return nonseekable_open(inode, filep);
320}
321
322static int kcov_close(struct inode *inode, struct file *filep)
323{
324 kcov_put(filep->private_data);
325 return 0;
326}
327
328/*
329 * Fault in a lazily-faulted vmalloc area before it can be used by
330 * __santizer_cov_trace_pc(), to avoid recursion issues if any code on the
331 * vmalloc fault handling path is instrumented.
332 */
333static void kcov_fault_in_area(struct kcov *kcov)
334{
335 unsigned long stride = PAGE_SIZE / sizeof(unsigned long);
336 unsigned long *area = kcov->area;
337 unsigned long offset;
338
339 for (offset = 0; offset < kcov->size; offset += stride)
340 READ_ONCE(area[offset]);
341}
342
343static int kcov_ioctl_locked(struct kcov *kcov, unsigned int cmd,
344 unsigned long arg)
345{
346 struct task_struct *t;
347 unsigned long size, unused;
348
349 switch (cmd) {
350 case KCOV_INIT_TRACE:
351 /*
352 * Enable kcov in trace mode and setup buffer size.
353 * Must happen before anything else.
354 */
355 if (kcov->mode != KCOV_MODE_DISABLED)
356 return -EBUSY;
357 /*
358 * Size must be at least 2 to hold current position and one PC.
359 * Later we allocate size * sizeof(unsigned long) memory,
360 * that must not overflow.
361 */
362 size = arg;
363 if (size < 2 || size > INT_MAX / sizeof(unsigned long))
364 return -EINVAL;
365 kcov->size = size;
366 kcov->mode = KCOV_MODE_INIT;
367 return 0;
368 case KCOV_ENABLE:
369 /*
370 * Enable coverage for the current task.
371 * At this point user must have been enabled trace mode,
372 * and mmapped the file. Coverage collection is disabled only
373 * at task exit or voluntary by KCOV_DISABLE. After that it can
374 * be enabled for another task.
375 */
376 if (kcov->mode != KCOV_MODE_INIT || !kcov->area)
377 return -EINVAL;
378 t = current;
379 if (kcov->t != NULL || t->kcov != NULL)
380 return -EBUSY;
381 if (arg == KCOV_TRACE_PC)
382 kcov->mode = KCOV_MODE_TRACE_PC;
383 else if (arg == KCOV_TRACE_CMP)
384#ifdef CONFIG_KCOV_ENABLE_COMPARISONS
385 kcov->mode = KCOV_MODE_TRACE_CMP;
386#else
387 return -ENOTSUPP;
388#endif
389 else
390 return -EINVAL;
391 kcov_fault_in_area(kcov);
392 /* Cache in task struct for performance. */
393 t->kcov_size = kcov->size;
394 t->kcov_area = kcov->area;
395 /* See comment in check_kcov_mode(). */
396 barrier();
397 WRITE_ONCE(t->kcov_mode, kcov->mode);
398 t->kcov = kcov;
399 kcov->t = t;
400 /* This is put either in kcov_task_exit() or in KCOV_DISABLE. */
401 kcov_get(kcov);
402 return 0;
403 case KCOV_DISABLE:
404 /* Disable coverage for the current task. */
405 unused = arg;
406 if (unused != 0 || current->kcov != kcov)
407 return -EINVAL;
408 t = current;
409 if (WARN_ON(kcov->t != t))
410 return -EINVAL;
411 kcov_task_init(t);
412 kcov->t = NULL;
413 kcov->mode = KCOV_MODE_INIT;
414 kcov_put(kcov);
415 return 0;
416 default:
417 return -ENOTTY;
418 }
419}
420
421static long kcov_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
422{
423 struct kcov *kcov;
424 int res;
425
426 kcov = filep->private_data;
427 spin_lock(&kcov->lock);
428 res = kcov_ioctl_locked(kcov, cmd, arg);
429 spin_unlock(&kcov->lock);
430 return res;
431}
432
433static const struct file_operations kcov_fops = {
434 .open = kcov_open,
435 .unlocked_ioctl = kcov_ioctl,
436 .compat_ioctl = kcov_ioctl,
437 .mmap = kcov_mmap,
438 .release = kcov_close,
439};
440
441static int __init kcov_init(void)
442{
443 /*
444 * The kcov debugfs file won't ever get removed and thus,
445 * there is no need to protect it against removal races. The
446 * use of debugfs_create_file_unsafe() is actually safe here.
447 */
448 debugfs_create_file_unsafe("kcov", 0600, NULL, NULL, &kcov_fops);
449
450 return 0;
451}
452
453device_initcall(kcov_init);