Loading...
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
4 * using the CPU's debug registers. Derived from
5 * "arch/x86/kernel/hw_breakpoint.c"
6 *
7 * Copyright 2010 IBM Corporation
8 * Author: K.Prasad <prasad@linux.vnet.ibm.com>
9 */
10
11#include <linux/hw_breakpoint.h>
12#include <linux/notifier.h>
13#include <linux/kprobes.h>
14#include <linux/percpu.h>
15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/smp.h>
18#include <linux/debugfs.h>
19#include <linux/init.h>
20
21#include <asm/hw_breakpoint.h>
22#include <asm/processor.h>
23#include <asm/sstep.h>
24#include <asm/debug.h>
25#include <asm/debugfs.h>
26#include <asm/hvcall.h>
27#include <asm/inst.h>
28#include <linux/uaccess.h>
29
30/*
31 * Stores the breakpoints currently in use on each breakpoint address
32 * register for every cpu
33 */
34static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
35
36/*
37 * Returns total number of data or instruction breakpoints available.
38 */
39int hw_breakpoint_slots(int type)
40{
41 if (type == TYPE_DATA)
42 return nr_wp_slots();
43 return 0; /* no instruction breakpoints available */
44}
45
46static bool single_step_pending(void)
47{
48 int i;
49
50 for (i = 0; i < nr_wp_slots(); i++) {
51 if (current->thread.last_hit_ubp[i])
52 return true;
53 }
54 return false;
55}
56
57/*
58 * Install a perf counter breakpoint.
59 *
60 * We seek a free debug address register and use it for this
61 * breakpoint.
62 *
63 * Atomic: we hold the counter->ctx->lock and we only handle variables
64 * and registers local to this cpu.
65 */
66int arch_install_hw_breakpoint(struct perf_event *bp)
67{
68 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
69 struct perf_event **slot;
70 int i;
71
72 for (i = 0; i < nr_wp_slots(); i++) {
73 slot = this_cpu_ptr(&bp_per_reg[i]);
74 if (!*slot) {
75 *slot = bp;
76 break;
77 }
78 }
79
80 if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
81 return -EBUSY;
82
83 /*
84 * Do not install DABR values if the instruction must be single-stepped.
85 * If so, DABR will be populated in single_step_dabr_instruction().
86 */
87 if (!single_step_pending())
88 __set_breakpoint(i, info);
89
90 return 0;
91}
92
93/*
94 * Uninstall the breakpoint contained in the given counter.
95 *
96 * First we search the debug address register it uses and then we disable
97 * it.
98 *
99 * Atomic: we hold the counter->ctx->lock and we only handle variables
100 * and registers local to this cpu.
101 */
102void arch_uninstall_hw_breakpoint(struct perf_event *bp)
103{
104 struct arch_hw_breakpoint null_brk = {0};
105 struct perf_event **slot;
106 int i;
107
108 for (i = 0; i < nr_wp_slots(); i++) {
109 slot = this_cpu_ptr(&bp_per_reg[i]);
110 if (*slot == bp) {
111 *slot = NULL;
112 break;
113 }
114 }
115
116 if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
117 return;
118
119 __set_breakpoint(i, &null_brk);
120}
121
122static bool is_ptrace_bp(struct perf_event *bp)
123{
124 return bp->overflow_handler == ptrace_triggered;
125}
126
127struct breakpoint {
128 struct list_head list;
129 struct perf_event *bp;
130 bool ptrace_bp;
131};
132
133static DEFINE_PER_CPU(struct breakpoint *, cpu_bps[HBP_NUM_MAX]);
134static LIST_HEAD(task_bps);
135
136static struct breakpoint *alloc_breakpoint(struct perf_event *bp)
137{
138 struct breakpoint *tmp;
139
140 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
141 if (!tmp)
142 return ERR_PTR(-ENOMEM);
143 tmp->bp = bp;
144 tmp->ptrace_bp = is_ptrace_bp(bp);
145 return tmp;
146}
147
148static bool bp_addr_range_overlap(struct perf_event *bp1, struct perf_event *bp2)
149{
150 __u64 bp1_saddr, bp1_eaddr, bp2_saddr, bp2_eaddr;
151
152 bp1_saddr = ALIGN_DOWN(bp1->attr.bp_addr, HW_BREAKPOINT_SIZE);
153 bp1_eaddr = ALIGN(bp1->attr.bp_addr + bp1->attr.bp_len, HW_BREAKPOINT_SIZE);
154 bp2_saddr = ALIGN_DOWN(bp2->attr.bp_addr, HW_BREAKPOINT_SIZE);
155 bp2_eaddr = ALIGN(bp2->attr.bp_addr + bp2->attr.bp_len, HW_BREAKPOINT_SIZE);
156
157 return (bp1_saddr < bp2_eaddr && bp1_eaddr > bp2_saddr);
158}
159
160static bool alternate_infra_bp(struct breakpoint *b, struct perf_event *bp)
161{
162 return is_ptrace_bp(bp) ? !b->ptrace_bp : b->ptrace_bp;
163}
164
165static bool can_co_exist(struct breakpoint *b, struct perf_event *bp)
166{
167 return !(alternate_infra_bp(b, bp) && bp_addr_range_overlap(b->bp, bp));
168}
169
170static int task_bps_add(struct perf_event *bp)
171{
172 struct breakpoint *tmp;
173
174 tmp = alloc_breakpoint(bp);
175 if (IS_ERR(tmp))
176 return PTR_ERR(tmp);
177
178 list_add(&tmp->list, &task_bps);
179 return 0;
180}
181
182static void task_bps_remove(struct perf_event *bp)
183{
184 struct list_head *pos, *q;
185
186 list_for_each_safe(pos, q, &task_bps) {
187 struct breakpoint *tmp = list_entry(pos, struct breakpoint, list);
188
189 if (tmp->bp == bp) {
190 list_del(&tmp->list);
191 kfree(tmp);
192 break;
193 }
194 }
195}
196
197/*
198 * If any task has breakpoint from alternate infrastructure,
199 * return true. Otherwise return false.
200 */
201static bool all_task_bps_check(struct perf_event *bp)
202{
203 struct breakpoint *tmp;
204
205 list_for_each_entry(tmp, &task_bps, list) {
206 if (!can_co_exist(tmp, bp))
207 return true;
208 }
209 return false;
210}
211
212/*
213 * If same task has breakpoint from alternate infrastructure,
214 * return true. Otherwise return false.
215 */
216static bool same_task_bps_check(struct perf_event *bp)
217{
218 struct breakpoint *tmp;
219
220 list_for_each_entry(tmp, &task_bps, list) {
221 if (tmp->bp->hw.target == bp->hw.target &&
222 !can_co_exist(tmp, bp))
223 return true;
224 }
225 return false;
226}
227
228static int cpu_bps_add(struct perf_event *bp)
229{
230 struct breakpoint **cpu_bp;
231 struct breakpoint *tmp;
232 int i = 0;
233
234 tmp = alloc_breakpoint(bp);
235 if (IS_ERR(tmp))
236 return PTR_ERR(tmp);
237
238 cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
239 for (i = 0; i < nr_wp_slots(); i++) {
240 if (!cpu_bp[i]) {
241 cpu_bp[i] = tmp;
242 break;
243 }
244 }
245 return 0;
246}
247
248static void cpu_bps_remove(struct perf_event *bp)
249{
250 struct breakpoint **cpu_bp;
251 int i = 0;
252
253 cpu_bp = per_cpu_ptr(cpu_bps, bp->cpu);
254 for (i = 0; i < nr_wp_slots(); i++) {
255 if (!cpu_bp[i])
256 continue;
257
258 if (cpu_bp[i]->bp == bp) {
259 kfree(cpu_bp[i]);
260 cpu_bp[i] = NULL;
261 break;
262 }
263 }
264}
265
266static bool cpu_bps_check(int cpu, struct perf_event *bp)
267{
268 struct breakpoint **cpu_bp;
269 int i;
270
271 cpu_bp = per_cpu_ptr(cpu_bps, cpu);
272 for (i = 0; i < nr_wp_slots(); i++) {
273 if (cpu_bp[i] && !can_co_exist(cpu_bp[i], bp))
274 return true;
275 }
276 return false;
277}
278
279static bool all_cpu_bps_check(struct perf_event *bp)
280{
281 int cpu;
282
283 for_each_online_cpu(cpu) {
284 if (cpu_bps_check(cpu, bp))
285 return true;
286 }
287 return false;
288}
289
290/*
291 * We don't use any locks to serialize accesses to cpu_bps or task_bps
292 * because are already inside nr_bp_mutex.
293 */
294int arch_reserve_bp_slot(struct perf_event *bp)
295{
296 int ret;
297
298 /* ptrace breakpoint */
299 if (is_ptrace_bp(bp)) {
300 if (all_cpu_bps_check(bp))
301 return -ENOSPC;
302
303 if (same_task_bps_check(bp))
304 return -ENOSPC;
305
306 return task_bps_add(bp);
307 }
308
309 /* perf breakpoint */
310 if (is_kernel_addr(bp->attr.bp_addr))
311 return 0;
312
313 if (bp->hw.target && bp->cpu == -1) {
314 if (same_task_bps_check(bp))
315 return -ENOSPC;
316
317 return task_bps_add(bp);
318 } else if (!bp->hw.target && bp->cpu != -1) {
319 if (all_task_bps_check(bp))
320 return -ENOSPC;
321
322 return cpu_bps_add(bp);
323 }
324
325 if (same_task_bps_check(bp))
326 return -ENOSPC;
327
328 ret = cpu_bps_add(bp);
329 if (ret)
330 return ret;
331 ret = task_bps_add(bp);
332 if (ret)
333 cpu_bps_remove(bp);
334
335 return ret;
336}
337
338void arch_release_bp_slot(struct perf_event *bp)
339{
340 if (!is_kernel_addr(bp->attr.bp_addr)) {
341 if (bp->hw.target)
342 task_bps_remove(bp);
343 if (bp->cpu != -1)
344 cpu_bps_remove(bp);
345 }
346}
347
348/*
349 * Perform cleanup of arch-specific counters during unregistration
350 * of the perf-event
351 */
352void arch_unregister_hw_breakpoint(struct perf_event *bp)
353{
354 /*
355 * If the breakpoint is unregistered between a hw_breakpoint_handler()
356 * and the single_step_dabr_instruction(), then cleanup the breakpoint
357 * restoration variables to prevent dangling pointers.
358 * FIXME, this should not be using bp->ctx at all! Sayeth peterz.
359 */
360 if (bp->ctx && bp->ctx->task && bp->ctx->task != ((void *)-1L)) {
361 int i;
362
363 for (i = 0; i < nr_wp_slots(); i++) {
364 if (bp->ctx->task->thread.last_hit_ubp[i] == bp)
365 bp->ctx->task->thread.last_hit_ubp[i] = NULL;
366 }
367 }
368}
369
370/*
371 * Check for virtual address in kernel space.
372 */
373int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
374{
375 return is_kernel_addr(hw->address);
376}
377
378int arch_bp_generic_fields(int type, int *gen_bp_type)
379{
380 *gen_bp_type = 0;
381 if (type & HW_BRK_TYPE_READ)
382 *gen_bp_type |= HW_BREAKPOINT_R;
383 if (type & HW_BRK_TYPE_WRITE)
384 *gen_bp_type |= HW_BREAKPOINT_W;
385 if (*gen_bp_type == 0)
386 return -EINVAL;
387 return 0;
388}
389
390/*
391 * Watchpoint match range is always doubleword(8 bytes) aligned on
392 * powerpc. If the given range is crossing doubleword boundary, we
393 * need to increase the length such that next doubleword also get
394 * covered. Ex,
395 *
396 * address len = 6 bytes
397 * |=========.
398 * |------------v--|------v--------|
399 * | | | | | | | | | | | | | | | | |
400 * |---------------|---------------|
401 * <---8 bytes--->
402 *
403 * In this case, we should configure hw as:
404 * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1)
405 * len = 16 bytes
406 *
407 * @start_addr is inclusive but @end_addr is exclusive.
408 */
409static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
410{
411 u16 max_len = DABR_MAX_LEN;
412 u16 hw_len;
413 unsigned long start_addr, end_addr;
414
415 start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE);
416 end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE);
417 hw_len = end_addr - start_addr;
418
419 if (dawr_enabled()) {
420 max_len = DAWR_MAX_LEN;
421 /* DAWR region can't cross 512 bytes boundary on p10 predecessors */
422 if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
423 (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512)))
424 return -EINVAL;
425 } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
426 /* 8xx can setup a range without limitation */
427 max_len = U16_MAX;
428 }
429
430 if (hw_len > max_len)
431 return -EINVAL;
432
433 hw->hw_len = hw_len;
434 return 0;
435}
436
437/*
438 * Validate the arch-specific HW Breakpoint register settings
439 */
440int hw_breakpoint_arch_parse(struct perf_event *bp,
441 const struct perf_event_attr *attr,
442 struct arch_hw_breakpoint *hw)
443{
444 int ret = -EINVAL;
445
446 if (!bp || !attr->bp_len)
447 return ret;
448
449 hw->type = HW_BRK_TYPE_TRANSLATE;
450 if (attr->bp_type & HW_BREAKPOINT_R)
451 hw->type |= HW_BRK_TYPE_READ;
452 if (attr->bp_type & HW_BREAKPOINT_W)
453 hw->type |= HW_BRK_TYPE_WRITE;
454 if (hw->type == HW_BRK_TYPE_TRANSLATE)
455 /* must set alteast read or write */
456 return ret;
457 if (!attr->exclude_user)
458 hw->type |= HW_BRK_TYPE_USER;
459 if (!attr->exclude_kernel)
460 hw->type |= HW_BRK_TYPE_KERNEL;
461 if (!attr->exclude_hv)
462 hw->type |= HW_BRK_TYPE_HYP;
463 hw->address = attr->bp_addr;
464 hw->len = attr->bp_len;
465
466 if (!ppc_breakpoint_available())
467 return -ENODEV;
468
469 return hw_breakpoint_validate_len(hw);
470}
471
472/*
473 * Restores the breakpoint on the debug registers.
474 * Invoke this function if it is known that the execution context is
475 * about to change to cause loss of MSR_SE settings.
476 */
477void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
478{
479 struct arch_hw_breakpoint *info;
480 int i;
481
482 for (i = 0; i < nr_wp_slots(); i++) {
483 if (unlikely(tsk->thread.last_hit_ubp[i]))
484 goto reset;
485 }
486 return;
487
488reset:
489 regs->msr &= ~MSR_SE;
490 for (i = 0; i < nr_wp_slots(); i++) {
491 info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
492 __set_breakpoint(i, info);
493 tsk->thread.last_hit_ubp[i] = NULL;
494 }
495}
496
497static bool dar_in_user_range(unsigned long dar, struct arch_hw_breakpoint *info)
498{
499 return ((info->address <= dar) && (dar - info->address < info->len));
500}
501
502static bool ea_user_range_overlaps(unsigned long ea, int size,
503 struct arch_hw_breakpoint *info)
504{
505 return ((ea < info->address + info->len) &&
506 (ea + size > info->address));
507}
508
509static bool dar_in_hw_range(unsigned long dar, struct arch_hw_breakpoint *info)
510{
511 unsigned long hw_start_addr, hw_end_addr;
512
513 hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
514 hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
515
516 return ((hw_start_addr <= dar) && (hw_end_addr > dar));
517}
518
519static bool ea_hw_range_overlaps(unsigned long ea, int size,
520 struct arch_hw_breakpoint *info)
521{
522 unsigned long hw_start_addr, hw_end_addr;
523
524 hw_start_addr = ALIGN_DOWN(info->address, HW_BREAKPOINT_SIZE);
525 hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
526
527 return ((ea < hw_end_addr) && (ea + size > hw_start_addr));
528}
529
530/*
531 * If hw has multiple DAWR registers, we also need to check all
532 * dawrx constraint bits to confirm this is _really_ a valid event.
533 * If type is UNKNOWN, but privilege level matches, consider it as
534 * a positive match.
535 */
536static bool check_dawrx_constraints(struct pt_regs *regs, int type,
537 struct arch_hw_breakpoint *info)
538{
539 if (OP_IS_LOAD(type) && !(info->type & HW_BRK_TYPE_READ))
540 return false;
541
542 /*
543 * The Cache Management instructions other than dcbz never
544 * cause a match. i.e. if type is CACHEOP, the instruction
545 * is dcbz, and dcbz is treated as Store.
546 */
547 if ((OP_IS_STORE(type) || type == CACHEOP) && !(info->type & HW_BRK_TYPE_WRITE))
548 return false;
549
550 if (is_kernel_addr(regs->nip) && !(info->type & HW_BRK_TYPE_KERNEL))
551 return false;
552
553 if (user_mode(regs) && !(info->type & HW_BRK_TYPE_USER))
554 return false;
555
556 return true;
557}
558
559/*
560 * Return true if the event is valid wrt dawr configuration,
561 * including extraneous exception. Otherwise return false.
562 */
563static bool check_constraints(struct pt_regs *regs, struct ppc_inst instr,
564 unsigned long ea, int type, int size,
565 struct arch_hw_breakpoint *info)
566{
567 bool in_user_range = dar_in_user_range(regs->dar, info);
568 bool dawrx_constraints;
569
570 /*
571 * 8xx supports only one breakpoint and thus we can
572 * unconditionally return true.
573 */
574 if (IS_ENABLED(CONFIG_PPC_8xx)) {
575 if (!in_user_range)
576 info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
577 return true;
578 }
579
580 if (unlikely(ppc_inst_equal(instr, ppc_inst(0)))) {
581 if (cpu_has_feature(CPU_FTR_ARCH_31) &&
582 !dar_in_hw_range(regs->dar, info))
583 return false;
584
585 return true;
586 }
587
588 dawrx_constraints = check_dawrx_constraints(regs, type, info);
589
590 if (type == UNKNOWN) {
591 if (cpu_has_feature(CPU_FTR_ARCH_31) &&
592 !dar_in_hw_range(regs->dar, info))
593 return false;
594
595 return dawrx_constraints;
596 }
597
598 if (ea_user_range_overlaps(ea, size, info))
599 return dawrx_constraints;
600
601 if (ea_hw_range_overlaps(ea, size, info)) {
602 if (dawrx_constraints) {
603 info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
604 return true;
605 }
606 }
607 return false;
608}
609
610static int cache_op_size(void)
611{
612#ifdef __powerpc64__
613 return ppc64_caches.l1d.block_size;
614#else
615 return L1_CACHE_BYTES;
616#endif
617}
618
619static void get_instr_detail(struct pt_regs *regs, struct ppc_inst *instr,
620 int *type, int *size, unsigned long *ea)
621{
622 struct instruction_op op;
623
624 if (__get_user_instr_inatomic(*instr, (void __user *)regs->nip))
625 return;
626
627 analyse_instr(&op, regs, *instr);
628 *type = GETTYPE(op.type);
629 *ea = op.ea;
630#ifdef __powerpc64__
631 if (!(regs->msr & MSR_64BIT))
632 *ea &= 0xffffffffUL;
633#endif
634
635 *size = GETSIZE(op.type);
636 if (*type == CACHEOP) {
637 *size = cache_op_size();
638 *ea &= ~(*size - 1);
639 }
640}
641
642static bool is_larx_stcx_instr(int type)
643{
644 return type == LARX || type == STCX;
645}
646
647/*
648 * We've failed in reliably handling the hw-breakpoint. Unregister
649 * it and throw a warning message to let the user know about it.
650 */
651static void handler_error(struct perf_event *bp, struct arch_hw_breakpoint *info)
652{
653 WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.",
654 info->address);
655 perf_event_disable_inatomic(bp);
656}
657
658static void larx_stcx_err(struct perf_event *bp, struct arch_hw_breakpoint *info)
659{
660 printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n",
661 info->address);
662 perf_event_disable_inatomic(bp);
663}
664
665static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
666 struct arch_hw_breakpoint **info, int *hit,
667 struct ppc_inst instr)
668{
669 int i;
670 int stepped;
671
672 /* Do not emulate user-space instructions, instead single-step them */
673 if (user_mode(regs)) {
674 for (i = 0; i < nr_wp_slots(); i++) {
675 if (!hit[i])
676 continue;
677 current->thread.last_hit_ubp[i] = bp[i];
678 info[i] = NULL;
679 }
680 regs->msr |= MSR_SE;
681 return false;
682 }
683
684 stepped = emulate_step(regs, instr);
685 if (!stepped) {
686 for (i = 0; i < nr_wp_slots(); i++) {
687 if (!hit[i])
688 continue;
689 handler_error(bp[i], info[i]);
690 info[i] = NULL;
691 }
692 return false;
693 }
694 return true;
695}
696
697int hw_breakpoint_handler(struct die_args *args)
698{
699 bool err = false;
700 int rc = NOTIFY_STOP;
701 struct perf_event *bp[HBP_NUM_MAX] = { NULL };
702 struct pt_regs *regs = args->regs;
703 struct arch_hw_breakpoint *info[HBP_NUM_MAX] = { NULL };
704 int i;
705 int hit[HBP_NUM_MAX] = {0};
706 int nr_hit = 0;
707 bool ptrace_bp = false;
708 struct ppc_inst instr = ppc_inst(0);
709 int type = 0;
710 int size = 0;
711 unsigned long ea;
712
713 /* Disable breakpoints during exception handling */
714 hw_breakpoint_disable();
715
716 /*
717 * The counter may be concurrently released but that can only
718 * occur from a call_rcu() path. We can then safely fetch
719 * the breakpoint, use its callback, touch its counter
720 * while we are in an rcu_read_lock() path.
721 */
722 rcu_read_lock();
723
724 if (!IS_ENABLED(CONFIG_PPC_8xx))
725 get_instr_detail(regs, &instr, &type, &size, &ea);
726
727 for (i = 0; i < nr_wp_slots(); i++) {
728 bp[i] = __this_cpu_read(bp_per_reg[i]);
729 if (!bp[i])
730 continue;
731
732 info[i] = counter_arch_bp(bp[i]);
733 info[i]->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
734
735 if (check_constraints(regs, instr, ea, type, size, info[i])) {
736 if (!IS_ENABLED(CONFIG_PPC_8xx) &&
737 ppc_inst_equal(instr, ppc_inst(0))) {
738 handler_error(bp[i], info[i]);
739 info[i] = NULL;
740 err = 1;
741 continue;
742 }
743
744 if (is_ptrace_bp(bp[i]))
745 ptrace_bp = true;
746 hit[i] = 1;
747 nr_hit++;
748 }
749 }
750
751 if (err)
752 goto reset;
753
754 if (!nr_hit) {
755 rc = NOTIFY_DONE;
756 goto out;
757 }
758
759 /*
760 * Return early after invoking user-callback function without restoring
761 * DABR if the breakpoint is from ptrace which always operates in
762 * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
763 * generated in do_dabr().
764 */
765 if (ptrace_bp) {
766 for (i = 0; i < nr_wp_slots(); i++) {
767 if (!hit[i])
768 continue;
769 perf_bp_event(bp[i], regs);
770 info[i] = NULL;
771 }
772 rc = NOTIFY_DONE;
773 goto reset;
774 }
775
776 if (!IS_ENABLED(CONFIG_PPC_8xx)) {
777 if (is_larx_stcx_instr(type)) {
778 for (i = 0; i < nr_wp_slots(); i++) {
779 if (!hit[i])
780 continue;
781 larx_stcx_err(bp[i], info[i]);
782 info[i] = NULL;
783 }
784 goto reset;
785 }
786
787 if (!stepping_handler(regs, bp, info, hit, instr))
788 goto reset;
789 }
790
791 /*
792 * As a policy, the callback is invoked in a 'trigger-after-execute'
793 * fashion
794 */
795 for (i = 0; i < nr_wp_slots(); i++) {
796 if (!hit[i])
797 continue;
798 if (!(info[i]->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
799 perf_bp_event(bp[i], regs);
800 }
801
802reset:
803 for (i = 0; i < nr_wp_slots(); i++) {
804 if (!info[i])
805 continue;
806 __set_breakpoint(i, info[i]);
807 }
808
809out:
810 rcu_read_unlock();
811 return rc;
812}
813NOKPROBE_SYMBOL(hw_breakpoint_handler);
814
815/*
816 * Handle single-step exceptions following a DABR hit.
817 */
818static int single_step_dabr_instruction(struct die_args *args)
819{
820 struct pt_regs *regs = args->regs;
821 struct perf_event *bp = NULL;
822 struct arch_hw_breakpoint *info;
823 int i;
824 bool found = false;
825
826 /*
827 * Check if we are single-stepping as a result of a
828 * previous HW Breakpoint exception
829 */
830 for (i = 0; i < nr_wp_slots(); i++) {
831 bp = current->thread.last_hit_ubp[i];
832
833 if (!bp)
834 continue;
835
836 found = true;
837 info = counter_arch_bp(bp);
838
839 /*
840 * We shall invoke the user-defined callback function in the
841 * single stepping handler to confirm to 'trigger-after-execute'
842 * semantics
843 */
844 if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
845 perf_bp_event(bp, regs);
846 current->thread.last_hit_ubp[i] = NULL;
847 }
848
849 if (!found)
850 return NOTIFY_DONE;
851
852 for (i = 0; i < nr_wp_slots(); i++) {
853 bp = __this_cpu_read(bp_per_reg[i]);
854 if (!bp)
855 continue;
856
857 info = counter_arch_bp(bp);
858 __set_breakpoint(i, info);
859 }
860
861 /*
862 * If the process was being single-stepped by ptrace, let the
863 * other single-step actions occur (e.g. generate SIGTRAP).
864 */
865 if (test_thread_flag(TIF_SINGLESTEP))
866 return NOTIFY_DONE;
867
868 return NOTIFY_STOP;
869}
870NOKPROBE_SYMBOL(single_step_dabr_instruction);
871
872/*
873 * Handle debug exception notifications.
874 */
875int hw_breakpoint_exceptions_notify(
876 struct notifier_block *unused, unsigned long val, void *data)
877{
878 int ret = NOTIFY_DONE;
879
880 switch (val) {
881 case DIE_DABR_MATCH:
882 ret = hw_breakpoint_handler(data);
883 break;
884 case DIE_SSTEP:
885 ret = single_step_dabr_instruction(data);
886 break;
887 }
888
889 return ret;
890}
891NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify);
892
893/*
894 * Release the user breakpoints used by ptrace
895 */
896void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
897{
898 int i;
899 struct thread_struct *t = &tsk->thread;
900
901 for (i = 0; i < nr_wp_slots(); i++) {
902 unregister_hw_breakpoint(t->ptrace_bps[i]);
903 t->ptrace_bps[i] = NULL;
904 }
905}
906
907void hw_breakpoint_pmu_read(struct perf_event *bp)
908{
909 /* TODO */
910}
911
912void ptrace_triggered(struct perf_event *bp,
913 struct perf_sample_data *data, struct pt_regs *regs)
914{
915 struct perf_event_attr attr;
916
917 /*
918 * Disable the breakpoint request here since ptrace has defined a
919 * one-shot behaviour for breakpoint exceptions in PPC64.
920 * The SIGTRAP signal is generated automatically for us in do_dabr().
921 * We don't have to do anything about that here
922 */
923 attr = bp->attr;
924 attr.disabled = true;
925 modify_user_hw_breakpoint(bp, &attr);
926}
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * HW_breakpoint: a unified kernel/user-space hardware breakpoint facility,
4 * using the CPU's debug registers. Derived from
5 * "arch/x86/kernel/hw_breakpoint.c"
6 *
7 * Copyright 2010 IBM Corporation
8 * Author: K.Prasad <prasad@linux.vnet.ibm.com>
9 */
10
11#include <linux/hw_breakpoint.h>
12#include <linux/notifier.h>
13#include <linux/kprobes.h>
14#include <linux/percpu.h>
15#include <linux/kernel.h>
16#include <linux/sched.h>
17#include <linux/smp.h>
18#include <linux/spinlock.h>
19#include <linux/debugfs.h>
20#include <linux/init.h>
21
22#include <asm/hw_breakpoint.h>
23#include <asm/processor.h>
24#include <asm/sstep.h>
25#include <asm/debug.h>
26#include <asm/hvcall.h>
27#include <asm/inst.h>
28#include <linux/uaccess.h>
29
30/*
31 * Stores the breakpoints currently in use on each breakpoint address
32 * register for every cpu
33 */
34static DEFINE_PER_CPU(struct perf_event *, bp_per_reg[HBP_NUM_MAX]);
35
36/*
37 * Returns total number of data or instruction breakpoints available.
38 */
39int hw_breakpoint_slots(int type)
40{
41 if (type == TYPE_DATA)
42 return nr_wp_slots();
43 return 0; /* no instruction breakpoints available */
44}
45
46
47/*
48 * Install a perf counter breakpoint.
49 *
50 * We seek a free debug address register and use it for this
51 * breakpoint.
52 *
53 * Atomic: we hold the counter->ctx->lock and we only handle variables
54 * and registers local to this cpu.
55 */
56int arch_install_hw_breakpoint(struct perf_event *bp)
57{
58 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
59 struct perf_event **slot;
60 int i;
61
62 for (i = 0; i < nr_wp_slots(); i++) {
63 slot = this_cpu_ptr(&bp_per_reg[i]);
64 if (!*slot) {
65 *slot = bp;
66 break;
67 }
68 }
69
70 if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
71 return -EBUSY;
72
73 /*
74 * Do not install DABR values if the instruction must be single-stepped.
75 * If so, DABR will be populated in single_step_dabr_instruction().
76 */
77 if (!info->perf_single_step)
78 __set_breakpoint(i, info);
79
80 return 0;
81}
82
83/*
84 * Uninstall the breakpoint contained in the given counter.
85 *
86 * First we search the debug address register it uses and then we disable
87 * it.
88 *
89 * Atomic: we hold the counter->ctx->lock and we only handle variables
90 * and registers local to this cpu.
91 */
92void arch_uninstall_hw_breakpoint(struct perf_event *bp)
93{
94 struct arch_hw_breakpoint null_brk = {0};
95 struct perf_event **slot;
96 int i;
97
98 for (i = 0; i < nr_wp_slots(); i++) {
99 slot = this_cpu_ptr(&bp_per_reg[i]);
100 if (*slot == bp) {
101 *slot = NULL;
102 break;
103 }
104 }
105
106 if (WARN_ONCE(i == nr_wp_slots(), "Can't find any breakpoint slot"))
107 return;
108
109 __set_breakpoint(i, &null_brk);
110}
111
112static bool is_ptrace_bp(struct perf_event *bp)
113{
114 return bp->overflow_handler == ptrace_triggered;
115}
116
117/*
118 * Check for virtual address in kernel space.
119 */
120int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
121{
122 return is_kernel_addr(hw->address);
123}
124
125int arch_bp_generic_fields(int type, int *gen_bp_type)
126{
127 *gen_bp_type = 0;
128 if (type & HW_BRK_TYPE_READ)
129 *gen_bp_type |= HW_BREAKPOINT_R;
130 if (type & HW_BRK_TYPE_WRITE)
131 *gen_bp_type |= HW_BREAKPOINT_W;
132 if (*gen_bp_type == 0)
133 return -EINVAL;
134 return 0;
135}
136
137/*
138 * Watchpoint match range is always doubleword(8 bytes) aligned on
139 * powerpc. If the given range is crossing doubleword boundary, we
140 * need to increase the length such that next doubleword also get
141 * covered. Ex,
142 *
143 * address len = 6 bytes
144 * |=========.
145 * |------------v--|------v--------|
146 * | | | | | | | | | | | | | | | | |
147 * |---------------|---------------|
148 * <---8 bytes--->
149 *
150 * In this case, we should configure hw as:
151 * start_addr = address & ~(HW_BREAKPOINT_SIZE - 1)
152 * len = 16 bytes
153 *
154 * @start_addr is inclusive but @end_addr is exclusive.
155 */
156static int hw_breakpoint_validate_len(struct arch_hw_breakpoint *hw)
157{
158 u16 max_len = DABR_MAX_LEN;
159 u16 hw_len;
160 unsigned long start_addr, end_addr;
161
162 start_addr = ALIGN_DOWN(hw->address, HW_BREAKPOINT_SIZE);
163 end_addr = ALIGN(hw->address + hw->len, HW_BREAKPOINT_SIZE);
164 hw_len = end_addr - start_addr;
165
166 if (dawr_enabled()) {
167 max_len = DAWR_MAX_LEN;
168 /* DAWR region can't cross 512 bytes boundary on p10 predecessors */
169 if (!cpu_has_feature(CPU_FTR_ARCH_31) &&
170 (ALIGN_DOWN(start_addr, SZ_512) != ALIGN_DOWN(end_addr - 1, SZ_512)))
171 return -EINVAL;
172 } else if (IS_ENABLED(CONFIG_PPC_8xx)) {
173 /* 8xx can setup a range without limitation */
174 max_len = U16_MAX;
175 }
176
177 if (hw_len > max_len)
178 return -EINVAL;
179
180 hw->hw_len = hw_len;
181 return 0;
182}
183
184/*
185 * Validate the arch-specific HW Breakpoint register settings
186 */
187int hw_breakpoint_arch_parse(struct perf_event *bp,
188 const struct perf_event_attr *attr,
189 struct arch_hw_breakpoint *hw)
190{
191 int ret = -EINVAL;
192
193 if (!bp || !attr->bp_len)
194 return ret;
195
196 hw->type = HW_BRK_TYPE_TRANSLATE;
197 if (attr->bp_type & HW_BREAKPOINT_R)
198 hw->type |= HW_BRK_TYPE_READ;
199 if (attr->bp_type & HW_BREAKPOINT_W)
200 hw->type |= HW_BRK_TYPE_WRITE;
201 if (hw->type == HW_BRK_TYPE_TRANSLATE)
202 /* must set alteast read or write */
203 return ret;
204 if (!attr->exclude_user)
205 hw->type |= HW_BRK_TYPE_USER;
206 if (!attr->exclude_kernel)
207 hw->type |= HW_BRK_TYPE_KERNEL;
208 if (!attr->exclude_hv)
209 hw->type |= HW_BRK_TYPE_HYP;
210 hw->address = attr->bp_addr;
211 hw->len = attr->bp_len;
212
213 if (!ppc_breakpoint_available())
214 return -ENODEV;
215
216 return hw_breakpoint_validate_len(hw);
217}
218
219/*
220 * Restores the breakpoint on the debug registers.
221 * Invoke this function if it is known that the execution context is
222 * about to change to cause loss of MSR_SE settings.
223 *
224 * The perf watchpoint will simply re-trigger once the thread is started again,
225 * and the watchpoint handler will set up MSR_SE and perf_single_step as
226 * needed.
227 */
228void thread_change_pc(struct task_struct *tsk, struct pt_regs *regs)
229{
230 struct arch_hw_breakpoint *info;
231 int i;
232
233 preempt_disable();
234
235 for (i = 0; i < nr_wp_slots(); i++) {
236 struct perf_event *bp = __this_cpu_read(bp_per_reg[i]);
237
238 if (unlikely(bp && counter_arch_bp(bp)->perf_single_step))
239 goto reset;
240 }
241 goto out;
242
243reset:
244 regs_set_return_msr(regs, regs->msr & ~MSR_SE);
245 for (i = 0; i < nr_wp_slots(); i++) {
246 info = counter_arch_bp(__this_cpu_read(bp_per_reg[i]));
247 __set_breakpoint(i, info);
248 info->perf_single_step = false;
249 }
250
251out:
252 preempt_enable();
253}
254
255static bool is_larx_stcx_instr(int type)
256{
257 return type == LARX || type == STCX;
258}
259
260static bool is_octword_vsx_instr(int type, int size)
261{
262 return ((type == LOAD_VSX || type == STORE_VSX) && size == 32);
263}
264
265/*
266 * We've failed in reliably handling the hw-breakpoint. Unregister
267 * it and throw a warning message to let the user know about it.
268 */
269static void handler_error(struct perf_event *bp)
270{
271 WARN(1, "Unable to handle hardware breakpoint. Breakpoint at 0x%lx will be disabled.",
272 counter_arch_bp(bp)->address);
273 perf_event_disable_inatomic(bp);
274}
275
276static void larx_stcx_err(struct perf_event *bp)
277{
278 printk_ratelimited("Breakpoint hit on instruction that can't be emulated. Breakpoint at 0x%lx will be disabled.\n",
279 counter_arch_bp(bp)->address);
280 perf_event_disable_inatomic(bp);
281}
282
283static bool stepping_handler(struct pt_regs *regs, struct perf_event **bp,
284 int *hit, ppc_inst_t instr)
285{
286 int i;
287 int stepped;
288
289 /* Do not emulate user-space instructions, instead single-step them */
290 if (user_mode(regs)) {
291 for (i = 0; i < nr_wp_slots(); i++) {
292 if (!hit[i])
293 continue;
294
295 counter_arch_bp(bp[i])->perf_single_step = true;
296 bp[i] = NULL;
297 }
298 regs_set_return_msr(regs, regs->msr | MSR_SE);
299 return false;
300 }
301
302 stepped = emulate_step(regs, instr);
303 if (!stepped) {
304 for (i = 0; i < nr_wp_slots(); i++) {
305 if (!hit[i])
306 continue;
307 handler_error(bp[i]);
308 bp[i] = NULL;
309 }
310 return false;
311 }
312 return true;
313}
314
315static void handle_p10dd1_spurious_exception(struct perf_event **bp,
316 int *hit, unsigned long ea)
317{
318 int i;
319 unsigned long hw_end_addr;
320
321 /*
322 * Handle spurious exception only when any bp_per_reg is set.
323 * Otherwise this might be created by xmon and not actually a
324 * spurious exception.
325 */
326 for (i = 0; i < nr_wp_slots(); i++) {
327 struct arch_hw_breakpoint *info;
328
329 if (!bp[i])
330 continue;
331
332 info = counter_arch_bp(bp[i]);
333
334 hw_end_addr = ALIGN(info->address + info->len, HW_BREAKPOINT_SIZE);
335
336 /*
337 * Ending address of DAWR range is less than starting
338 * address of op.
339 */
340 if ((hw_end_addr - 1) >= ea)
341 continue;
342
343 /*
344 * Those addresses need to be in the same or in two
345 * consecutive 512B blocks;
346 */
347 if (((hw_end_addr - 1) >> 10) != (ea >> 10))
348 continue;
349
350 /*
351 * 'op address + 64B' generates an address that has a
352 * carry into bit 52 (crosses 2K boundary).
353 */
354 if ((ea & 0x800) == ((ea + 64) & 0x800))
355 continue;
356
357 break;
358 }
359
360 if (i == nr_wp_slots())
361 return;
362
363 for (i = 0; i < nr_wp_slots(); i++) {
364 if (bp[i]) {
365 hit[i] = 1;
366 counter_arch_bp(bp[i])->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
367 }
368 }
369}
370
371/*
372 * Handle a DABR or DAWR exception.
373 *
374 * Called in atomic context.
375 */
376int hw_breakpoint_handler(struct die_args *args)
377{
378 bool err = false;
379 int rc = NOTIFY_STOP;
380 struct perf_event *bp[HBP_NUM_MAX] = { NULL };
381 struct pt_regs *regs = args->regs;
382 int i;
383 int hit[HBP_NUM_MAX] = {0};
384 int nr_hit = 0;
385 bool ptrace_bp = false;
386 ppc_inst_t instr = ppc_inst(0);
387 int type = 0;
388 int size = 0;
389 unsigned long ea = 0;
390
391 /* Disable breakpoints during exception handling */
392 hw_breakpoint_disable();
393
394 /*
395 * The counter may be concurrently released but that can only
396 * occur from a call_rcu() path. We can then safely fetch
397 * the breakpoint, use its callback, touch its counter
398 * while we are in an rcu_read_lock() path.
399 */
400 rcu_read_lock();
401
402 if (!IS_ENABLED(CONFIG_PPC_8xx))
403 wp_get_instr_detail(regs, &instr, &type, &size, &ea);
404
405 for (i = 0; i < nr_wp_slots(); i++) {
406 struct arch_hw_breakpoint *info;
407
408 bp[i] = __this_cpu_read(bp_per_reg[i]);
409 if (!bp[i])
410 continue;
411
412 info = counter_arch_bp(bp[i]);
413 info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
414
415 if (wp_check_constraints(regs, instr, ea, type, size, info)) {
416 if (!IS_ENABLED(CONFIG_PPC_8xx) &&
417 ppc_inst_equal(instr, ppc_inst(0))) {
418 handler_error(bp[i]);
419 bp[i] = NULL;
420 err = 1;
421 continue;
422 }
423
424 if (is_ptrace_bp(bp[i]))
425 ptrace_bp = true;
426 hit[i] = 1;
427 nr_hit++;
428 }
429 }
430
431 if (err)
432 goto reset;
433
434 if (!nr_hit) {
435 /* Workaround for Power10 DD1 */
436 if (!IS_ENABLED(CONFIG_PPC_8xx) && mfspr(SPRN_PVR) == 0x800100 &&
437 is_octword_vsx_instr(type, size)) {
438 handle_p10dd1_spurious_exception(bp, hit, ea);
439 } else {
440 rc = NOTIFY_DONE;
441 goto out;
442 }
443 }
444
445 /*
446 * Return early after invoking user-callback function without restoring
447 * DABR if the breakpoint is from ptrace which always operates in
448 * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
449 * generated in do_dabr().
450 */
451 if (ptrace_bp) {
452 for (i = 0; i < nr_wp_slots(); i++) {
453 if (!hit[i] || !is_ptrace_bp(bp[i]))
454 continue;
455 perf_bp_event(bp[i], regs);
456 bp[i] = NULL;
457 }
458 rc = NOTIFY_DONE;
459 goto reset;
460 }
461
462 if (!IS_ENABLED(CONFIG_PPC_8xx)) {
463 if (is_larx_stcx_instr(type)) {
464 for (i = 0; i < nr_wp_slots(); i++) {
465 if (!hit[i])
466 continue;
467 larx_stcx_err(bp[i]);
468 bp[i] = NULL;
469 }
470 goto reset;
471 }
472
473 if (!stepping_handler(regs, bp, hit, instr))
474 goto reset;
475 }
476
477 /*
478 * As a policy, the callback is invoked in a 'trigger-after-execute'
479 * fashion
480 */
481 for (i = 0; i < nr_wp_slots(); i++) {
482 if (!hit[i])
483 continue;
484 if (!(counter_arch_bp(bp[i])->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
485 perf_bp_event(bp[i], regs);
486 }
487
488reset:
489 for (i = 0; i < nr_wp_slots(); i++) {
490 if (!bp[i])
491 continue;
492 __set_breakpoint(i, counter_arch_bp(bp[i]));
493 }
494
495out:
496 rcu_read_unlock();
497 return rc;
498}
499NOKPROBE_SYMBOL(hw_breakpoint_handler);
500
501/*
502 * Handle single-step exceptions following a DABR hit.
503 *
504 * Called in atomic context.
505 */
506static int single_step_dabr_instruction(struct die_args *args)
507{
508 struct pt_regs *regs = args->regs;
509 bool found = false;
510
511 /*
512 * Check if we are single-stepping as a result of a
513 * previous HW Breakpoint exception
514 */
515 for (int i = 0; i < nr_wp_slots(); i++) {
516 struct perf_event *bp;
517 struct arch_hw_breakpoint *info;
518
519 bp = __this_cpu_read(bp_per_reg[i]);
520
521 if (!bp)
522 continue;
523
524 info = counter_arch_bp(bp);
525
526 if (!info->perf_single_step)
527 continue;
528
529 found = true;
530
531 /*
532 * We shall invoke the user-defined callback function in the
533 * single stepping handler to confirm to 'trigger-after-execute'
534 * semantics
535 */
536 if (!(info->type & HW_BRK_TYPE_EXTRANEOUS_IRQ))
537 perf_bp_event(bp, regs);
538
539 info->perf_single_step = false;
540 __set_breakpoint(i, counter_arch_bp(bp));
541 }
542
543 /*
544 * If the process was being single-stepped by ptrace, let the
545 * other single-step actions occur (e.g. generate SIGTRAP).
546 */
547 if (!found || test_thread_flag(TIF_SINGLESTEP))
548 return NOTIFY_DONE;
549
550 return NOTIFY_STOP;
551}
552NOKPROBE_SYMBOL(single_step_dabr_instruction);
553
554/*
555 * Handle debug exception notifications.
556 *
557 * Called in atomic context.
558 */
559int hw_breakpoint_exceptions_notify(
560 struct notifier_block *unused, unsigned long val, void *data)
561{
562 int ret = NOTIFY_DONE;
563
564 switch (val) {
565 case DIE_DABR_MATCH:
566 ret = hw_breakpoint_handler(data);
567 break;
568 case DIE_SSTEP:
569 ret = single_step_dabr_instruction(data);
570 break;
571 }
572
573 return ret;
574}
575NOKPROBE_SYMBOL(hw_breakpoint_exceptions_notify);
576
577/*
578 * Release the user breakpoints used by ptrace
579 */
580void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
581{
582 int i;
583 struct thread_struct *t = &tsk->thread;
584
585 for (i = 0; i < nr_wp_slots(); i++) {
586 unregister_hw_breakpoint(t->ptrace_bps[i]);
587 t->ptrace_bps[i] = NULL;
588 }
589}
590
591void hw_breakpoint_pmu_read(struct perf_event *bp)
592{
593 /* TODO */
594}
595
596void ptrace_triggered(struct perf_event *bp,
597 struct perf_sample_data *data, struct pt_regs *regs)
598{
599 struct perf_event_attr attr;
600
601 /*
602 * Disable the breakpoint request here since ptrace has defined a
603 * one-shot behaviour for breakpoint exceptions in PPC64.
604 * The SIGTRAP signal is generated automatically for us in do_dabr().
605 * We don't have to do anything about that here
606 */
607 attr = bp->attr;
608 attr.disabled = true;
609 modify_user_hw_breakpoint(bp, &attr);
610}