Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/percpu.h>
3#include <linux/jump_label.h>
4#include <asm/trace.h>
5#include <asm/asm-prototypes.h>
6
7#ifdef CONFIG_JUMP_LABEL
8struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
9
10int opal_tracepoint_regfunc(void)
11{
12 static_key_slow_inc(&opal_tracepoint_key);
13 return 0;
14}
15
16void opal_tracepoint_unregfunc(void)
17{
18 static_key_slow_dec(&opal_tracepoint_key);
19}
20#else
21/*
22 * We optimise OPAL calls by placing opal_tracepoint_refcount
23 * directly in the TOC so we can check if the opal tracepoints are
24 * enabled via a single load.
25 */
26
27/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
28extern long opal_tracepoint_refcount;
29
30int opal_tracepoint_regfunc(void)
31{
32 opal_tracepoint_refcount++;
33 return 0;
34}
35
36void opal_tracepoint_unregfunc(void)
37{
38 opal_tracepoint_refcount--;
39}
40#endif
41
42/*
43 * Since the tracing code might execute OPAL calls we need to guard against
44 * recursion.
45 */
46static DEFINE_PER_CPU(unsigned int, opal_trace_depth);
47
48void __trace_opal_entry(unsigned long opcode, unsigned long *args)
49{
50 unsigned long flags;
51 unsigned int *depth;
52
53 local_irq_save(flags);
54
55 depth = this_cpu_ptr(&opal_trace_depth);
56
57 if (*depth)
58 goto out;
59
60 (*depth)++;
61 preempt_disable();
62 trace_opal_entry(opcode, args);
63 (*depth)--;
64
65out:
66 local_irq_restore(flags);
67}
68
69void __trace_opal_exit(long opcode, unsigned long retval)
70{
71 unsigned long flags;
72 unsigned int *depth;
73
74 local_irq_save(flags);
75
76 depth = this_cpu_ptr(&opal_trace_depth);
77
78 if (*depth)
79 goto out;
80
81 (*depth)++;
82 trace_opal_exit(opcode, retval);
83 preempt_enable();
84 (*depth)--;
85
86out:
87 local_irq_restore(flags);
88}
1#include <linux/percpu.h>
2#include <linux/jump_label.h>
3#include <asm/trace.h>
4
5#ifdef HAVE_JUMP_LABEL
6struct static_key opal_tracepoint_key = STATIC_KEY_INIT;
7
8void opal_tracepoint_regfunc(void)
9{
10 static_key_slow_inc(&opal_tracepoint_key);
11}
12
13void opal_tracepoint_unregfunc(void)
14{
15 static_key_slow_dec(&opal_tracepoint_key);
16}
17#else
18/*
19 * We optimise OPAL calls by placing opal_tracepoint_refcount
20 * directly in the TOC so we can check if the opal tracepoints are
21 * enabled via a single load.
22 */
23
24/* NB: reg/unreg are called while guarded with the tracepoints_mutex */
25extern long opal_tracepoint_refcount;
26
27void opal_tracepoint_regfunc(void)
28{
29 opal_tracepoint_refcount++;
30}
31
32void opal_tracepoint_unregfunc(void)
33{
34 opal_tracepoint_refcount--;
35}
36#endif
37
38/*
39 * Since the tracing code might execute OPAL calls we need to guard against
40 * recursion.
41 */
42static DEFINE_PER_CPU(unsigned int, opal_trace_depth);
43
44void __trace_opal_entry(unsigned long opcode, unsigned long *args)
45{
46 unsigned long flags;
47 unsigned int *depth;
48
49 local_irq_save(flags);
50
51 depth = this_cpu_ptr(&opal_trace_depth);
52
53 if (*depth)
54 goto out;
55
56 (*depth)++;
57 preempt_disable();
58 trace_opal_entry(opcode, args);
59 (*depth)--;
60
61out:
62 local_irq_restore(flags);
63}
64
65void __trace_opal_exit(long opcode, unsigned long retval)
66{
67 unsigned long flags;
68 unsigned int *depth;
69
70 local_irq_save(flags);
71
72 depth = this_cpu_ptr(&opal_trace_depth);
73
74 if (*depth)
75 goto out;
76
77 (*depth)++;
78 trace_opal_exit(opcode, retval);
79 preempt_enable();
80 (*depth)--;
81
82out:
83 local_irq_restore(flags);
84}