Loading...
1/* pcr.c: Generic sparc64 performance counter infrastructure.
2 *
3 * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
4 */
5#include <linux/kernel.h>
6#include <linux/export.h>
7#include <linux/init.h>
8#include <linux/irq.h>
9
10#include <linux/irq_work.h>
11#include <linux/ftrace.h>
12
13#include <asm/pil.h>
14#include <asm/pcr.h>
15#include <asm/nmi.h>
16#include <asm/spitfire.h>
17#include <asm/perfctr.h>
18
19/* This code is shared between various users of the performance
20 * counters. Users will be oprofile, pseudo-NMI watchdog, and the
21 * perf_event support layer.
22 */
23
24#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
25#define PCR_N2_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
26 PCR_N2_TOE_OV1 | \
27 (2 << PCR_N2_SL1_SHIFT) | \
28 (0xff << PCR_N2_MASK1_SHIFT))
29
30u64 pcr_enable;
31unsigned int picl_shift;
32
33/* Performance counter interrupts run unmasked at PIL level 15.
34 * Therefore we can't do things like wakeups and other work
35 * that expects IRQ disabling to be adhered to in locking etc.
36 *
37 * Therefore in such situations we defer the work by signalling
38 * a lower level cpu IRQ.
39 */
40void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
41{
42 struct pt_regs *old_regs;
43
44 clear_softint(1 << PIL_DEFERRED_PCR_WORK);
45
46 old_regs = set_irq_regs(regs);
47 irq_enter();
48#ifdef CONFIG_IRQ_WORK
49 irq_work_run();
50#endif
51 irq_exit();
52 set_irq_regs(old_regs);
53}
54
55void arch_irq_work_raise(void)
56{
57 set_softint(1 << PIL_DEFERRED_PCR_WORK);
58}
59
60const struct pcr_ops *pcr_ops;
61EXPORT_SYMBOL_GPL(pcr_ops);
62
63static u64 direct_pcr_read(void)
64{
65 u64 val;
66
67 read_pcr(val);
68 return val;
69}
70
71static void direct_pcr_write(u64 val)
72{
73 write_pcr(val);
74}
75
76static const struct pcr_ops direct_pcr_ops = {
77 .read = direct_pcr_read,
78 .write = direct_pcr_write,
79};
80
81static void n2_pcr_write(u64 val)
82{
83 unsigned long ret;
84
85 if (val & PCR_N2_HTRACE) {
86 ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
87 if (ret != HV_EOK)
88 write_pcr(val);
89 } else
90 write_pcr(val);
91}
92
93static const struct pcr_ops n2_pcr_ops = {
94 .read = direct_pcr_read,
95 .write = n2_pcr_write,
96};
97
98static unsigned long perf_hsvc_group;
99static unsigned long perf_hsvc_major;
100static unsigned long perf_hsvc_minor;
101
102static int __init register_perf_hsvc(void)
103{
104 if (tlb_type == hypervisor) {
105 switch (sun4v_chip_type) {
106 case SUN4V_CHIP_NIAGARA1:
107 perf_hsvc_group = HV_GRP_NIAG_PERF;
108 break;
109
110 case SUN4V_CHIP_NIAGARA2:
111 perf_hsvc_group = HV_GRP_N2_CPU;
112 break;
113
114 case SUN4V_CHIP_NIAGARA3:
115 perf_hsvc_group = HV_GRP_KT_CPU;
116 break;
117
118 default:
119 return -ENODEV;
120 }
121
122
123 perf_hsvc_major = 1;
124 perf_hsvc_minor = 0;
125 if (sun4v_hvapi_register(perf_hsvc_group,
126 perf_hsvc_major,
127 &perf_hsvc_minor)) {
128 printk("perfmon: Could not register hvapi.\n");
129 return -ENODEV;
130 }
131 }
132 return 0;
133}
134
135static void __init unregister_perf_hsvc(void)
136{
137 if (tlb_type != hypervisor)
138 return;
139 sun4v_hvapi_unregister(perf_hsvc_group);
140}
141
142int __init pcr_arch_init(void)
143{
144 int err = register_perf_hsvc();
145
146 if (err)
147 return err;
148
149 switch (tlb_type) {
150 case hypervisor:
151 pcr_ops = &n2_pcr_ops;
152 pcr_enable = PCR_N2_ENABLE;
153 picl_shift = 2;
154 break;
155
156 case cheetah:
157 case cheetah_plus:
158 pcr_ops = &direct_pcr_ops;
159 pcr_enable = PCR_SUN4U_ENABLE;
160 break;
161
162 case spitfire:
163 /* UltraSPARC-I/II and derivatives lack a profile
164 * counter overflow interrupt so we can't make use of
165 * their hardware currently.
166 */
167 /* fallthrough */
168 default:
169 err = -ENODEV;
170 goto out_unregister;
171 }
172
173 return nmi_init();
174
175out_unregister:
176 unregister_perf_hsvc();
177 return err;
178}
1/* pcr.c: Generic sparc64 performance counter infrastructure.
2 *
3 * Copyright (C) 2009 David S. Miller (davem@davemloft.net)
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/irq.h>
9
10#include <linux/irq_work.h>
11#include <linux/ftrace.h>
12
13#include <asm/pil.h>
14#include <asm/pcr.h>
15#include <asm/nmi.h>
16
17/* This code is shared between various users of the performance
18 * counters. Users will be oprofile, pseudo-NMI watchdog, and the
19 * perf_event support layer.
20 */
21
22#define PCR_SUN4U_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE)
23#define PCR_N2_ENABLE (PCR_PIC_PRIV | PCR_STRACE | PCR_UTRACE | \
24 PCR_N2_TOE_OV1 | \
25 (2 << PCR_N2_SL1_SHIFT) | \
26 (0xff << PCR_N2_MASK1_SHIFT))
27
28u64 pcr_enable;
29unsigned int picl_shift;
30
31/* Performance counter interrupts run unmasked at PIL level 15.
32 * Therefore we can't do things like wakeups and other work
33 * that expects IRQ disabling to be adhered to in locking etc.
34 *
35 * Therefore in such situations we defer the work by signalling
36 * a lower level cpu IRQ.
37 */
38void __irq_entry deferred_pcr_work_irq(int irq, struct pt_regs *regs)
39{
40 struct pt_regs *old_regs;
41
42 clear_softint(1 << PIL_DEFERRED_PCR_WORK);
43
44 old_regs = set_irq_regs(regs);
45 irq_enter();
46#ifdef CONFIG_IRQ_WORK
47 irq_work_run();
48#endif
49 irq_exit();
50 set_irq_regs(old_regs);
51}
52
53void arch_irq_work_raise(void)
54{
55 set_softint(1 << PIL_DEFERRED_PCR_WORK);
56}
57
58const struct pcr_ops *pcr_ops;
59EXPORT_SYMBOL_GPL(pcr_ops);
60
61static u64 direct_pcr_read(void)
62{
63 u64 val;
64
65 read_pcr(val);
66 return val;
67}
68
69static void direct_pcr_write(u64 val)
70{
71 write_pcr(val);
72}
73
74static const struct pcr_ops direct_pcr_ops = {
75 .read = direct_pcr_read,
76 .write = direct_pcr_write,
77};
78
79static void n2_pcr_write(u64 val)
80{
81 unsigned long ret;
82
83 if (val & PCR_N2_HTRACE) {
84 ret = sun4v_niagara2_setperf(HV_N2_PERF_SPARC_CTL, val);
85 if (ret != HV_EOK)
86 write_pcr(val);
87 } else
88 write_pcr(val);
89}
90
91static const struct pcr_ops n2_pcr_ops = {
92 .read = direct_pcr_read,
93 .write = n2_pcr_write,
94};
95
96static unsigned long perf_hsvc_group;
97static unsigned long perf_hsvc_major;
98static unsigned long perf_hsvc_minor;
99
100static int __init register_perf_hsvc(void)
101{
102 if (tlb_type == hypervisor) {
103 switch (sun4v_chip_type) {
104 case SUN4V_CHIP_NIAGARA1:
105 perf_hsvc_group = HV_GRP_NIAG_PERF;
106 break;
107
108 case SUN4V_CHIP_NIAGARA2:
109 perf_hsvc_group = HV_GRP_N2_CPU;
110 break;
111
112 case SUN4V_CHIP_NIAGARA3:
113 perf_hsvc_group = HV_GRP_KT_CPU;
114 break;
115
116 default:
117 return -ENODEV;
118 }
119
120
121 perf_hsvc_major = 1;
122 perf_hsvc_minor = 0;
123 if (sun4v_hvapi_register(perf_hsvc_group,
124 perf_hsvc_major,
125 &perf_hsvc_minor)) {
126 printk("perfmon: Could not register hvapi.\n");
127 return -ENODEV;
128 }
129 }
130 return 0;
131}
132
133static void __init unregister_perf_hsvc(void)
134{
135 if (tlb_type != hypervisor)
136 return;
137 sun4v_hvapi_unregister(perf_hsvc_group);
138}
139
140int __init pcr_arch_init(void)
141{
142 int err = register_perf_hsvc();
143
144 if (err)
145 return err;
146
147 switch (tlb_type) {
148 case hypervisor:
149 pcr_ops = &n2_pcr_ops;
150 pcr_enable = PCR_N2_ENABLE;
151 picl_shift = 2;
152 break;
153
154 case cheetah:
155 case cheetah_plus:
156 pcr_ops = &direct_pcr_ops;
157 pcr_enable = PCR_SUN4U_ENABLE;
158 break;
159
160 case spitfire:
161 /* UltraSPARC-I/II and derivatives lack a profile
162 * counter overflow interrupt so we can't make use of
163 * their hardware currently.
164 */
165 /* fallthrough */
166 default:
167 err = -ENODEV;
168 goto out_unregister;
169 }
170
171 return nmi_init();
172
173out_unregister:
174 unregister_perf_hsvc();
175 return err;
176}