Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 * Author: Jonathan Kim <jonathan.kim@amd.com>
23 *
24 */
25
26#include <linux/perf_event.h>
27#include <linux/init.h>
28#include "amdgpu.h"
29#include "amdgpu_pmu.h"
30#include "df_v3_6.h"
31
32#define PMU_NAME_SIZE 32
33
34/* record to keep track of pmu entry per pmu type per device */
35struct amdgpu_pmu_entry {
36 struct list_head entry;
37 struct amdgpu_device *adev;
38 struct pmu pmu;
39 unsigned int pmu_perf_type;
40};
41
42static LIST_HEAD(amdgpu_pmu_list);
43
44
45/* initialize perf counter */
46static int amdgpu_perf_event_init(struct perf_event *event)
47{
48 struct hw_perf_event *hwc = &event->hw;
49
50 /* test the event attr type check for PMU enumeration */
51 if (event->attr.type != event->pmu->type)
52 return -ENOENT;
53
54 /* update the hw_perf_event struct with config data */
55 hwc->config = event->attr.config;
56
57 return 0;
58}
59
60/* start perf counter */
61static void amdgpu_perf_start(struct perf_event *event, int flags)
62{
63 struct hw_perf_event *hwc = &event->hw;
64 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
65 struct amdgpu_pmu_entry,
66 pmu);
67
68 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
69 return;
70
71 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
72 hwc->state = 0;
73
74 switch (pe->pmu_perf_type) {
75 case PERF_TYPE_AMDGPU_DF:
76 if (!(flags & PERF_EF_RELOAD))
77 pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 1);
78
79 pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 0);
80 break;
81 default:
82 break;
83 }
84
85 perf_event_update_userpage(event);
86
87}
88
89/* read perf counter */
90static void amdgpu_perf_read(struct perf_event *event)
91{
92 struct hw_perf_event *hwc = &event->hw;
93 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
94 struct amdgpu_pmu_entry,
95 pmu);
96
97 u64 count, prev;
98
99 do {
100 prev = local64_read(&hwc->prev_count);
101
102 switch (pe->pmu_perf_type) {
103 case PERF_TYPE_AMDGPU_DF:
104 pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->config,
105 &count);
106 break;
107 default:
108 count = 0;
109 break;
110 }
111 } while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev);
112
113 local64_add(count - prev, &event->count);
114}
115
116/* stop perf counter */
117static void amdgpu_perf_stop(struct perf_event *event, int flags)
118{
119 struct hw_perf_event *hwc = &event->hw;
120 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
121 struct amdgpu_pmu_entry,
122 pmu);
123
124 if (hwc->state & PERF_HES_UPTODATE)
125 return;
126
127 switch (pe->pmu_perf_type) {
128 case PERF_TYPE_AMDGPU_DF:
129 pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 0);
130 break;
131 default:
132 break;
133 }
134
135 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
136 hwc->state |= PERF_HES_STOPPED;
137
138 if (hwc->state & PERF_HES_UPTODATE)
139 return;
140
141 amdgpu_perf_read(event);
142 hwc->state |= PERF_HES_UPTODATE;
143}
144
145/* add perf counter */
146static int amdgpu_perf_add(struct perf_event *event, int flags)
147{
148 struct hw_perf_event *hwc = &event->hw;
149 int retval;
150
151 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
152 struct amdgpu_pmu_entry,
153 pmu);
154
155 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
156
157 switch (pe->pmu_perf_type) {
158 case PERF_TYPE_AMDGPU_DF:
159 retval = pe->adev->df.funcs->pmc_start(pe->adev,
160 hwc->config, 1);
161 break;
162 default:
163 return 0;
164 }
165
166 if (retval)
167 return retval;
168
169 if (flags & PERF_EF_START)
170 amdgpu_perf_start(event, PERF_EF_RELOAD);
171
172 return retval;
173
174}
175
176/* delete perf counter */
177static void amdgpu_perf_del(struct perf_event *event, int flags)
178{
179 struct hw_perf_event *hwc = &event->hw;
180 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
181 struct amdgpu_pmu_entry,
182 pmu);
183
184 amdgpu_perf_stop(event, PERF_EF_UPDATE);
185
186 switch (pe->pmu_perf_type) {
187 case PERF_TYPE_AMDGPU_DF:
188 pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 1);
189 break;
190 default:
191 break;
192 }
193
194 perf_event_update_userpage(event);
195}
196
197/* vega20 pmus */
198
199/* init pmu tracking per pmu type */
200static int init_pmu_by_type(struct amdgpu_device *adev,
201 const struct attribute_group *attr_groups[],
202 char *pmu_type_name, char *pmu_file_prefix,
203 unsigned int pmu_perf_type,
204 unsigned int num_counters)
205{
206 char pmu_name[PMU_NAME_SIZE];
207 struct amdgpu_pmu_entry *pmu_entry;
208 int ret = 0;
209
210 pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL);
211
212 if (!pmu_entry)
213 return -ENOMEM;
214
215 pmu_entry->adev = adev;
216 pmu_entry->pmu = (struct pmu){
217 .event_init = amdgpu_perf_event_init,
218 .add = amdgpu_perf_add,
219 .del = amdgpu_perf_del,
220 .start = amdgpu_perf_start,
221 .stop = amdgpu_perf_stop,
222 .read = amdgpu_perf_read,
223 .task_ctx_nr = perf_invalid_context,
224 };
225
226 pmu_entry->pmu.attr_groups = attr_groups;
227 pmu_entry->pmu_perf_type = pmu_perf_type;
228 snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d",
229 pmu_file_prefix, adev->ddev->primary->index);
230
231 ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
232
233 if (ret) {
234 kfree(pmu_entry);
235 pr_warn("Error initializing AMDGPU %s PMUs.\n", pmu_type_name);
236 return ret;
237 }
238
239 pr_info("Detected AMDGPU %s Counters. # of Counters = %d.\n",
240 pmu_type_name, num_counters);
241
242 list_add_tail(&pmu_entry->entry, &amdgpu_pmu_list);
243
244 return 0;
245}
246
247/* init amdgpu_pmu */
248int amdgpu_pmu_init(struct amdgpu_device *adev)
249{
250 int ret = 0;
251
252 switch (adev->asic_type) {
253 case CHIP_VEGA20:
254 /* init df */
255 ret = init_pmu_by_type(adev, df_v3_6_attr_groups,
256 "DF", "amdgpu_df", PERF_TYPE_AMDGPU_DF,
257 DF_V3_6_MAX_COUNTERS);
258
259 /* other pmu types go here*/
260 break;
261 default:
262 return 0;
263 }
264
265 return 0;
266}
267
268
269/* destroy all pmu data associated with target device */
270void amdgpu_pmu_fini(struct amdgpu_device *adev)
271{
272 struct amdgpu_pmu_entry *pe, *temp;
273
274 list_for_each_entry_safe(pe, temp, &amdgpu_pmu_list, entry) {
275 if (pe->adev == adev) {
276 list_del(&pe->entry);
277 perf_pmu_unregister(&pe->pmu);
278 kfree(pe);
279 }
280 }
281}
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/perf_event.h>
25#include <linux/init.h>
26#include "amdgpu.h"
27#include "amdgpu_pmu.h"
28
29#define PMU_NAME_SIZE 32
30#define NUM_FORMATS_AMDGPU_PMU 4
31#define NUM_FORMATS_DF_VEGA20 3
32#define NUM_EVENTS_DF_VEGA20 8
33#define NUM_EVENT_TYPES_VEGA20 1
34#define NUM_EVENTS_VEGA20_XGMI 2
35#define NUM_EVENTS_VEGA20_MAX NUM_EVENTS_VEGA20_XGMI
36#define NUM_EVENT_TYPES_ARCTURUS 1
37#define NUM_EVENTS_ARCTURUS_XGMI 6
38#define NUM_EVENTS_ARCTURUS_MAX NUM_EVENTS_ARCTURUS_XGMI
39
40struct amdgpu_pmu_event_attribute {
41 struct device_attribute attr;
42 const char *event_str;
43 unsigned int type;
44};
45
46/* record to keep track of pmu entry per pmu type per device */
47struct amdgpu_pmu_entry {
48 struct list_head entry;
49 struct amdgpu_device *adev;
50 struct pmu pmu;
51 unsigned int pmu_perf_type;
52 char *pmu_type_name;
53 char *pmu_file_prefix;
54 struct attribute_group fmt_attr_group;
55 struct amdgpu_pmu_event_attribute *fmt_attr;
56 struct attribute_group evt_attr_group;
57 struct amdgpu_pmu_event_attribute *evt_attr;
58};
59
60static ssize_t amdgpu_pmu_event_show(struct device *dev,
61 struct device_attribute *attr, char *buf)
62{
63 struct amdgpu_pmu_event_attribute *amdgpu_pmu_attr;
64
65 amdgpu_pmu_attr = container_of(attr, struct amdgpu_pmu_event_attribute,
66 attr);
67
68 if (!amdgpu_pmu_attr->type)
69 return sprintf(buf, "%s\n", amdgpu_pmu_attr->event_str);
70
71 return sprintf(buf, "%s,type=0x%x\n",
72 amdgpu_pmu_attr->event_str, amdgpu_pmu_attr->type);
73}
74
75static LIST_HEAD(amdgpu_pmu_list);
76
77
78struct amdgpu_pmu_attr {
79 const char *name;
80 const char *config;
81};
82
83struct amdgpu_pmu_type {
84 const unsigned int type;
85 const unsigned int num_of_type;
86};
87
88struct amdgpu_pmu_config {
89 struct amdgpu_pmu_attr *formats;
90 unsigned int num_formats;
91 struct amdgpu_pmu_attr *events;
92 unsigned int num_events;
93 struct amdgpu_pmu_type *types;
94 unsigned int num_types;
95};
96
97/*
98 * Events fall under two categories:
99 * - PMU typed
100 * Events in /sys/bus/event_source/devices/amdgpu_<pmu_type>_<dev_num> have
101 * performance counter operations handled by one IP <pmu_type>. Formats and
102 * events should be defined by <pmu_type>_<asic_type>_formats and
103 * <pmu_type>_<asic_type>_events respectively.
104 *
105 * - Event config typed
106 * Events in /sys/bus/event_source/devices/amdgpu_<dev_num> have performance
107 * counter operations that can be handled by multiple IPs dictated by their
108 * "type" format field. Formats and events should be defined by
109 * amdgpu_pmu_formats and <asic_type>_events respectively. Format field
110 * "type" is generated in amdgpu_pmu_event_show and defined in
111 * <asic_type>_event_config_types.
112 */
113
114static struct amdgpu_pmu_attr amdgpu_pmu_formats[NUM_FORMATS_AMDGPU_PMU] = {
115 { .name = "event", .config = "config:0-7" },
116 { .name = "instance", .config = "config:8-15" },
117 { .name = "umask", .config = "config:16-23"},
118 { .name = "type", .config = "config:56-63"}
119};
120
121/* Vega20 events */
122static struct amdgpu_pmu_attr vega20_events[NUM_EVENTS_VEGA20_MAX] = {
123 { .name = "xgmi_link0_data_outbound",
124 .config = "event=0x7,instance=0x46,umask=0x2" },
125 { .name = "xgmi_link1_data_outbound",
126 .config = "event=0x7,instance=0x47,umask=0x2" }
127};
128
129static struct amdgpu_pmu_type vega20_types[NUM_EVENT_TYPES_VEGA20] = {
130 { .type = AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI,
131 .num_of_type = NUM_EVENTS_VEGA20_XGMI }
132};
133
134static struct amdgpu_pmu_config vega20_config = {
135 .formats = amdgpu_pmu_formats,
136 .num_formats = ARRAY_SIZE(amdgpu_pmu_formats),
137 .events = vega20_events,
138 .num_events = ARRAY_SIZE(vega20_events),
139 .types = vega20_types,
140 .num_types = ARRAY_SIZE(vega20_types)
141};
142
143/* Vega20 data fabric (DF) events */
144static struct amdgpu_pmu_attr df_vega20_formats[NUM_FORMATS_DF_VEGA20] = {
145 { .name = "event", .config = "config:0-7" },
146 { .name = "instance", .config = "config:8-15" },
147 { .name = "umask", .config = "config:16-23"}
148};
149
150static struct amdgpu_pmu_attr df_vega20_events[NUM_EVENTS_DF_VEGA20] = {
151 { .name = "cake0_pcsout_txdata",
152 .config = "event=0x7,instance=0x46,umask=0x2" },
153 { .name = "cake1_pcsout_txdata",
154 .config = "event=0x7,instance=0x47,umask=0x2" },
155 { .name = "cake0_pcsout_txmeta",
156 .config = "event=0x7,instance=0x46,umask=0x4" },
157 { .name = "cake1_pcsout_txmeta",
158 .config = "event=0x7,instance=0x47,umask=0x4" },
159 { .name = "cake0_ftiinstat_reqalloc",
160 .config = "event=0xb,instance=0x46,umask=0x4" },
161 { .name = "cake1_ftiinstat_reqalloc",
162 .config = "event=0xb,instance=0x47,umask=0x4" },
163 { .name = "cake0_ftiinstat_rspalloc",
164 .config = "event=0xb,instance=0x46,umask=0x8" },
165 { .name = "cake1_ftiinstat_rspalloc",
166 .config = "event=0xb,instance=0x47,umask=0x8" }
167};
168
169static struct amdgpu_pmu_config df_vega20_config = {
170 .formats = df_vega20_formats,
171 .num_formats = ARRAY_SIZE(df_vega20_formats),
172 .events = df_vega20_events,
173 .num_events = ARRAY_SIZE(df_vega20_events),
174 .types = NULL,
175 .num_types = 0
176};
177
178/* Arcturus events */
179static struct amdgpu_pmu_attr arcturus_events[NUM_EVENTS_ARCTURUS_MAX] = {
180 { .name = "xgmi_link0_data_outbound",
181 .config = "event=0x7,instance=0x4b,umask=0x2" },
182 { .name = "xgmi_link1_data_outbound",
183 .config = "event=0x7,instance=0x4c,umask=0x2" },
184 { .name = "xgmi_link2_data_outbound",
185 .config = "event=0x7,instance=0x4d,umask=0x2" },
186 { .name = "xgmi_link3_data_outbound",
187 .config = "event=0x7,instance=0x4e,umask=0x2" },
188 { .name = "xgmi_link4_data_outbound",
189 .config = "event=0x7,instance=0x4f,umask=0x2" },
190 { .name = "xgmi_link5_data_outbound",
191 .config = "event=0x7,instance=0x50,umask=0x2" }
192};
193
194static struct amdgpu_pmu_type arcturus_types[NUM_EVENT_TYPES_ARCTURUS] = {
195 { .type = AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI,
196 .num_of_type = NUM_EVENTS_ARCTURUS_XGMI }
197};
198
199static struct amdgpu_pmu_config arcturus_config = {
200 .formats = amdgpu_pmu_formats,
201 .num_formats = ARRAY_SIZE(amdgpu_pmu_formats),
202 .events = arcturus_events,
203 .num_events = ARRAY_SIZE(arcturus_events),
204 .types = arcturus_types,
205 .num_types = ARRAY_SIZE(arcturus_types)
206};
207
208/* initialize perf counter */
209static int amdgpu_perf_event_init(struct perf_event *event)
210{
211 struct hw_perf_event *hwc = &event->hw;
212
213 /* test the event attr type check for PMU enumeration */
214 if (event->attr.type != event->pmu->type)
215 return -ENOENT;
216
217 /* update the hw_perf_event struct with config data */
218 hwc->config = event->attr.config;
219 hwc->config_base = AMDGPU_PMU_PERF_TYPE_NONE;
220
221 return 0;
222}
223
224/* start perf counter */
225static void amdgpu_perf_start(struct perf_event *event, int flags)
226{
227 struct hw_perf_event *hwc = &event->hw;
228 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
229 struct amdgpu_pmu_entry,
230 pmu);
231 int target_cntr = 0;
232
233 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
234 return;
235
236 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
237 hwc->state = 0;
238
239 switch (hwc->config_base) {
240 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
241 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
242 if (!(flags & PERF_EF_RELOAD)) {
243 target_cntr = pe->adev->df.funcs->pmc_start(pe->adev,
244 hwc->config, 0 /* unused */,
245 1 /* add counter */);
246 if (target_cntr < 0)
247 break;
248
249 hwc->idx = target_cntr;
250 }
251
252 pe->adev->df.funcs->pmc_start(pe->adev, hwc->config,
253 hwc->idx, 0);
254 break;
255 default:
256 break;
257 }
258
259 perf_event_update_userpage(event);
260}
261
262/* read perf counter */
263static void amdgpu_perf_read(struct perf_event *event)
264{
265 struct hw_perf_event *hwc = &event->hw;
266 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
267 struct amdgpu_pmu_entry,
268 pmu);
269 u64 count, prev;
270
271 do {
272 prev = local64_read(&hwc->prev_count);
273
274 switch (hwc->config_base) {
275 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
276 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
277 pe->adev->df.funcs->pmc_get_count(pe->adev,
278 hwc->config, hwc->idx, &count);
279 break;
280 default:
281 count = 0;
282 break;
283 }
284 } while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev);
285
286 local64_add(count - prev, &event->count);
287}
288
289/* stop perf counter */
290static void amdgpu_perf_stop(struct perf_event *event, int flags)
291{
292 struct hw_perf_event *hwc = &event->hw;
293 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
294 struct amdgpu_pmu_entry,
295 pmu);
296
297 if (hwc->state & PERF_HES_UPTODATE)
298 return;
299
300 switch (hwc->config_base) {
301 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
302 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
303 pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx,
304 0);
305 break;
306 default:
307 break;
308 }
309
310 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
311 hwc->state |= PERF_HES_STOPPED;
312
313 if (hwc->state & PERF_HES_UPTODATE)
314 return;
315
316 amdgpu_perf_read(event);
317 hwc->state |= PERF_HES_UPTODATE;
318}
319
320/* add perf counter */
321static int amdgpu_perf_add(struct perf_event *event, int flags)
322{
323 struct hw_perf_event *hwc = &event->hw;
324 int retval = 0, target_cntr;
325 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
326 struct amdgpu_pmu_entry,
327 pmu);
328
329 switch (pe->pmu_perf_type) {
330 case AMDGPU_PMU_PERF_TYPE_DF:
331 hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF;
332 break;
333 case AMDGPU_PMU_PERF_TYPE_ALL:
334 hwc->config_base = (hwc->config >>
335 AMDGPU_PMU_EVENT_CONFIG_TYPE_SHIFT) &
336 AMDGPU_PMU_EVENT_CONFIG_TYPE_MASK;
337 break;
338 }
339
340 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
341
342 switch (hwc->config_base) {
343 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
344 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
345 target_cntr = pe->adev->df.funcs->pmc_start(pe->adev,
346 hwc->config, 0 /* unused */,
347 1 /* add counter */);
348 if (target_cntr < 0)
349 retval = target_cntr;
350 else
351 hwc->idx = target_cntr;
352
353 break;
354 default:
355 return 0;
356 }
357
358 if (retval)
359 return retval;
360
361 if (flags & PERF_EF_START)
362 amdgpu_perf_start(event, PERF_EF_RELOAD);
363
364 return retval;
365}
366
367/* delete perf counter */
368static void amdgpu_perf_del(struct perf_event *event, int flags)
369{
370 struct hw_perf_event *hwc = &event->hw;
371 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
372 struct amdgpu_pmu_entry,
373 pmu);
374
375 amdgpu_perf_stop(event, PERF_EF_UPDATE);
376
377 switch (hwc->config_base) {
378 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
379 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
380 pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx,
381 1);
382 break;
383 default:
384 break;
385 }
386
387 perf_event_update_userpage(event);
388}
389
390static void amdgpu_pmu_create_event_attrs_by_type(
391 struct attribute_group *attr_group,
392 struct amdgpu_pmu_event_attribute *pmu_attr,
393 struct amdgpu_pmu_attr events[],
394 int s_offset,
395 int e_offset,
396 unsigned int type)
397{
398 int i;
399
400 pmu_attr += s_offset;
401
402 for (i = s_offset; i < e_offset; i++) {
403 attr_group->attrs[i] = &pmu_attr->attr.attr;
404 sysfs_attr_init(&pmu_attr->attr.attr);
405 pmu_attr->attr.attr.name = events[i].name;
406 pmu_attr->attr.attr.mode = 0444;
407 pmu_attr->attr.show = amdgpu_pmu_event_show;
408 pmu_attr->event_str = events[i].config;
409 pmu_attr->type = type;
410 pmu_attr++;
411 }
412}
413
414static void amdgpu_pmu_create_attrs(struct attribute_group *attr_group,
415 struct amdgpu_pmu_event_attribute *pmu_attr,
416 struct amdgpu_pmu_attr events[],
417 int num_events)
418{
419 amdgpu_pmu_create_event_attrs_by_type(attr_group, pmu_attr, events, 0,
420 num_events, AMDGPU_PMU_EVENT_CONFIG_TYPE_NONE);
421}
422
423
424static int amdgpu_pmu_alloc_pmu_attrs(
425 struct attribute_group *fmt_attr_group,
426 struct amdgpu_pmu_event_attribute **fmt_attr,
427 struct attribute_group *evt_attr_group,
428 struct amdgpu_pmu_event_attribute **evt_attr,
429 struct amdgpu_pmu_config *config)
430{
431 *fmt_attr = kcalloc(config->num_formats, sizeof(**fmt_attr),
432 GFP_KERNEL);
433
434 if (!(*fmt_attr))
435 return -ENOMEM;
436
437 fmt_attr_group->attrs = kcalloc(config->num_formats + 1,
438 sizeof(*fmt_attr_group->attrs), GFP_KERNEL);
439
440 if (!fmt_attr_group->attrs)
441 goto err_fmt_attr_grp;
442
443 *evt_attr = kcalloc(config->num_events, sizeof(**evt_attr), GFP_KERNEL);
444
445 if (!(*evt_attr))
446 goto err_evt_attr;
447
448 evt_attr_group->attrs = kcalloc(config->num_events + 1,
449 sizeof(*evt_attr_group->attrs), GFP_KERNEL);
450
451 if (!evt_attr_group->attrs)
452 goto err_evt_attr_grp;
453
454 return 0;
455err_evt_attr_grp:
456 kfree(*evt_attr);
457err_evt_attr:
458 kfree(fmt_attr_group->attrs);
459err_fmt_attr_grp:
460 kfree(*fmt_attr);
461 return -ENOMEM;
462}
463
464/* init pmu tracking per pmu type */
465static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry,
466 struct amdgpu_pmu_config *config)
467{
468 const struct attribute_group *attr_groups[] = {
469 &pmu_entry->fmt_attr_group,
470 &pmu_entry->evt_attr_group,
471 NULL
472 };
473 char pmu_name[PMU_NAME_SIZE];
474 int ret = 0, total_num_events = 0;
475
476 pmu_entry->pmu = (struct pmu){
477 .event_init = amdgpu_perf_event_init,
478 .add = amdgpu_perf_add,
479 .del = amdgpu_perf_del,
480 .start = amdgpu_perf_start,
481 .stop = amdgpu_perf_stop,
482 .read = amdgpu_perf_read,
483 .task_ctx_nr = perf_invalid_context,
484 };
485
486 ret = amdgpu_pmu_alloc_pmu_attrs(&pmu_entry->fmt_attr_group,
487 &pmu_entry->fmt_attr,
488 &pmu_entry->evt_attr_group,
489 &pmu_entry->evt_attr,
490 config);
491
492 if (ret)
493 goto err_out;
494
495 amdgpu_pmu_create_attrs(&pmu_entry->fmt_attr_group, pmu_entry->fmt_attr,
496 config->formats, config->num_formats);
497
498 if (pmu_entry->pmu_perf_type == AMDGPU_PMU_PERF_TYPE_ALL) {
499 int i;
500
501 for (i = 0; i < config->num_types; i++) {
502 amdgpu_pmu_create_event_attrs_by_type(
503 &pmu_entry->evt_attr_group,
504 pmu_entry->evt_attr,
505 config->events,
506 total_num_events,
507 total_num_events +
508 config->types[i].num_of_type,
509 config->types[i].type);
510 total_num_events += config->types[i].num_of_type;
511 }
512 } else {
513 amdgpu_pmu_create_attrs(&pmu_entry->evt_attr_group,
514 pmu_entry->evt_attr,
515 config->events, config->num_events);
516 total_num_events = config->num_events;
517 }
518
519 pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
520 GFP_KERNEL);
521
522 if (!pmu_entry->pmu.attr_groups) {
523 ret = -ENOMEM;
524 goto err_attr_group;
525 }
526
527 snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix,
528 adev_to_drm(pmu_entry->adev)->primary->index);
529
530 ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
531
532 if (ret)
533 goto err_register;
534
535 if (pmu_entry->pmu_perf_type != AMDGPU_PMU_PERF_TYPE_ALL)
536 pr_info("Detected AMDGPU %s Counters. # of Counters = %d.\n",
537 pmu_entry->pmu_type_name, total_num_events);
538 else
539 pr_info("Detected AMDGPU %d Perf Events.\n", total_num_events);
540
541
542 list_add_tail(&pmu_entry->entry, &amdgpu_pmu_list);
543
544 return 0;
545err_register:
546 kfree(pmu_entry->pmu.attr_groups);
547err_attr_group:
548 kfree(pmu_entry->fmt_attr_group.attrs);
549 kfree(pmu_entry->fmt_attr);
550 kfree(pmu_entry->evt_attr_group.attrs);
551 kfree(pmu_entry->evt_attr);
552err_out:
553 pr_warn("Error initializing AMDGPU %s PMUs.\n",
554 pmu_entry->pmu_type_name);
555 return ret;
556}
557
558/* destroy all pmu data associated with target device */
559void amdgpu_pmu_fini(struct amdgpu_device *adev)
560{
561 struct amdgpu_pmu_entry *pe, *temp;
562
563 list_for_each_entry_safe(pe, temp, &amdgpu_pmu_list, entry) {
564 if (pe->adev != adev)
565 continue;
566 list_del(&pe->entry);
567 perf_pmu_unregister(&pe->pmu);
568 kfree(pe->pmu.attr_groups);
569 kfree(pe->fmt_attr_group.attrs);
570 kfree(pe->fmt_attr);
571 kfree(pe->evt_attr_group.attrs);
572 kfree(pe->evt_attr);
573 kfree(pe);
574 }
575}
576
577static struct amdgpu_pmu_entry *create_pmu_entry(struct amdgpu_device *adev,
578 unsigned int pmu_type,
579 char *pmu_type_name,
580 char *pmu_file_prefix)
581{
582 struct amdgpu_pmu_entry *pmu_entry;
583
584 pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL);
585
586 if (!pmu_entry)
587 return pmu_entry;
588
589 pmu_entry->adev = adev;
590 pmu_entry->fmt_attr_group.name = "format";
591 pmu_entry->fmt_attr_group.attrs = NULL;
592 pmu_entry->evt_attr_group.name = "events";
593 pmu_entry->evt_attr_group.attrs = NULL;
594 pmu_entry->pmu_perf_type = pmu_type;
595 pmu_entry->pmu_type_name = pmu_type_name;
596 pmu_entry->pmu_file_prefix = pmu_file_prefix;
597
598 return pmu_entry;
599}
600
601/* init amdgpu_pmu */
602int amdgpu_pmu_init(struct amdgpu_device *adev)
603{
604 int ret = 0;
605 struct amdgpu_pmu_entry *pmu_entry, *pmu_entry_df;
606
607 switch (adev->asic_type) {
608 case CHIP_VEGA20:
609 pmu_entry_df = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_DF,
610 "DF", "amdgpu_df");
611
612 if (!pmu_entry_df)
613 return -ENOMEM;
614
615 ret = init_pmu_entry_by_type_and_add(pmu_entry_df,
616 &df_vega20_config);
617
618 if (ret) {
619 kfree(pmu_entry_df);
620 return ret;
621 }
622
623 pmu_entry = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_ALL,
624 "", "amdgpu");
625
626 if (!pmu_entry) {
627 amdgpu_pmu_fini(adev);
628 return -ENOMEM;
629 }
630
631 ret = init_pmu_entry_by_type_and_add(pmu_entry,
632 &vega20_config);
633
634 if (ret) {
635 kfree(pmu_entry);
636 amdgpu_pmu_fini(adev);
637 return ret;
638 }
639
640 break;
641 case CHIP_ARCTURUS:
642 pmu_entry = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_ALL,
643 "", "amdgpu");
644 if (!pmu_entry)
645 return -ENOMEM;
646
647 ret = init_pmu_entry_by_type_and_add(pmu_entry,
648 &arcturus_config);
649
650 if (ret) {
651 kfree(pmu_entry);
652 return -ENOMEM;
653 }
654
655 break;
656
657 default:
658 return 0;
659 }
660
661 return ret;
662}