Loading...
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/perf_event.h>
25#include <linux/init.h>
26#include "amdgpu.h"
27#include "amdgpu_pmu.h"
28
29#define PMU_NAME_SIZE 32
30#define NUM_FORMATS_AMDGPU_PMU 4
31#define NUM_FORMATS_DF_VEGA20 3
32#define NUM_EVENTS_DF_VEGA20 8
33#define NUM_EVENT_TYPES_VEGA20 1
34#define NUM_EVENTS_VEGA20_XGMI 2
35#define NUM_EVENTS_VEGA20_MAX NUM_EVENTS_VEGA20_XGMI
36#define NUM_EVENT_TYPES_ARCTURUS 1
37#define NUM_EVENTS_ARCTURUS_XGMI 6
38#define NUM_EVENTS_ARCTURUS_MAX NUM_EVENTS_ARCTURUS_XGMI
39
40struct amdgpu_pmu_event_attribute {
41 struct device_attribute attr;
42 const char *event_str;
43 unsigned int type;
44};
45
46/* record to keep track of pmu entry per pmu type per device */
47struct amdgpu_pmu_entry {
48 struct list_head entry;
49 struct amdgpu_device *adev;
50 struct pmu pmu;
51 unsigned int pmu_perf_type;
52 char *pmu_type_name;
53 char *pmu_file_prefix;
54 struct attribute_group fmt_attr_group;
55 struct amdgpu_pmu_event_attribute *fmt_attr;
56 struct attribute_group evt_attr_group;
57 struct amdgpu_pmu_event_attribute *evt_attr;
58};
59
60static ssize_t amdgpu_pmu_event_show(struct device *dev,
61 struct device_attribute *attr, char *buf)
62{
63 struct amdgpu_pmu_event_attribute *amdgpu_pmu_attr;
64
65 amdgpu_pmu_attr = container_of(attr, struct amdgpu_pmu_event_attribute,
66 attr);
67
68 if (!amdgpu_pmu_attr->type)
69 return sprintf(buf, "%s\n", amdgpu_pmu_attr->event_str);
70
71 return sprintf(buf, "%s,type=0x%x\n",
72 amdgpu_pmu_attr->event_str, amdgpu_pmu_attr->type);
73}
74
75static LIST_HEAD(amdgpu_pmu_list);
76
77
78struct amdgpu_pmu_attr {
79 const char *name;
80 const char *config;
81};
82
83struct amdgpu_pmu_type {
84 const unsigned int type;
85 const unsigned int num_of_type;
86};
87
88struct amdgpu_pmu_config {
89 struct amdgpu_pmu_attr *formats;
90 unsigned int num_formats;
91 struct amdgpu_pmu_attr *events;
92 unsigned int num_events;
93 struct amdgpu_pmu_type *types;
94 unsigned int num_types;
95};
96
97/*
98 * Events fall under two categories:
99 * - PMU typed
100 * Events in /sys/bus/event_source/devices/amdgpu_<pmu_type>_<dev_num> have
101 * performance counter operations handled by one IP <pmu_type>. Formats and
102 * events should be defined by <pmu_type>_<asic_type>_formats and
103 * <pmu_type>_<asic_type>_events respectively.
104 *
105 * - Event config typed
106 * Events in /sys/bus/event_source/devices/amdgpu_<dev_num> have performance
107 * counter operations that can be handled by multiple IPs dictated by their
108 * "type" format field. Formats and events should be defined by
109 * amdgpu_pmu_formats and <asic_type>_events respectively. Format field
110 * "type" is generated in amdgpu_pmu_event_show and defined in
111 * <asic_type>_event_config_types.
112 */
113
114static struct amdgpu_pmu_attr amdgpu_pmu_formats[NUM_FORMATS_AMDGPU_PMU] = {
115 { .name = "event", .config = "config:0-7" },
116 { .name = "instance", .config = "config:8-15" },
117 { .name = "umask", .config = "config:16-23"},
118 { .name = "type", .config = "config:56-63"}
119};
120
121/* Vega20 events */
122static struct amdgpu_pmu_attr vega20_events[NUM_EVENTS_VEGA20_MAX] = {
123 { .name = "xgmi_link0_data_outbound",
124 .config = "event=0x7,instance=0x46,umask=0x2" },
125 { .name = "xgmi_link1_data_outbound",
126 .config = "event=0x7,instance=0x47,umask=0x2" }
127};
128
129static struct amdgpu_pmu_type vega20_types[NUM_EVENT_TYPES_VEGA20] = {
130 { .type = AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI,
131 .num_of_type = NUM_EVENTS_VEGA20_XGMI }
132};
133
134static struct amdgpu_pmu_config vega20_config = {
135 .formats = amdgpu_pmu_formats,
136 .num_formats = ARRAY_SIZE(amdgpu_pmu_formats),
137 .events = vega20_events,
138 .num_events = ARRAY_SIZE(vega20_events),
139 .types = vega20_types,
140 .num_types = ARRAY_SIZE(vega20_types)
141};
142
143/* Vega20 data fabric (DF) events */
144static struct amdgpu_pmu_attr df_vega20_formats[NUM_FORMATS_DF_VEGA20] = {
145 { .name = "event", .config = "config:0-7" },
146 { .name = "instance", .config = "config:8-15" },
147 { .name = "umask", .config = "config:16-23"}
148};
149
150static struct amdgpu_pmu_attr df_vega20_events[NUM_EVENTS_DF_VEGA20] = {
151 { .name = "cake0_pcsout_txdata",
152 .config = "event=0x7,instance=0x46,umask=0x2" },
153 { .name = "cake1_pcsout_txdata",
154 .config = "event=0x7,instance=0x47,umask=0x2" },
155 { .name = "cake0_pcsout_txmeta",
156 .config = "event=0x7,instance=0x46,umask=0x4" },
157 { .name = "cake1_pcsout_txmeta",
158 .config = "event=0x7,instance=0x47,umask=0x4" },
159 { .name = "cake0_ftiinstat_reqalloc",
160 .config = "event=0xb,instance=0x46,umask=0x4" },
161 { .name = "cake1_ftiinstat_reqalloc",
162 .config = "event=0xb,instance=0x47,umask=0x4" },
163 { .name = "cake0_ftiinstat_rspalloc",
164 .config = "event=0xb,instance=0x46,umask=0x8" },
165 { .name = "cake1_ftiinstat_rspalloc",
166 .config = "event=0xb,instance=0x47,umask=0x8" }
167};
168
169static struct amdgpu_pmu_config df_vega20_config = {
170 .formats = df_vega20_formats,
171 .num_formats = ARRAY_SIZE(df_vega20_formats),
172 .events = df_vega20_events,
173 .num_events = ARRAY_SIZE(df_vega20_events),
174 .types = NULL,
175 .num_types = 0
176};
177
178/* Arcturus events */
179static struct amdgpu_pmu_attr arcturus_events[NUM_EVENTS_ARCTURUS_MAX] = {
180 { .name = "xgmi_link0_data_outbound",
181 .config = "event=0x7,instance=0x4b,umask=0x2" },
182 { .name = "xgmi_link1_data_outbound",
183 .config = "event=0x7,instance=0x4c,umask=0x2" },
184 { .name = "xgmi_link2_data_outbound",
185 .config = "event=0x7,instance=0x4d,umask=0x2" },
186 { .name = "xgmi_link3_data_outbound",
187 .config = "event=0x7,instance=0x4e,umask=0x2" },
188 { .name = "xgmi_link4_data_outbound",
189 .config = "event=0x7,instance=0x4f,umask=0x2" },
190 { .name = "xgmi_link5_data_outbound",
191 .config = "event=0x7,instance=0x50,umask=0x2" }
192};
193
194static struct amdgpu_pmu_type arcturus_types[NUM_EVENT_TYPES_ARCTURUS] = {
195 { .type = AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI,
196 .num_of_type = NUM_EVENTS_ARCTURUS_XGMI }
197};
198
199static struct amdgpu_pmu_config arcturus_config = {
200 .formats = amdgpu_pmu_formats,
201 .num_formats = ARRAY_SIZE(amdgpu_pmu_formats),
202 .events = arcturus_events,
203 .num_events = ARRAY_SIZE(arcturus_events),
204 .types = arcturus_types,
205 .num_types = ARRAY_SIZE(arcturus_types)
206};
207
208/* initialize perf counter */
209static int amdgpu_perf_event_init(struct perf_event *event)
210{
211 struct hw_perf_event *hwc = &event->hw;
212
213 /* test the event attr type check for PMU enumeration */
214 if (event->attr.type != event->pmu->type)
215 return -ENOENT;
216
217 /* update the hw_perf_event struct with config data */
218 hwc->config = event->attr.config;
219 hwc->config_base = AMDGPU_PMU_PERF_TYPE_NONE;
220
221 return 0;
222}
223
224/* start perf counter */
225static void amdgpu_perf_start(struct perf_event *event, int flags)
226{
227 struct hw_perf_event *hwc = &event->hw;
228 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
229 struct amdgpu_pmu_entry,
230 pmu);
231 int target_cntr = 0;
232
233 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
234 return;
235
236 if ((!pe->adev->df.funcs) ||
237 (!pe->adev->df.funcs->pmc_start))
238 return;
239
240 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
241 hwc->state = 0;
242
243 switch (hwc->config_base) {
244 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
245 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
246 if (!(flags & PERF_EF_RELOAD)) {
247 target_cntr = pe->adev->df.funcs->pmc_start(pe->adev,
248 hwc->config, 0 /* unused */,
249 1 /* add counter */);
250 if (target_cntr < 0)
251 break;
252
253 hwc->idx = target_cntr;
254 }
255
256 pe->adev->df.funcs->pmc_start(pe->adev, hwc->config,
257 hwc->idx, 0);
258 break;
259 default:
260 break;
261 }
262
263 perf_event_update_userpage(event);
264}
265
266/* read perf counter */
267static void amdgpu_perf_read(struct perf_event *event)
268{
269 struct hw_perf_event *hwc = &event->hw;
270 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
271 struct amdgpu_pmu_entry,
272 pmu);
273 u64 count, prev;
274
275 if ((!pe->adev->df.funcs) ||
276 (!pe->adev->df.funcs->pmc_get_count))
277 return;
278
279 prev = local64_read(&hwc->prev_count);
280 do {
281 switch (hwc->config_base) {
282 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
283 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
284 pe->adev->df.funcs->pmc_get_count(pe->adev,
285 hwc->config, hwc->idx, &count);
286 break;
287 default:
288 count = 0;
289 break;
290 }
291 } while (!local64_try_cmpxchg(&hwc->prev_count, &prev, count));
292
293 local64_add(count - prev, &event->count);
294}
295
296/* stop perf counter */
297static void amdgpu_perf_stop(struct perf_event *event, int flags)
298{
299 struct hw_perf_event *hwc = &event->hw;
300 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
301 struct amdgpu_pmu_entry,
302 pmu);
303
304 if (hwc->state & PERF_HES_UPTODATE)
305 return;
306
307 if ((!pe->adev->df.funcs) ||
308 (!pe->adev->df.funcs->pmc_stop))
309 return;
310
311 switch (hwc->config_base) {
312 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
313 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
314 pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx,
315 0);
316 break;
317 default:
318 break;
319 }
320
321 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
322 hwc->state |= PERF_HES_STOPPED;
323
324 if (hwc->state & PERF_HES_UPTODATE)
325 return;
326
327 amdgpu_perf_read(event);
328 hwc->state |= PERF_HES_UPTODATE;
329}
330
331/* add perf counter */
332static int amdgpu_perf_add(struct perf_event *event, int flags)
333{
334 struct hw_perf_event *hwc = &event->hw;
335 int retval = 0, target_cntr;
336 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
337 struct amdgpu_pmu_entry,
338 pmu);
339
340 if ((!pe->adev->df.funcs) ||
341 (!pe->adev->df.funcs->pmc_start))
342 return -EINVAL;
343
344 switch (pe->pmu_perf_type) {
345 case AMDGPU_PMU_PERF_TYPE_DF:
346 hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF;
347 break;
348 case AMDGPU_PMU_PERF_TYPE_ALL:
349 hwc->config_base = (hwc->config >>
350 AMDGPU_PMU_EVENT_CONFIG_TYPE_SHIFT) &
351 AMDGPU_PMU_EVENT_CONFIG_TYPE_MASK;
352 break;
353 }
354
355 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
356
357 switch (hwc->config_base) {
358 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
359 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
360 target_cntr = pe->adev->df.funcs->pmc_start(pe->adev,
361 hwc->config, 0 /* unused */,
362 1 /* add counter */);
363 if (target_cntr < 0)
364 retval = target_cntr;
365 else
366 hwc->idx = target_cntr;
367
368 break;
369 default:
370 return 0;
371 }
372
373 if (retval)
374 return retval;
375
376 if (flags & PERF_EF_START)
377 amdgpu_perf_start(event, PERF_EF_RELOAD);
378
379 return retval;
380}
381
382/* delete perf counter */
383static void amdgpu_perf_del(struct perf_event *event, int flags)
384{
385 struct hw_perf_event *hwc = &event->hw;
386 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
387 struct amdgpu_pmu_entry,
388 pmu);
389 if ((!pe->adev->df.funcs) ||
390 (!pe->adev->df.funcs->pmc_stop))
391 return;
392
393 amdgpu_perf_stop(event, PERF_EF_UPDATE);
394
395 switch (hwc->config_base) {
396 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
397 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
398 pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx,
399 1);
400 break;
401 default:
402 break;
403 }
404
405 perf_event_update_userpage(event);
406}
407
408static void amdgpu_pmu_create_event_attrs_by_type(
409 struct attribute_group *attr_group,
410 struct amdgpu_pmu_event_attribute *pmu_attr,
411 struct amdgpu_pmu_attr events[],
412 int s_offset,
413 int e_offset,
414 unsigned int type)
415{
416 int i;
417
418 pmu_attr += s_offset;
419
420 for (i = s_offset; i < e_offset; i++) {
421 attr_group->attrs[i] = &pmu_attr->attr.attr;
422 sysfs_attr_init(&pmu_attr->attr.attr);
423 pmu_attr->attr.attr.name = events[i].name;
424 pmu_attr->attr.attr.mode = 0444;
425 pmu_attr->attr.show = amdgpu_pmu_event_show;
426 pmu_attr->event_str = events[i].config;
427 pmu_attr->type = type;
428 pmu_attr++;
429 }
430}
431
432static void amdgpu_pmu_create_attrs(struct attribute_group *attr_group,
433 struct amdgpu_pmu_event_attribute *pmu_attr,
434 struct amdgpu_pmu_attr events[],
435 int num_events)
436{
437 amdgpu_pmu_create_event_attrs_by_type(attr_group, pmu_attr, events, 0,
438 num_events, AMDGPU_PMU_EVENT_CONFIG_TYPE_NONE);
439}
440
441
442static int amdgpu_pmu_alloc_pmu_attrs(
443 struct attribute_group *fmt_attr_group,
444 struct amdgpu_pmu_event_attribute **fmt_attr,
445 struct attribute_group *evt_attr_group,
446 struct amdgpu_pmu_event_attribute **evt_attr,
447 struct amdgpu_pmu_config *config)
448{
449 *fmt_attr = kcalloc(config->num_formats, sizeof(**fmt_attr),
450 GFP_KERNEL);
451
452 if (!(*fmt_attr))
453 return -ENOMEM;
454
455 fmt_attr_group->attrs = kcalloc(config->num_formats + 1,
456 sizeof(*fmt_attr_group->attrs), GFP_KERNEL);
457
458 if (!fmt_attr_group->attrs)
459 goto err_fmt_attr_grp;
460
461 *evt_attr = kcalloc(config->num_events, sizeof(**evt_attr), GFP_KERNEL);
462
463 if (!(*evt_attr))
464 goto err_evt_attr;
465
466 evt_attr_group->attrs = kcalloc(config->num_events + 1,
467 sizeof(*evt_attr_group->attrs), GFP_KERNEL);
468
469 if (!evt_attr_group->attrs)
470 goto err_evt_attr_grp;
471
472 return 0;
473err_evt_attr_grp:
474 kfree(*evt_attr);
475err_evt_attr:
476 kfree(fmt_attr_group->attrs);
477err_fmt_attr_grp:
478 kfree(*fmt_attr);
479 return -ENOMEM;
480}
481
482/* init pmu tracking per pmu type */
483static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry,
484 struct amdgpu_pmu_config *config)
485{
486 const struct attribute_group *attr_groups[] = {
487 &pmu_entry->fmt_attr_group,
488 &pmu_entry->evt_attr_group,
489 NULL
490 };
491 char pmu_name[PMU_NAME_SIZE];
492 int ret = 0, total_num_events = 0;
493
494 pmu_entry->pmu = (struct pmu){
495 .event_init = amdgpu_perf_event_init,
496 .add = amdgpu_perf_add,
497 .del = amdgpu_perf_del,
498 .start = amdgpu_perf_start,
499 .stop = amdgpu_perf_stop,
500 .read = amdgpu_perf_read,
501 .task_ctx_nr = perf_invalid_context,
502 };
503
504 ret = amdgpu_pmu_alloc_pmu_attrs(&pmu_entry->fmt_attr_group,
505 &pmu_entry->fmt_attr,
506 &pmu_entry->evt_attr_group,
507 &pmu_entry->evt_attr,
508 config);
509
510 if (ret)
511 goto err_out;
512
513 amdgpu_pmu_create_attrs(&pmu_entry->fmt_attr_group, pmu_entry->fmt_attr,
514 config->formats, config->num_formats);
515
516 if (pmu_entry->pmu_perf_type == AMDGPU_PMU_PERF_TYPE_ALL) {
517 int i;
518
519 for (i = 0; i < config->num_types; i++) {
520 amdgpu_pmu_create_event_attrs_by_type(
521 &pmu_entry->evt_attr_group,
522 pmu_entry->evt_attr,
523 config->events,
524 total_num_events,
525 total_num_events +
526 config->types[i].num_of_type,
527 config->types[i].type);
528 total_num_events += config->types[i].num_of_type;
529 }
530 } else {
531 amdgpu_pmu_create_attrs(&pmu_entry->evt_attr_group,
532 pmu_entry->evt_attr,
533 config->events, config->num_events);
534 total_num_events = config->num_events;
535 }
536
537 pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
538 GFP_KERNEL);
539
540 if (!pmu_entry->pmu.attr_groups) {
541 ret = -ENOMEM;
542 goto err_attr_group;
543 }
544
545 snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix,
546 adev_to_drm(pmu_entry->adev)->primary->index);
547
548 ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
549
550 if (ret)
551 goto err_register;
552
553 if (pmu_entry->pmu_perf_type != AMDGPU_PMU_PERF_TYPE_ALL)
554 pr_info("Detected AMDGPU %s Counters. # of Counters = %d.\n",
555 pmu_entry->pmu_type_name, total_num_events);
556 else
557 pr_info("Detected AMDGPU %d Perf Events.\n", total_num_events);
558
559
560 list_add_tail(&pmu_entry->entry, &amdgpu_pmu_list);
561
562 return 0;
563err_register:
564 kfree(pmu_entry->pmu.attr_groups);
565err_attr_group:
566 kfree(pmu_entry->fmt_attr_group.attrs);
567 kfree(pmu_entry->fmt_attr);
568 kfree(pmu_entry->evt_attr_group.attrs);
569 kfree(pmu_entry->evt_attr);
570err_out:
571 pr_warn("Error initializing AMDGPU %s PMUs.\n",
572 pmu_entry->pmu_type_name);
573 return ret;
574}
575
576/* destroy all pmu data associated with target device */
577void amdgpu_pmu_fini(struct amdgpu_device *adev)
578{
579 struct amdgpu_pmu_entry *pe, *temp;
580
581 list_for_each_entry_safe(pe, temp, &amdgpu_pmu_list, entry) {
582 if (pe->adev != adev)
583 continue;
584 list_del(&pe->entry);
585 perf_pmu_unregister(&pe->pmu);
586 kfree(pe->pmu.attr_groups);
587 kfree(pe->fmt_attr_group.attrs);
588 kfree(pe->fmt_attr);
589 kfree(pe->evt_attr_group.attrs);
590 kfree(pe->evt_attr);
591 kfree(pe);
592 }
593}
594
595static struct amdgpu_pmu_entry *create_pmu_entry(struct amdgpu_device *adev,
596 unsigned int pmu_type,
597 char *pmu_type_name,
598 char *pmu_file_prefix)
599{
600 struct amdgpu_pmu_entry *pmu_entry;
601
602 pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL);
603
604 if (!pmu_entry)
605 return pmu_entry;
606
607 pmu_entry->adev = adev;
608 pmu_entry->fmt_attr_group.name = "format";
609 pmu_entry->fmt_attr_group.attrs = NULL;
610 pmu_entry->evt_attr_group.name = "events";
611 pmu_entry->evt_attr_group.attrs = NULL;
612 pmu_entry->pmu_perf_type = pmu_type;
613 pmu_entry->pmu_type_name = pmu_type_name;
614 pmu_entry->pmu_file_prefix = pmu_file_prefix;
615
616 return pmu_entry;
617}
618
619/* init amdgpu_pmu */
620int amdgpu_pmu_init(struct amdgpu_device *adev)
621{
622 int ret = 0;
623 struct amdgpu_pmu_entry *pmu_entry, *pmu_entry_df;
624
625 switch (adev->asic_type) {
626 case CHIP_VEGA20:
627 pmu_entry_df = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_DF,
628 "DF", "amdgpu_df");
629
630 if (!pmu_entry_df)
631 return -ENOMEM;
632
633 ret = init_pmu_entry_by_type_and_add(pmu_entry_df,
634 &df_vega20_config);
635
636 if (ret) {
637 kfree(pmu_entry_df);
638 return ret;
639 }
640
641 pmu_entry = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_ALL,
642 "", "amdgpu");
643
644 if (!pmu_entry) {
645 amdgpu_pmu_fini(adev);
646 return -ENOMEM;
647 }
648
649 ret = init_pmu_entry_by_type_and_add(pmu_entry,
650 &vega20_config);
651
652 if (ret) {
653 kfree(pmu_entry);
654 amdgpu_pmu_fini(adev);
655 return ret;
656 }
657
658 break;
659 case CHIP_ARCTURUS:
660 pmu_entry = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_ALL,
661 "", "amdgpu");
662 if (!pmu_entry)
663 return -ENOMEM;
664
665 ret = init_pmu_entry_by_type_and_add(pmu_entry,
666 &arcturus_config);
667
668 if (ret) {
669 kfree(pmu_entry);
670 return -ENOMEM;
671 }
672
673 break;
674
675 default:
676 return 0;
677 }
678
679 return ret;
680}
1/*
2 * Copyright 2019 Advanced Micro Devices, Inc.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
21 *
22 */
23
24#include <linux/perf_event.h>
25#include <linux/init.h>
26#include "amdgpu.h"
27#include "amdgpu_pmu.h"
28
29#define PMU_NAME_SIZE 32
30#define NUM_FORMATS_AMDGPU_PMU 4
31#define NUM_FORMATS_DF_VEGA20 3
32#define NUM_EVENTS_DF_VEGA20 8
33#define NUM_EVENT_TYPES_VEGA20 1
34#define NUM_EVENTS_VEGA20_XGMI 2
35#define NUM_EVENTS_VEGA20_MAX NUM_EVENTS_VEGA20_XGMI
36#define NUM_EVENT_TYPES_ARCTURUS 1
37#define NUM_EVENTS_ARCTURUS_XGMI 6
38#define NUM_EVENTS_ARCTURUS_MAX NUM_EVENTS_ARCTURUS_XGMI
39
40struct amdgpu_pmu_event_attribute {
41 struct device_attribute attr;
42 const char *event_str;
43 unsigned int type;
44};
45
46/* record to keep track of pmu entry per pmu type per device */
47struct amdgpu_pmu_entry {
48 struct list_head entry;
49 struct amdgpu_device *adev;
50 struct pmu pmu;
51 unsigned int pmu_perf_type;
52 char *pmu_type_name;
53 char *pmu_file_prefix;
54 struct attribute_group fmt_attr_group;
55 struct amdgpu_pmu_event_attribute *fmt_attr;
56 struct attribute_group evt_attr_group;
57 struct amdgpu_pmu_event_attribute *evt_attr;
58};
59
60static ssize_t amdgpu_pmu_event_show(struct device *dev,
61 struct device_attribute *attr, char *buf)
62{
63 struct amdgpu_pmu_event_attribute *amdgpu_pmu_attr;
64
65 amdgpu_pmu_attr = container_of(attr, struct amdgpu_pmu_event_attribute,
66 attr);
67
68 if (!amdgpu_pmu_attr->type)
69 return sprintf(buf, "%s\n", amdgpu_pmu_attr->event_str);
70
71 return sprintf(buf, "%s,type=0x%x\n",
72 amdgpu_pmu_attr->event_str, amdgpu_pmu_attr->type);
73}
74
75static LIST_HEAD(amdgpu_pmu_list);
76
77
78struct amdgpu_pmu_attr {
79 const char *name;
80 const char *config;
81};
82
83struct amdgpu_pmu_type {
84 const unsigned int type;
85 const unsigned int num_of_type;
86};
87
88struct amdgpu_pmu_config {
89 struct amdgpu_pmu_attr *formats;
90 unsigned int num_formats;
91 struct amdgpu_pmu_attr *events;
92 unsigned int num_events;
93 struct amdgpu_pmu_type *types;
94 unsigned int num_types;
95};
96
97/*
98 * Events fall under two categories:
99 * - PMU typed
100 * Events in /sys/bus/event_source/devices/amdgpu_<pmu_type>_<dev_num> have
101 * performance counter operations handled by one IP <pmu_type>. Formats and
102 * events should be defined by <pmu_type>_<asic_type>_formats and
103 * <pmu_type>_<asic_type>_events respectively.
104 *
105 * - Event config typed
106 * Events in /sys/bus/event_source/devices/amdgpu_<dev_num> have performance
107 * counter operations that can be handled by multiple IPs dictated by their
108 * "type" format field. Formats and events should be defined by
109 * amdgpu_pmu_formats and <asic_type>_events respectively. Format field
110 * "type" is generated in amdgpu_pmu_event_show and defined in
111 * <asic_type>_event_config_types.
112 */
113
114static struct amdgpu_pmu_attr amdgpu_pmu_formats[NUM_FORMATS_AMDGPU_PMU] = {
115 { .name = "event", .config = "config:0-7" },
116 { .name = "instance", .config = "config:8-15" },
117 { .name = "umask", .config = "config:16-23"},
118 { .name = "type", .config = "config:56-63"}
119};
120
121/* Vega20 events */
122static struct amdgpu_pmu_attr vega20_events[NUM_EVENTS_VEGA20_MAX] = {
123 { .name = "xgmi_link0_data_outbound",
124 .config = "event=0x7,instance=0x46,umask=0x2" },
125 { .name = "xgmi_link1_data_outbound",
126 .config = "event=0x7,instance=0x47,umask=0x2" }
127};
128
129static struct amdgpu_pmu_type vega20_types[NUM_EVENT_TYPES_VEGA20] = {
130 { .type = AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI,
131 .num_of_type = NUM_EVENTS_VEGA20_XGMI }
132};
133
134static struct amdgpu_pmu_config vega20_config = {
135 .formats = amdgpu_pmu_formats,
136 .num_formats = ARRAY_SIZE(amdgpu_pmu_formats),
137 .events = vega20_events,
138 .num_events = ARRAY_SIZE(vega20_events),
139 .types = vega20_types,
140 .num_types = ARRAY_SIZE(vega20_types)
141};
142
143/* Vega20 data fabric (DF) events */
144static struct amdgpu_pmu_attr df_vega20_formats[NUM_FORMATS_DF_VEGA20] = {
145 { .name = "event", .config = "config:0-7" },
146 { .name = "instance", .config = "config:8-15" },
147 { .name = "umask", .config = "config:16-23"}
148};
149
150static struct amdgpu_pmu_attr df_vega20_events[NUM_EVENTS_DF_VEGA20] = {
151 { .name = "cake0_pcsout_txdata",
152 .config = "event=0x7,instance=0x46,umask=0x2" },
153 { .name = "cake1_pcsout_txdata",
154 .config = "event=0x7,instance=0x47,umask=0x2" },
155 { .name = "cake0_pcsout_txmeta",
156 .config = "event=0x7,instance=0x46,umask=0x4" },
157 { .name = "cake1_pcsout_txmeta",
158 .config = "event=0x7,instance=0x47,umask=0x4" },
159 { .name = "cake0_ftiinstat_reqalloc",
160 .config = "event=0xb,instance=0x46,umask=0x4" },
161 { .name = "cake1_ftiinstat_reqalloc",
162 .config = "event=0xb,instance=0x47,umask=0x4" },
163 { .name = "cake0_ftiinstat_rspalloc",
164 .config = "event=0xb,instance=0x46,umask=0x8" },
165 { .name = "cake1_ftiinstat_rspalloc",
166 .config = "event=0xb,instance=0x47,umask=0x8" }
167};
168
169static struct amdgpu_pmu_config df_vega20_config = {
170 .formats = df_vega20_formats,
171 .num_formats = ARRAY_SIZE(df_vega20_formats),
172 .events = df_vega20_events,
173 .num_events = ARRAY_SIZE(df_vega20_events),
174 .types = NULL,
175 .num_types = 0
176};
177
178/* Arcturus events */
179static struct amdgpu_pmu_attr arcturus_events[NUM_EVENTS_ARCTURUS_MAX] = {
180 { .name = "xgmi_link0_data_outbound",
181 .config = "event=0x7,instance=0x4b,umask=0x2" },
182 { .name = "xgmi_link1_data_outbound",
183 .config = "event=0x7,instance=0x4c,umask=0x2" },
184 { .name = "xgmi_link2_data_outbound",
185 .config = "event=0x7,instance=0x4d,umask=0x2" },
186 { .name = "xgmi_link3_data_outbound",
187 .config = "event=0x7,instance=0x4e,umask=0x2" },
188 { .name = "xgmi_link4_data_outbound",
189 .config = "event=0x7,instance=0x4f,umask=0x2" },
190 { .name = "xgmi_link5_data_outbound",
191 .config = "event=0x7,instance=0x50,umask=0x2" }
192};
193
194static struct amdgpu_pmu_type arcturus_types[NUM_EVENT_TYPES_ARCTURUS] = {
195 { .type = AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI,
196 .num_of_type = NUM_EVENTS_ARCTURUS_XGMI }
197};
198
199static struct amdgpu_pmu_config arcturus_config = {
200 .formats = amdgpu_pmu_formats,
201 .num_formats = ARRAY_SIZE(amdgpu_pmu_formats),
202 .events = arcturus_events,
203 .num_events = ARRAY_SIZE(arcturus_events),
204 .types = arcturus_types,
205 .num_types = ARRAY_SIZE(arcturus_types)
206};
207
208/* initialize perf counter */
209static int amdgpu_perf_event_init(struct perf_event *event)
210{
211 struct hw_perf_event *hwc = &event->hw;
212
213 /* test the event attr type check for PMU enumeration */
214 if (event->attr.type != event->pmu->type)
215 return -ENOENT;
216
217 /* update the hw_perf_event struct with config data */
218 hwc->config = event->attr.config;
219 hwc->config_base = AMDGPU_PMU_PERF_TYPE_NONE;
220
221 return 0;
222}
223
224/* start perf counter */
225static void amdgpu_perf_start(struct perf_event *event, int flags)
226{
227 struct hw_perf_event *hwc = &event->hw;
228 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
229 struct amdgpu_pmu_entry,
230 pmu);
231 int target_cntr = 0;
232
233 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
234 return;
235
236 if ((!pe->adev->df.funcs) ||
237 (!pe->adev->df.funcs->pmc_start))
238 return;
239
240 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
241 hwc->state = 0;
242
243 switch (hwc->config_base) {
244 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
245 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
246 if (!(flags & PERF_EF_RELOAD)) {
247 target_cntr = pe->adev->df.funcs->pmc_start(pe->adev,
248 hwc->config, 0 /* unused */,
249 1 /* add counter */);
250 if (target_cntr < 0)
251 break;
252
253 hwc->idx = target_cntr;
254 }
255
256 pe->adev->df.funcs->pmc_start(pe->adev, hwc->config,
257 hwc->idx, 0);
258 break;
259 default:
260 break;
261 }
262
263 perf_event_update_userpage(event);
264}
265
266/* read perf counter */
267static void amdgpu_perf_read(struct perf_event *event)
268{
269 struct hw_perf_event *hwc = &event->hw;
270 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
271 struct amdgpu_pmu_entry,
272 pmu);
273 u64 count, prev;
274
275 if ((!pe->adev->df.funcs) ||
276 (!pe->adev->df.funcs->pmc_get_count))
277 return;
278
279 do {
280 prev = local64_read(&hwc->prev_count);
281
282 switch (hwc->config_base) {
283 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
284 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
285 pe->adev->df.funcs->pmc_get_count(pe->adev,
286 hwc->config, hwc->idx, &count);
287 break;
288 default:
289 count = 0;
290 break;
291 }
292 } while (local64_cmpxchg(&hwc->prev_count, prev, count) != prev);
293
294 local64_add(count - prev, &event->count);
295}
296
297/* stop perf counter */
298static void amdgpu_perf_stop(struct perf_event *event, int flags)
299{
300 struct hw_perf_event *hwc = &event->hw;
301 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
302 struct amdgpu_pmu_entry,
303 pmu);
304
305 if (hwc->state & PERF_HES_UPTODATE)
306 return;
307
308 if ((!pe->adev->df.funcs) ||
309 (!pe->adev->df.funcs->pmc_stop))
310 return;
311
312 switch (hwc->config_base) {
313 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
314 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
315 pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx,
316 0);
317 break;
318 default:
319 break;
320 }
321
322 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
323 hwc->state |= PERF_HES_STOPPED;
324
325 if (hwc->state & PERF_HES_UPTODATE)
326 return;
327
328 amdgpu_perf_read(event);
329 hwc->state |= PERF_HES_UPTODATE;
330}
331
332/* add perf counter */
333static int amdgpu_perf_add(struct perf_event *event, int flags)
334{
335 struct hw_perf_event *hwc = &event->hw;
336 int retval = 0, target_cntr;
337 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
338 struct amdgpu_pmu_entry,
339 pmu);
340
341 if ((!pe->adev->df.funcs) ||
342 (!pe->adev->df.funcs->pmc_start))
343 return -EINVAL;
344
345 switch (pe->pmu_perf_type) {
346 case AMDGPU_PMU_PERF_TYPE_DF:
347 hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF;
348 break;
349 case AMDGPU_PMU_PERF_TYPE_ALL:
350 hwc->config_base = (hwc->config >>
351 AMDGPU_PMU_EVENT_CONFIG_TYPE_SHIFT) &
352 AMDGPU_PMU_EVENT_CONFIG_TYPE_MASK;
353 break;
354 }
355
356 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
357
358 switch (hwc->config_base) {
359 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
360 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
361 target_cntr = pe->adev->df.funcs->pmc_start(pe->adev,
362 hwc->config, 0 /* unused */,
363 1 /* add counter */);
364 if (target_cntr < 0)
365 retval = target_cntr;
366 else
367 hwc->idx = target_cntr;
368
369 break;
370 default:
371 return 0;
372 }
373
374 if (retval)
375 return retval;
376
377 if (flags & PERF_EF_START)
378 amdgpu_perf_start(event, PERF_EF_RELOAD);
379
380 return retval;
381}
382
383/* delete perf counter */
384static void amdgpu_perf_del(struct perf_event *event, int flags)
385{
386 struct hw_perf_event *hwc = &event->hw;
387 struct amdgpu_pmu_entry *pe = container_of(event->pmu,
388 struct amdgpu_pmu_entry,
389 pmu);
390 if ((!pe->adev->df.funcs) ||
391 (!pe->adev->df.funcs->pmc_stop))
392 return;
393
394 amdgpu_perf_stop(event, PERF_EF_UPDATE);
395
396 switch (hwc->config_base) {
397 case AMDGPU_PMU_EVENT_CONFIG_TYPE_DF:
398 case AMDGPU_PMU_EVENT_CONFIG_TYPE_XGMI:
399 pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx,
400 1);
401 break;
402 default:
403 break;
404 }
405
406 perf_event_update_userpage(event);
407}
408
409static void amdgpu_pmu_create_event_attrs_by_type(
410 struct attribute_group *attr_group,
411 struct amdgpu_pmu_event_attribute *pmu_attr,
412 struct amdgpu_pmu_attr events[],
413 int s_offset,
414 int e_offset,
415 unsigned int type)
416{
417 int i;
418
419 pmu_attr += s_offset;
420
421 for (i = s_offset; i < e_offset; i++) {
422 attr_group->attrs[i] = &pmu_attr->attr.attr;
423 sysfs_attr_init(&pmu_attr->attr.attr);
424 pmu_attr->attr.attr.name = events[i].name;
425 pmu_attr->attr.attr.mode = 0444;
426 pmu_attr->attr.show = amdgpu_pmu_event_show;
427 pmu_attr->event_str = events[i].config;
428 pmu_attr->type = type;
429 pmu_attr++;
430 }
431}
432
433static void amdgpu_pmu_create_attrs(struct attribute_group *attr_group,
434 struct amdgpu_pmu_event_attribute *pmu_attr,
435 struct amdgpu_pmu_attr events[],
436 int num_events)
437{
438 amdgpu_pmu_create_event_attrs_by_type(attr_group, pmu_attr, events, 0,
439 num_events, AMDGPU_PMU_EVENT_CONFIG_TYPE_NONE);
440}
441
442
443static int amdgpu_pmu_alloc_pmu_attrs(
444 struct attribute_group *fmt_attr_group,
445 struct amdgpu_pmu_event_attribute **fmt_attr,
446 struct attribute_group *evt_attr_group,
447 struct amdgpu_pmu_event_attribute **evt_attr,
448 struct amdgpu_pmu_config *config)
449{
450 *fmt_attr = kcalloc(config->num_formats, sizeof(**fmt_attr),
451 GFP_KERNEL);
452
453 if (!(*fmt_attr))
454 return -ENOMEM;
455
456 fmt_attr_group->attrs = kcalloc(config->num_formats + 1,
457 sizeof(*fmt_attr_group->attrs), GFP_KERNEL);
458
459 if (!fmt_attr_group->attrs)
460 goto err_fmt_attr_grp;
461
462 *evt_attr = kcalloc(config->num_events, sizeof(**evt_attr), GFP_KERNEL);
463
464 if (!(*evt_attr))
465 goto err_evt_attr;
466
467 evt_attr_group->attrs = kcalloc(config->num_events + 1,
468 sizeof(*evt_attr_group->attrs), GFP_KERNEL);
469
470 if (!evt_attr_group->attrs)
471 goto err_evt_attr_grp;
472
473 return 0;
474err_evt_attr_grp:
475 kfree(*evt_attr);
476err_evt_attr:
477 kfree(fmt_attr_group->attrs);
478err_fmt_attr_grp:
479 kfree(*fmt_attr);
480 return -ENOMEM;
481}
482
483/* init pmu tracking per pmu type */
484static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry,
485 struct amdgpu_pmu_config *config)
486{
487 const struct attribute_group *attr_groups[] = {
488 &pmu_entry->fmt_attr_group,
489 &pmu_entry->evt_attr_group,
490 NULL
491 };
492 char pmu_name[PMU_NAME_SIZE];
493 int ret = 0, total_num_events = 0;
494
495 pmu_entry->pmu = (struct pmu){
496 .event_init = amdgpu_perf_event_init,
497 .add = amdgpu_perf_add,
498 .del = amdgpu_perf_del,
499 .start = amdgpu_perf_start,
500 .stop = amdgpu_perf_stop,
501 .read = amdgpu_perf_read,
502 .task_ctx_nr = perf_invalid_context,
503 };
504
505 ret = amdgpu_pmu_alloc_pmu_attrs(&pmu_entry->fmt_attr_group,
506 &pmu_entry->fmt_attr,
507 &pmu_entry->evt_attr_group,
508 &pmu_entry->evt_attr,
509 config);
510
511 if (ret)
512 goto err_out;
513
514 amdgpu_pmu_create_attrs(&pmu_entry->fmt_attr_group, pmu_entry->fmt_attr,
515 config->formats, config->num_formats);
516
517 if (pmu_entry->pmu_perf_type == AMDGPU_PMU_PERF_TYPE_ALL) {
518 int i;
519
520 for (i = 0; i < config->num_types; i++) {
521 amdgpu_pmu_create_event_attrs_by_type(
522 &pmu_entry->evt_attr_group,
523 pmu_entry->evt_attr,
524 config->events,
525 total_num_events,
526 total_num_events +
527 config->types[i].num_of_type,
528 config->types[i].type);
529 total_num_events += config->types[i].num_of_type;
530 }
531 } else {
532 amdgpu_pmu_create_attrs(&pmu_entry->evt_attr_group,
533 pmu_entry->evt_attr,
534 config->events, config->num_events);
535 total_num_events = config->num_events;
536 }
537
538 pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
539 GFP_KERNEL);
540
541 if (!pmu_entry->pmu.attr_groups) {
542 ret = -ENOMEM;
543 goto err_attr_group;
544 }
545
546 snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix,
547 adev_to_drm(pmu_entry->adev)->primary->index);
548
549 ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1);
550
551 if (ret)
552 goto err_register;
553
554 if (pmu_entry->pmu_perf_type != AMDGPU_PMU_PERF_TYPE_ALL)
555 pr_info("Detected AMDGPU %s Counters. # of Counters = %d.\n",
556 pmu_entry->pmu_type_name, total_num_events);
557 else
558 pr_info("Detected AMDGPU %d Perf Events.\n", total_num_events);
559
560
561 list_add_tail(&pmu_entry->entry, &amdgpu_pmu_list);
562
563 return 0;
564err_register:
565 kfree(pmu_entry->pmu.attr_groups);
566err_attr_group:
567 kfree(pmu_entry->fmt_attr_group.attrs);
568 kfree(pmu_entry->fmt_attr);
569 kfree(pmu_entry->evt_attr_group.attrs);
570 kfree(pmu_entry->evt_attr);
571err_out:
572 pr_warn("Error initializing AMDGPU %s PMUs.\n",
573 pmu_entry->pmu_type_name);
574 return ret;
575}
576
577/* destroy all pmu data associated with target device */
578void amdgpu_pmu_fini(struct amdgpu_device *adev)
579{
580 struct amdgpu_pmu_entry *pe, *temp;
581
582 list_for_each_entry_safe(pe, temp, &amdgpu_pmu_list, entry) {
583 if (pe->adev != adev)
584 continue;
585 list_del(&pe->entry);
586 perf_pmu_unregister(&pe->pmu);
587 kfree(pe->pmu.attr_groups);
588 kfree(pe->fmt_attr_group.attrs);
589 kfree(pe->fmt_attr);
590 kfree(pe->evt_attr_group.attrs);
591 kfree(pe->evt_attr);
592 kfree(pe);
593 }
594}
595
596static struct amdgpu_pmu_entry *create_pmu_entry(struct amdgpu_device *adev,
597 unsigned int pmu_type,
598 char *pmu_type_name,
599 char *pmu_file_prefix)
600{
601 struct amdgpu_pmu_entry *pmu_entry;
602
603 pmu_entry = kzalloc(sizeof(struct amdgpu_pmu_entry), GFP_KERNEL);
604
605 if (!pmu_entry)
606 return pmu_entry;
607
608 pmu_entry->adev = adev;
609 pmu_entry->fmt_attr_group.name = "format";
610 pmu_entry->fmt_attr_group.attrs = NULL;
611 pmu_entry->evt_attr_group.name = "events";
612 pmu_entry->evt_attr_group.attrs = NULL;
613 pmu_entry->pmu_perf_type = pmu_type;
614 pmu_entry->pmu_type_name = pmu_type_name;
615 pmu_entry->pmu_file_prefix = pmu_file_prefix;
616
617 return pmu_entry;
618}
619
620/* init amdgpu_pmu */
621int amdgpu_pmu_init(struct amdgpu_device *adev)
622{
623 int ret = 0;
624 struct amdgpu_pmu_entry *pmu_entry, *pmu_entry_df;
625
626 switch (adev->asic_type) {
627 case CHIP_VEGA20:
628 pmu_entry_df = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_DF,
629 "DF", "amdgpu_df");
630
631 if (!pmu_entry_df)
632 return -ENOMEM;
633
634 ret = init_pmu_entry_by_type_and_add(pmu_entry_df,
635 &df_vega20_config);
636
637 if (ret) {
638 kfree(pmu_entry_df);
639 return ret;
640 }
641
642 pmu_entry = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_ALL,
643 "", "amdgpu");
644
645 if (!pmu_entry) {
646 amdgpu_pmu_fini(adev);
647 return -ENOMEM;
648 }
649
650 ret = init_pmu_entry_by_type_and_add(pmu_entry,
651 &vega20_config);
652
653 if (ret) {
654 kfree(pmu_entry);
655 amdgpu_pmu_fini(adev);
656 return ret;
657 }
658
659 break;
660 case CHIP_ARCTURUS:
661 pmu_entry = create_pmu_entry(adev, AMDGPU_PMU_PERF_TYPE_ALL,
662 "", "amdgpu");
663 if (!pmu_entry)
664 return -ENOMEM;
665
666 ret = init_pmu_entry_by_type_and_add(pmu_entry,
667 &arcturus_config);
668
669 if (ret) {
670 kfree(pmu_entry);
671 return -ENOMEM;
672 }
673
674 break;
675
676 default:
677 return 0;
678 }
679
680 return ret;
681}