Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Synopsys DesignWare PCIe PMU driver
  4 *
  5 * Copyright (C) 2021-2023 Alibaba Inc.
  6 */
  7
  8#include <linux/bitfield.h>
  9#include <linux/bitops.h>
 10#include <linux/cpuhotplug.h>
 11#include <linux/cpumask.h>
 12#include <linux/device.h>
 13#include <linux/errno.h>
 14#include <linux/kernel.h>
 15#include <linux/list.h>
 16#include <linux/perf_event.h>
 17#include <linux/pci.h>
 18#include <linux/platform_device.h>
 19#include <linux/smp.h>
 20#include <linux/sysfs.h>
 21#include <linux/types.h>
 22
 23#define DWC_PCIE_VSEC_RAS_DES_ID		0x02
 24#define DWC_PCIE_EVENT_CNT_CTL			0x8
 25
 26/*
 27 * Event Counter Data Select includes two parts:
 28 * - 27-24: Group number(4-bit: 0..0x7)
 29 * - 23-16: Event number(8-bit: 0..0x13) within the Group
 30 *
 31 * Put them together as in TRM.
 32 */
 33#define DWC_PCIE_CNT_EVENT_SEL			GENMASK(27, 16)
 34#define DWC_PCIE_CNT_LANE_SEL			GENMASK(11, 8)
 35#define DWC_PCIE_CNT_STATUS			BIT(7)
 36#define DWC_PCIE_CNT_ENABLE			GENMASK(4, 2)
 37#define DWC_PCIE_PER_EVENT_OFF			0x1
 38#define DWC_PCIE_PER_EVENT_ON			0x3
 39#define DWC_PCIE_EVENT_CLEAR			GENMASK(1, 0)
 40#define DWC_PCIE_EVENT_PER_CLEAR		0x1
 41
 42#define DWC_PCIE_EVENT_CNT_DATA			0xC
 43
 44#define DWC_PCIE_TIME_BASED_ANAL_CTL		0x10
 45#define DWC_PCIE_TIME_BASED_REPORT_SEL		GENMASK(31, 24)
 46#define DWC_PCIE_TIME_BASED_DURATION_SEL	GENMASK(15, 8)
 47#define DWC_PCIE_DURATION_MANUAL_CTL		0x0
 48#define DWC_PCIE_DURATION_1MS			0x1
 49#define DWC_PCIE_DURATION_10MS			0x2
 50#define DWC_PCIE_DURATION_100MS			0x3
 51#define DWC_PCIE_DURATION_1S			0x4
 52#define DWC_PCIE_DURATION_2S			0x5
 53#define DWC_PCIE_DURATION_4S			0x6
 54#define DWC_PCIE_DURATION_4US			0xFF
 55#define DWC_PCIE_TIME_BASED_TIMER_START		BIT(0)
 56#define DWC_PCIE_TIME_BASED_CNT_ENABLE		0x1
 57
 58#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW	0x14
 59#define DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH	0x18
 60
 61/* Event attributes */
 62#define DWC_PCIE_CONFIG_EVENTID			GENMASK(15, 0)
 63#define DWC_PCIE_CONFIG_TYPE			GENMASK(19, 16)
 64#define DWC_PCIE_CONFIG_LANE			GENMASK(27, 20)
 65
 66#define DWC_PCIE_EVENT_ID(event)	FIELD_GET(DWC_PCIE_CONFIG_EVENTID, (event)->attr.config)
 67#define DWC_PCIE_EVENT_TYPE(event)	FIELD_GET(DWC_PCIE_CONFIG_TYPE, (event)->attr.config)
 68#define DWC_PCIE_EVENT_LANE(event)	FIELD_GET(DWC_PCIE_CONFIG_LANE, (event)->attr.config)
 69
 70enum dwc_pcie_event_type {
 71	DWC_PCIE_TIME_BASE_EVENT,
 72	DWC_PCIE_LANE_EVENT,
 73	DWC_PCIE_EVENT_TYPE_MAX,
 74};
 75
 76#define DWC_PCIE_LANE_EVENT_MAX_PERIOD		GENMASK_ULL(31, 0)
 77#define DWC_PCIE_MAX_PERIOD			GENMASK_ULL(63, 0)
 78
 79struct dwc_pcie_pmu {
 80	struct pmu		pmu;
 81	struct pci_dev		*pdev;		/* Root Port device */
 82	u16			ras_des_offset;
 83	u32			nr_lanes;
 84
 85	struct hlist_node	cpuhp_node;
 86	struct perf_event	*event[DWC_PCIE_EVENT_TYPE_MAX];
 87	int			on_cpu;
 88};
 89
 90#define to_dwc_pcie_pmu(p) (container_of(p, struct dwc_pcie_pmu, pmu))
 91
 92static int dwc_pcie_pmu_hp_state;
 93static struct list_head dwc_pcie_dev_info_head =
 94				LIST_HEAD_INIT(dwc_pcie_dev_info_head);
 95static bool notify;
 96
 97struct dwc_pcie_dev_info {
 98	struct platform_device *plat_dev;
 99	struct pci_dev *pdev;
100	struct list_head dev_node;
101};
102
103struct dwc_pcie_vendor_id {
104	int vendor_id;
105};
106
107static const struct dwc_pcie_vendor_id dwc_pcie_vendor_ids[] = {
108	{.vendor_id = PCI_VENDOR_ID_ALIBABA },
109	{.vendor_id = PCI_VENDOR_ID_AMPERE },
110	{.vendor_id = PCI_VENDOR_ID_QCOM },
111	{} /* terminator */
112};
113
114static ssize_t cpumask_show(struct device *dev,
115					 struct device_attribute *attr,
116					 char *buf)
117{
118	struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(dev_get_drvdata(dev));
119
120	return cpumap_print_to_pagebuf(true, buf, cpumask_of(pcie_pmu->on_cpu));
121}
122static DEVICE_ATTR_RO(cpumask);
123
124static struct attribute *dwc_pcie_pmu_cpumask_attrs[] = {
125	&dev_attr_cpumask.attr,
126	NULL
127};
128
129static struct attribute_group dwc_pcie_cpumask_attr_group = {
130	.attrs = dwc_pcie_pmu_cpumask_attrs,
131};
132
133struct dwc_pcie_format_attr {
134	struct device_attribute attr;
135	u64 field;
136	int config;
137};
138
139PMU_FORMAT_ATTR(eventid, "config:0-15");
140PMU_FORMAT_ATTR(type, "config:16-19");
141PMU_FORMAT_ATTR(lane, "config:20-27");
142
143static struct attribute *dwc_pcie_format_attrs[] = {
144	&format_attr_type.attr,
145	&format_attr_eventid.attr,
146	&format_attr_lane.attr,
147	NULL,
148};
149
150static struct attribute_group dwc_pcie_format_attrs_group = {
151	.name = "format",
152	.attrs = dwc_pcie_format_attrs,
153};
154
155struct dwc_pcie_event_attr {
156	struct device_attribute attr;
157	enum dwc_pcie_event_type type;
158	u16 eventid;
159	u8 lane;
160};
161
162static ssize_t dwc_pcie_event_show(struct device *dev,
163				struct device_attribute *attr, char *buf)
164{
165	struct dwc_pcie_event_attr *eattr;
166
167	eattr = container_of(attr, typeof(*eattr), attr);
168
169	if (eattr->type == DWC_PCIE_LANE_EVENT)
170		return sysfs_emit(buf, "eventid=0x%x,type=0x%x,lane=?\n",
171				  eattr->eventid, eattr->type);
172	else if (eattr->type == DWC_PCIE_TIME_BASE_EVENT)
173		return sysfs_emit(buf, "eventid=0x%x,type=0x%x\n",
174				  eattr->eventid, eattr->type);
175
176	return 0;
177}
178
179#define DWC_PCIE_EVENT_ATTR(_name, _type, _eventid, _lane)		\
180	(&((struct dwc_pcie_event_attr[]) {{				\
181		.attr = __ATTR(_name, 0444, dwc_pcie_event_show, NULL),	\
182		.type = _type,						\
183		.eventid = _eventid,					\
184		.lane = _lane,						\
185	}})[0].attr.attr)
186
187#define DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(_name, _eventid)		\
188	DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_TIME_BASE_EVENT, _eventid, 0)
189#define DWC_PCIE_PMU_LANE_EVENT_ATTR(_name, _eventid)			\
190	DWC_PCIE_EVENT_ATTR(_name, DWC_PCIE_LANE_EVENT, _eventid, 0)
191
192static struct attribute *dwc_pcie_pmu_time_event_attrs[] = {
193	/* Group #0 */
194	DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(one_cycle, 0x00),
195	DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_L0S, 0x01),
196	DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(RX_L0S, 0x02),
197	DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L0, 0x03),
198	DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1, 0x04),
199	DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_1, 0x05),
200	DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_2, 0x06),
201	DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(CFG_RCVRY, 0x07),
202	DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(TX_RX_L0S, 0x08),
203	DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(L1_AUX, 0x09),
204
205	/* Group #1 */
206	DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_pcie_tlp_data_payload, 0x20),
207	DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(rx_pcie_tlp_data_payload, 0x21),
208	DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(tx_ccix_tlp_data_payload, 0x22),
209	DWC_PCIE_PMU_TIME_BASE_EVENT_ATTR(rx_ccix_tlp_data_payload, 0x23),
210
211	/*
212	 * Leave it to the user to specify the lane ID to avoid generating
213	 * a list of hundreds of events.
214	 */
215	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ack_dllp, 0x600),
216	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_update_fc_dllp, 0x601),
217	DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ack_dllp, 0x602),
218	DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_update_fc_dllp, 0x603),
219	DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_nullified_tlp, 0x604),
220	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_nullified_tlp, 0x605),
221	DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_duplicate_tlp, 0x606),
222	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_write, 0x700),
223	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_memory_read, 0x701),
224	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_write, 0x702),
225	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_configuration_read, 0x703),
226	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_write, 0x704),
227	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_io_read, 0x705),
228	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_without_data, 0x706),
229	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_completion_with_data, 0x707),
230	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_message_tlp, 0x708),
231	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_atomic, 0x709),
232	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_tlp_with_prefix, 0x70A),
233	DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_write, 0x70B),
234	DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_memory_read, 0x70C),
235	DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_write, 0x70F),
236	DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_io_read, 0x710),
237	DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_without_data, 0x711),
238	DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_completion_with_data, 0x712),
239	DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_message_tlp, 0x713),
240	DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_atomic, 0x714),
241	DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_tlp_with_prefix, 0x715),
242	DWC_PCIE_PMU_LANE_EVENT_ATTR(tx_ccix_tlp, 0x716),
243	DWC_PCIE_PMU_LANE_EVENT_ATTR(rx_ccix_tlp, 0x717),
244	NULL
245};
246
247static const struct attribute_group dwc_pcie_event_attrs_group = {
248	.name = "events",
249	.attrs = dwc_pcie_pmu_time_event_attrs,
250};
251
252static const struct attribute_group *dwc_pcie_attr_groups[] = {
253	&dwc_pcie_event_attrs_group,
254	&dwc_pcie_format_attrs_group,
255	&dwc_pcie_cpumask_attr_group,
256	NULL
257};
258
259static void dwc_pcie_pmu_lane_event_enable(struct dwc_pcie_pmu *pcie_pmu,
260					   bool enable)
261{
262	struct pci_dev *pdev = pcie_pmu->pdev;
263	u16 ras_des_offset = pcie_pmu->ras_des_offset;
264
265	if (enable)
266		pci_clear_and_set_config_dword(pdev,
267					ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
268					DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_ON);
269	else
270		pci_clear_and_set_config_dword(pdev,
271					ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
272					DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF);
273}
274
275static void dwc_pcie_pmu_time_based_event_enable(struct dwc_pcie_pmu *pcie_pmu,
276					  bool enable)
277{
278	struct pci_dev *pdev = pcie_pmu->pdev;
279	u16 ras_des_offset = pcie_pmu->ras_des_offset;
280
281	pci_clear_and_set_config_dword(pdev,
282				       ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL,
283				       DWC_PCIE_TIME_BASED_TIMER_START, enable);
284}
285
286static u64 dwc_pcie_pmu_read_lane_event_counter(struct perf_event *event)
287{
288	struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
289	struct pci_dev *pdev = pcie_pmu->pdev;
290	u16 ras_des_offset = pcie_pmu->ras_des_offset;
291	u32 val;
292
293	pci_read_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_DATA, &val);
294
295	return val;
296}
297
298static u64 dwc_pcie_pmu_read_time_based_counter(struct perf_event *event)
299{
300	struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
301	struct pci_dev *pdev = pcie_pmu->pdev;
302	int event_id = DWC_PCIE_EVENT_ID(event);
303	u16 ras_des_offset = pcie_pmu->ras_des_offset;
304	u32 lo, hi, ss;
305	u64 val;
306
307	/*
308	 * The 64-bit value of the data counter is spread across two
309	 * registers that are not synchronized. In order to read them
310	 * atomically, ensure that the high 32 bits match before and after
311	 * reading the low 32 bits.
312	 */
313	pci_read_config_dword(pdev,
314		ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH, &hi);
315	do {
316		/* snapshot the high 32 bits */
317		ss = hi;
318
319		pci_read_config_dword(
320			pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_LOW,
321			&lo);
322		pci_read_config_dword(
323			pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_DATA_REG_HIGH,
324			&hi);
325	} while (hi != ss);
326
327	val = ((u64)hi << 32) | lo;
328	/*
329	 * The Group#1 event measures the amount of data processed in 16-byte
330	 * units. Simplify the end-user interface by multiplying the counter
331	 * at the point of read.
332	 */
333	if (event_id >= 0x20 && event_id <= 0x23)
334		val *= 16;
335
336	return val;
337}
338
339static void dwc_pcie_pmu_event_update(struct perf_event *event)
340{
341	struct hw_perf_event *hwc = &event->hw;
342	enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
343	u64 delta, prev, now = 0;
344
345	do {
346		prev = local64_read(&hwc->prev_count);
347
348		if (type == DWC_PCIE_LANE_EVENT)
349			now = dwc_pcie_pmu_read_lane_event_counter(event);
350		else if (type == DWC_PCIE_TIME_BASE_EVENT)
351			now = dwc_pcie_pmu_read_time_based_counter(event);
352
353	} while (local64_cmpxchg(&hwc->prev_count, prev, now) != prev);
354
355	delta = (now - prev) & DWC_PCIE_MAX_PERIOD;
356	/* 32-bit counter for Lane Event Counting */
357	if (type == DWC_PCIE_LANE_EVENT)
358		delta &= DWC_PCIE_LANE_EVENT_MAX_PERIOD;
359
360	local64_add(delta, &event->count);
361}
362
363static int dwc_pcie_pmu_event_init(struct perf_event *event)
364{
365	struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
366	enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
367	struct perf_event *sibling;
368	u32 lane;
369
370	if (event->attr.type != event->pmu->type)
371		return -ENOENT;
372
373	/* We don't support sampling */
374	if (is_sampling_event(event))
375		return -EINVAL;
376
377	/* We cannot support task bound events */
378	if (event->cpu < 0 || event->attach_state & PERF_ATTACH_TASK)
379		return -EINVAL;
380
381	if (event->group_leader != event &&
382	    !is_software_event(event->group_leader))
383		return -EINVAL;
384
385	for_each_sibling_event(sibling, event->group_leader) {
386		if (sibling->pmu != event->pmu && !is_software_event(sibling))
387			return -EINVAL;
388	}
389
390	if (type < 0 || type >= DWC_PCIE_EVENT_TYPE_MAX)
391		return -EINVAL;
392
393	if (type == DWC_PCIE_LANE_EVENT) {
394		lane = DWC_PCIE_EVENT_LANE(event);
395		if (lane < 0 || lane >= pcie_pmu->nr_lanes)
396			return -EINVAL;
397	}
398
399	event->cpu = pcie_pmu->on_cpu;
400
401	return 0;
402}
403
404static void dwc_pcie_pmu_event_start(struct perf_event *event, int flags)
405{
406	struct hw_perf_event *hwc = &event->hw;
407	struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
408	enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
409
410	hwc->state = 0;
411	local64_set(&hwc->prev_count, 0);
412
413	if (type == DWC_PCIE_LANE_EVENT)
414		dwc_pcie_pmu_lane_event_enable(pcie_pmu, true);
415	else if (type == DWC_PCIE_TIME_BASE_EVENT)
416		dwc_pcie_pmu_time_based_event_enable(pcie_pmu, true);
417}
418
419static void dwc_pcie_pmu_event_stop(struct perf_event *event, int flags)
420{
421	struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
422	enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
423	struct hw_perf_event *hwc = &event->hw;
424
425	if (event->hw.state & PERF_HES_STOPPED)
426		return;
427
428	if (type == DWC_PCIE_LANE_EVENT)
429		dwc_pcie_pmu_lane_event_enable(pcie_pmu, false);
430	else if (type == DWC_PCIE_TIME_BASE_EVENT)
431		dwc_pcie_pmu_time_based_event_enable(pcie_pmu, false);
432
433	dwc_pcie_pmu_event_update(event);
434	hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
435}
436
437static int dwc_pcie_pmu_event_add(struct perf_event *event, int flags)
438{
439	struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
440	struct pci_dev *pdev = pcie_pmu->pdev;
441	struct hw_perf_event *hwc = &event->hw;
442	enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
443	int event_id = DWC_PCIE_EVENT_ID(event);
444	int lane = DWC_PCIE_EVENT_LANE(event);
445	u16 ras_des_offset = pcie_pmu->ras_des_offset;
446	u32 ctrl;
447
448	/* one counter for each type and it is in use */
449	if (pcie_pmu->event[type])
450		return -ENOSPC;
451
452	pcie_pmu->event[type] = event;
453	hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
454
455	if (type == DWC_PCIE_LANE_EVENT) {
456		/* EVENT_COUNTER_DATA_REG needs clear manually */
457		ctrl = FIELD_PREP(DWC_PCIE_CNT_EVENT_SEL, event_id) |
458			FIELD_PREP(DWC_PCIE_CNT_LANE_SEL, lane) |
459			FIELD_PREP(DWC_PCIE_CNT_ENABLE, DWC_PCIE_PER_EVENT_OFF) |
460			FIELD_PREP(DWC_PCIE_EVENT_CLEAR, DWC_PCIE_EVENT_PER_CLEAR);
461		pci_write_config_dword(pdev, ras_des_offset + DWC_PCIE_EVENT_CNT_CTL,
462				       ctrl);
463	} else if (type == DWC_PCIE_TIME_BASE_EVENT) {
464		/*
465		 * TIME_BASED_ANAL_DATA_REG is a 64 bit register, we can safely
466		 * use it with any manually controlled duration. And it is
467		 * cleared when next measurement starts.
468		 */
469		ctrl = FIELD_PREP(DWC_PCIE_TIME_BASED_REPORT_SEL, event_id) |
470			FIELD_PREP(DWC_PCIE_TIME_BASED_DURATION_SEL,
471				   DWC_PCIE_DURATION_MANUAL_CTL) |
472			DWC_PCIE_TIME_BASED_CNT_ENABLE;
473		pci_write_config_dword(
474			pdev, ras_des_offset + DWC_PCIE_TIME_BASED_ANAL_CTL, ctrl);
475	}
476
477	if (flags & PERF_EF_START)
478		dwc_pcie_pmu_event_start(event, PERF_EF_RELOAD);
479
480	perf_event_update_userpage(event);
481
482	return 0;
483}
484
485static void dwc_pcie_pmu_event_del(struct perf_event *event, int flags)
486{
487	struct dwc_pcie_pmu *pcie_pmu = to_dwc_pcie_pmu(event->pmu);
488	enum dwc_pcie_event_type type = DWC_PCIE_EVENT_TYPE(event);
489
490	dwc_pcie_pmu_event_stop(event, flags | PERF_EF_UPDATE);
491	perf_event_update_userpage(event);
492	pcie_pmu->event[type] = NULL;
493}
494
495static void dwc_pcie_pmu_remove_cpuhp_instance(void *hotplug_node)
496{
497	cpuhp_state_remove_instance_nocalls(dwc_pcie_pmu_hp_state, hotplug_node);
498}
499
500/*
501 * Find the binded DES capability device info of a PCI device.
502 * @pdev: The PCI device.
503 */
504static struct dwc_pcie_dev_info *dwc_pcie_find_dev_info(struct pci_dev *pdev)
505{
506	struct dwc_pcie_dev_info *dev_info;
507
508	list_for_each_entry(dev_info, &dwc_pcie_dev_info_head, dev_node)
509		if (dev_info->pdev == pdev)
510			return dev_info;
511
512	return NULL;
513}
514
515static void dwc_pcie_unregister_pmu(void *data)
516{
517	struct dwc_pcie_pmu *pcie_pmu = data;
518
519	perf_pmu_unregister(&pcie_pmu->pmu);
520}
521
522static bool dwc_pcie_match_des_cap(struct pci_dev *pdev)
523{
524	const struct dwc_pcie_vendor_id *vid;
525	u16 vsec = 0;
526	u32 val;
527
528	if (!pci_is_pcie(pdev) || !(pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT))
529		return false;
530
531	for (vid = dwc_pcie_vendor_ids; vid->vendor_id; vid++) {
532		vsec = pci_find_vsec_capability(pdev, vid->vendor_id,
533						DWC_PCIE_VSEC_RAS_DES_ID);
534		if (vsec)
535			break;
536	}
537	if (!vsec)
538		return false;
539
540	pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
541	if (PCI_VNDR_HEADER_REV(val) != 0x04)
542		return false;
543
544	pci_dbg(pdev,
545		"Detected PCIe Vendor-Specific Extended Capability RAS DES\n");
546	return true;
547}
548
549static void dwc_pcie_unregister_dev(struct dwc_pcie_dev_info *dev_info)
550{
551	platform_device_unregister(dev_info->plat_dev);
552	list_del(&dev_info->dev_node);
553	kfree(dev_info);
554}
555
556static int dwc_pcie_register_dev(struct pci_dev *pdev)
557{
558	struct platform_device *plat_dev;
559	struct dwc_pcie_dev_info *dev_info;
560	u32 sbdf;
561
562	sbdf = (pci_domain_nr(pdev->bus) << 16) | PCI_DEVID(pdev->bus->number, pdev->devfn);
563	plat_dev = platform_device_register_data(NULL, "dwc_pcie_pmu", sbdf,
564						 pdev, sizeof(*pdev));
565
566	if (IS_ERR(plat_dev))
567		return PTR_ERR(plat_dev);
568
569	dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
570	if (!dev_info)
571		return -ENOMEM;
572
573	/* Cache platform device to handle pci device hotplug */
574	dev_info->plat_dev = plat_dev;
575	dev_info->pdev = pdev;
576	list_add(&dev_info->dev_node, &dwc_pcie_dev_info_head);
577
578	return 0;
579}
580
581static int dwc_pcie_pmu_notifier(struct notifier_block *nb,
582				     unsigned long action, void *data)
583{
584	struct device *dev = data;
585	struct pci_dev *pdev = to_pci_dev(dev);
586	struct dwc_pcie_dev_info *dev_info;
587
588	switch (action) {
589	case BUS_NOTIFY_ADD_DEVICE:
590		if (!dwc_pcie_match_des_cap(pdev))
591			return NOTIFY_DONE;
592		if (dwc_pcie_register_dev(pdev))
593			return NOTIFY_BAD;
594		break;
595	case BUS_NOTIFY_DEL_DEVICE:
596		dev_info = dwc_pcie_find_dev_info(pdev);
597		if (!dev_info)
598			return NOTIFY_DONE;
599		dwc_pcie_unregister_dev(dev_info);
600		break;
601	}
602
603	return NOTIFY_OK;
604}
605
606static struct notifier_block dwc_pcie_pmu_nb = {
607	.notifier_call = dwc_pcie_pmu_notifier,
608};
609
610static int dwc_pcie_pmu_probe(struct platform_device *plat_dev)
611{
612	struct pci_dev *pdev = plat_dev->dev.platform_data;
613	struct dwc_pcie_pmu *pcie_pmu;
614	char *name;
615	u32 sbdf, val;
616	u16 vsec;
617	int ret;
618
619	vsec = pci_find_vsec_capability(pdev, pdev->vendor,
620					DWC_PCIE_VSEC_RAS_DES_ID);
621	pci_read_config_dword(pdev, vsec + PCI_VNDR_HEADER, &val);
622	sbdf = plat_dev->id;
623	name = devm_kasprintf(&plat_dev->dev, GFP_KERNEL, "dwc_rootport_%x", sbdf);
624	if (!name)
625		return -ENOMEM;
626
627	pcie_pmu = devm_kzalloc(&plat_dev->dev, sizeof(*pcie_pmu), GFP_KERNEL);
628	if (!pcie_pmu)
629		return -ENOMEM;
630
631	pcie_pmu->pdev = pdev;
632	pcie_pmu->ras_des_offset = vsec;
633	pcie_pmu->nr_lanes = pcie_get_width_cap(pdev);
634	pcie_pmu->on_cpu = -1;
635	pcie_pmu->pmu = (struct pmu){
636		.name		= name,
637		.parent		= &pdev->dev,
638		.module		= THIS_MODULE,
639		.attr_groups	= dwc_pcie_attr_groups,
640		.capabilities	= PERF_PMU_CAP_NO_EXCLUDE,
641		.task_ctx_nr	= perf_invalid_context,
642		.event_init	= dwc_pcie_pmu_event_init,
643		.add		= dwc_pcie_pmu_event_add,
644		.del		= dwc_pcie_pmu_event_del,
645		.start		= dwc_pcie_pmu_event_start,
646		.stop		= dwc_pcie_pmu_event_stop,
647		.read		= dwc_pcie_pmu_event_update,
648	};
649
650	/* Add this instance to the list used by the offline callback */
651	ret = cpuhp_state_add_instance(dwc_pcie_pmu_hp_state,
652				       &pcie_pmu->cpuhp_node);
653	if (ret) {
654		pci_err(pdev, "Error %d registering hotplug @%x\n", ret, sbdf);
655		return ret;
656	}
657
658	/* Unwind when platform driver removes */
659	ret = devm_add_action_or_reset(&plat_dev->dev,
660				       dwc_pcie_pmu_remove_cpuhp_instance,
661				       &pcie_pmu->cpuhp_node);
662	if (ret)
663		return ret;
664
665	ret = perf_pmu_register(&pcie_pmu->pmu, name, -1);
666	if (ret) {
667		pci_err(pdev, "Error %d registering PMU @%x\n", ret, sbdf);
668		return ret;
669	}
670	ret = devm_add_action_or_reset(&plat_dev->dev, dwc_pcie_unregister_pmu,
671				       pcie_pmu);
672	if (ret)
673		return ret;
674
675	return 0;
676}
677
678static int dwc_pcie_pmu_online_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
679{
680	struct dwc_pcie_pmu *pcie_pmu;
681
682	pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node);
683	if (pcie_pmu->on_cpu == -1)
684		pcie_pmu->on_cpu = cpumask_local_spread(
685			0, dev_to_node(&pcie_pmu->pdev->dev));
686
687	return 0;
688}
689
690static int dwc_pcie_pmu_offline_cpu(unsigned int cpu, struct hlist_node *cpuhp_node)
691{
692	struct dwc_pcie_pmu *pcie_pmu;
693	struct pci_dev *pdev;
694	unsigned int target;
695	int node;
696
697	pcie_pmu = hlist_entry_safe(cpuhp_node, struct dwc_pcie_pmu, cpuhp_node);
698	/* Nothing to do if this CPU doesn't own the PMU */
699	if (cpu != pcie_pmu->on_cpu)
700		return 0;
701
702	pcie_pmu->on_cpu = -1;
703	pdev = pcie_pmu->pdev;
704	node = dev_to_node(&pdev->dev);
705
706	target = cpumask_any_and_but(cpumask_of_node(node), cpu_online_mask, cpu);
707	if (target >= nr_cpu_ids)
708		target = cpumask_any_but(cpu_online_mask, cpu);
709
710	if (target >= nr_cpu_ids) {
711		pci_err(pdev, "There is no CPU to set\n");
712		return 0;
713	}
714
715	/* This PMU does NOT support interrupt, just migrate context. */
716	perf_pmu_migrate_context(&pcie_pmu->pmu, cpu, target);
717	pcie_pmu->on_cpu = target;
718
719	return 0;
720}
721
722static struct platform_driver dwc_pcie_pmu_driver = {
723	.probe = dwc_pcie_pmu_probe,
724	.driver = {.name = "dwc_pcie_pmu",},
725};
726
727static int __init dwc_pcie_pmu_init(void)
728{
729	struct pci_dev *pdev = NULL;
730	int ret;
731
732	for_each_pci_dev(pdev) {
733		if (!dwc_pcie_match_des_cap(pdev))
734			continue;
735
736		ret = dwc_pcie_register_dev(pdev);
737		if (ret) {
738			pci_dev_put(pdev);
739			return ret;
740		}
741	}
742
743	ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
744				      "perf/dwc_pcie_pmu:online",
745				      dwc_pcie_pmu_online_cpu,
746				      dwc_pcie_pmu_offline_cpu);
747	if (ret < 0)
748		return ret;
749
750	dwc_pcie_pmu_hp_state = ret;
751
752	ret = platform_driver_register(&dwc_pcie_pmu_driver);
753	if (ret)
754		goto platform_driver_register_err;
755
756	ret = bus_register_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
757	if (ret)
758		goto platform_driver_register_err;
759	notify = true;
760
761	return 0;
762
763platform_driver_register_err:
764	cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
765
766	return ret;
767}
768
769static void __exit dwc_pcie_pmu_exit(void)
770{
771	struct dwc_pcie_dev_info *dev_info, *tmp;
772
773	if (notify)
774		bus_unregister_notifier(&pci_bus_type, &dwc_pcie_pmu_nb);
775	list_for_each_entry_safe(dev_info, tmp, &dwc_pcie_dev_info_head, dev_node)
776		dwc_pcie_unregister_dev(dev_info);
777	platform_driver_unregister(&dwc_pcie_pmu_driver);
778	cpuhp_remove_multi_state(dwc_pcie_pmu_hp_state);
779}
780
781module_init(dwc_pcie_pmu_init);
782module_exit(dwc_pcie_pmu_exit);
783
784MODULE_DESCRIPTION("PMU driver for DesignWare Cores PCI Express Controller");
785MODULE_AUTHOR("Shuai Xue <xueshuai@linux.alibaba.com>");
786MODULE_LICENSE("GPL v2");