Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Feb 10-13, 2025
Register
Loading...
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/slab.h>
  3#include <linux/pci.h>
  4#include <asm/apicdef.h>
  5#include <asm/intel-family.h>
  6#include <linux/io-64-nonatomic-lo-hi.h>
  7
  8#include <linux/perf_event.h>
  9#include "../perf_event.h"
 10
 11#define UNCORE_PMU_NAME_LEN		32
 12#define UNCORE_PMU_HRTIMER_INTERVAL	(60LL * NSEC_PER_SEC)
 13#define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
 14
 15#define UNCORE_FIXED_EVENT		0xff
 16#define UNCORE_PMC_IDX_MAX_GENERIC	8
 17#define UNCORE_PMC_IDX_MAX_FIXED	1
 18#define UNCORE_PMC_IDX_MAX_FREERUNNING	1
 19#define UNCORE_PMC_IDX_FIXED		UNCORE_PMC_IDX_MAX_GENERIC
 20#define UNCORE_PMC_IDX_FREERUNNING	(UNCORE_PMC_IDX_FIXED + \
 21					UNCORE_PMC_IDX_MAX_FIXED)
 22#define UNCORE_PMC_IDX_MAX		(UNCORE_PMC_IDX_FREERUNNING + \
 23					UNCORE_PMC_IDX_MAX_FREERUNNING)
 24
 25#define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx)	\
 26		((dev << 24) | (func << 16) | (type << 8) | idx)
 27#define UNCORE_PCI_DEV_DATA(type, idx)	((type << 8) | idx)
 28#define UNCORE_PCI_DEV_DEV(data)	((data >> 24) & 0xff)
 29#define UNCORE_PCI_DEV_FUNC(data)	((data >> 16) & 0xff)
 30#define UNCORE_PCI_DEV_TYPE(data)	((data >> 8) & 0xff)
 31#define UNCORE_PCI_DEV_IDX(data)	(data & 0xff)
 32#define UNCORE_EXTRA_PCI_DEV		0xff
 33#define UNCORE_EXTRA_PCI_DEV_MAX	4
 34
 35#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
 36
 37#define UNCORE_IGNORE_END		-1
 38
 39struct pci_extra_dev {
 40	struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
 41};
 42
 43struct intel_uncore_ops;
 44struct intel_uncore_pmu;
 45struct intel_uncore_box;
 46struct uncore_event_desc;
 47struct freerunning_counters;
 48struct intel_uncore_topology;
 49
 50struct intel_uncore_type {
 51	const char *name;
 52	int num_counters;
 53	int num_boxes;
 54	int perf_ctr_bits;
 55	int fixed_ctr_bits;
 56	int num_freerunning_types;
 57	int type_id;
 58	unsigned perf_ctr;
 59	unsigned event_ctl;
 60	unsigned event_mask;
 61	unsigned event_mask_ext;
 62	unsigned fixed_ctr;
 63	unsigned fixed_ctl;
 64	unsigned box_ctl;
 65	u64 *box_ctls;	/* Unit ctrl addr of the first box of each die */
 66	union {
 67		unsigned msr_offset;
 68		unsigned mmio_offset;
 69	};
 70	unsigned mmio_map_size;
 71	unsigned num_shared_regs:8;
 72	unsigned single_fixed:1;
 73	unsigned pair_ctr_ctl:1;
 74	union {
 75		u64 *msr_offsets;
 76		u64 *pci_offsets;
 77		u64 *mmio_offsets;
 78	};
 79	unsigned *box_ids;
 80	struct event_constraint unconstrainted;
 81	struct event_constraint *constraints;
 82	struct intel_uncore_pmu *pmus;
 83	struct intel_uncore_ops *ops;
 84	struct uncore_event_desc *event_descs;
 85	struct freerunning_counters *freerunning;
 86	const struct attribute_group *attr_groups[4];
 87	const struct attribute_group **attr_update;
 88	struct pmu *pmu; /* for custom pmu ops */
 89	/*
 90	 * Uncore PMU would store relevant platform topology configuration here
 91	 * to identify which platform component each PMON block of that type is
 92	 * supposed to monitor.
 93	 */
 94	struct intel_uncore_topology **topology;
 95	/*
 96	 * Optional callbacks for managing mapping of Uncore units to PMONs
 97	 */
 98	int (*get_topology)(struct intel_uncore_type *type);
 99	void (*set_mapping)(struct intel_uncore_type *type);
100	void (*cleanup_mapping)(struct intel_uncore_type *type);
101};
102
103#define pmu_group attr_groups[0]
104#define format_group attr_groups[1]
105#define events_group attr_groups[2]
106
107struct intel_uncore_ops {
108	void (*init_box)(struct intel_uncore_box *);
109	void (*exit_box)(struct intel_uncore_box *);
110	void (*disable_box)(struct intel_uncore_box *);
111	void (*enable_box)(struct intel_uncore_box *);
112	void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
113	void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
114	u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
115	int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
116	struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
117						   struct perf_event *);
118	void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
119};
120
121struct intel_uncore_pmu {
122	struct pmu			pmu;
123	char				name[UNCORE_PMU_NAME_LEN];
124	int				pmu_idx;
125	int				func_id;
126	bool				registered;
127	atomic_t			activeboxes;
128	struct intel_uncore_type	*type;
129	struct intel_uncore_box		**boxes;
130};
131
132struct intel_uncore_extra_reg {
133	raw_spinlock_t lock;
134	u64 config, config1, config2;
135	atomic_t ref;
136};
137
138struct intel_uncore_box {
139	int dieid;	/* Logical die ID */
 
140	int n_active;	/* number of active events */
141	int n_events;
142	int cpu;	/* cpu to collect events */
143	unsigned long flags;
144	atomic_t refcnt;
145	struct perf_event *events[UNCORE_PMC_IDX_MAX];
146	struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
147	struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
148	unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
149	u64 tags[UNCORE_PMC_IDX_MAX];
150	struct pci_dev *pci_dev;
151	struct intel_uncore_pmu *pmu;
152	u64 hrtimer_duration; /* hrtimer timeout for this box */
153	struct hrtimer hrtimer;
154	struct list_head list;
155	struct list_head active_list;
156	void __iomem *io_addr;
157	struct intel_uncore_extra_reg shared_regs[];
158};
159
160/* CFL uncore 8th cbox MSRs */
161#define CFL_UNC_CBO_7_PERFEVTSEL0		0xf70
162#define CFL_UNC_CBO_7_PER_CTR0			0xf76
163
164#define UNCORE_BOX_FLAG_INITIATED		0
165/* event config registers are 8-byte apart */
166#define UNCORE_BOX_FLAG_CTL_OFFS8		1
167/* CFL 8th CBOX has different MSR space */
168#define UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS	2
169
170struct uncore_event_desc {
171	struct device_attribute attr;
172	const char *config;
173};
174
175struct freerunning_counters {
176	unsigned int counter_base;
177	unsigned int counter_offset;
178	unsigned int box_offset;
179	unsigned int num_counters;
180	unsigned int bits;
181	unsigned *box_offsets;
182};
183
184struct uncore_iio_topology {
185	int pci_bus_no;
186	int segment;
187};
188
189struct uncore_upi_topology {
190	int die_to;
191	int pmu_idx_to;
192	int enabled;
193};
194
195struct intel_uncore_topology {
196	int pmu_idx;
197	union {
198		void *untyped;
199		struct uncore_iio_topology *iio;
200		struct uncore_upi_topology *upi;
201	};
202};
203
204struct pci2phy_map {
205	struct list_head list;
206	int segment;
207	int pbus_to_dieid[256];
208};
209
210struct pci2phy_map *__find_pci2phy_map(int segment);
211int uncore_pcibus_to_dieid(struct pci_bus *bus);
212int uncore_die_to_segment(int die);
213int uncore_device_to_die(struct pci_dev *dev);
214
215ssize_t uncore_event_show(struct device *dev,
216			  struct device_attribute *attr, char *buf);
217
218static inline struct intel_uncore_pmu *dev_to_uncore_pmu(struct device *dev)
219{
220	return container_of(dev_get_drvdata(dev), struct intel_uncore_pmu, pmu);
221}
222
223#define to_device_attribute(n)	container_of(n, struct device_attribute, attr)
224#define to_dev_ext_attribute(n)	container_of(n, struct dev_ext_attribute, attr)
225#define attr_to_ext_attr(n)	to_dev_ext_attribute(to_device_attribute(n))
226
227extern int __uncore_max_dies;
228#define uncore_max_dies()	(__uncore_max_dies)
229
230#define INTEL_UNCORE_EVENT_DESC(_name, _config)			\
231{								\
232	.attr	= __ATTR(_name, 0444, uncore_event_show, NULL),	\
233	.config	= _config,					\
234}
235
236#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)			\
237static ssize_t __uncore_##_var##_show(struct device *dev,		\
238				struct device_attribute *attr,		\
239				char *page)				\
240{									\
241	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
242	return sprintf(page, _format "\n");				\
243}									\
244static struct device_attribute format_attr_##_var =			\
245	__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
246
247static inline bool uncore_pmc_fixed(int idx)
248{
249	return idx == UNCORE_PMC_IDX_FIXED;
250}
251
252static inline bool uncore_pmc_freerunning(int idx)
253{
254	return idx == UNCORE_PMC_IDX_FREERUNNING;
255}
256
257static inline bool uncore_mmio_is_valid_offset(struct intel_uncore_box *box,
258					       unsigned long offset)
259{
260	if (offset < box->pmu->type->mmio_map_size)
261		return true;
262
263	pr_warn_once("perf uncore: Invalid offset 0x%lx exceeds mapped area of %s.\n",
264		     offset, box->pmu->type->name);
265
266	return false;
267}
268
269static inline
270unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box)
271{
272	return box->pmu->type->box_ctl +
273	       box->pmu->type->mmio_offset * box->pmu->pmu_idx;
274}
275
276static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
277{
278	return box->pmu->type->box_ctl;
279}
280
281static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
282{
283	return box->pmu->type->fixed_ctl;
284}
285
286static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
287{
288	return box->pmu->type->fixed_ctr;
289}
290
291static inline
292unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
293{
294	if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
295		return idx * 8 + box->pmu->type->event_ctl;
296
297	return idx * 4 + box->pmu->type->event_ctl;
298}
299
300static inline
301unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
302{
303	return idx * 8 + box->pmu->type->perf_ctr;
304}
305
306static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
307{
308	struct intel_uncore_pmu *pmu = box->pmu;
309	return pmu->type->msr_offsets ?
310		pmu->type->msr_offsets[pmu->pmu_idx] :
311		pmu->type->msr_offset * pmu->pmu_idx;
312}
313
314static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
315{
316	if (!box->pmu->type->box_ctl)
317		return 0;
318	return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
319}
320
321static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
322{
323	if (!box->pmu->type->fixed_ctl)
324		return 0;
325	return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
326}
327
328static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
329{
330	return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
331}
332
333
334/*
335 * In the uncore document, there is no event-code assigned to free running
336 * counters. Some events need to be defined to indicate the free running
337 * counters. The events are encoded as event-code + umask-code.
338 *
339 * The event-code for all free running counters is 0xff, which is the same as
340 * the fixed counters.
341 *
342 * The umask-code is used to distinguish a fixed counter and a free running
343 * counter, and different types of free running counters.
344 * - For fixed counters, the umask-code is 0x0X.
345 *   X indicates the index of the fixed counter, which starts from 0.
346 * - For free running counters, the umask-code uses the rest of the space.
347 *   It would bare the format of 0xXY.
348 *   X stands for the type of free running counters, which starts from 1.
349 *   Y stands for the index of free running counters of same type, which
350 *   starts from 0.
351 *
352 * For example, there are three types of IIO free running counters on Skylake
353 * server, IO CLOCKS counters, BANDWIDTH counters and UTILIZATION counters.
354 * The event-code for all the free running counters is 0xff.
355 * 'ioclk' is the first counter of IO CLOCKS. IO CLOCKS is the first type,
356 * which umask-code starts from 0x10.
357 * So 'ioclk' is encoded as event=0xff,umask=0x10
358 * 'bw_in_port2' is the third counter of BANDWIDTH counters. BANDWIDTH is
359 * the second type, which umask-code starts from 0x20.
360 * So 'bw_in_port2' is encoded as event=0xff,umask=0x22
361 */
362static inline unsigned int uncore_freerunning_idx(u64 config)
363{
364	return ((config >> 8) & 0xf);
365}
366
367#define UNCORE_FREERUNNING_UMASK_START		0x10
368
369static inline unsigned int uncore_freerunning_type(u64 config)
370{
371	return ((((config >> 8) - UNCORE_FREERUNNING_UMASK_START) >> 4) & 0xf);
372}
373
374static inline
375unsigned int uncore_freerunning_counter(struct intel_uncore_box *box,
376					struct perf_event *event)
377{
378	unsigned int type = uncore_freerunning_type(event->hw.config);
379	unsigned int idx = uncore_freerunning_idx(event->hw.config);
380	struct intel_uncore_pmu *pmu = box->pmu;
381
382	return pmu->type->freerunning[type].counter_base +
383	       pmu->type->freerunning[type].counter_offset * idx +
384	       (pmu->type->freerunning[type].box_offsets ?
385	        pmu->type->freerunning[type].box_offsets[pmu->pmu_idx] :
386	        pmu->type->freerunning[type].box_offset * pmu->pmu_idx);
387}
388
389static inline
390unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
391{
392	if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
393		return CFL_UNC_CBO_7_PERFEVTSEL0 +
394		       (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
395	} else {
396		return box->pmu->type->event_ctl +
397		       (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
398		       uncore_msr_box_offset(box);
399	}
400}
401
402static inline
403unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
404{
405	if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) {
406		return CFL_UNC_CBO_7_PER_CTR0 +
407		       (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx);
408	} else {
409		return box->pmu->type->perf_ctr +
410		       (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
411		       uncore_msr_box_offset(box);
412	}
413}
414
415static inline
416unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
417{
418	if (box->pci_dev || box->io_addr)
419		return uncore_pci_fixed_ctl(box);
420	else
421		return uncore_msr_fixed_ctl(box);
422}
423
424static inline
425unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
426{
427	if (box->pci_dev || box->io_addr)
428		return uncore_pci_fixed_ctr(box);
429	else
430		return uncore_msr_fixed_ctr(box);
431}
432
433static inline
434unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
435{
436	if (box->pci_dev || box->io_addr)
437		return uncore_pci_event_ctl(box, idx);
438	else
439		return uncore_msr_event_ctl(box, idx);
440}
441
442static inline
443unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
444{
445	if (box->pci_dev || box->io_addr)
446		return uncore_pci_perf_ctr(box, idx);
447	else
448		return uncore_msr_perf_ctr(box, idx);
449}
450
451static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
452{
453	return box->pmu->type->perf_ctr_bits;
454}
455
456static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
457{
458	return box->pmu->type->fixed_ctr_bits;
459}
460
461static inline
462unsigned int uncore_freerunning_bits(struct intel_uncore_box *box,
463				     struct perf_event *event)
464{
465	unsigned int type = uncore_freerunning_type(event->hw.config);
466
467	return box->pmu->type->freerunning[type].bits;
468}
469
470static inline int uncore_num_freerunning(struct intel_uncore_box *box,
471					 struct perf_event *event)
472{
473	unsigned int type = uncore_freerunning_type(event->hw.config);
474
475	return box->pmu->type->freerunning[type].num_counters;
476}
477
478static inline int uncore_num_freerunning_types(struct intel_uncore_box *box,
479					       struct perf_event *event)
480{
481	return box->pmu->type->num_freerunning_types;
482}
483
484static inline bool check_valid_freerunning_event(struct intel_uncore_box *box,
485						 struct perf_event *event)
486{
487	unsigned int type = uncore_freerunning_type(event->hw.config);
488	unsigned int idx = uncore_freerunning_idx(event->hw.config);
489
490	return (type < uncore_num_freerunning_types(box, event)) &&
491	       (idx < uncore_num_freerunning(box, event));
492}
493
494static inline int uncore_num_counters(struct intel_uncore_box *box)
495{
496	return box->pmu->type->num_counters;
497}
498
499static inline bool is_freerunning_event(struct perf_event *event)
500{
501	u64 cfg = event->attr.config;
502
503	return ((cfg & UNCORE_FIXED_EVENT) == UNCORE_FIXED_EVENT) &&
504	       (((cfg >> 8) & 0xff) >= UNCORE_FREERUNNING_UMASK_START);
505}
506
507/* Check and reject invalid config */
508static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
509					       struct perf_event *event)
510{
511	if (is_freerunning_event(event))
512		return 0;
513
514	return -EINVAL;
515}
516
517static inline void uncore_disable_event(struct intel_uncore_box *box,
518				struct perf_event *event)
519{
520	box->pmu->type->ops->disable_event(box, event);
521}
522
523static inline void uncore_enable_event(struct intel_uncore_box *box,
524				struct perf_event *event)
525{
526	box->pmu->type->ops->enable_event(box, event);
527}
528
529static inline u64 uncore_read_counter(struct intel_uncore_box *box,
530				struct perf_event *event)
531{
532	return box->pmu->type->ops->read_counter(box, event);
533}
534
535static inline void uncore_box_init(struct intel_uncore_box *box)
536{
537	if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
538		if (box->pmu->type->ops->init_box)
539			box->pmu->type->ops->init_box(box);
540	}
541}
542
543static inline void uncore_box_exit(struct intel_uncore_box *box)
544{
545	if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
546		if (box->pmu->type->ops->exit_box)
547			box->pmu->type->ops->exit_box(box);
548	}
549}
550
551static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
552{
553	return (box->dieid < 0);
554}
555
556static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
557{
558	return container_of(event->pmu, struct intel_uncore_pmu, pmu);
559}
560
561static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
562{
563	return event->pmu_private;
564}
565
566struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
567u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
568void uncore_mmio_exit_box(struct intel_uncore_box *box);
569u64 uncore_mmio_read_counter(struct intel_uncore_box *box,
570			     struct perf_event *event);
571void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
572void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
573void uncore_pmu_event_start(struct perf_event *event, int flags);
574void uncore_pmu_event_stop(struct perf_event *event, int flags);
575int uncore_pmu_event_add(struct perf_event *event, int flags);
576void uncore_pmu_event_del(struct perf_event *event, int flags);
577void uncore_pmu_event_read(struct perf_event *event);
578void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
579struct event_constraint *
580uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
581void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
582u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
583void uncore_get_alias_name(char *pmu_name, struct intel_uncore_pmu *pmu);
584
585extern struct intel_uncore_type *empty_uncore[];
586extern struct intel_uncore_type **uncore_msr_uncores;
587extern struct intel_uncore_type **uncore_pci_uncores;
588extern struct intel_uncore_type **uncore_mmio_uncores;
589extern struct pci_driver *uncore_pci_driver;
590extern struct pci_driver *uncore_pci_sub_driver;
591extern raw_spinlock_t pci2phy_map_lock;
592extern struct list_head pci2phy_map_head;
593extern struct pci_extra_dev *uncore_extra_pci_dev;
594extern struct event_constraint uncore_constraint_empty;
595extern int spr_uncore_units_ignore[];
596extern int gnr_uncore_units_ignore[];
597
598/* uncore_snb.c */
599int snb_uncore_pci_init(void);
600int ivb_uncore_pci_init(void);
601int hsw_uncore_pci_init(void);
602int bdw_uncore_pci_init(void);
603int skl_uncore_pci_init(void);
604void snb_uncore_cpu_init(void);
605void nhm_uncore_cpu_init(void);
606void skl_uncore_cpu_init(void);
607void icl_uncore_cpu_init(void);
608void tgl_uncore_cpu_init(void);
609void adl_uncore_cpu_init(void);
610void mtl_uncore_cpu_init(void);
611void tgl_uncore_mmio_init(void);
612void tgl_l_uncore_mmio_init(void);
613void adl_uncore_mmio_init(void);
614int snb_pci2phy_map_init(int devid);
615
616/* uncore_snbep.c */
617int snbep_uncore_pci_init(void);
618void snbep_uncore_cpu_init(void);
619int ivbep_uncore_pci_init(void);
620void ivbep_uncore_cpu_init(void);
621int hswep_uncore_pci_init(void);
622void hswep_uncore_cpu_init(void);
623int bdx_uncore_pci_init(void);
624void bdx_uncore_cpu_init(void);
625int knl_uncore_pci_init(void);
626void knl_uncore_cpu_init(void);
627int skx_uncore_pci_init(void);
628void skx_uncore_cpu_init(void);
629int snr_uncore_pci_init(void);
630void snr_uncore_cpu_init(void);
631void snr_uncore_mmio_init(void);
632int icx_uncore_pci_init(void);
633void icx_uncore_cpu_init(void);
634void icx_uncore_mmio_init(void);
635int spr_uncore_pci_init(void);
636void spr_uncore_cpu_init(void);
637void spr_uncore_mmio_init(void);
638int gnr_uncore_pci_init(void);
639void gnr_uncore_cpu_init(void);
640void gnr_uncore_mmio_init(void);
641
642/* uncore_nhmex.c */
643void nhmex_uncore_cpu_init(void);
v4.10.11
 
  1#include <linux/slab.h>
  2#include <linux/pci.h>
  3#include <asm/apicdef.h>
 
 
  4
  5#include <linux/perf_event.h>
  6#include "../perf_event.h"
  7
  8#define UNCORE_PMU_NAME_LEN		32
  9#define UNCORE_PMU_HRTIMER_INTERVAL	(60LL * NSEC_PER_SEC)
 10#define UNCORE_SNB_IMC_HRTIMER_INTERVAL (5ULL * NSEC_PER_SEC)
 11
 12#define UNCORE_FIXED_EVENT		0xff
 13#define UNCORE_PMC_IDX_MAX_GENERIC	8
 
 
 14#define UNCORE_PMC_IDX_FIXED		UNCORE_PMC_IDX_MAX_GENERIC
 15#define UNCORE_PMC_IDX_MAX		(UNCORE_PMC_IDX_FIXED + 1)
 
 
 
 16
 17#define UNCORE_PCI_DEV_FULL_DATA(dev, func, type, idx)	\
 18		((dev << 24) | (func << 16) | (type << 8) | idx)
 19#define UNCORE_PCI_DEV_DATA(type, idx)	((type << 8) | idx)
 20#define UNCORE_PCI_DEV_DEV(data)	((data >> 24) & 0xff)
 21#define UNCORE_PCI_DEV_FUNC(data)	((data >> 16) & 0xff)
 22#define UNCORE_PCI_DEV_TYPE(data)	((data >> 8) & 0xff)
 23#define UNCORE_PCI_DEV_IDX(data)	(data & 0xff)
 24#define UNCORE_EXTRA_PCI_DEV		0xff
 25#define UNCORE_EXTRA_PCI_DEV_MAX	3
 26
 27#define UNCORE_EVENT_CONSTRAINT(c, n) EVENT_CONSTRAINT(c, n, 0xff)
 28
 
 
 29struct pci_extra_dev {
 30	struct pci_dev *dev[UNCORE_EXTRA_PCI_DEV_MAX];
 31};
 32
 33struct intel_uncore_ops;
 34struct intel_uncore_pmu;
 35struct intel_uncore_box;
 36struct uncore_event_desc;
 
 
 37
 38struct intel_uncore_type {
 39	const char *name;
 40	int num_counters;
 41	int num_boxes;
 42	int perf_ctr_bits;
 43	int fixed_ctr_bits;
 
 
 44	unsigned perf_ctr;
 45	unsigned event_ctl;
 46	unsigned event_mask;
 47	unsigned event_mask_ext;
 48	unsigned fixed_ctr;
 49	unsigned fixed_ctl;
 50	unsigned box_ctl;
 51	unsigned msr_offset;
 
 
 
 
 
 52	unsigned num_shared_regs:8;
 53	unsigned single_fixed:1;
 54	unsigned pair_ctr_ctl:1;
 55	unsigned *msr_offsets;
 
 
 
 
 
 56	struct event_constraint unconstrainted;
 57	struct event_constraint *constraints;
 58	struct intel_uncore_pmu *pmus;
 59	struct intel_uncore_ops *ops;
 60	struct uncore_event_desc *event_descs;
 
 61	const struct attribute_group *attr_groups[4];
 
 62	struct pmu *pmu; /* for custom pmu ops */
 
 
 
 
 
 
 
 
 
 
 
 
 63};
 64
 65#define pmu_group attr_groups[0]
 66#define format_group attr_groups[1]
 67#define events_group attr_groups[2]
 68
 69struct intel_uncore_ops {
 70	void (*init_box)(struct intel_uncore_box *);
 71	void (*exit_box)(struct intel_uncore_box *);
 72	void (*disable_box)(struct intel_uncore_box *);
 73	void (*enable_box)(struct intel_uncore_box *);
 74	void (*disable_event)(struct intel_uncore_box *, struct perf_event *);
 75	void (*enable_event)(struct intel_uncore_box *, struct perf_event *);
 76	u64 (*read_counter)(struct intel_uncore_box *, struct perf_event *);
 77	int (*hw_config)(struct intel_uncore_box *, struct perf_event *);
 78	struct event_constraint *(*get_constraint)(struct intel_uncore_box *,
 79						   struct perf_event *);
 80	void (*put_constraint)(struct intel_uncore_box *, struct perf_event *);
 81};
 82
 83struct intel_uncore_pmu {
 84	struct pmu			pmu;
 85	char				name[UNCORE_PMU_NAME_LEN];
 86	int				pmu_idx;
 87	int				func_id;
 88	bool				registered;
 89	atomic_t			activeboxes;
 90	struct intel_uncore_type	*type;
 91	struct intel_uncore_box		**boxes;
 92};
 93
 94struct intel_uncore_extra_reg {
 95	raw_spinlock_t lock;
 96	u64 config, config1, config2;
 97	atomic_t ref;
 98};
 99
100struct intel_uncore_box {
101	int pci_phys_id;
102	int pkgid;
103	int n_active;	/* number of active events */
104	int n_events;
105	int cpu;	/* cpu to collect events */
106	unsigned long flags;
107	atomic_t refcnt;
108	struct perf_event *events[UNCORE_PMC_IDX_MAX];
109	struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
110	struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
111	unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
112	u64 tags[UNCORE_PMC_IDX_MAX];
113	struct pci_dev *pci_dev;
114	struct intel_uncore_pmu *pmu;
115	u64 hrtimer_duration; /* hrtimer timeout for this box */
116	struct hrtimer hrtimer;
117	struct list_head list;
118	struct list_head active_list;
119	void *io_addr;
120	struct intel_uncore_extra_reg shared_regs[0];
121};
122
123#define UNCORE_BOX_FLAG_INITIATED	0
124#define UNCORE_BOX_FLAG_CTL_OFFS8	1 /* event config registers are 8-byte apart */
 
 
 
 
 
 
 
125
126struct uncore_event_desc {
127	struct kobj_attribute attr;
128	const char *config;
129};
130
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
131struct pci2phy_map {
132	struct list_head list;
133	int segment;
134	int pbus_to_physid[256];
135};
136
137struct pci2phy_map *__find_pci2phy_map(int segment);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
138
139ssize_t uncore_event_show(struct kobject *kobj,
140			  struct kobj_attribute *attr, char *buf);
141
142#define INTEL_UNCORE_EVENT_DESC(_name, _config)			\
143{								\
144	.attr	= __ATTR(_name, 0444, uncore_event_show, NULL),	\
145	.config	= _config,					\
146}
147
148#define DEFINE_UNCORE_FORMAT_ATTR(_var, _name, _format)			\
149static ssize_t __uncore_##_var##_show(struct kobject *kobj,		\
150				struct kobj_attribute *attr,		\
151				char *page)				\
152{									\
153	BUILD_BUG_ON(sizeof(_format) >= PAGE_SIZE);			\
154	return sprintf(page, _format "\n");				\
155}									\
156static struct kobj_attribute format_attr_##_var =			\
157	__ATTR(_name, 0444, __uncore_##_var##_show, NULL)
158
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
159static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box)
160{
161	return box->pmu->type->box_ctl;
162}
163
164static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box)
165{
166	return box->pmu->type->fixed_ctl;
167}
168
169static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box)
170{
171	return box->pmu->type->fixed_ctr;
172}
173
174static inline
175unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx)
176{
177	if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags))
178		return idx * 8 + box->pmu->type->event_ctl;
179
180	return idx * 4 + box->pmu->type->event_ctl;
181}
182
183static inline
184unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx)
185{
186	return idx * 8 + box->pmu->type->perf_ctr;
187}
188
189static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box)
190{
191	struct intel_uncore_pmu *pmu = box->pmu;
192	return pmu->type->msr_offsets ?
193		pmu->type->msr_offsets[pmu->pmu_idx] :
194		pmu->type->msr_offset * pmu->pmu_idx;
195}
196
197static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box)
198{
199	if (!box->pmu->type->box_ctl)
200		return 0;
201	return box->pmu->type->box_ctl + uncore_msr_box_offset(box);
202}
203
204static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box)
205{
206	if (!box->pmu->type->fixed_ctl)
207		return 0;
208	return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box);
209}
210
211static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box)
212{
213	return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box);
214}
215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216static inline
217unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx)
218{
219	return box->pmu->type->event_ctl +
220		(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
221		uncore_msr_box_offset(box);
 
 
 
 
 
222}
223
224static inline
225unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx)
226{
227	return box->pmu->type->perf_ctr +
228		(box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) +
229		uncore_msr_box_offset(box);
 
 
 
 
 
230}
231
232static inline
233unsigned uncore_fixed_ctl(struct intel_uncore_box *box)
234{
235	if (box->pci_dev)
236		return uncore_pci_fixed_ctl(box);
237	else
238		return uncore_msr_fixed_ctl(box);
239}
240
241static inline
242unsigned uncore_fixed_ctr(struct intel_uncore_box *box)
243{
244	if (box->pci_dev)
245		return uncore_pci_fixed_ctr(box);
246	else
247		return uncore_msr_fixed_ctr(box);
248}
249
250static inline
251unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx)
252{
253	if (box->pci_dev)
254		return uncore_pci_event_ctl(box, idx);
255	else
256		return uncore_msr_event_ctl(box, idx);
257}
258
259static inline
260unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx)
261{
262	if (box->pci_dev)
263		return uncore_pci_perf_ctr(box, idx);
264	else
265		return uncore_msr_perf_ctr(box, idx);
266}
267
268static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box)
269{
270	return box->pmu->type->perf_ctr_bits;
271}
272
273static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box)
274{
275	return box->pmu->type->fixed_ctr_bits;
276}
277
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278static inline int uncore_num_counters(struct intel_uncore_box *box)
279{
280	return box->pmu->type->num_counters;
281}
282
283static inline void uncore_disable_box(struct intel_uncore_box *box)
284{
285	if (box->pmu->type->ops->disable_box)
286		box->pmu->type->ops->disable_box(box);
 
 
287}
288
289static inline void uncore_enable_box(struct intel_uncore_box *box)
 
 
290{
291	if (box->pmu->type->ops->enable_box)
292		box->pmu->type->ops->enable_box(box);
 
 
293}
294
295static inline void uncore_disable_event(struct intel_uncore_box *box,
296				struct perf_event *event)
297{
298	box->pmu->type->ops->disable_event(box, event);
299}
300
301static inline void uncore_enable_event(struct intel_uncore_box *box,
302				struct perf_event *event)
303{
304	box->pmu->type->ops->enable_event(box, event);
305}
306
307static inline u64 uncore_read_counter(struct intel_uncore_box *box,
308				struct perf_event *event)
309{
310	return box->pmu->type->ops->read_counter(box, event);
311}
312
313static inline void uncore_box_init(struct intel_uncore_box *box)
314{
315	if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
316		if (box->pmu->type->ops->init_box)
317			box->pmu->type->ops->init_box(box);
318	}
319}
320
321static inline void uncore_box_exit(struct intel_uncore_box *box)
322{
323	if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
324		if (box->pmu->type->ops->exit_box)
325			box->pmu->type->ops->exit_box(box);
326	}
327}
328
329static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
330{
331	return (box->pkgid < 0);
332}
333
334static inline struct intel_uncore_pmu *uncore_event_to_pmu(struct perf_event *event)
335{
336	return container_of(event->pmu, struct intel_uncore_pmu, pmu);
337}
338
339static inline struct intel_uncore_box *uncore_event_to_box(struct perf_event *event)
340{
341	return event->pmu_private;
342}
343
344struct intel_uncore_box *uncore_pmu_to_box(struct intel_uncore_pmu *pmu, int cpu);
345u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event);
 
 
 
346void uncore_pmu_start_hrtimer(struct intel_uncore_box *box);
347void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box);
 
 
 
 
348void uncore_pmu_event_read(struct perf_event *event);
349void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event);
350struct event_constraint *
351uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event);
352void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event);
353u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx);
 
354
 
355extern struct intel_uncore_type **uncore_msr_uncores;
356extern struct intel_uncore_type **uncore_pci_uncores;
 
357extern struct pci_driver *uncore_pci_driver;
 
358extern raw_spinlock_t pci2phy_map_lock;
359extern struct list_head pci2phy_map_head;
360extern struct pci_extra_dev *uncore_extra_pci_dev;
361extern struct event_constraint uncore_constraint_empty;
 
 
362
363/* perf_event_intel_uncore_snb.c */
364int snb_uncore_pci_init(void);
365int ivb_uncore_pci_init(void);
366int hsw_uncore_pci_init(void);
367int bdw_uncore_pci_init(void);
368int skl_uncore_pci_init(void);
369void snb_uncore_cpu_init(void);
370void nhm_uncore_cpu_init(void);
371void skl_uncore_cpu_init(void);
 
 
 
 
 
 
 
372int snb_pci2phy_map_init(int devid);
373
374/* perf_event_intel_uncore_snbep.c */
375int snbep_uncore_pci_init(void);
376void snbep_uncore_cpu_init(void);
377int ivbep_uncore_pci_init(void);
378void ivbep_uncore_cpu_init(void);
379int hswep_uncore_pci_init(void);
380void hswep_uncore_cpu_init(void);
381int bdx_uncore_pci_init(void);
382void bdx_uncore_cpu_init(void);
383int knl_uncore_pci_init(void);
384void knl_uncore_cpu_init(void);
385int skx_uncore_pci_init(void);
386void skx_uncore_cpu_init(void);
 
 
 
 
 
 
 
 
 
 
 
 
387
388/* perf_event_intel_uncore_nhmex.c */
389void nhmex_uncore_cpu_init(void);