Linux Audio

Check our new training course

Loading...
v6.9.4
  1/* SPDX-License-Identifier: GPL-2.0-or-later */
  2/*
  3 * Performance event support - PowerPC classic/server specific definitions.
  4 *
  5 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
 
 
 
 
 
  6 */
  7
  8#include <linux/types.h>
  9#include <asm/hw_irq.h>
 10#include <linux/device.h>
 11#include <uapi/asm/perf_event.h>
 12
 13/* Update perf_event_print_debug() if this changes */
 14#define MAX_HWEVENTS		8
 15#define MAX_EVENT_ALTERNATIVES	8
 16#define MAX_LIMITED_HWCOUNTERS	2
 17
 18struct perf_event;
 19
 20struct mmcr_regs {
 21	unsigned long mmcr0;
 22	unsigned long mmcr1;
 23	unsigned long mmcr2;
 24	unsigned long mmcra;
 25	unsigned long mmcr3;
 26};
 27/*
 28 * This struct provides the constants and functions needed to
 29 * describe the PMU on a particular POWER-family CPU.
 30 */
 31struct power_pmu {
 32	const char	*name;
 33	int		n_counter;
 34	int		max_alternatives;
 35	unsigned long	add_fields;
 36	unsigned long	test_adder;
 37	int		(*compute_mmcr)(u64 events[], int n_ev,
 38				unsigned int hwc[], struct mmcr_regs *mmcr,
 39				struct perf_event *pevents[], u32 flags);
 40	int		(*get_constraint)(u64 event_id, unsigned long *mskp,
 41				unsigned long *valp, u64 event_config1);
 42	int		(*get_alternatives)(u64 event_id, unsigned int flags,
 43				u64 alt[]);
 44	void		(*get_mem_data_src)(union perf_mem_data_src *dsrc,
 45				u32 flags, struct pt_regs *regs);
 46	void		(*get_mem_weight)(u64 *weight, u64 type);
 47	unsigned long	group_constraint_mask;
 48	unsigned long	group_constraint_val;
 49	u64             (*bhrb_filter_map)(u64 branch_sample_type);
 50	void            (*config_bhrb)(u64 pmu_bhrb_filter);
 51	void		(*disable_pmc)(unsigned int pmc, struct mmcr_regs *mmcr);
 52	int		(*limited_pmc_event)(u64 event_id);
 53	u32		flags;
 54	const struct attribute_group	**attr_groups;
 55	int		n_generic;
 56	int		*generic_events;
 57	u64		(*cache_events)[PERF_COUNT_HW_CACHE_MAX]
 58			       [PERF_COUNT_HW_CACHE_OP_MAX]
 59			       [PERF_COUNT_HW_CACHE_RESULT_MAX];
 60
 61	int		n_blacklist_ev;
 62	int 		*blacklist_ev;
 63	/* BHRB entries in the PMU */
 64	int		bhrb_nr;
 65	/*
 66	 * set this flag with `PERF_PMU_CAP_EXTENDED_REGS` if
 67	 * the pmu supports extended perf regs capability
 68	 */
 69	int		capabilities;
 70	/*
 71	 * Function to check event code for values which are
 72	 * reserved. Function takes struct perf_event as input,
 73	 * since event code could be spread in attr.config*
 74	 */
 75	int		(*check_attr_config)(struct perf_event *ev);
 76};
 77
 78/*
 79 * Values for power_pmu.flags
 80 */
 81#define PPMU_LIMITED_PMC5_6	0x00000001 /* PMC5/6 have limited function */
 82#define PPMU_ALT_SIPR		0x00000002 /* uses alternate posn for SIPR/HV */
 83#define PPMU_NO_SIPR		0x00000004 /* no SIPR/HV in MMCRA at all */
 84#define PPMU_NO_CONT_SAMPLING	0x00000008 /* no continuous sampling */
 85#define PPMU_SIAR_VALID		0x00000010 /* Processor has SIAR Valid bit */
 86#define PPMU_HAS_SSLOT		0x00000020 /* Has sampled slot in MMCRA */
 87#define PPMU_HAS_SIER		0x00000040 /* Has SIER */
 88#define PPMU_ARCH_207S		0x00000080 /* PMC is architecture v2.07S */
 89#define PPMU_NO_SIAR		0x00000100 /* Do not use SIAR */
 90#define PPMU_ARCH_31		0x00000200 /* Has MMCR3, SIER2 and SIER3 */
 91#define PPMU_P10_DD1		0x00000400 /* Is power10 DD1 processor version */
 92#define PPMU_HAS_ATTR_CONFIG1	0x00000800 /* Using config1 attribute */
 93
 94/*
 95 * Values for flags to get_alternatives()
 96 */
 97#define PPMU_LIMITED_PMC_OK	1	/* can put this on a limited PMC */
 98#define PPMU_LIMITED_PMC_REQD	2	/* have to put this on a limited PMC */
 99#define PPMU_ONLY_COUNT_RUN	4	/* only counting in run state */
100
101int __init register_power_pmu(struct power_pmu *pmu);
102
103struct pt_regs;
104extern unsigned long perf_misc_flags(struct pt_regs *regs);
105extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
106extern unsigned long int read_bhrb(int n);
107
108/*
109 * Only override the default definitions in include/linux/perf_event.h
110 * if we have hardware PMU support.
111 */
112#ifdef CONFIG_PPC_PERF_CTRS
113#define perf_misc_flags(regs)	perf_misc_flags(regs)
114#endif
115
116/*
117 * The power_pmu.get_constraint function returns a 32/64-bit value and
118 * a 32/64-bit mask that express the constraints between this event_id and
119 * other events.
120 *
121 * The value and mask are divided up into (non-overlapping) bitfields
122 * of three different types:
123 *
124 * Select field: this expresses the constraint that some set of bits
125 * in MMCR* needs to be set to a specific value for this event_id.  For a
126 * select field, the mask contains 1s in every bit of the field, and
127 * the value contains a unique value for each possible setting of the
128 * MMCR* bits.  The constraint checking code will ensure that two events
129 * that set the same field in their masks have the same value in their
130 * value dwords.
131 *
132 * Add field: this expresses the constraint that there can be at most
133 * N events in a particular class.  A field of k bits can be used for
134 * N <= 2^(k-1) - 1.  The mask has the most significant bit of the field
135 * set (and the other bits 0), and the value has only the least significant
136 * bit of the field set.  In addition, the 'add_fields' and 'test_adder'
137 * in the struct power_pmu for this processor come into play.  The
138 * add_fields value contains 1 in the LSB of the field, and the
139 * test_adder contains 2^(k-1) - 1 - N in the field.
140 *
141 * NAND field: this expresses the constraint that you may not have events
142 * in all of a set of classes.  (For example, on PPC970, you can't select
143 * events from the FPU, ISU and IDU simultaneously, although any two are
144 * possible.)  For N classes, the field is N+1 bits wide, and each class
145 * is assigned one bit from the least-significant N bits.  The mask has
146 * only the most-significant bit set, and the value has only the bit
147 * for the event_id's class set.  The test_adder has the least significant
148 * bit set in the field.
149 *
150 * If an event_id is not subject to the constraint expressed by a particular
151 * field, then it will have 0 in both the mask and value for that field.
152 */
153
154extern ssize_t power_events_sysfs_show(struct device *dev,
155				struct device_attribute *attr, char *page);
156
157/*
158 * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix.
159 *
160 * Having a suffix allows us to have aliases in sysfs - eg: the generic
161 * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and
162 * 'PM_CYC' where the latter is the name by which the event is known in
163 * POWER CPU specification.
164 *
165 * Similarly, some hardware and cache events use the same event code. Eg.
166 * on POWER8, both "cache-references" and "L1-dcache-loads" events refer
167 * to the same event, PM_LD_REF_L1.  The suffix, allows us to have two
168 * sysfs objects for the same event and thus two entries/aliases in sysfs.
169 */
170#define	EVENT_VAR(_id, _suffix)		event_attr_##_id##_suffix
171#define	EVENT_PTR(_id, _suffix)		&EVENT_VAR(_id, _suffix).attr.attr
172
173#define	EVENT_ATTR(_name, _id, _suffix)					\
174	PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), _id,		\
175			power_events_sysfs_show)
176
177#define	GENERIC_EVENT_ATTR(_name, _id)	EVENT_ATTR(_name, _id, _g)
178#define	GENERIC_EVENT_PTR(_id)		EVENT_PTR(_id, _g)
179
180#define	CACHE_EVENT_ATTR(_name, _id)	EVENT_ATTR(_name, _id, _c)
181#define	CACHE_EVENT_PTR(_id)		EVENT_PTR(_id, _c)
182
183#define	POWER_EVENT_ATTR(_name, _id)	EVENT_ATTR(_name, _id, _p)
184#define	POWER_EVENT_PTR(_id)		EVENT_PTR(_id, _p)
v4.10.11
 
  1/*
  2 * Performance event support - PowerPC classic/server specific definitions.
  3 *
  4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the License, or (at your option) any later version.
 10 */
 11
 12#include <linux/types.h>
 13#include <asm/hw_irq.h>
 14#include <linux/device.h>
 15#include <uapi/asm/perf_event.h>
 16
 17/* Update perf_event_print_debug() if this changes */
 18#define MAX_HWEVENTS		8
 19#define MAX_EVENT_ALTERNATIVES	8
 20#define MAX_LIMITED_HWCOUNTERS	2
 21
 22struct perf_event;
 23
 
 
 
 
 
 
 
 24/*
 25 * This struct provides the constants and functions needed to
 26 * describe the PMU on a particular POWER-family CPU.
 27 */
 28struct power_pmu {
 29	const char	*name;
 30	int		n_counter;
 31	int		max_alternatives;
 32	unsigned long	add_fields;
 33	unsigned long	test_adder;
 34	int		(*compute_mmcr)(u64 events[], int n_ev,
 35				unsigned int hwc[], unsigned long mmcr[],
 36				struct perf_event *pevents[]);
 37	int		(*get_constraint)(u64 event_id, unsigned long *mskp,
 38				unsigned long *valp);
 39	int		(*get_alternatives)(u64 event_id, unsigned int flags,
 40				u64 alt[]);
 
 
 
 
 
 41	u64             (*bhrb_filter_map)(u64 branch_sample_type);
 42	void            (*config_bhrb)(u64 pmu_bhrb_filter);
 43	void		(*disable_pmc)(unsigned int pmc, unsigned long mmcr[]);
 44	int		(*limited_pmc_event)(u64 event_id);
 45	u32		flags;
 46	const struct attribute_group	**attr_groups;
 47	int		n_generic;
 48	int		*generic_events;
 49	int		(*cache_events)[PERF_COUNT_HW_CACHE_MAX]
 50			       [PERF_COUNT_HW_CACHE_OP_MAX]
 51			       [PERF_COUNT_HW_CACHE_RESULT_MAX];
 52
 
 
 53	/* BHRB entries in the PMU */
 54	int		bhrb_nr;
 
 
 
 
 
 
 
 
 
 
 
 55};
 56
 57/*
 58 * Values for power_pmu.flags
 59 */
 60#define PPMU_LIMITED_PMC5_6	0x00000001 /* PMC5/6 have limited function */
 61#define PPMU_ALT_SIPR		0x00000002 /* uses alternate posn for SIPR/HV */
 62#define PPMU_NO_SIPR		0x00000004 /* no SIPR/HV in MMCRA at all */
 63#define PPMU_NO_CONT_SAMPLING	0x00000008 /* no continuous sampling */
 64#define PPMU_SIAR_VALID		0x00000010 /* Processor has SIAR Valid bit */
 65#define PPMU_HAS_SSLOT		0x00000020 /* Has sampled slot in MMCRA */
 66#define PPMU_HAS_SIER		0x00000040 /* Has SIER */
 67#define PPMU_ARCH_207S		0x00000080 /* PMC is architecture v2.07S */
 68#define PPMU_NO_SIAR		0x00000100 /* Do not use SIAR */
 
 
 
 69
 70/*
 71 * Values for flags to get_alternatives()
 72 */
 73#define PPMU_LIMITED_PMC_OK	1	/* can put this on a limited PMC */
 74#define PPMU_LIMITED_PMC_REQD	2	/* have to put this on a limited PMC */
 75#define PPMU_ONLY_COUNT_RUN	4	/* only counting in run state */
 76
 77extern int register_power_pmu(struct power_pmu *);
 78
 79struct pt_regs;
 80extern unsigned long perf_misc_flags(struct pt_regs *regs);
 81extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
 82extern unsigned long int read_bhrb(int n);
 83
 84/*
 85 * Only override the default definitions in include/linux/perf_event.h
 86 * if we have hardware PMU support.
 87 */
 88#ifdef CONFIG_PPC_PERF_CTRS
 89#define perf_misc_flags(regs)	perf_misc_flags(regs)
 90#endif
 91
 92/*
 93 * The power_pmu.get_constraint function returns a 32/64-bit value and
 94 * a 32/64-bit mask that express the constraints between this event_id and
 95 * other events.
 96 *
 97 * The value and mask are divided up into (non-overlapping) bitfields
 98 * of three different types:
 99 *
100 * Select field: this expresses the constraint that some set of bits
101 * in MMCR* needs to be set to a specific value for this event_id.  For a
102 * select field, the mask contains 1s in every bit of the field, and
103 * the value contains a unique value for each possible setting of the
104 * MMCR* bits.  The constraint checking code will ensure that two events
105 * that set the same field in their masks have the same value in their
106 * value dwords.
107 *
108 * Add field: this expresses the constraint that there can be at most
109 * N events in a particular class.  A field of k bits can be used for
110 * N <= 2^(k-1) - 1.  The mask has the most significant bit of the field
111 * set (and the other bits 0), and the value has only the least significant
112 * bit of the field set.  In addition, the 'add_fields' and 'test_adder'
113 * in the struct power_pmu for this processor come into play.  The
114 * add_fields value contains 1 in the LSB of the field, and the
115 * test_adder contains 2^(k-1) - 1 - N in the field.
116 *
117 * NAND field: this expresses the constraint that you may not have events
118 * in all of a set of classes.  (For example, on PPC970, you can't select
119 * events from the FPU, ISU and IDU simultaneously, although any two are
120 * possible.)  For N classes, the field is N+1 bits wide, and each class
121 * is assigned one bit from the least-significant N bits.  The mask has
122 * only the most-significant bit set, and the value has only the bit
123 * for the event_id's class set.  The test_adder has the least significant
124 * bit set in the field.
125 *
126 * If an event_id is not subject to the constraint expressed by a particular
127 * field, then it will have 0 in both the mask and value for that field.
128 */
129
130extern ssize_t power_events_sysfs_show(struct device *dev,
131				struct device_attribute *attr, char *page);
132
133/*
134 * EVENT_VAR() is same as PMU_EVENT_VAR with a suffix.
135 *
136 * Having a suffix allows us to have aliases in sysfs - eg: the generic
137 * event 'cpu-cycles' can have two entries in sysfs: 'cpu-cycles' and
138 * 'PM_CYC' where the latter is the name by which the event is known in
139 * POWER CPU specification.
140 *
141 * Similarly, some hardware and cache events use the same event code. Eg.
142 * on POWER8, both "cache-references" and "L1-dcache-loads" events refer
143 * to the same event, PM_LD_REF_L1.  The suffix, allows us to have two
144 * sysfs objects for the same event and thus two entries/aliases in sysfs.
145 */
146#define	EVENT_VAR(_id, _suffix)		event_attr_##_id##_suffix
147#define	EVENT_PTR(_id, _suffix)		&EVENT_VAR(_id, _suffix).attr.attr
148
149#define	EVENT_ATTR(_name, _id, _suffix)					\
150	PMU_EVENT_ATTR(_name, EVENT_VAR(_id, _suffix), _id,		\
151			power_events_sysfs_show)
152
153#define	GENERIC_EVENT_ATTR(_name, _id)	EVENT_ATTR(_name, _id, _g)
154#define	GENERIC_EVENT_PTR(_id)		EVENT_PTR(_id, _g)
155
156#define	CACHE_EVENT_ATTR(_name, _id)	EVENT_ATTR(_name, _id, _c)
157#define	CACHE_EVENT_PTR(_id)		EVENT_PTR(_id, _c)
158
159#define	POWER_EVENT_ATTR(_name, _id)	EVENT_ATTR(_name, _id, _p)
160#define	POWER_EVENT_PTR(_id)		EVENT_PTR(_id, _p)