Linux Audio

Check our new training course

Loading...
Note: File does not exist in v5.4.
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Performance event support - Processor Activity Instrumentation Extension
  4 * Facility
  5 *
  6 *  Copyright IBM Corp. 2022
  7 *  Author(s): Thomas Richter <tmricht@linux.ibm.com>
  8 */
  9#define KMSG_COMPONENT	"pai_ext"
 10#define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt
 11
 12#include <linux/kernel.h>
 13#include <linux/kernel_stat.h>
 14#include <linux/percpu.h>
 15#include <linux/notifier.h>
 16#include <linux/init.h>
 17#include <linux/export.h>
 18#include <linux/io.h>
 19#include <linux/perf_event.h>
 20#include <asm/ctlreg.h>
 21#include <asm/pai.h>
 22#include <asm/debug.h>
 23
 24#define	PAIE1_CB_SZ		0x200	/* Size of PAIE1 control block */
 25#define	PAIE1_CTRBLOCK_SZ	0x400	/* Size of PAIE1 counter blocks */
 26
 27static debug_info_t *paiext_dbg;
 28static unsigned int paiext_cnt;	/* Extracted with QPACI instruction */
 29
 30struct pai_userdata {
 31	u16 num;
 32	u64 value;
 33} __packed;
 34
 35/* Create the PAI extension 1 control block area.
 36 * The PAI extension control block 1 is pointed to by lowcore
 37 * address 0x1508 for each CPU. This control block is 512 bytes in size
 38 * and requires a 512 byte boundary alignment.
 39 */
 40struct paiext_cb {		/* PAI extension 1 control block */
 41	u64 header;		/* Not used */
 42	u64 reserved1;
 43	u64 acc;		/* Addr to analytics counter control block */
 44	u8 reserved2[488];
 45} __packed;
 46
 47struct paiext_map {
 48	unsigned long *area;		/* Area for CPU to store counters */
 49	struct pai_userdata *save;	/* Area to store non-zero counters */
 50	unsigned int active_events;	/* # of PAI Extension users */
 51	refcount_t refcnt;
 52	struct perf_event *event;	/* Perf event for sampling */
 53	struct paiext_cb *paiext_cb;	/* PAI extension control block area */
 54	struct list_head syswide_list;	/* List system-wide sampling events */
 55};
 56
 57struct paiext_mapptr {
 58	struct paiext_map *mapptr;
 59};
 60
 61static struct paiext_root {		/* Anchor to per CPU data */
 62	refcount_t refcnt;		/* Overall active events */
 63	struct paiext_mapptr __percpu *mapptr;
 64} paiext_root;
 65
 66/* Free per CPU data when the last event is removed. */
 67static void paiext_root_free(void)
 68{
 69	if (refcount_dec_and_test(&paiext_root.refcnt)) {
 70		free_percpu(paiext_root.mapptr);
 71		paiext_root.mapptr = NULL;
 72	}
 73	debug_sprintf_event(paiext_dbg, 5, "%s root.refcount %d\n", __func__,
 74			    refcount_read(&paiext_root.refcnt));
 75}
 76
 77/* On initialization of first event also allocate per CPU data dynamically.
 78 * Start with an array of pointers, the array size is the maximum number of
 79 * CPUs possible, which might be larger than the number of CPUs currently
 80 * online.
 81 */
 82static int paiext_root_alloc(void)
 83{
 84	if (!refcount_inc_not_zero(&paiext_root.refcnt)) {
 85		/* The memory is already zeroed. */
 86		paiext_root.mapptr = alloc_percpu(struct paiext_mapptr);
 87		if (!paiext_root.mapptr) {
 88			/* Returning without refcnt adjustment is ok. The
 89			 * error code is handled by paiext_alloc() which
 90			 * decrements refcnt when an event can not be
 91			 * created.
 92			 */
 93			return -ENOMEM;
 94		}
 95		refcount_set(&paiext_root.refcnt, 1);
 96	}
 97	return 0;
 98}
 99
100/* Protects against concurrent increment of sampler and counter member
101 * increments at the same time and prohibits concurrent execution of
102 * counting and sampling events.
103 * Ensures that analytics counter block is deallocated only when the
104 * sampling and counting on that cpu is zero.
105 * For details see paiext_alloc().
106 */
107static DEFINE_MUTEX(paiext_reserve_mutex);
108
109/* Free all memory allocated for event counting/sampling setup */
110static void paiext_free(struct paiext_mapptr *mp)
111{
112	kfree(mp->mapptr->area);
113	kfree(mp->mapptr->paiext_cb);
114	kvfree(mp->mapptr->save);
115	kfree(mp->mapptr);
116	mp->mapptr = NULL;
117}
118
119/* Release the PMU if event is the last perf event */
120static void paiext_event_destroy_cpu(struct perf_event *event, int cpu)
121{
122	struct paiext_mapptr *mp = per_cpu_ptr(paiext_root.mapptr, cpu);
123	struct paiext_map *cpump = mp->mapptr;
124
125	mutex_lock(&paiext_reserve_mutex);
126	if (refcount_dec_and_test(&cpump->refcnt))	/* Last reference gone */
127		paiext_free(mp);
128	paiext_root_free();
129	mutex_unlock(&paiext_reserve_mutex);
130}
131
132static void paiext_event_destroy(struct perf_event *event)
133{
134	int cpu;
135
136	free_page(PAI_SAVE_AREA(event));
137	if (event->cpu == -1) {
138		struct cpumask *mask = PAI_CPU_MASK(event);
139
140		for_each_cpu(cpu, mask)
141			paiext_event_destroy_cpu(event, cpu);
142		kfree(mask);
143	} else {
144		paiext_event_destroy_cpu(event, event->cpu);
145	}
146	debug_sprintf_event(paiext_dbg, 4, "%s cpu %d\n", __func__,
147			    event->cpu);
148}
149
150/* Used to avoid races in checking concurrent access of counting and
151 * sampling for pai_extension events.
152 *
153 * Only one instance of event pai_ext/NNPA_ALL/ for sampling is
154 * allowed and when this event is running, no counting event is allowed.
155 * Several counting events are allowed in parallel, but no sampling event
156 * is allowed while one (or more) counting events are running.
157 *
158 * This function is called in process context and it is safe to block.
159 * When the event initialization functions fails, no other call back will
160 * be invoked.
161 *
162 * Allocate the memory for the event.
163 */
164static int paiext_alloc_cpu(struct perf_event *event, int cpu)
165{
166	struct paiext_mapptr *mp;
167	struct paiext_map *cpump;
168	int rc;
169
170	mutex_lock(&paiext_reserve_mutex);
171	rc = paiext_root_alloc();
172	if (rc)
173		goto unlock;
174
175	mp = per_cpu_ptr(paiext_root.mapptr, cpu);
176	cpump = mp->mapptr;
177	if (!cpump) {			/* Paiext_map allocated? */
178		rc = -ENOMEM;
179		cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
180		if (!cpump)
181			goto undo;
182
183		/* Allocate memory for counter area and counter extraction.
184		 * These are
185		 * - a 512 byte block and requires 512 byte boundary alignment.
186		 * - a 1KB byte block and requires 1KB boundary alignment.
187		 * Only the first counting event has to allocate the area.
188		 *
189		 * Note: This works with commit 59bb47985c1d by default.
190		 * Backporting this to kernels without this commit might
191		 * need adjustment.
192		 */
193		mp->mapptr = cpump;
194		cpump->area = kzalloc(PAIE1_CTRBLOCK_SZ, GFP_KERNEL);
195		cpump->paiext_cb = kzalloc(PAIE1_CB_SZ, GFP_KERNEL);
196		cpump->save = kvmalloc_array(paiext_cnt + 1,
197					     sizeof(struct pai_userdata),
198					     GFP_KERNEL);
199		if (!cpump->save || !cpump->area || !cpump->paiext_cb) {
200			paiext_free(mp);
201			goto undo;
202		}
203		INIT_LIST_HEAD(&cpump->syswide_list);
204		refcount_set(&cpump->refcnt, 1);
205		rc = 0;
206	} else {
207		refcount_inc(&cpump->refcnt);
208	}
209
210undo:
211	if (rc) {
212		/* Error in allocation of event, decrement anchor. Since
213		 * the event in not created, its destroy() function is never
214		 * invoked. Adjust the reference counter for the anchor.
215		 */
216		paiext_root_free();
217	}
218unlock:
219	mutex_unlock(&paiext_reserve_mutex);
220	/* If rc is non-zero, no increment of counter/sampler was done. */
221	return rc;
222}
223
224static int paiext_alloc(struct perf_event *event)
225{
226	struct cpumask *maskptr;
227	int cpu, rc = -ENOMEM;
228
229	maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL);
230	if (!maskptr)
231		goto out;
232
233	for_each_online_cpu(cpu) {
234		rc = paiext_alloc_cpu(event, cpu);
235		if (rc) {
236			for_each_cpu(cpu, maskptr)
237				paiext_event_destroy_cpu(event, cpu);
238			kfree(maskptr);
239			goto out;
240		}
241		cpumask_set_cpu(cpu, maskptr);
242	}
243
244	/*
245	 * On error all cpumask are freed and all events have been destroyed.
246	 * Save of which CPUs data structures have been allocated for.
247	 * Release them in paicrypt_event_destroy call back function
248	 * for this event.
249	 */
250	PAI_CPU_MASK(event) = maskptr;
251	rc = 0;
252out:
253	return rc;
254}
255
256/* The PAI extension 1 control block supports up to 128 entries. Return
257 * the index within PAIE1_CB given the event number. Also validate event
258 * number.
259 */
260static int paiext_event_valid(struct perf_event *event)
261{
262	u64 cfg = event->attr.config;
263
264	if (cfg >= PAI_NNPA_BASE && cfg <= PAI_NNPA_BASE + paiext_cnt) {
265		/* Offset NNPA in paiext_cb */
266		event->hw.config_base = offsetof(struct paiext_cb, acc);
267		return 0;
268	}
269	return -EINVAL;
270}
271
272/* Might be called on different CPU than the one the event is intended for. */
273static int paiext_event_init(struct perf_event *event)
274{
275	struct perf_event_attr *a = &event->attr;
276	int rc;
277
278	/* PMU pai_ext registered as PERF_TYPE_RAW, check event type */
279	if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
280		return -ENOENT;
281	/* PAI extension event must be valid and in supported range */
282	rc = paiext_event_valid(event);
283	if (rc)
284		return rc;
285	/* Allow only event NNPA_ALL for sampling. */
286	if (a->sample_period && a->config != PAI_NNPA_BASE)
287		return -EINVAL;
288	/* Prohibit exclude_user event selection */
289	if (a->exclude_user)
290		return -EINVAL;
291	/* Get a page to store last counter values for sampling */
292	if (a->sample_period) {
293		PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
294		if (!PAI_SAVE_AREA(event))
295			return -ENOMEM;
296	}
297
298	if (event->cpu >= 0)
299		rc = paiext_alloc_cpu(event, event->cpu);
300	else
301		rc = paiext_alloc(event);
302	if (rc) {
303		free_page(PAI_SAVE_AREA(event));
304		return rc;
305	}
306	event->destroy = paiext_event_destroy;
307
308	if (a->sample_period) {
309		a->sample_period = 1;
310		a->freq = 0;
311		/* Register for paicrypt_sched_task() to be called */
312		event->attach_state |= PERF_ATTACH_SCHED_CB;
313		/* Add raw data which are the memory mapped counters */
314		a->sample_type |= PERF_SAMPLE_RAW;
315		/* Turn off inheritance */
316		a->inherit = 0;
317	}
318
319	return 0;
320}
321
322static u64 paiext_getctr(unsigned long *area, int nr)
323{
324	return area[nr];
325}
326
327/* Read the counter values. Return value from location in buffer. For event
328 * NNPA_ALL sum up all events.
329 */
330static u64 paiext_getdata(struct perf_event *event)
331{
332	struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
333	struct paiext_map *cpump = mp->mapptr;
334	u64 sum = 0;
335	int i;
336
337	if (event->attr.config != PAI_NNPA_BASE)
338		return paiext_getctr(cpump->area,
339				     event->attr.config - PAI_NNPA_BASE);
340
341	for (i = 1; i <= paiext_cnt; i++)
342		sum += paiext_getctr(cpump->area, i);
343
344	return sum;
345}
346
347static u64 paiext_getall(struct perf_event *event)
348{
349	return paiext_getdata(event);
350}
351
352static void paiext_read(struct perf_event *event)
353{
354	u64 prev, new, delta;
355
356	prev = local64_read(&event->hw.prev_count);
357	new = paiext_getall(event);
358	local64_set(&event->hw.prev_count, new);
359	delta = new - prev;
360	local64_add(delta, &event->count);
361}
362
363static void paiext_start(struct perf_event *event, int flags)
364{
365	struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
366	struct paiext_map *cpump = mp->mapptr;
367	u64 sum;
368
369	if (!event->attr.sample_period) {	/* Counting */
370		sum = paiext_getall(event);	/* Get current value */
371		local64_set(&event->hw.prev_count, sum);
372	} else {				/* Sampling */
373		memcpy((void *)PAI_SAVE_AREA(event), cpump->area,
374		       PAIE1_CTRBLOCK_SZ);
375		/* Enable context switch callback for system-wide sampling */
376		if (!(event->attach_state & PERF_ATTACH_TASK)) {
377			list_add_tail(PAI_SWLIST(event), &cpump->syswide_list);
378			perf_sched_cb_inc(event->pmu);
379		} else {
380			cpump->event = event;
381		}
382	}
383}
384
385static int paiext_add(struct perf_event *event, int flags)
386{
387	struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
388	struct paiext_map *cpump = mp->mapptr;
389	struct paiext_cb *pcb = cpump->paiext_cb;
390
391	if (++cpump->active_events == 1) {
392		get_lowcore()->aicd = virt_to_phys(cpump->paiext_cb);
393		pcb->acc = virt_to_phys(cpump->area) | 0x1;
394		/* Enable CPU instruction lookup for PAIE1 control block */
395		local_ctl_set_bit(0, CR0_PAI_EXTENSION_BIT);
396	}
397	if (flags & PERF_EF_START)
398		paiext_start(event, PERF_EF_RELOAD);
399	event->hw.state = 0;
400	return 0;
401}
402
403static void paiext_have_sample(struct perf_event *, struct paiext_map *);
404static void paiext_stop(struct perf_event *event, int flags)
405{
406	struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
407	struct paiext_map *cpump = mp->mapptr;
408
409	if (!event->attr.sample_period) {	/* Counting */
410		paiext_read(event);
411	} else {				/* Sampling */
412		if (!(event->attach_state & PERF_ATTACH_TASK)) {
413			list_del(PAI_SWLIST(event));
414			perf_sched_cb_dec(event->pmu);
415		} else {
416			paiext_have_sample(event, cpump);
417			cpump->event = NULL;
418		}
419	}
420	event->hw.state = PERF_HES_STOPPED;
421}
422
423static void paiext_del(struct perf_event *event, int flags)
424{
425	struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
426	struct paiext_map *cpump = mp->mapptr;
427	struct paiext_cb *pcb = cpump->paiext_cb;
428
429	paiext_stop(event, PERF_EF_UPDATE);
430	if (--cpump->active_events == 0) {
431		/* Disable CPU instruction lookup for PAIE1 control block */
432		local_ctl_clear_bit(0, CR0_PAI_EXTENSION_BIT);
433		pcb->acc = 0;
434		get_lowcore()->aicd = 0;
435	}
436}
437
438/* Create raw data and save it in buffer. Returns number of bytes copied.
439 * Saves only positive counter entries of the form
440 * 2 bytes: Number of counter
441 * 8 bytes: Value of counter
442 */
443static size_t paiext_copy(struct pai_userdata *userdata, unsigned long *area,
444			  unsigned long *area_old)
445{
446	int i, outidx = 0;
447
448	for (i = 1; i <= paiext_cnt; i++) {
449		u64 val = paiext_getctr(area, i);
450		u64 val_old = paiext_getctr(area_old, i);
451
452		if (val >= val_old)
453			val -= val_old;
454		else
455			val = (~0ULL - val_old) + val + 1;
456		if (val) {
457			userdata[outidx].num = i;
458			userdata[outidx].value = val;
459			outidx++;
460		}
461	}
462	return outidx * sizeof(*userdata);
463}
464
465/* Write sample when one or more counters values are nonzero.
466 *
467 * Note: The function paiext_sched_task() and paiext_push_sample() are not
468 * invoked after function paiext_del() has been called because of function
469 * perf_sched_cb_dec().
470 * The function paiext_sched_task() and paiext_push_sample() are only
471 * called when sampling is active. Function perf_sched_cb_inc()
472 * has been invoked to install function paiext_sched_task() as call back
473 * to run at context switch time (see paiext_add()).
474 *
475 * This causes function perf_event_context_sched_out() and
476 * perf_event_context_sched_in() to check whether the PMU has installed an
477 * sched_task() callback. That callback is not active after paiext_del()
478 * returns and has deleted the event on that CPU.
479 */
480static int paiext_push_sample(size_t rawsize, struct paiext_map *cpump,
481			      struct perf_event *event)
482{
483	struct perf_sample_data data;
484	struct perf_raw_record raw;
485	struct pt_regs regs;
486	int overflow;
487
488	/* Setup perf sample */
489	memset(&regs, 0, sizeof(regs));
490	memset(&raw, 0, sizeof(raw));
491	memset(&data, 0, sizeof(data));
492	perf_sample_data_init(&data, 0, event->hw.last_period);
493	if (event->attr.sample_type & PERF_SAMPLE_TID) {
494		data.tid_entry.pid = task_tgid_nr(current);
495		data.tid_entry.tid = task_pid_nr(current);
496	}
497	if (event->attr.sample_type & PERF_SAMPLE_TIME)
498		data.time = event->clock();
499	if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
500		data.id = event->id;
501	if (event->attr.sample_type & PERF_SAMPLE_CPU)
502		data.cpu_entry.cpu = smp_processor_id();
503	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
504		raw.frag.size = rawsize;
505		raw.frag.data = cpump->save;
506		perf_sample_save_raw_data(&data, event, &raw);
507	}
508
509	overflow = perf_event_overflow(event, &data, &regs);
510	perf_event_update_userpage(event);
511	/* Save NNPA lowcore area after read in event */
512	memcpy((void *)PAI_SAVE_AREA(event), cpump->area,
513	       PAIE1_CTRBLOCK_SZ);
514	return overflow;
515}
516
517/* Check if there is data to be saved on schedule out of a task. */
518static void paiext_have_sample(struct perf_event *event,
519			       struct paiext_map *cpump)
520{
521	size_t rawsize;
522
523	if (!event)
524		return;
525	rawsize = paiext_copy(cpump->save, cpump->area,
526			      (unsigned long *)PAI_SAVE_AREA(event));
527	if (rawsize)			/* Incremented counters */
528		paiext_push_sample(rawsize, cpump, event);
529}
530
531/* Check if there is data to be saved on schedule out of a task. */
532static void paiext_have_samples(void)
533{
534	struct paiext_mapptr *mp = this_cpu_ptr(paiext_root.mapptr);
535	struct paiext_map *cpump = mp->mapptr;
536	struct perf_event *event;
537
538	list_for_each_entry(event, &cpump->syswide_list, hw.tp_list)
539		paiext_have_sample(event, cpump);
540}
541
542/* Called on schedule-in and schedule-out. No access to event structure,
543 * but for sampling only event NNPA_ALL is allowed.
544 */
545static void paiext_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
546{
547	/* We started with a clean page on event installation. So read out
548	 * results on schedule_out and if page was dirty, save old values.
549	 */
550	if (!sched_in)
551		paiext_have_samples();
552}
553
554/* Attribute definitions for pai extension1 interface. As with other CPU
555 * Measurement Facilities, there is one attribute per mapped counter.
556 * The number of mapped counters may vary per machine generation. Use
557 * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
558 * to determine the number of mapped counters. The instructions returns
559 * a positive number, which is the highest number of supported counters.
560 * All counters less than this number are also supported, there are no
561 * holes. A returned number of zero means no support for mapped counters.
562 *
563 * The identification of the counter is a unique number. The chosen range
564 * is 0x1800 + offset in mapped kernel page.
565 * All CPU Measurement Facility counters identifiers must be unique and
566 * the numbers from 0 to 496 are already used for the CPU Measurement
567 * Counter facility. Number 0x1000 to 0x103e are used for PAI cryptography
568 * counters.
569 * Numbers 0xb0000, 0xbc000 and 0xbd000 are already
570 * used for the CPU Measurement Sampling facility.
571 */
572PMU_FORMAT_ATTR(event, "config:0-63");
573
574static struct attribute *paiext_format_attr[] = {
575	&format_attr_event.attr,
576	NULL,
577};
578
579static struct attribute_group paiext_events_group = {
580	.name = "events",
581	.attrs = NULL,			/* Filled in attr_event_init() */
582};
583
584static struct attribute_group paiext_format_group = {
585	.name = "format",
586	.attrs = paiext_format_attr,
587};
588
589static const struct attribute_group *paiext_attr_groups[] = {
590	&paiext_events_group,
591	&paiext_format_group,
592	NULL,
593};
594
595/* Performance monitoring unit for mapped counters */
596static struct pmu paiext = {
597	.task_ctx_nr  = perf_hw_context,
598	.event_init   = paiext_event_init,
599	.add	      = paiext_add,
600	.del	      = paiext_del,
601	.start	      = paiext_start,
602	.stop	      = paiext_stop,
603	.read	      = paiext_read,
604	.sched_task   = paiext_sched_task,
605	.attr_groups  = paiext_attr_groups,
606};
607
608/* List of symbolic PAI extension 1 NNPA counter names. */
609static const char * const paiext_ctrnames[] = {
610	[0] = "NNPA_ALL",
611	[1] = "NNPA_ADD",
612	[2] = "NNPA_SUB",
613	[3] = "NNPA_MUL",
614	[4] = "NNPA_DIV",
615	[5] = "NNPA_MIN",
616	[6] = "NNPA_MAX",
617	[7] = "NNPA_LOG",
618	[8] = "NNPA_EXP",
619	[9] = "NNPA_IBM_RESERVED_9",
620	[10] = "NNPA_RELU",
621	[11] = "NNPA_TANH",
622	[12] = "NNPA_SIGMOID",
623	[13] = "NNPA_SOFTMAX",
624	[14] = "NNPA_BATCHNORM",
625	[15] = "NNPA_MAXPOOL2D",
626	[16] = "NNPA_AVGPOOL2D",
627	[17] = "NNPA_LSTMACT",
628	[18] = "NNPA_GRUACT",
629	[19] = "NNPA_CONVOLUTION",
630	[20] = "NNPA_MATMUL_OP",
631	[21] = "NNPA_MATMUL_OP_BCAST23",
632	[22] = "NNPA_SMALLBATCH",
633	[23] = "NNPA_LARGEDIM",
634	[24] = "NNPA_SMALLTENSOR",
635	[25] = "NNPA_1MFRAME",
636	[26] = "NNPA_2GFRAME",
637	[27] = "NNPA_ACCESSEXCEPT",
638	[28] = "NNPA_TRANSFORM",
639	[29] = "NNPA_GELU",
640	[30] = "NNPA_MOMENTS",
641	[31] = "NNPA_LAYERNORM",
642	[32] = "NNPA_MATMUL_OP_BCAST1",
643	[33] = "NNPA_SQRT",
644	[34] = "NNPA_INVSQRT",
645	[35] = "NNPA_NORM",
646	[36] = "NNPA_REDUCE",
647};
648
649static void __init attr_event_free(struct attribute **attrs, int num)
650{
651	struct perf_pmu_events_attr *pa;
652	struct device_attribute *dap;
653	int i;
654
655	for (i = 0; i < num; i++) {
656		dap = container_of(attrs[i], struct device_attribute, attr);
657		pa = container_of(dap, struct perf_pmu_events_attr, attr);
658		kfree(pa);
659	}
660	kfree(attrs);
661}
662
663static int __init attr_event_init_one(struct attribute **attrs, int num)
664{
665	struct perf_pmu_events_attr *pa;
666
667	/* Index larger than array_size, no counter name available */
668	if (num >= ARRAY_SIZE(paiext_ctrnames)) {
669		attrs[num] = NULL;
670		return 0;
671	}
672
673	pa = kzalloc(sizeof(*pa), GFP_KERNEL);
674	if (!pa)
675		return -ENOMEM;
676
677	sysfs_attr_init(&pa->attr.attr);
678	pa->id = PAI_NNPA_BASE + num;
679	pa->attr.attr.name = paiext_ctrnames[num];
680	pa->attr.attr.mode = 0444;
681	pa->attr.show = cpumf_events_sysfs_show;
682	pa->attr.store = NULL;
683	attrs[num] = &pa->attr.attr;
684	return 0;
685}
686
687/* Create PMU sysfs event attributes on the fly. */
688static int __init attr_event_init(void)
689{
690	struct attribute **attrs;
691	int ret, i;
692
693	attrs = kmalloc_array(paiext_cnt + 2, sizeof(*attrs), GFP_KERNEL);
694	if (!attrs)
695		return -ENOMEM;
696	for (i = 0; i <= paiext_cnt; i++) {
697		ret = attr_event_init_one(attrs, i);
698		if (ret) {
699			attr_event_free(attrs, i);
700			return ret;
701		}
702	}
703	attrs[i] = NULL;
704	paiext_events_group.attrs = attrs;
705	return 0;
706}
707
708static int __init paiext_init(void)
709{
710	struct qpaci_info_block ib;
711	int rc = -ENOMEM;
712
713	if (!test_facility(197))
714		return 0;
715
716	qpaci(&ib);
717	paiext_cnt = ib.num_nnpa;
718	if (paiext_cnt >= PAI_NNPA_MAXCTR)
719		paiext_cnt = PAI_NNPA_MAXCTR;
720	if (!paiext_cnt)
721		return 0;
722
723	rc = attr_event_init();
724	if (rc) {
725		pr_err("Creation of PMU " KMSG_COMPONENT " /sysfs failed\n");
726		return rc;
727	}
728
729	/* Setup s390dbf facility */
730	paiext_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
731	if (!paiext_dbg) {
732		pr_err("Registration of s390dbf " KMSG_COMPONENT " failed\n");
733		rc = -ENOMEM;
734		goto out_init;
735	}
736	debug_register_view(paiext_dbg, &debug_sprintf_view);
737
738	rc = perf_pmu_register(&paiext, KMSG_COMPONENT, -1);
739	if (rc) {
740		pr_err("Registration of " KMSG_COMPONENT " PMU failed with "
741		       "rc=%i\n", rc);
742		goto out_pmu;
743	}
744
745	return 0;
746
747out_pmu:
748	debug_unregister_view(paiext_dbg, &debug_sprintf_view);
749	debug_unregister(paiext_dbg);
750out_init:
751	attr_event_free(paiext_events_group.attrs,
752			ARRAY_SIZE(paiext_ctrnames) + 1);
753	return rc;
754}
755
756device_initcall(paiext_init);