Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Performance event support - Processor Activity Instrumentation Facility
  4 *
  5 *  Copyright IBM Corp. 2022
  6 *  Author(s): Thomas Richter <tmricht@linux.ibm.com>
  7 */
  8#define KMSG_COMPONENT	"pai_crypto"
  9#define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt
 10
 11#include <linux/kernel.h>
 12#include <linux/kernel_stat.h>
 13#include <linux/percpu.h>
 14#include <linux/notifier.h>
 15#include <linux/init.h>
 16#include <linux/export.h>
 17#include <linux/io.h>
 18#include <linux/perf_event.h>
 19#include <asm/ctlreg.h>
 20#include <asm/pai.h>
 21#include <asm/debug.h>
 22
 23static debug_info_t *cfm_dbg;
 24static unsigned int paicrypt_cnt;	/* Size of the mapped counter sets */
 25					/* extracted with QPACI instruction */
 26
 27DEFINE_STATIC_KEY_FALSE(pai_key);
 28
 29struct pai_userdata {
 30	u16 num;
 31	u64 value;
 32} __packed;
 33
 34struct paicrypt_map {
 35	unsigned long *page;		/* Page for CPU to store counters */
 36	struct pai_userdata *save;	/* Page to store no-zero counters */
 37	unsigned int active_events;	/* # of PAI crypto users */
 38	refcount_t refcnt;		/* Reference count mapped buffers */
 
 39	struct perf_event *event;	/* Perf event for sampling */
 40	struct list_head syswide_list;	/* List system-wide sampling events */
 41};
 42
 43struct paicrypt_mapptr {
 44	struct paicrypt_map *mapptr;
 45};
 46
 47static struct paicrypt_root {		/* Anchor to per CPU data */
 48	refcount_t refcnt;		/* Overall active events */
 49	struct paicrypt_mapptr __percpu *mapptr;
 50} paicrypt_root;
 51
 52/* Free per CPU data when the last event is removed. */
 53static void paicrypt_root_free(void)
 54{
 55	if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
 56		free_percpu(paicrypt_root.mapptr);
 57		paicrypt_root.mapptr = NULL;
 58	}
 59	debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
 60			    refcount_read(&paicrypt_root.refcnt));
 61}
 62
 63/*
 64 * On initialization of first event also allocate per CPU data dynamically.
 65 * Start with an array of pointers, the array size is the maximum number of
 66 * CPUs possible, which might be larger than the number of CPUs currently
 67 * online.
 68 */
 69static int paicrypt_root_alloc(void)
 70{
 71	if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) {
 72		/* The memory is already zeroed. */
 73		paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr);
 74		if (!paicrypt_root.mapptr)
 75			return -ENOMEM;
 76		refcount_set(&paicrypt_root.refcnt, 1);
 77	}
 78	return 0;
 79}
 80
 81/* Release the PMU if event is the last perf event */
 82static DEFINE_MUTEX(pai_reserve_mutex);
 83
 84/* Adjust usage counters and remove allocated memory when all users are
 85 * gone.
 86 */
 87static void paicrypt_event_destroy_cpu(struct perf_event *event, int cpu)
 88{
 89	struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr, cpu);
 
 90	struct paicrypt_map *cpump = mp->mapptr;
 91
 
 
 92	mutex_lock(&pai_reserve_mutex);
 93	debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d "
 94			    "refcnt %u\n", __func__, event->attr.config,
 95			    event->cpu, cpump->active_events,
 
 96			    refcount_read(&cpump->refcnt));
 97	if (refcount_dec_and_test(&cpump->refcnt)) {
 98		debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
 99				    __func__, (unsigned long)cpump->page,
100				    cpump->save);
101		free_page((unsigned long)cpump->page);
102		kvfree(cpump->save);
103		kfree(cpump);
104		mp->mapptr = NULL;
105	}
106	paicrypt_root_free();
107	mutex_unlock(&pai_reserve_mutex);
108}
109
110static void paicrypt_event_destroy(struct perf_event *event)
111{
112	int cpu;
113
114	static_branch_dec(&pai_key);
115	free_page(PAI_SAVE_AREA(event));
116	if (event->cpu == -1) {
117		struct cpumask *mask = PAI_CPU_MASK(event);
118
119		for_each_cpu(cpu, mask)
120			paicrypt_event_destroy_cpu(event, cpu);
121		kfree(mask);
122	} else {
123		paicrypt_event_destroy_cpu(event, event->cpu);
124	}
125}
126
127static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel)
128{
129	if (kernel)
130		nr += PAI_CRYPTO_MAXCTR;
131	return page[nr];
132}
133
134/* Read the counter values. Return value from location in CMP. For event
135 * CRYPTO_ALL sum up all events.
136 */
137static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
138{
139	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
140	struct paicrypt_map *cpump = mp->mapptr;
141	u64 sum = 0;
142	int i;
143
144	if (event->attr.config != PAI_CRYPTO_BASE) {
145		return paicrypt_getctr(cpump->page,
146				       event->attr.config - PAI_CRYPTO_BASE,
147				       kernel);
148	}
149
150	for (i = 1; i <= paicrypt_cnt; i++) {
151		u64 val = paicrypt_getctr(cpump->page, i, kernel);
152
153		if (!val)
154			continue;
155		sum += val;
156	}
157	return sum;
158}
159
160static u64 paicrypt_getall(struct perf_event *event)
161{
162	u64 sum = 0;
163
164	if (!event->attr.exclude_kernel)
165		sum += paicrypt_getdata(event, true);
166	if (!event->attr.exclude_user)
167		sum += paicrypt_getdata(event, false);
168
169	return sum;
170}
171
172/* Check concurrent access of counting and sampling for crypto events.
 
 
 
 
 
 
 
173 * This function is called in process context and it is save to block.
174 * When the event initialization functions fails, no other call back will
175 * be invoked.
176 *
177 * Allocate the memory for the event.
178 */
179static struct paicrypt_map *paicrypt_busy(struct perf_event *event, int cpu)
180{
 
181	struct paicrypt_map *cpump = NULL;
182	struct paicrypt_mapptr *mp;
183	int rc;
184
185	mutex_lock(&pai_reserve_mutex);
186
187	/* Allocate root node */
188	rc = paicrypt_root_alloc();
189	if (rc)
190		goto unlock;
191
192	/* Allocate node for this event */
193	mp = per_cpu_ptr(paicrypt_root.mapptr, cpu);
194	cpump = mp->mapptr;
195	if (!cpump) {			/* Paicrypt_map allocated? */
196		cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
197		if (!cpump) {
198			rc = -ENOMEM;
199			goto free_root;
200		}
201		INIT_LIST_HEAD(&cpump->syswide_list);
202	}
203
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
204	/* Allocate memory for counter page and counter extraction.
205	 * Only the first counting event has to allocate a page.
206	 */
207	if (cpump->page) {
208		refcount_inc(&cpump->refcnt);
209		goto unlock;
210	}
211
212	rc = -ENOMEM;
213	cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
214	if (!cpump->page)
215		goto free_paicrypt_map;
216	cpump->save = kvmalloc_array(paicrypt_cnt + 1,
217				     sizeof(struct pai_userdata), GFP_KERNEL);
218	if (!cpump->save) {
219		free_page((unsigned long)cpump->page);
220		cpump->page = NULL;
221		goto free_paicrypt_map;
222	}
223
224	/* Set mode and reference count */
225	rc = 0;
226	refcount_set(&cpump->refcnt, 1);
 
227	mp->mapptr = cpump;
228	debug_sprintf_event(cfm_dbg, 5, "%s users %d refcnt %u page %#lx "
229			    "save %p rc %d\n", __func__, cpump->active_events,
230			    refcount_read(&cpump->refcnt),
 
231			    (unsigned long)cpump->page, cpump->save, rc);
232	goto unlock;
233
234free_paicrypt_map:
235	/* Undo memory allocation */
236	kfree(cpump);
237	mp->mapptr = NULL;
238free_root:
239	paicrypt_root_free();
 
240unlock:
241	mutex_unlock(&pai_reserve_mutex);
242	return rc ? ERR_PTR(rc) : cpump;
243}
244
245static int paicrypt_event_init_all(struct perf_event *event)
246{
247	struct paicrypt_map *cpump;
248	struct cpumask *maskptr;
249	int cpu, rc = -ENOMEM;
250
251	maskptr = kzalloc(sizeof(*maskptr), GFP_KERNEL);
252	if (!maskptr)
253		goto out;
254
255	for_each_online_cpu(cpu) {
256		cpump = paicrypt_busy(event, cpu);
257		if (IS_ERR(cpump)) {
258			for_each_cpu(cpu, maskptr)
259				paicrypt_event_destroy_cpu(event, cpu);
260			kfree(maskptr);
261			rc = PTR_ERR(cpump);
262			goto out;
263		}
264		cpumask_set_cpu(cpu, maskptr);
265	}
266
267	/*
268	 * On error all cpumask are freed and all events have been destroyed.
269	 * Save of which CPUs data structures have been allocated for.
270	 * Release them in paicrypt_event_destroy call back function
271	 * for this event.
272	 */
273	PAI_CPU_MASK(event) = maskptr;
274	rc = 0;
275out:
276	return rc;
277}
278
279/* Might be called on different CPU than the one the event is intended for. */
280static int paicrypt_event_init(struct perf_event *event)
281{
282	struct perf_event_attr *a = &event->attr;
283	struct paicrypt_map *cpump;
284	int rc = 0;
285
286	/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
287	if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
288		return -ENOENT;
289	/* PAI crypto event must be in valid range */
290	if (a->config < PAI_CRYPTO_BASE ||
291	    a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
292		return -EINVAL;
293	/* Allow only CRYPTO_ALL for sampling */
 
 
 
294	if (a->sample_period && a->config != PAI_CRYPTO_BASE)
295		return -EINVAL;
296	/* Get a page to store last counter values for sampling */
297	if (a->sample_period) {
298		PAI_SAVE_AREA(event) = get_zeroed_page(GFP_KERNEL);
299		if (!PAI_SAVE_AREA(event)) {
300			rc = -ENOMEM;
301			goto out;
302		}
303	}
304
305	if (event->cpu >= 0) {
306		cpump = paicrypt_busy(event, event->cpu);
307		if (IS_ERR(cpump))
308			rc = PTR_ERR(cpump);
309	} else {
310		rc = paicrypt_event_init_all(event);
311	}
312	if (rc) {
313		free_page(PAI_SAVE_AREA(event));
314		goto out;
315	}
316	event->destroy = paicrypt_event_destroy;
317
318	if (a->sample_period) {
319		a->sample_period = 1;
320		a->freq = 0;
321		/* Register for paicrypt_sched_task() to be called */
322		event->attach_state |= PERF_ATTACH_SCHED_CB;
323		/* Add raw data which contain the memory mapped counters */
324		a->sample_type |= PERF_SAMPLE_RAW;
325		/* Turn off inheritance */
326		a->inherit = 0;
327	}
328
329	static_branch_inc(&pai_key);
330out:
331	return rc;
332}
333
334static void paicrypt_read(struct perf_event *event)
335{
336	u64 prev, new, delta;
337
338	prev = local64_read(&event->hw.prev_count);
339	new = paicrypt_getall(event);
340	local64_set(&event->hw.prev_count, new);
341	delta = (prev <= new) ? new - prev
342			      : (-1ULL - prev) + new + 1;	 /* overflow */
343	local64_add(delta, &event->count);
344}
345
346static void paicrypt_start(struct perf_event *event, int flags)
347{
348	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
349	struct paicrypt_map *cpump = mp->mapptr;
350	u64 sum;
351
 
 
 
 
 
352	if (!event->attr.sample_period) {	/* Counting */
353		sum = paicrypt_getall(event);	/* Get current value */
354		local64_set(&event->hw.prev_count, sum);
355	} else {				/* Sampling */
356		memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
357		/* Enable context switch callback for system-wide sampling */
358		if (!(event->attach_state & PERF_ATTACH_TASK)) {
359			list_add_tail(PAI_SWLIST(event), &cpump->syswide_list);
360			perf_sched_cb_inc(event->pmu);
361		} else {
362			cpump->event = event;
363		}
 
 
364	}
365}
366
367static int paicrypt_add(struct perf_event *event, int flags)
368{
369	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
370	struct paicrypt_map *cpump = mp->mapptr;
371	unsigned long ccd;
372
373	if (++cpump->active_events == 1) {
374		ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
375		WRITE_ONCE(get_lowcore()->ccd, ccd);
376		local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
377	}
 
378	if (flags & PERF_EF_START)
379		paicrypt_start(event, PERF_EF_RELOAD);
380	event->hw.state = 0;
381	return 0;
382}
383
384static void paicrypt_have_sample(struct perf_event *, struct paicrypt_map *);
385static void paicrypt_stop(struct perf_event *event, int flags)
386{
387	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
388	struct paicrypt_map *cpump = mp->mapptr;
389
390	if (!event->attr.sample_period) {	/* Counting */
391		paicrypt_read(event);
392	} else {				/* Sampling */
393		if (!(event->attach_state & PERF_ATTACH_TASK)) {
394			perf_sched_cb_dec(event->pmu);
395			list_del(PAI_SWLIST(event));
396		} else {
397			paicrypt_have_sample(event, cpump);
398			cpump->event = NULL;
399		}
400	}
401	event->hw.state = PERF_HES_STOPPED;
402}
403
404static void paicrypt_del(struct perf_event *event, int flags)
405{
406	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
407	struct paicrypt_map *cpump = mp->mapptr;
408
409	paicrypt_stop(event, PERF_EF_UPDATE);
410	if (--cpump->active_events == 0) {
411		local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
412		WRITE_ONCE(get_lowcore()->ccd, 0);
413	}
414}
415
416/* Create raw data and save it in buffer. Calculate the delta for each
417 * counter between this invocation and the last invocation.
418 * Returns number of bytes copied.
419 * Saves only entries with positive counter difference of the form
420 * 2 bytes: Number of counter
421 * 8 bytes: Value of counter
422 */
423static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page,
424			    unsigned long *page_old, bool exclude_user,
425			    bool exclude_kernel)
426{
427	int i, outidx = 0;
428
429	for (i = 1; i <= paicrypt_cnt; i++) {
430		u64 val = 0, val_old = 0;
431
432		if (!exclude_kernel) {
433			val += paicrypt_getctr(page, i, true);
434			val_old += paicrypt_getctr(page_old, i, true);
435		}
436		if (!exclude_user) {
437			val += paicrypt_getctr(page, i, false);
438			val_old += paicrypt_getctr(page_old, i, false);
439		}
440		if (val >= val_old)
441			val -= val_old;
442		else
443			val = (~0ULL - val_old) + val + 1;
444		if (val) {
445			userdata[outidx].num = i;
446			userdata[outidx].value = val;
447			outidx++;
448		}
449	}
450	return outidx * sizeof(struct pai_userdata);
451}
452
453static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
454				struct perf_event *event)
455{
456	struct perf_sample_data data;
457	struct perf_raw_record raw;
458	struct pt_regs regs;
459	int overflow;
460
461	/* Setup perf sample */
462	memset(&regs, 0, sizeof(regs));
463	memset(&raw, 0, sizeof(raw));
464	memset(&data, 0, sizeof(data));
465	perf_sample_data_init(&data, 0, event->hw.last_period);
466	if (event->attr.sample_type & PERF_SAMPLE_TID) {
467		data.tid_entry.pid = task_tgid_nr(current);
468		data.tid_entry.tid = task_pid_nr(current);
469	}
470	if (event->attr.sample_type & PERF_SAMPLE_TIME)
471		data.time = event->clock();
472	if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
473		data.id = event->id;
474	if (event->attr.sample_type & PERF_SAMPLE_CPU) {
475		data.cpu_entry.cpu = smp_processor_id();
476		data.cpu_entry.reserved = 0;
477	}
478	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
479		raw.frag.size = rawsize;
480		raw.frag.data = cpump->save;
481		perf_sample_save_raw_data(&data, event, &raw);
482	}
483
484	overflow = perf_event_overflow(event, &data, &regs);
485	perf_event_update_userpage(event);
486	/* Save crypto counter lowcore page after reading event data. */
487	memcpy((void *)PAI_SAVE_AREA(event), cpump->page, PAGE_SIZE);
488	return overflow;
489}
490
491/* Check if there is data to be saved on schedule out of a task. */
492static void paicrypt_have_sample(struct perf_event *event,
493				 struct paicrypt_map *cpump)
494{
 
 
 
495	size_t rawsize;
 
496
497	if (!event)		/* No event active */
498		return;
499	rawsize = paicrypt_copy(cpump->save, cpump->page,
500				(unsigned long *)PAI_SAVE_AREA(event),
501				event->attr.exclude_user,
502				event->attr.exclude_kernel);
503	if (rawsize)			/* No incremented counters */
504		paicrypt_push_sample(rawsize, cpump, event);
505}
506
507/* Check if there is data to be saved on schedule out of a task. */
508static void paicrypt_have_samples(void)
509{
510	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
511	struct paicrypt_map *cpump = mp->mapptr;
512	struct perf_event *event;
513
514	list_for_each_entry(event, &cpump->syswide_list, hw.tp_list)
515		paicrypt_have_sample(event, cpump);
516}
517
518/* Called on schedule-in and schedule-out. No access to event structure,
519 * but for sampling only event CRYPTO_ALL is allowed.
520 */
521static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
522{
523	/* We started with a clean page on event installation. So read out
524	 * results on schedule_out and if page was dirty, save old values.
525	 */
526	if (!sched_in)
527		paicrypt_have_samples();
528}
529
530/* Attribute definitions for paicrypt interface. As with other CPU
531 * Measurement Facilities, there is one attribute per mapped counter.
532 * The number of mapped counters may vary per machine generation. Use
533 * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
534 * to determine the number of mapped counters. The instructions returns
535 * a positive number, which is the highest number of supported counters.
536 * All counters less than this number are also supported, there are no
537 * holes. A returned number of zero means no support for mapped counters.
538 *
539 * The identification of the counter is a unique number. The chosen range
540 * is 0x1000 + offset in mapped kernel page.
541 * All CPU Measurement Facility counters identifiers must be unique and
542 * the numbers from 0 to 496 are already used for the CPU Measurement
543 * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
544 * used for the CPU Measurement Sampling facility.
545 */
546PMU_FORMAT_ATTR(event, "config:0-63");
547
548static struct attribute *paicrypt_format_attr[] = {
549	&format_attr_event.attr,
550	NULL,
551};
552
553static struct attribute_group paicrypt_events_group = {
554	.name = "events",
555	.attrs = NULL			/* Filled in attr_event_init() */
556};
557
558static struct attribute_group paicrypt_format_group = {
559	.name = "format",
560	.attrs = paicrypt_format_attr,
561};
562
563static const struct attribute_group *paicrypt_attr_groups[] = {
564	&paicrypt_events_group,
565	&paicrypt_format_group,
566	NULL,
567};
568
569/* Performance monitoring unit for mapped counters */
570static struct pmu paicrypt = {
571	.task_ctx_nr  = perf_hw_context,
572	.event_init   = paicrypt_event_init,
573	.add	      = paicrypt_add,
574	.del	      = paicrypt_del,
575	.start	      = paicrypt_start,
576	.stop	      = paicrypt_stop,
577	.read	      = paicrypt_read,
578	.sched_task   = paicrypt_sched_task,
579	.attr_groups  = paicrypt_attr_groups
580};
581
582/* List of symbolic PAI counter names. */
583static const char * const paicrypt_ctrnames[] = {
584	[0] = "CRYPTO_ALL",
585	[1] = "KM_DEA",
586	[2] = "KM_TDEA_128",
587	[3] = "KM_TDEA_192",
588	[4] = "KM_ENCRYPTED_DEA",
589	[5] = "KM_ENCRYPTED_TDEA_128",
590	[6] = "KM_ENCRYPTED_TDEA_192",
591	[7] = "KM_AES_128",
592	[8] = "KM_AES_192",
593	[9] = "KM_AES_256",
594	[10] = "KM_ENCRYPTED_AES_128",
595	[11] = "KM_ENCRYPTED_AES_192",
596	[12] = "KM_ENCRYPTED_AES_256",
597	[13] = "KM_XTS_AES_128",
598	[14] = "KM_XTS_AES_256",
599	[15] = "KM_XTS_ENCRYPTED_AES_128",
600	[16] = "KM_XTS_ENCRYPTED_AES_256",
601	[17] = "KMC_DEA",
602	[18] = "KMC_TDEA_128",
603	[19] = "KMC_TDEA_192",
604	[20] = "KMC_ENCRYPTED_DEA",
605	[21] = "KMC_ENCRYPTED_TDEA_128",
606	[22] = "KMC_ENCRYPTED_TDEA_192",
607	[23] = "KMC_AES_128",
608	[24] = "KMC_AES_192",
609	[25] = "KMC_AES_256",
610	[26] = "KMC_ENCRYPTED_AES_128",
611	[27] = "KMC_ENCRYPTED_AES_192",
612	[28] = "KMC_ENCRYPTED_AES_256",
613	[29] = "KMC_PRNG",
614	[30] = "KMA_GCM_AES_128",
615	[31] = "KMA_GCM_AES_192",
616	[32] = "KMA_GCM_AES_256",
617	[33] = "KMA_GCM_ENCRYPTED_AES_128",
618	[34] = "KMA_GCM_ENCRYPTED_AES_192",
619	[35] = "KMA_GCM_ENCRYPTED_AES_256",
620	[36] = "KMF_DEA",
621	[37] = "KMF_TDEA_128",
622	[38] = "KMF_TDEA_192",
623	[39] = "KMF_ENCRYPTED_DEA",
624	[40] = "KMF_ENCRYPTED_TDEA_128",
625	[41] = "KMF_ENCRYPTED_TDEA_192",
626	[42] = "KMF_AES_128",
627	[43] = "KMF_AES_192",
628	[44] = "KMF_AES_256",
629	[45] = "KMF_ENCRYPTED_AES_128",
630	[46] = "KMF_ENCRYPTED_AES_192",
631	[47] = "KMF_ENCRYPTED_AES_256",
632	[48] = "KMCTR_DEA",
633	[49] = "KMCTR_TDEA_128",
634	[50] = "KMCTR_TDEA_192",
635	[51] = "KMCTR_ENCRYPTED_DEA",
636	[52] = "KMCTR_ENCRYPTED_TDEA_128",
637	[53] = "KMCTR_ENCRYPTED_TDEA_192",
638	[54] = "KMCTR_AES_128",
639	[55] = "KMCTR_AES_192",
640	[56] = "KMCTR_AES_256",
641	[57] = "KMCTR_ENCRYPTED_AES_128",
642	[58] = "KMCTR_ENCRYPTED_AES_192",
643	[59] = "KMCTR_ENCRYPTED_AES_256",
644	[60] = "KMO_DEA",
645	[61] = "KMO_TDEA_128",
646	[62] = "KMO_TDEA_192",
647	[63] = "KMO_ENCRYPTED_DEA",
648	[64] = "KMO_ENCRYPTED_TDEA_128",
649	[65] = "KMO_ENCRYPTED_TDEA_192",
650	[66] = "KMO_AES_128",
651	[67] = "KMO_AES_192",
652	[68] = "KMO_AES_256",
653	[69] = "KMO_ENCRYPTED_AES_128",
654	[70] = "KMO_ENCRYPTED_AES_192",
655	[71] = "KMO_ENCRYPTED_AES_256",
656	[72] = "KIMD_SHA_1",
657	[73] = "KIMD_SHA_256",
658	[74] = "KIMD_SHA_512",
659	[75] = "KIMD_SHA3_224",
660	[76] = "KIMD_SHA3_256",
661	[77] = "KIMD_SHA3_384",
662	[78] = "KIMD_SHA3_512",
663	[79] = "KIMD_SHAKE_128",
664	[80] = "KIMD_SHAKE_256",
665	[81] = "KIMD_GHASH",
666	[82] = "KLMD_SHA_1",
667	[83] = "KLMD_SHA_256",
668	[84] = "KLMD_SHA_512",
669	[85] = "KLMD_SHA3_224",
670	[86] = "KLMD_SHA3_256",
671	[87] = "KLMD_SHA3_384",
672	[88] = "KLMD_SHA3_512",
673	[89] = "KLMD_SHAKE_128",
674	[90] = "KLMD_SHAKE_256",
675	[91] = "KMAC_DEA",
676	[92] = "KMAC_TDEA_128",
677	[93] = "KMAC_TDEA_192",
678	[94] = "KMAC_ENCRYPTED_DEA",
679	[95] = "KMAC_ENCRYPTED_TDEA_128",
680	[96] = "KMAC_ENCRYPTED_TDEA_192",
681	[97] = "KMAC_AES_128",
682	[98] = "KMAC_AES_192",
683	[99] = "KMAC_AES_256",
684	[100] = "KMAC_ENCRYPTED_AES_128",
685	[101] = "KMAC_ENCRYPTED_AES_192",
686	[102] = "KMAC_ENCRYPTED_AES_256",
687	[103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
688	[104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
689	[105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
690	[106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
691	[107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
692	[108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
693	[109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
694	[110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
695	[111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
696	[112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
697	[113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
698	[114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
699	[115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
700	[116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
701	[117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
702	[118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
703	[119] = "PCC_SCALAR_MULTIPLY_P256",
704	[120] = "PCC_SCALAR_MULTIPLY_P384",
705	[121] = "PCC_SCALAR_MULTIPLY_P521",
706	[122] = "PCC_SCALAR_MULTIPLY_ED25519",
707	[123] = "PCC_SCALAR_MULTIPLY_ED448",
708	[124] = "PCC_SCALAR_MULTIPLY_X25519",
709	[125] = "PCC_SCALAR_MULTIPLY_X448",
710	[126] = "PRNO_SHA_512_DRNG",
711	[127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
712	[128] = "PRNO_TRNG",
713	[129] = "KDSA_ECDSA_VERIFY_P256",
714	[130] = "KDSA_ECDSA_VERIFY_P384",
715	[131] = "KDSA_ECDSA_VERIFY_P521",
716	[132] = "KDSA_ECDSA_SIGN_P256",
717	[133] = "KDSA_ECDSA_SIGN_P384",
718	[134] = "KDSA_ECDSA_SIGN_P521",
719	[135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
720	[136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
721	[137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
722	[138] = "KDSA_EDDSA_VERIFY_ED25519",
723	[139] = "KDSA_EDDSA_VERIFY_ED448",
724	[140] = "KDSA_EDDSA_SIGN_ED25519",
725	[141] = "KDSA_EDDSA_SIGN_ED448",
726	[142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
727	[143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
728	[144] = "PCKMO_ENCRYPT_DEA_KEY",
729	[145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
730	[146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
731	[147] = "PCKMO_ENCRYPT_AES_128_KEY",
732	[148] = "PCKMO_ENCRYPT_AES_192_KEY",
733	[149] = "PCKMO_ENCRYPT_AES_256_KEY",
734	[150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
735	[151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
736	[152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
737	[153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
738	[154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
739	[155] = "IBM_RESERVED_155",
740	[156] = "IBM_RESERVED_156",
741	[157] = "KM_FULL_XTS_AES_128",
742	[158] = "KM_FULL_XTS_AES_256",
743	[159] = "KM_FULL_XTS_ENCRYPTED_AES_128",
744	[160] = "KM_FULL_XTS_ENCRYPTED_AES_256",
745	[161] = "KMAC_HMAC_SHA_224",
746	[162] = "KMAC_HMAC_SHA_256",
747	[163] = "KMAC_HMAC_SHA_384",
748	[164] = "KMAC_HMAC_SHA_512",
749	[165] = "KMAC_HMAC_ENCRYPTED_SHA_224",
750	[166] = "KMAC_HMAC_ENCRYPTED_SHA_256",
751	[167] = "KMAC_HMAC_ENCRYPTED_SHA_384",
752	[168] = "KMAC_HMAC_ENCRYPTED_SHA_512",
753	[169] = "PCKMO_ENCRYPT_HMAC_512_KEY",
754	[170] = "PCKMO_ENCRYPT_HMAC_1024_KEY",
755	[171] = "PCKMO_ENCRYPT_AES_XTS_128",
756	[172] = "PCKMO_ENCRYPT_AES_XTS_256",
757};
758
759static void __init attr_event_free(struct attribute **attrs, int num)
760{
761	struct perf_pmu_events_attr *pa;
762	int i;
763
764	for (i = 0; i < num; i++) {
765		struct device_attribute *dap;
766
767		dap = container_of(attrs[i], struct device_attribute, attr);
768		pa = container_of(dap, struct perf_pmu_events_attr, attr);
769		kfree(pa);
770	}
771	kfree(attrs);
772}
773
774static int __init attr_event_init_one(struct attribute **attrs, int num)
775{
776	struct perf_pmu_events_attr *pa;
777
778	/* Index larger than array_size, no counter name available */
779	if (num >= ARRAY_SIZE(paicrypt_ctrnames)) {
780		attrs[num] = NULL;
781		return 0;
782	}
783
784	pa = kzalloc(sizeof(*pa), GFP_KERNEL);
785	if (!pa)
786		return -ENOMEM;
787
788	sysfs_attr_init(&pa->attr.attr);
789	pa->id = PAI_CRYPTO_BASE + num;
790	pa->attr.attr.name = paicrypt_ctrnames[num];
791	pa->attr.attr.mode = 0444;
792	pa->attr.show = cpumf_events_sysfs_show;
793	pa->attr.store = NULL;
794	attrs[num] = &pa->attr.attr;
795	return 0;
796}
797
798/* Create PMU sysfs event attributes on the fly. */
799static int __init attr_event_init(void)
800{
801	struct attribute **attrs;
802	int ret, i;
803
804	attrs = kmalloc_array(paicrypt_cnt + 2, sizeof(*attrs), GFP_KERNEL);
 
805	if (!attrs)
806		return -ENOMEM;
807	for (i = 0; i <= paicrypt_cnt; i++) {
808		ret = attr_event_init_one(attrs, i);
809		if (ret) {
810			attr_event_free(attrs, i);
811			return ret;
812		}
813	}
814	attrs[i] = NULL;
815	paicrypt_events_group.attrs = attrs;
816	return 0;
817}
818
819static int __init paicrypt_init(void)
820{
821	struct qpaci_info_block ib;
822	int rc;
823
824	if (!test_facility(196))
825		return 0;
826
827	qpaci(&ib);
828	paicrypt_cnt = ib.num_cc;
829	if (paicrypt_cnt == 0)
830		return 0;
831	if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) {
832		pr_err("Too many PMU pai_crypto counters %d\n", paicrypt_cnt);
833		return -E2BIG;
834	}
835
836	rc = attr_event_init();		/* Export known PAI crypto events */
837	if (rc) {
838		pr_err("Creation of PMU pai_crypto /sysfs failed\n");
839		return rc;
840	}
841
842	/* Setup s390dbf facility */
843	cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
844	if (!cfm_dbg) {
845		pr_err("Registration of s390dbf pai_crypto failed\n");
846		return -ENOMEM;
847	}
848	debug_register_view(cfm_dbg, &debug_sprintf_view);
849
850	rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
851	if (rc) {
852		pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
853		       rc);
854		debug_unregister_view(cfm_dbg, &debug_sprintf_view);
855		debug_unregister(cfm_dbg);
856		return rc;
857	}
858	return 0;
859}
860
861device_initcall(paicrypt_init);
v6.8
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Performance event support - Processor Activity Instrumentation Facility
  4 *
  5 *  Copyright IBM Corp. 2022
  6 *  Author(s): Thomas Richter <tmricht@linux.ibm.com>
  7 */
  8#define KMSG_COMPONENT	"pai_crypto"
  9#define pr_fmt(fmt)	KMSG_COMPONENT ": " fmt
 10
 11#include <linux/kernel.h>
 12#include <linux/kernel_stat.h>
 13#include <linux/percpu.h>
 14#include <linux/notifier.h>
 15#include <linux/init.h>
 16#include <linux/export.h>
 17#include <linux/io.h>
 18#include <linux/perf_event.h>
 19#include <asm/ctlreg.h>
 20#include <asm/pai.h>
 21#include <asm/debug.h>
 22
 23static debug_info_t *cfm_dbg;
 24static unsigned int paicrypt_cnt;	/* Size of the mapped counter sets */
 25					/* extracted with QPACI instruction */
 26
 27DEFINE_STATIC_KEY_FALSE(pai_key);
 28
 29struct pai_userdata {
 30	u16 num;
 31	u64 value;
 32} __packed;
 33
 34struct paicrypt_map {
 35	unsigned long *page;		/* Page for CPU to store counters */
 36	struct pai_userdata *save;	/* Page to store no-zero counters */
 37	unsigned int active_events;	/* # of PAI crypto users */
 38	refcount_t refcnt;		/* Reference count mapped buffers */
 39	enum paievt_mode mode;		/* Type of event */
 40	struct perf_event *event;	/* Perf event for sampling */
 
 41};
 42
 43struct paicrypt_mapptr {
 44	struct paicrypt_map *mapptr;
 45};
 46
 47static struct paicrypt_root {		/* Anchor to per CPU data */
 48	refcount_t refcnt;		/* Overall active events */
 49	struct paicrypt_mapptr __percpu *mapptr;
 50} paicrypt_root;
 51
 52/* Free per CPU data when the last event is removed. */
 53static void paicrypt_root_free(void)
 54{
 55	if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
 56		free_percpu(paicrypt_root.mapptr);
 57		paicrypt_root.mapptr = NULL;
 58	}
 59	debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
 60			    refcount_read(&paicrypt_root.refcnt));
 61}
 62
 63/*
 64 * On initialization of first event also allocate per CPU data dynamically.
 65 * Start with an array of pointers, the array size is the maximum number of
 66 * CPUs possible, which might be larger than the number of CPUs currently
 67 * online.
 68 */
 69static int paicrypt_root_alloc(void)
 70{
 71	if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) {
 72		/* The memory is already zeroed. */
 73		paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr);
 74		if (!paicrypt_root.mapptr)
 75			return -ENOMEM;
 76		refcount_set(&paicrypt_root.refcnt, 1);
 77	}
 78	return 0;
 79}
 80
 81/* Release the PMU if event is the last perf event */
 82static DEFINE_MUTEX(pai_reserve_mutex);
 83
 84/* Adjust usage counters and remove allocated memory when all users are
 85 * gone.
 86 */
 87static void paicrypt_event_destroy(struct perf_event *event)
 88{
 89	struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr,
 90						 event->cpu);
 91	struct paicrypt_map *cpump = mp->mapptr;
 92
 93	cpump->event = NULL;
 94	static_branch_dec(&pai_key);
 95	mutex_lock(&pai_reserve_mutex);
 96	debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
 97			    " mode %d refcnt %u\n", __func__,
 98			    event->attr.config, event->cpu,
 99			    cpump->active_events, cpump->mode,
100			    refcount_read(&cpump->refcnt));
101	if (refcount_dec_and_test(&cpump->refcnt)) {
102		debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
103				    __func__, (unsigned long)cpump->page,
104				    cpump->save);
105		free_page((unsigned long)cpump->page);
106		kvfree(cpump->save);
107		kfree(cpump);
108		mp->mapptr = NULL;
109	}
110	paicrypt_root_free();
111	mutex_unlock(&pai_reserve_mutex);
112}
113
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
114static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel)
115{
116	if (kernel)
117		nr += PAI_CRYPTO_MAXCTR;
118	return page[nr];
119}
120
121/* Read the counter values. Return value from location in CMP. For event
122 * CRYPTO_ALL sum up all events.
123 */
124static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
125{
126	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
127	struct paicrypt_map *cpump = mp->mapptr;
128	u64 sum = 0;
129	int i;
130
131	if (event->attr.config != PAI_CRYPTO_BASE) {
132		return paicrypt_getctr(cpump->page,
133				       event->attr.config - PAI_CRYPTO_BASE,
134				       kernel);
135	}
136
137	for (i = 1; i <= paicrypt_cnt; i++) {
138		u64 val = paicrypt_getctr(cpump->page, i, kernel);
139
140		if (!val)
141			continue;
142		sum += val;
143	}
144	return sum;
145}
146
147static u64 paicrypt_getall(struct perf_event *event)
148{
149	u64 sum = 0;
150
151	if (!event->attr.exclude_kernel)
152		sum += paicrypt_getdata(event, true);
153	if (!event->attr.exclude_user)
154		sum += paicrypt_getdata(event, false);
155
156	return sum;
157}
158
159/* Used to avoid races in checking concurrent access of counting and
160 * sampling for crypto events
161 *
162 * Only one instance of event pai_crypto/CRYPTO_ALL/ for sampling is
163 * allowed and when this event is running, no counting event is allowed.
164 * Several counting events are allowed in parallel, but no sampling event
165 * is allowed while one (or more) counting events are running.
166 *
167 * This function is called in process context and it is save to block.
168 * When the event initialization functions fails, no other call back will
169 * be invoked.
170 *
171 * Allocate the memory for the event.
172 */
173static struct paicrypt_map *paicrypt_busy(struct perf_event *event)
174{
175	struct perf_event_attr *a = &event->attr;
176	struct paicrypt_map *cpump = NULL;
177	struct paicrypt_mapptr *mp;
178	int rc;
179
180	mutex_lock(&pai_reserve_mutex);
181
182	/* Allocate root node */
183	rc = paicrypt_root_alloc();
184	if (rc)
185		goto unlock;
186
187	/* Allocate node for this event */
188	mp = per_cpu_ptr(paicrypt_root.mapptr, event->cpu);
189	cpump = mp->mapptr;
190	if (!cpump) {			/* Paicrypt_map allocated? */
191		cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
192		if (!cpump) {
193			rc = -ENOMEM;
194			goto free_root;
195		}
 
196	}
197
198	if (a->sample_period) {		/* Sampling requested */
199		if (cpump->mode != PAI_MODE_NONE)
200			rc = -EBUSY;	/* ... sampling/counting active */
201	} else {			/* Counting requested */
202		if (cpump->mode == PAI_MODE_SAMPLING)
203			rc = -EBUSY;	/* ... and sampling active */
204	}
205	/*
206	 * This error case triggers when there is a conflict:
207	 * Either sampling requested and counting already active, or visa
208	 * versa. Therefore the struct paicrypto_map for this CPU is
209	 * needed or the error could not have occurred. Only adjust root
210	 * node refcount.
211	 */
212	if (rc)
213		goto free_root;
214
215	/* Allocate memory for counter page and counter extraction.
216	 * Only the first counting event has to allocate a page.
217	 */
218	if (cpump->page) {
219		refcount_inc(&cpump->refcnt);
220		goto unlock;
221	}
222
223	rc = -ENOMEM;
224	cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
225	if (!cpump->page)
226		goto free_paicrypt_map;
227	cpump->save = kvmalloc_array(paicrypt_cnt + 1,
228				     sizeof(struct pai_userdata), GFP_KERNEL);
229	if (!cpump->save) {
230		free_page((unsigned long)cpump->page);
231		cpump->page = NULL;
232		goto free_paicrypt_map;
233	}
234
235	/* Set mode and reference count */
236	rc = 0;
237	refcount_set(&cpump->refcnt, 1);
238	cpump->mode = a->sample_period ? PAI_MODE_SAMPLING : PAI_MODE_COUNTING;
239	mp->mapptr = cpump;
240	debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d"
241			    " mode %d refcnt %u page %#lx save %p rc %d\n",
242			    __func__, a->sample_period, cpump->active_events,
243			    cpump->mode, refcount_read(&cpump->refcnt),
244			    (unsigned long)cpump->page, cpump->save, rc);
245	goto unlock;
246
247free_paicrypt_map:
 
248	kfree(cpump);
249	mp->mapptr = NULL;
250free_root:
251	paicrypt_root_free();
252
253unlock:
254	mutex_unlock(&pai_reserve_mutex);
255	return rc ? ERR_PTR(rc) : cpump;
256}
257
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
258/* Might be called on different CPU than the one the event is intended for. */
259static int paicrypt_event_init(struct perf_event *event)
260{
261	struct perf_event_attr *a = &event->attr;
262	struct paicrypt_map *cpump;
 
263
264	/* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
265	if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
266		return -ENOENT;
267	/* PAI crypto event must be in valid range */
268	if (a->config < PAI_CRYPTO_BASE ||
269	    a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
270		return -EINVAL;
271	/* Allow only CPU wide operation, no process context for now. */
272	if ((event->attach_state & PERF_ATTACH_TASK) || event->cpu == -1)
273		return -ENOENT;
274	/* Allow only CRYPTO_ALL for sampling. */
275	if (a->sample_period && a->config != PAI_CRYPTO_BASE)
276		return -EINVAL;
 
 
 
 
 
 
 
 
277
278	cpump = paicrypt_busy(event);
279	if (IS_ERR(cpump))
280		return PTR_ERR(cpump);
281
 
 
 
 
 
 
 
282	event->destroy = paicrypt_event_destroy;
283
284	if (a->sample_period) {
285		a->sample_period = 1;
286		a->freq = 0;
287		/* Register for paicrypt_sched_task() to be called */
288		event->attach_state |= PERF_ATTACH_SCHED_CB;
289		/* Add raw data which contain the memory mapped counters */
290		a->sample_type |= PERF_SAMPLE_RAW;
291		/* Turn off inheritance */
292		a->inherit = 0;
293	}
294
295	static_branch_inc(&pai_key);
296	return 0;
 
297}
298
299static void paicrypt_read(struct perf_event *event)
300{
301	u64 prev, new, delta;
302
303	prev = local64_read(&event->hw.prev_count);
304	new = paicrypt_getall(event);
305	local64_set(&event->hw.prev_count, new);
306	delta = (prev <= new) ? new - prev
307			      : (-1ULL - prev) + new + 1;	 /* overflow */
308	local64_add(delta, &event->count);
309}
310
311static void paicrypt_start(struct perf_event *event, int flags)
312{
 
 
313	u64 sum;
314
315	/* Event initialization sets last_tag to 0. When later on the events
316	 * are deleted and re-added, do not reset the event count value to zero.
317	 * Events are added, deleted and re-added when 2 or more events
318	 * are active at the same time.
319	 */
320	if (!event->attr.sample_period) {	/* Counting */
321		if (!event->hw.last_tag) {
322			event->hw.last_tag = 1;
323			sum = paicrypt_getall(event);	/* Get current value */
324			local64_set(&event->hw.prev_count, sum);
 
 
 
 
 
 
325		}
326	} else {				/* Sampling */
327		perf_sched_cb_inc(event->pmu);
328	}
329}
330
331static int paicrypt_add(struct perf_event *event, int flags)
332{
333	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
334	struct paicrypt_map *cpump = mp->mapptr;
335	unsigned long ccd;
336
337	if (++cpump->active_events == 1) {
338		ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
339		WRITE_ONCE(S390_lowcore.ccd, ccd);
340		local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
341	}
342	cpump->event = event;
343	if (flags & PERF_EF_START)
344		paicrypt_start(event, PERF_EF_RELOAD);
345	event->hw.state = 0;
346	return 0;
347}
348
 
349static void paicrypt_stop(struct perf_event *event, int flags)
350{
351	if (!event->attr.sample_period)	/* Counting */
 
 
 
352		paicrypt_read(event);
353	else				/* Sampling */
354		perf_sched_cb_dec(event->pmu);
 
 
 
 
 
 
 
355	event->hw.state = PERF_HES_STOPPED;
356}
357
358static void paicrypt_del(struct perf_event *event, int flags)
359{
360	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
361	struct paicrypt_map *cpump = mp->mapptr;
362
363	paicrypt_stop(event, PERF_EF_UPDATE);
364	if (--cpump->active_events == 0) {
365		local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
366		WRITE_ONCE(S390_lowcore.ccd, 0);
367	}
368}
369
370/* Create raw data and save it in buffer. Returns number of bytes copied.
371 * Saves only positive counter entries of the form
 
 
372 * 2 bytes: Number of counter
373 * 8 bytes: Value of counter
374 */
375static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page,
376			    bool exclude_user, bool exclude_kernel)
 
377{
378	int i, outidx = 0;
379
380	for (i = 1; i <= paicrypt_cnt; i++) {
381		u64 val = 0;
382
383		if (!exclude_kernel)
384			val += paicrypt_getctr(page, i, true);
385		if (!exclude_user)
 
 
386			val += paicrypt_getctr(page, i, false);
 
 
 
 
 
 
387		if (val) {
388			userdata[outidx].num = i;
389			userdata[outidx].value = val;
390			outidx++;
391		}
392	}
393	return outidx * sizeof(struct pai_userdata);
394}
395
396static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
397				struct perf_event *event)
398{
399	struct perf_sample_data data;
400	struct perf_raw_record raw;
401	struct pt_regs regs;
402	int overflow;
403
404	/* Setup perf sample */
405	memset(&regs, 0, sizeof(regs));
406	memset(&raw, 0, sizeof(raw));
407	memset(&data, 0, sizeof(data));
408	perf_sample_data_init(&data, 0, event->hw.last_period);
409	if (event->attr.sample_type & PERF_SAMPLE_TID) {
410		data.tid_entry.pid = task_tgid_nr(current);
411		data.tid_entry.tid = task_pid_nr(current);
412	}
413	if (event->attr.sample_type & PERF_SAMPLE_TIME)
414		data.time = event->clock();
415	if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
416		data.id = event->id;
417	if (event->attr.sample_type & PERF_SAMPLE_CPU) {
418		data.cpu_entry.cpu = smp_processor_id();
419		data.cpu_entry.reserved = 0;
420	}
421	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
422		raw.frag.size = rawsize;
423		raw.frag.data = cpump->save;
424		perf_sample_save_raw_data(&data, &raw);
425	}
426
427	overflow = perf_event_overflow(event, &data, &regs);
428	perf_event_update_userpage(event);
429	/* Clear lowcore page after read */
430	memset(cpump->page, 0, PAGE_SIZE);
431	return overflow;
432}
433
434/* Check if there is data to be saved on schedule out of a task. */
435static int paicrypt_have_sample(void)
 
436{
437	struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
438	struct paicrypt_map *cpump = mp->mapptr;
439	struct perf_event *event = cpump->event;
440	size_t rawsize;
441	int rc = 0;
442
443	if (!event)		/* No event active */
444		return 0;
445	rawsize = paicrypt_copy(cpump->save, cpump->page,
446				cpump->event->attr.exclude_user,
447				cpump->event->attr.exclude_kernel);
 
448	if (rawsize)			/* No incremented counters */
449		rc = paicrypt_push_sample(rawsize, cpump, event);
450	return rc;
 
 
 
 
 
 
 
 
 
 
451}
452
453/* Called on schedule-in and schedule-out. No access to event structure,
454 * but for sampling only event CRYPTO_ALL is allowed.
455 */
456static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
457{
458	/* We started with a clean page on event installation. So read out
459	 * results on schedule_out and if page was dirty, clear values.
460	 */
461	if (!sched_in)
462		paicrypt_have_sample();
463}
464
465/* Attribute definitions for paicrypt interface. As with other CPU
466 * Measurement Facilities, there is one attribute per mapped counter.
467 * The number of mapped counters may vary per machine generation. Use
468 * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
469 * to determine the number of mapped counters. The instructions returns
470 * a positive number, which is the highest number of supported counters.
471 * All counters less than this number are also supported, there are no
472 * holes. A returned number of zero means no support for mapped counters.
473 *
474 * The identification of the counter is a unique number. The chosen range
475 * is 0x1000 + offset in mapped kernel page.
476 * All CPU Measurement Facility counters identifiers must be unique and
477 * the numbers from 0 to 496 are already used for the CPU Measurement
478 * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
479 * used for the CPU Measurement Sampling facility.
480 */
481PMU_FORMAT_ATTR(event, "config:0-63");
482
483static struct attribute *paicrypt_format_attr[] = {
484	&format_attr_event.attr,
485	NULL,
486};
487
488static struct attribute_group paicrypt_events_group = {
489	.name = "events",
490	.attrs = NULL			/* Filled in attr_event_init() */
491};
492
493static struct attribute_group paicrypt_format_group = {
494	.name = "format",
495	.attrs = paicrypt_format_attr,
496};
497
498static const struct attribute_group *paicrypt_attr_groups[] = {
499	&paicrypt_events_group,
500	&paicrypt_format_group,
501	NULL,
502};
503
504/* Performance monitoring unit for mapped counters */
505static struct pmu paicrypt = {
506	.task_ctx_nr  = perf_invalid_context,
507	.event_init   = paicrypt_event_init,
508	.add	      = paicrypt_add,
509	.del	      = paicrypt_del,
510	.start	      = paicrypt_start,
511	.stop	      = paicrypt_stop,
512	.read	      = paicrypt_read,
513	.sched_task   = paicrypt_sched_task,
514	.attr_groups  = paicrypt_attr_groups
515};
516
517/* List of symbolic PAI counter names. */
518static const char * const paicrypt_ctrnames[] = {
519	[0] = "CRYPTO_ALL",
520	[1] = "KM_DEA",
521	[2] = "KM_TDEA_128",
522	[3] = "KM_TDEA_192",
523	[4] = "KM_ENCRYPTED_DEA",
524	[5] = "KM_ENCRYPTED_TDEA_128",
525	[6] = "KM_ENCRYPTED_TDEA_192",
526	[7] = "KM_AES_128",
527	[8] = "KM_AES_192",
528	[9] = "KM_AES_256",
529	[10] = "KM_ENCRYPTED_AES_128",
530	[11] = "KM_ENCRYPTED_AES_192",
531	[12] = "KM_ENCRYPTED_AES_256",
532	[13] = "KM_XTS_AES_128",
533	[14] = "KM_XTS_AES_256",
534	[15] = "KM_XTS_ENCRYPTED_AES_128",
535	[16] = "KM_XTS_ENCRYPTED_AES_256",
536	[17] = "KMC_DEA",
537	[18] = "KMC_TDEA_128",
538	[19] = "KMC_TDEA_192",
539	[20] = "KMC_ENCRYPTED_DEA",
540	[21] = "KMC_ENCRYPTED_TDEA_128",
541	[22] = "KMC_ENCRYPTED_TDEA_192",
542	[23] = "KMC_AES_128",
543	[24] = "KMC_AES_192",
544	[25] = "KMC_AES_256",
545	[26] = "KMC_ENCRYPTED_AES_128",
546	[27] = "KMC_ENCRYPTED_AES_192",
547	[28] = "KMC_ENCRYPTED_AES_256",
548	[29] = "KMC_PRNG",
549	[30] = "KMA_GCM_AES_128",
550	[31] = "KMA_GCM_AES_192",
551	[32] = "KMA_GCM_AES_256",
552	[33] = "KMA_GCM_ENCRYPTED_AES_128",
553	[34] = "KMA_GCM_ENCRYPTED_AES_192",
554	[35] = "KMA_GCM_ENCRYPTED_AES_256",
555	[36] = "KMF_DEA",
556	[37] = "KMF_TDEA_128",
557	[38] = "KMF_TDEA_192",
558	[39] = "KMF_ENCRYPTED_DEA",
559	[40] = "KMF_ENCRYPTED_TDEA_128",
560	[41] = "KMF_ENCRYPTED_TDEA_192",
561	[42] = "KMF_AES_128",
562	[43] = "KMF_AES_192",
563	[44] = "KMF_AES_256",
564	[45] = "KMF_ENCRYPTED_AES_128",
565	[46] = "KMF_ENCRYPTED_AES_192",
566	[47] = "KMF_ENCRYPTED_AES_256",
567	[48] = "KMCTR_DEA",
568	[49] = "KMCTR_TDEA_128",
569	[50] = "KMCTR_TDEA_192",
570	[51] = "KMCTR_ENCRYPTED_DEA",
571	[52] = "KMCTR_ENCRYPTED_TDEA_128",
572	[53] = "KMCTR_ENCRYPTED_TDEA_192",
573	[54] = "KMCTR_AES_128",
574	[55] = "KMCTR_AES_192",
575	[56] = "KMCTR_AES_256",
576	[57] = "KMCTR_ENCRYPTED_AES_128",
577	[58] = "KMCTR_ENCRYPTED_AES_192",
578	[59] = "KMCTR_ENCRYPTED_AES_256",
579	[60] = "KMO_DEA",
580	[61] = "KMO_TDEA_128",
581	[62] = "KMO_TDEA_192",
582	[63] = "KMO_ENCRYPTED_DEA",
583	[64] = "KMO_ENCRYPTED_TDEA_128",
584	[65] = "KMO_ENCRYPTED_TDEA_192",
585	[66] = "KMO_AES_128",
586	[67] = "KMO_AES_192",
587	[68] = "KMO_AES_256",
588	[69] = "KMO_ENCRYPTED_AES_128",
589	[70] = "KMO_ENCRYPTED_AES_192",
590	[71] = "KMO_ENCRYPTED_AES_256",
591	[72] = "KIMD_SHA_1",
592	[73] = "KIMD_SHA_256",
593	[74] = "KIMD_SHA_512",
594	[75] = "KIMD_SHA3_224",
595	[76] = "KIMD_SHA3_256",
596	[77] = "KIMD_SHA3_384",
597	[78] = "KIMD_SHA3_512",
598	[79] = "KIMD_SHAKE_128",
599	[80] = "KIMD_SHAKE_256",
600	[81] = "KIMD_GHASH",
601	[82] = "KLMD_SHA_1",
602	[83] = "KLMD_SHA_256",
603	[84] = "KLMD_SHA_512",
604	[85] = "KLMD_SHA3_224",
605	[86] = "KLMD_SHA3_256",
606	[87] = "KLMD_SHA3_384",
607	[88] = "KLMD_SHA3_512",
608	[89] = "KLMD_SHAKE_128",
609	[90] = "KLMD_SHAKE_256",
610	[91] = "KMAC_DEA",
611	[92] = "KMAC_TDEA_128",
612	[93] = "KMAC_TDEA_192",
613	[94] = "KMAC_ENCRYPTED_DEA",
614	[95] = "KMAC_ENCRYPTED_TDEA_128",
615	[96] = "KMAC_ENCRYPTED_TDEA_192",
616	[97] = "KMAC_AES_128",
617	[98] = "KMAC_AES_192",
618	[99] = "KMAC_AES_256",
619	[100] = "KMAC_ENCRYPTED_AES_128",
620	[101] = "KMAC_ENCRYPTED_AES_192",
621	[102] = "KMAC_ENCRYPTED_AES_256",
622	[103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
623	[104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
624	[105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
625	[106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
626	[107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
627	[108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
628	[109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
629	[110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
630	[111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
631	[112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
632	[113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
633	[114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
634	[115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
635	[116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
636	[117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
637	[118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
638	[119] = "PCC_SCALAR_MULTIPLY_P256",
639	[120] = "PCC_SCALAR_MULTIPLY_P384",
640	[121] = "PCC_SCALAR_MULTIPLY_P521",
641	[122] = "PCC_SCALAR_MULTIPLY_ED25519",
642	[123] = "PCC_SCALAR_MULTIPLY_ED448",
643	[124] = "PCC_SCALAR_MULTIPLY_X25519",
644	[125] = "PCC_SCALAR_MULTIPLY_X448",
645	[126] = "PRNO_SHA_512_DRNG",
646	[127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
647	[128] = "PRNO_TRNG",
648	[129] = "KDSA_ECDSA_VERIFY_P256",
649	[130] = "KDSA_ECDSA_VERIFY_P384",
650	[131] = "KDSA_ECDSA_VERIFY_P521",
651	[132] = "KDSA_ECDSA_SIGN_P256",
652	[133] = "KDSA_ECDSA_SIGN_P384",
653	[134] = "KDSA_ECDSA_SIGN_P521",
654	[135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
655	[136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
656	[137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
657	[138] = "KDSA_EDDSA_VERIFY_ED25519",
658	[139] = "KDSA_EDDSA_VERIFY_ED448",
659	[140] = "KDSA_EDDSA_SIGN_ED25519",
660	[141] = "KDSA_EDDSA_SIGN_ED448",
661	[142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
662	[143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
663	[144] = "PCKMO_ENCRYPT_DEA_KEY",
664	[145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
665	[146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
666	[147] = "PCKMO_ENCRYPT_AES_128_KEY",
667	[148] = "PCKMO_ENCRYPT_AES_192_KEY",
668	[149] = "PCKMO_ENCRYPT_AES_256_KEY",
669	[150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
670	[151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
671	[152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
672	[153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
673	[154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
674	[155] = "IBM_RESERVED_155",
675	[156] = "IBM_RESERVED_156",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
676};
677
678static void __init attr_event_free(struct attribute **attrs, int num)
679{
680	struct perf_pmu_events_attr *pa;
681	int i;
682
683	for (i = 0; i < num; i++) {
684		struct device_attribute *dap;
685
686		dap = container_of(attrs[i], struct device_attribute, attr);
687		pa = container_of(dap, struct perf_pmu_events_attr, attr);
688		kfree(pa);
689	}
690	kfree(attrs);
691}
692
693static int __init attr_event_init_one(struct attribute **attrs, int num)
694{
695	struct perf_pmu_events_attr *pa;
696
 
 
 
 
 
 
697	pa = kzalloc(sizeof(*pa), GFP_KERNEL);
698	if (!pa)
699		return -ENOMEM;
700
701	sysfs_attr_init(&pa->attr.attr);
702	pa->id = PAI_CRYPTO_BASE + num;
703	pa->attr.attr.name = paicrypt_ctrnames[num];
704	pa->attr.attr.mode = 0444;
705	pa->attr.show = cpumf_events_sysfs_show;
706	pa->attr.store = NULL;
707	attrs[num] = &pa->attr.attr;
708	return 0;
709}
710
711/* Create PMU sysfs event attributes on the fly. */
712static int __init attr_event_init(void)
713{
714	struct attribute **attrs;
715	int ret, i;
716
717	attrs = kmalloc_array(ARRAY_SIZE(paicrypt_ctrnames) + 1, sizeof(*attrs),
718			      GFP_KERNEL);
719	if (!attrs)
720		return -ENOMEM;
721	for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) {
722		ret = attr_event_init_one(attrs, i);
723		if (ret) {
724			attr_event_free(attrs, i - 1);
725			return ret;
726		}
727	}
728	attrs[i] = NULL;
729	paicrypt_events_group.attrs = attrs;
730	return 0;
731}
732
733static int __init paicrypt_init(void)
734{
735	struct qpaci_info_block ib;
736	int rc;
737
738	if (!test_facility(196))
739		return 0;
740
741	qpaci(&ib);
742	paicrypt_cnt = ib.num_cc;
743	if (paicrypt_cnt == 0)
744		return 0;
745	if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR)
746		paicrypt_cnt = PAI_CRYPTO_MAXCTR - 1;
 
 
747
748	rc = attr_event_init();		/* Export known PAI crypto events */
749	if (rc) {
750		pr_err("Creation of PMU pai_crypto /sysfs failed\n");
751		return rc;
752	}
753
754	/* Setup s390dbf facility */
755	cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
756	if (!cfm_dbg) {
757		pr_err("Registration of s390dbf pai_crypto failed\n");
758		return -ENOMEM;
759	}
760	debug_register_view(cfm_dbg, &debug_sprintf_view);
761
762	rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
763	if (rc) {
764		pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
765		       rc);
766		debug_unregister_view(cfm_dbg, &debug_sprintf_view);
767		debug_unregister(cfm_dbg);
768		return rc;
769	}
770	return 0;
771}
772
773device_initcall(paicrypt_init);