Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Performance event support - Processor Activity Instrumentation Facility
4 *
5 * Copyright IBM Corp. 2022
6 * Author(s): Thomas Richter <tmricht@linux.ibm.com>
7 */
8#define KMSG_COMPONENT "pai_crypto"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/kernel_stat.h>
13#include <linux/percpu.h>
14#include <linux/notifier.h>
15#include <linux/init.h>
16#include <linux/export.h>
17#include <linux/io.h>
18#include <linux/perf_event.h>
19#include <asm/ctlreg.h>
20#include <asm/pai.h>
21#include <asm/debug.h>
22
23static debug_info_t *cfm_dbg;
24static unsigned int paicrypt_cnt; /* Size of the mapped counter sets */
25 /* extracted with QPACI instruction */
26
27DEFINE_STATIC_KEY_FALSE(pai_key);
28
29struct pai_userdata {
30 u16 num;
31 u64 value;
32} __packed;
33
34struct paicrypt_map {
35 unsigned long *page; /* Page for CPU to store counters */
36 struct pai_userdata *save; /* Page to store no-zero counters */
37 unsigned int active_events; /* # of PAI crypto users */
38 refcount_t refcnt; /* Reference count mapped buffers */
39 enum paievt_mode mode; /* Type of event */
40 struct perf_event *event; /* Perf event for sampling */
41};
42
43struct paicrypt_mapptr {
44 struct paicrypt_map *mapptr;
45};
46
47static struct paicrypt_root { /* Anchor to per CPU data */
48 refcount_t refcnt; /* Overall active events */
49 struct paicrypt_mapptr __percpu *mapptr;
50} paicrypt_root;
51
52/* Free per CPU data when the last event is removed. */
53static void paicrypt_root_free(void)
54{
55 if (refcount_dec_and_test(&paicrypt_root.refcnt)) {
56 free_percpu(paicrypt_root.mapptr);
57 paicrypt_root.mapptr = NULL;
58 }
59 debug_sprintf_event(cfm_dbg, 5, "%s root.refcount %d\n", __func__,
60 refcount_read(&paicrypt_root.refcnt));
61}
62
63/*
64 * On initialization of first event also allocate per CPU data dynamically.
65 * Start with an array of pointers, the array size is the maximum number of
66 * CPUs possible, which might be larger than the number of CPUs currently
67 * online.
68 */
69static int paicrypt_root_alloc(void)
70{
71 if (!refcount_inc_not_zero(&paicrypt_root.refcnt)) {
72 /* The memory is already zeroed. */
73 paicrypt_root.mapptr = alloc_percpu(struct paicrypt_mapptr);
74 if (!paicrypt_root.mapptr)
75 return -ENOMEM;
76 refcount_set(&paicrypt_root.refcnt, 1);
77 }
78 return 0;
79}
80
81/* Release the PMU if event is the last perf event */
82static DEFINE_MUTEX(pai_reserve_mutex);
83
84/* Adjust usage counters and remove allocated memory when all users are
85 * gone.
86 */
87static void paicrypt_event_destroy(struct perf_event *event)
88{
89 struct paicrypt_mapptr *mp = per_cpu_ptr(paicrypt_root.mapptr,
90 event->cpu);
91 struct paicrypt_map *cpump = mp->mapptr;
92
93 cpump->event = NULL;
94 static_branch_dec(&pai_key);
95 mutex_lock(&pai_reserve_mutex);
96 debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
97 " mode %d refcnt %u\n", __func__,
98 event->attr.config, event->cpu,
99 cpump->active_events, cpump->mode,
100 refcount_read(&cpump->refcnt));
101 if (refcount_dec_and_test(&cpump->refcnt)) {
102 debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
103 __func__, (unsigned long)cpump->page,
104 cpump->save);
105 free_page((unsigned long)cpump->page);
106 kvfree(cpump->save);
107 kfree(cpump);
108 mp->mapptr = NULL;
109 }
110 paicrypt_root_free();
111 mutex_unlock(&pai_reserve_mutex);
112}
113
114static u64 paicrypt_getctr(unsigned long *page, int nr, bool kernel)
115{
116 if (kernel)
117 nr += PAI_CRYPTO_MAXCTR;
118 return page[nr];
119}
120
121/* Read the counter values. Return value from location in CMP. For event
122 * CRYPTO_ALL sum up all events.
123 */
124static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
125{
126 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
127 struct paicrypt_map *cpump = mp->mapptr;
128 u64 sum = 0;
129 int i;
130
131 if (event->attr.config != PAI_CRYPTO_BASE) {
132 return paicrypt_getctr(cpump->page,
133 event->attr.config - PAI_CRYPTO_BASE,
134 kernel);
135 }
136
137 for (i = 1; i <= paicrypt_cnt; i++) {
138 u64 val = paicrypt_getctr(cpump->page, i, kernel);
139
140 if (!val)
141 continue;
142 sum += val;
143 }
144 return sum;
145}
146
147static u64 paicrypt_getall(struct perf_event *event)
148{
149 u64 sum = 0;
150
151 if (!event->attr.exclude_kernel)
152 sum += paicrypt_getdata(event, true);
153 if (!event->attr.exclude_user)
154 sum += paicrypt_getdata(event, false);
155
156 return sum;
157}
158
159/* Used to avoid races in checking concurrent access of counting and
160 * sampling for crypto events
161 *
162 * Only one instance of event pai_crypto/CRYPTO_ALL/ for sampling is
163 * allowed and when this event is running, no counting event is allowed.
164 * Several counting events are allowed in parallel, but no sampling event
165 * is allowed while one (or more) counting events are running.
166 *
167 * This function is called in process context and it is save to block.
168 * When the event initialization functions fails, no other call back will
169 * be invoked.
170 *
171 * Allocate the memory for the event.
172 */
173static struct paicrypt_map *paicrypt_busy(struct perf_event *event)
174{
175 struct perf_event_attr *a = &event->attr;
176 struct paicrypt_map *cpump = NULL;
177 struct paicrypt_mapptr *mp;
178 int rc;
179
180 mutex_lock(&pai_reserve_mutex);
181
182 /* Allocate root node */
183 rc = paicrypt_root_alloc();
184 if (rc)
185 goto unlock;
186
187 /* Allocate node for this event */
188 mp = per_cpu_ptr(paicrypt_root.mapptr, event->cpu);
189 cpump = mp->mapptr;
190 if (!cpump) { /* Paicrypt_map allocated? */
191 cpump = kzalloc(sizeof(*cpump), GFP_KERNEL);
192 if (!cpump) {
193 rc = -ENOMEM;
194 goto free_root;
195 }
196 }
197
198 if (a->sample_period) { /* Sampling requested */
199 if (cpump->mode != PAI_MODE_NONE)
200 rc = -EBUSY; /* ... sampling/counting active */
201 } else { /* Counting requested */
202 if (cpump->mode == PAI_MODE_SAMPLING)
203 rc = -EBUSY; /* ... and sampling active */
204 }
205 /*
206 * This error case triggers when there is a conflict:
207 * Either sampling requested and counting already active, or visa
208 * versa. Therefore the struct paicrypto_map for this CPU is
209 * needed or the error could not have occurred. Only adjust root
210 * node refcount.
211 */
212 if (rc)
213 goto free_root;
214
215 /* Allocate memory for counter page and counter extraction.
216 * Only the first counting event has to allocate a page.
217 */
218 if (cpump->page) {
219 refcount_inc(&cpump->refcnt);
220 goto unlock;
221 }
222
223 rc = -ENOMEM;
224 cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
225 if (!cpump->page)
226 goto free_paicrypt_map;
227 cpump->save = kvmalloc_array(paicrypt_cnt + 1,
228 sizeof(struct pai_userdata), GFP_KERNEL);
229 if (!cpump->save) {
230 free_page((unsigned long)cpump->page);
231 cpump->page = NULL;
232 goto free_paicrypt_map;
233 }
234
235 /* Set mode and reference count */
236 rc = 0;
237 refcount_set(&cpump->refcnt, 1);
238 cpump->mode = a->sample_period ? PAI_MODE_SAMPLING : PAI_MODE_COUNTING;
239 mp->mapptr = cpump;
240 debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d"
241 " mode %d refcnt %u page %#lx save %p rc %d\n",
242 __func__, a->sample_period, cpump->active_events,
243 cpump->mode, refcount_read(&cpump->refcnt),
244 (unsigned long)cpump->page, cpump->save, rc);
245 goto unlock;
246
247free_paicrypt_map:
248 kfree(cpump);
249 mp->mapptr = NULL;
250free_root:
251 paicrypt_root_free();
252
253unlock:
254 mutex_unlock(&pai_reserve_mutex);
255 return rc ? ERR_PTR(rc) : cpump;
256}
257
258/* Might be called on different CPU than the one the event is intended for. */
259static int paicrypt_event_init(struct perf_event *event)
260{
261 struct perf_event_attr *a = &event->attr;
262 struct paicrypt_map *cpump;
263
264 /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
265 if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
266 return -ENOENT;
267 /* PAI crypto event must be in valid range */
268 if (a->config < PAI_CRYPTO_BASE ||
269 a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
270 return -EINVAL;
271 /* Allow only CPU wide operation, no process context for now. */
272 if ((event->attach_state & PERF_ATTACH_TASK) || event->cpu == -1)
273 return -ENOENT;
274 /* Allow only CRYPTO_ALL for sampling. */
275 if (a->sample_period && a->config != PAI_CRYPTO_BASE)
276 return -EINVAL;
277
278 cpump = paicrypt_busy(event);
279 if (IS_ERR(cpump))
280 return PTR_ERR(cpump);
281
282 event->destroy = paicrypt_event_destroy;
283
284 if (a->sample_period) {
285 a->sample_period = 1;
286 a->freq = 0;
287 /* Register for paicrypt_sched_task() to be called */
288 event->attach_state |= PERF_ATTACH_SCHED_CB;
289 /* Add raw data which contain the memory mapped counters */
290 a->sample_type |= PERF_SAMPLE_RAW;
291 /* Turn off inheritance */
292 a->inherit = 0;
293 }
294
295 static_branch_inc(&pai_key);
296 return 0;
297}
298
299static void paicrypt_read(struct perf_event *event)
300{
301 u64 prev, new, delta;
302
303 prev = local64_read(&event->hw.prev_count);
304 new = paicrypt_getall(event);
305 local64_set(&event->hw.prev_count, new);
306 delta = (prev <= new) ? new - prev
307 : (-1ULL - prev) + new + 1; /* overflow */
308 local64_add(delta, &event->count);
309}
310
311static void paicrypt_start(struct perf_event *event, int flags)
312{
313 u64 sum;
314
315 /* Event initialization sets last_tag to 0. When later on the events
316 * are deleted and re-added, do not reset the event count value to zero.
317 * Events are added, deleted and re-added when 2 or more events
318 * are active at the same time.
319 */
320 if (!event->attr.sample_period) { /* Counting */
321 if (!event->hw.last_tag) {
322 event->hw.last_tag = 1;
323 sum = paicrypt_getall(event); /* Get current value */
324 local64_set(&event->hw.prev_count, sum);
325 }
326 } else { /* Sampling */
327 perf_sched_cb_inc(event->pmu);
328 }
329}
330
331static int paicrypt_add(struct perf_event *event, int flags)
332{
333 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
334 struct paicrypt_map *cpump = mp->mapptr;
335 unsigned long ccd;
336
337 if (++cpump->active_events == 1) {
338 ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
339 WRITE_ONCE(S390_lowcore.ccd, ccd);
340 local_ctl_set_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
341 }
342 cpump->event = event;
343 if (flags & PERF_EF_START)
344 paicrypt_start(event, PERF_EF_RELOAD);
345 event->hw.state = 0;
346 return 0;
347}
348
349static void paicrypt_stop(struct perf_event *event, int flags)
350{
351 if (!event->attr.sample_period) /* Counting */
352 paicrypt_read(event);
353 else /* Sampling */
354 perf_sched_cb_dec(event->pmu);
355 event->hw.state = PERF_HES_STOPPED;
356}
357
358static void paicrypt_del(struct perf_event *event, int flags)
359{
360 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
361 struct paicrypt_map *cpump = mp->mapptr;
362
363 paicrypt_stop(event, PERF_EF_UPDATE);
364 if (--cpump->active_events == 0) {
365 local_ctl_clear_bit(0, CR0_CRYPTOGRAPHY_COUNTER_BIT);
366 WRITE_ONCE(S390_lowcore.ccd, 0);
367 }
368}
369
370/* Create raw data and save it in buffer. Returns number of bytes copied.
371 * Saves only positive counter entries of the form
372 * 2 bytes: Number of counter
373 * 8 bytes: Value of counter
374 */
375static size_t paicrypt_copy(struct pai_userdata *userdata, unsigned long *page,
376 bool exclude_user, bool exclude_kernel)
377{
378 int i, outidx = 0;
379
380 for (i = 1; i <= paicrypt_cnt; i++) {
381 u64 val = 0;
382
383 if (!exclude_kernel)
384 val += paicrypt_getctr(page, i, true);
385 if (!exclude_user)
386 val += paicrypt_getctr(page, i, false);
387 if (val) {
388 userdata[outidx].num = i;
389 userdata[outidx].value = val;
390 outidx++;
391 }
392 }
393 return outidx * sizeof(struct pai_userdata);
394}
395
396static int paicrypt_push_sample(size_t rawsize, struct paicrypt_map *cpump,
397 struct perf_event *event)
398{
399 struct perf_sample_data data;
400 struct perf_raw_record raw;
401 struct pt_regs regs;
402 int overflow;
403
404 /* Setup perf sample */
405 memset(®s, 0, sizeof(regs));
406 memset(&raw, 0, sizeof(raw));
407 memset(&data, 0, sizeof(data));
408 perf_sample_data_init(&data, 0, event->hw.last_period);
409 if (event->attr.sample_type & PERF_SAMPLE_TID) {
410 data.tid_entry.pid = task_tgid_nr(current);
411 data.tid_entry.tid = task_pid_nr(current);
412 }
413 if (event->attr.sample_type & PERF_SAMPLE_TIME)
414 data.time = event->clock();
415 if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
416 data.id = event->id;
417 if (event->attr.sample_type & PERF_SAMPLE_CPU) {
418 data.cpu_entry.cpu = smp_processor_id();
419 data.cpu_entry.reserved = 0;
420 }
421 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
422 raw.frag.size = rawsize;
423 raw.frag.data = cpump->save;
424 perf_sample_save_raw_data(&data, &raw);
425 }
426
427 overflow = perf_event_overflow(event, &data, ®s);
428 perf_event_update_userpage(event);
429 /* Clear lowcore page after read */
430 memset(cpump->page, 0, PAGE_SIZE);
431 return overflow;
432}
433
434/* Check if there is data to be saved on schedule out of a task. */
435static int paicrypt_have_sample(void)
436{
437 struct paicrypt_mapptr *mp = this_cpu_ptr(paicrypt_root.mapptr);
438 struct paicrypt_map *cpump = mp->mapptr;
439 struct perf_event *event = cpump->event;
440 size_t rawsize;
441 int rc = 0;
442
443 if (!event) /* No event active */
444 return 0;
445 rawsize = paicrypt_copy(cpump->save, cpump->page,
446 cpump->event->attr.exclude_user,
447 cpump->event->attr.exclude_kernel);
448 if (rawsize) /* No incremented counters */
449 rc = paicrypt_push_sample(rawsize, cpump, event);
450 return rc;
451}
452
453/* Called on schedule-in and schedule-out. No access to event structure,
454 * but for sampling only event CRYPTO_ALL is allowed.
455 */
456static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
457{
458 /* We started with a clean page on event installation. So read out
459 * results on schedule_out and if page was dirty, clear values.
460 */
461 if (!sched_in)
462 paicrypt_have_sample();
463}
464
465/* Attribute definitions for paicrypt interface. As with other CPU
466 * Measurement Facilities, there is one attribute per mapped counter.
467 * The number of mapped counters may vary per machine generation. Use
468 * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
469 * to determine the number of mapped counters. The instructions returns
470 * a positive number, which is the highest number of supported counters.
471 * All counters less than this number are also supported, there are no
472 * holes. A returned number of zero means no support for mapped counters.
473 *
474 * The identification of the counter is a unique number. The chosen range
475 * is 0x1000 + offset in mapped kernel page.
476 * All CPU Measurement Facility counters identifiers must be unique and
477 * the numbers from 0 to 496 are already used for the CPU Measurement
478 * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
479 * used for the CPU Measurement Sampling facility.
480 */
481PMU_FORMAT_ATTR(event, "config:0-63");
482
483static struct attribute *paicrypt_format_attr[] = {
484 &format_attr_event.attr,
485 NULL,
486};
487
488static struct attribute_group paicrypt_events_group = {
489 .name = "events",
490 .attrs = NULL /* Filled in attr_event_init() */
491};
492
493static struct attribute_group paicrypt_format_group = {
494 .name = "format",
495 .attrs = paicrypt_format_attr,
496};
497
498static const struct attribute_group *paicrypt_attr_groups[] = {
499 &paicrypt_events_group,
500 &paicrypt_format_group,
501 NULL,
502};
503
504/* Performance monitoring unit for mapped counters */
505static struct pmu paicrypt = {
506 .task_ctx_nr = perf_invalid_context,
507 .event_init = paicrypt_event_init,
508 .add = paicrypt_add,
509 .del = paicrypt_del,
510 .start = paicrypt_start,
511 .stop = paicrypt_stop,
512 .read = paicrypt_read,
513 .sched_task = paicrypt_sched_task,
514 .attr_groups = paicrypt_attr_groups
515};
516
517/* List of symbolic PAI counter names. */
518static const char * const paicrypt_ctrnames[] = {
519 [0] = "CRYPTO_ALL",
520 [1] = "KM_DEA",
521 [2] = "KM_TDEA_128",
522 [3] = "KM_TDEA_192",
523 [4] = "KM_ENCRYPTED_DEA",
524 [5] = "KM_ENCRYPTED_TDEA_128",
525 [6] = "KM_ENCRYPTED_TDEA_192",
526 [7] = "KM_AES_128",
527 [8] = "KM_AES_192",
528 [9] = "KM_AES_256",
529 [10] = "KM_ENCRYPTED_AES_128",
530 [11] = "KM_ENCRYPTED_AES_192",
531 [12] = "KM_ENCRYPTED_AES_256",
532 [13] = "KM_XTS_AES_128",
533 [14] = "KM_XTS_AES_256",
534 [15] = "KM_XTS_ENCRYPTED_AES_128",
535 [16] = "KM_XTS_ENCRYPTED_AES_256",
536 [17] = "KMC_DEA",
537 [18] = "KMC_TDEA_128",
538 [19] = "KMC_TDEA_192",
539 [20] = "KMC_ENCRYPTED_DEA",
540 [21] = "KMC_ENCRYPTED_TDEA_128",
541 [22] = "KMC_ENCRYPTED_TDEA_192",
542 [23] = "KMC_AES_128",
543 [24] = "KMC_AES_192",
544 [25] = "KMC_AES_256",
545 [26] = "KMC_ENCRYPTED_AES_128",
546 [27] = "KMC_ENCRYPTED_AES_192",
547 [28] = "KMC_ENCRYPTED_AES_256",
548 [29] = "KMC_PRNG",
549 [30] = "KMA_GCM_AES_128",
550 [31] = "KMA_GCM_AES_192",
551 [32] = "KMA_GCM_AES_256",
552 [33] = "KMA_GCM_ENCRYPTED_AES_128",
553 [34] = "KMA_GCM_ENCRYPTED_AES_192",
554 [35] = "KMA_GCM_ENCRYPTED_AES_256",
555 [36] = "KMF_DEA",
556 [37] = "KMF_TDEA_128",
557 [38] = "KMF_TDEA_192",
558 [39] = "KMF_ENCRYPTED_DEA",
559 [40] = "KMF_ENCRYPTED_TDEA_128",
560 [41] = "KMF_ENCRYPTED_TDEA_192",
561 [42] = "KMF_AES_128",
562 [43] = "KMF_AES_192",
563 [44] = "KMF_AES_256",
564 [45] = "KMF_ENCRYPTED_AES_128",
565 [46] = "KMF_ENCRYPTED_AES_192",
566 [47] = "KMF_ENCRYPTED_AES_256",
567 [48] = "KMCTR_DEA",
568 [49] = "KMCTR_TDEA_128",
569 [50] = "KMCTR_TDEA_192",
570 [51] = "KMCTR_ENCRYPTED_DEA",
571 [52] = "KMCTR_ENCRYPTED_TDEA_128",
572 [53] = "KMCTR_ENCRYPTED_TDEA_192",
573 [54] = "KMCTR_AES_128",
574 [55] = "KMCTR_AES_192",
575 [56] = "KMCTR_AES_256",
576 [57] = "KMCTR_ENCRYPTED_AES_128",
577 [58] = "KMCTR_ENCRYPTED_AES_192",
578 [59] = "KMCTR_ENCRYPTED_AES_256",
579 [60] = "KMO_DEA",
580 [61] = "KMO_TDEA_128",
581 [62] = "KMO_TDEA_192",
582 [63] = "KMO_ENCRYPTED_DEA",
583 [64] = "KMO_ENCRYPTED_TDEA_128",
584 [65] = "KMO_ENCRYPTED_TDEA_192",
585 [66] = "KMO_AES_128",
586 [67] = "KMO_AES_192",
587 [68] = "KMO_AES_256",
588 [69] = "KMO_ENCRYPTED_AES_128",
589 [70] = "KMO_ENCRYPTED_AES_192",
590 [71] = "KMO_ENCRYPTED_AES_256",
591 [72] = "KIMD_SHA_1",
592 [73] = "KIMD_SHA_256",
593 [74] = "KIMD_SHA_512",
594 [75] = "KIMD_SHA3_224",
595 [76] = "KIMD_SHA3_256",
596 [77] = "KIMD_SHA3_384",
597 [78] = "KIMD_SHA3_512",
598 [79] = "KIMD_SHAKE_128",
599 [80] = "KIMD_SHAKE_256",
600 [81] = "KIMD_GHASH",
601 [82] = "KLMD_SHA_1",
602 [83] = "KLMD_SHA_256",
603 [84] = "KLMD_SHA_512",
604 [85] = "KLMD_SHA3_224",
605 [86] = "KLMD_SHA3_256",
606 [87] = "KLMD_SHA3_384",
607 [88] = "KLMD_SHA3_512",
608 [89] = "KLMD_SHAKE_128",
609 [90] = "KLMD_SHAKE_256",
610 [91] = "KMAC_DEA",
611 [92] = "KMAC_TDEA_128",
612 [93] = "KMAC_TDEA_192",
613 [94] = "KMAC_ENCRYPTED_DEA",
614 [95] = "KMAC_ENCRYPTED_TDEA_128",
615 [96] = "KMAC_ENCRYPTED_TDEA_192",
616 [97] = "KMAC_AES_128",
617 [98] = "KMAC_AES_192",
618 [99] = "KMAC_AES_256",
619 [100] = "KMAC_ENCRYPTED_AES_128",
620 [101] = "KMAC_ENCRYPTED_AES_192",
621 [102] = "KMAC_ENCRYPTED_AES_256",
622 [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
623 [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
624 [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
625 [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
626 [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
627 [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
628 [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
629 [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
630 [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
631 [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
632 [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
633 [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
634 [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
635 [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
636 [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
637 [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
638 [119] = "PCC_SCALAR_MULTIPLY_P256",
639 [120] = "PCC_SCALAR_MULTIPLY_P384",
640 [121] = "PCC_SCALAR_MULTIPLY_P521",
641 [122] = "PCC_SCALAR_MULTIPLY_ED25519",
642 [123] = "PCC_SCALAR_MULTIPLY_ED448",
643 [124] = "PCC_SCALAR_MULTIPLY_X25519",
644 [125] = "PCC_SCALAR_MULTIPLY_X448",
645 [126] = "PRNO_SHA_512_DRNG",
646 [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
647 [128] = "PRNO_TRNG",
648 [129] = "KDSA_ECDSA_VERIFY_P256",
649 [130] = "KDSA_ECDSA_VERIFY_P384",
650 [131] = "KDSA_ECDSA_VERIFY_P521",
651 [132] = "KDSA_ECDSA_SIGN_P256",
652 [133] = "KDSA_ECDSA_SIGN_P384",
653 [134] = "KDSA_ECDSA_SIGN_P521",
654 [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
655 [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
656 [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
657 [138] = "KDSA_EDDSA_VERIFY_ED25519",
658 [139] = "KDSA_EDDSA_VERIFY_ED448",
659 [140] = "KDSA_EDDSA_SIGN_ED25519",
660 [141] = "KDSA_EDDSA_SIGN_ED448",
661 [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
662 [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
663 [144] = "PCKMO_ENCRYPT_DEA_KEY",
664 [145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
665 [146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
666 [147] = "PCKMO_ENCRYPT_AES_128_KEY",
667 [148] = "PCKMO_ENCRYPT_AES_192_KEY",
668 [149] = "PCKMO_ENCRYPT_AES_256_KEY",
669 [150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
670 [151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
671 [152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
672 [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
673 [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
674 [155] = "IBM_RESERVED_155",
675 [156] = "IBM_RESERVED_156",
676};
677
678static void __init attr_event_free(struct attribute **attrs, int num)
679{
680 struct perf_pmu_events_attr *pa;
681 int i;
682
683 for (i = 0; i < num; i++) {
684 struct device_attribute *dap;
685
686 dap = container_of(attrs[i], struct device_attribute, attr);
687 pa = container_of(dap, struct perf_pmu_events_attr, attr);
688 kfree(pa);
689 }
690 kfree(attrs);
691}
692
693static int __init attr_event_init_one(struct attribute **attrs, int num)
694{
695 struct perf_pmu_events_attr *pa;
696
697 pa = kzalloc(sizeof(*pa), GFP_KERNEL);
698 if (!pa)
699 return -ENOMEM;
700
701 sysfs_attr_init(&pa->attr.attr);
702 pa->id = PAI_CRYPTO_BASE + num;
703 pa->attr.attr.name = paicrypt_ctrnames[num];
704 pa->attr.attr.mode = 0444;
705 pa->attr.show = cpumf_events_sysfs_show;
706 pa->attr.store = NULL;
707 attrs[num] = &pa->attr.attr;
708 return 0;
709}
710
711/* Create PMU sysfs event attributes on the fly. */
712static int __init attr_event_init(void)
713{
714 struct attribute **attrs;
715 int ret, i;
716
717 attrs = kmalloc_array(ARRAY_SIZE(paicrypt_ctrnames) + 1, sizeof(*attrs),
718 GFP_KERNEL);
719 if (!attrs)
720 return -ENOMEM;
721 for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) {
722 ret = attr_event_init_one(attrs, i);
723 if (ret) {
724 attr_event_free(attrs, i - 1);
725 return ret;
726 }
727 }
728 attrs[i] = NULL;
729 paicrypt_events_group.attrs = attrs;
730 return 0;
731}
732
733static int __init paicrypt_init(void)
734{
735 struct qpaci_info_block ib;
736 int rc;
737
738 if (!test_facility(196))
739 return 0;
740
741 qpaci(&ib);
742 paicrypt_cnt = ib.num_cc;
743 if (paicrypt_cnt == 0)
744 return 0;
745 if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR)
746 paicrypt_cnt = PAI_CRYPTO_MAXCTR - 1;
747
748 rc = attr_event_init(); /* Export known PAI crypto events */
749 if (rc) {
750 pr_err("Creation of PMU pai_crypto /sysfs failed\n");
751 return rc;
752 }
753
754 /* Setup s390dbf facility */
755 cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
756 if (!cfm_dbg) {
757 pr_err("Registration of s390dbf pai_crypto failed\n");
758 return -ENOMEM;
759 }
760 debug_register_view(cfm_dbg, &debug_sprintf_view);
761
762 rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
763 if (rc) {
764 pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
765 rc);
766 debug_unregister_view(cfm_dbg, &debug_sprintf_view);
767 debug_unregister(cfm_dbg);
768 return rc;
769 }
770 return 0;
771}
772
773device_initcall(paicrypt_init);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Performance event support - Processor Activity Instrumentation Facility
4 *
5 * Copyright IBM Corp. 2022
6 * Author(s): Thomas Richter <tmricht@linux.ibm.com>
7 */
8#define KMSG_COMPONENT "pai_crypto"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/kernel.h>
12#include <linux/kernel_stat.h>
13#include <linux/percpu.h>
14#include <linux/notifier.h>
15#include <linux/init.h>
16#include <linux/export.h>
17#include <linux/io.h>
18#include <linux/perf_event.h>
19
20#include <asm/ctl_reg.h>
21#include <asm/pai.h>
22#include <asm/debug.h>
23
24static debug_info_t *cfm_dbg;
25static unsigned int paicrypt_cnt; /* Size of the mapped counter sets */
26 /* extracted with QPACI instruction */
27
28DEFINE_STATIC_KEY_FALSE(pai_key);
29
30struct pai_userdata {
31 u16 num;
32 u64 value;
33} __packed;
34
35struct paicrypt_map {
36 unsigned long *page; /* Page for CPU to store counters */
37 struct pai_userdata *save; /* Page to store no-zero counters */
38 unsigned int active_events; /* # of PAI crypto users */
39 unsigned int refcnt; /* Reference count mapped buffers */
40 enum paievt_mode mode; /* Type of event */
41 struct perf_event *event; /* Perf event for sampling */
42};
43
44static DEFINE_PER_CPU(struct paicrypt_map, paicrypt_map);
45
46/* Release the PMU if event is the last perf event */
47static DEFINE_MUTEX(pai_reserve_mutex);
48
49/* Adjust usage counters and remove allocated memory when all users are
50 * gone.
51 */
52static void paicrypt_event_destroy(struct perf_event *event)
53{
54 struct paicrypt_map *cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
55
56 cpump->event = NULL;
57 static_branch_dec(&pai_key);
58 mutex_lock(&pai_reserve_mutex);
59 debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d"
60 " mode %d refcnt %d\n", __func__,
61 event->attr.config, event->cpu,
62 cpump->active_events, cpump->mode, cpump->refcnt);
63 if (!--cpump->refcnt) {
64 debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n",
65 __func__, (unsigned long)cpump->page,
66 cpump->save);
67 free_page((unsigned long)cpump->page);
68 cpump->page = NULL;
69 kvfree(cpump->save);
70 cpump->save = NULL;
71 cpump->mode = PAI_MODE_NONE;
72 }
73 mutex_unlock(&pai_reserve_mutex);
74}
75
76static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel)
77{
78 if (kernel)
79 nr += PAI_CRYPTO_MAXCTR;
80 return cpump->page[nr];
81}
82
83/* Read the counter values. Return value from location in CMP. For event
84 * CRYPTO_ALL sum up all events.
85 */
86static u64 paicrypt_getdata(struct perf_event *event, bool kernel)
87{
88 struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
89 u64 sum = 0;
90 int i;
91
92 if (event->attr.config != PAI_CRYPTO_BASE) {
93 return paicrypt_getctr(cpump,
94 event->attr.config - PAI_CRYPTO_BASE,
95 kernel);
96 }
97
98 for (i = 1; i <= paicrypt_cnt; i++) {
99 u64 val = paicrypt_getctr(cpump, i, kernel);
100
101 if (!val)
102 continue;
103 sum += val;
104 }
105 return sum;
106}
107
108static u64 paicrypt_getall(struct perf_event *event)
109{
110 u64 sum = 0;
111
112 if (!event->attr.exclude_kernel)
113 sum += paicrypt_getdata(event, true);
114 if (!event->attr.exclude_user)
115 sum += paicrypt_getdata(event, false);
116
117 return sum;
118}
119
120/* Used to avoid races in checking concurrent access of counting and
121 * sampling for crypto events
122 *
123 * Only one instance of event pai_crypto/CRYPTO_ALL/ for sampling is
124 * allowed and when this event is running, no counting event is allowed.
125 * Several counting events are allowed in parallel, but no sampling event
126 * is allowed while one (or more) counting events are running.
127 *
128 * This function is called in process context and it is save to block.
129 * When the event initialization functions fails, no other call back will
130 * be invoked.
131 *
132 * Allocate the memory for the event.
133 */
134static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump)
135{
136 int rc = 0;
137
138 mutex_lock(&pai_reserve_mutex);
139 if (a->sample_period) { /* Sampling requested */
140 if (cpump->mode != PAI_MODE_NONE)
141 rc = -EBUSY; /* ... sampling/counting active */
142 } else { /* Counting requested */
143 if (cpump->mode == PAI_MODE_SAMPLING)
144 rc = -EBUSY; /* ... and sampling active */
145 }
146 if (rc)
147 goto unlock;
148
149 /* Allocate memory for counter page and counter extraction.
150 * Only the first counting event has to allocate a page.
151 */
152 if (cpump->page)
153 goto unlock;
154
155 rc = -ENOMEM;
156 cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL);
157 if (!cpump->page)
158 goto unlock;
159 cpump->save = kvmalloc_array(paicrypt_cnt + 1,
160 sizeof(struct pai_userdata), GFP_KERNEL);
161 if (!cpump->save) {
162 free_page((unsigned long)cpump->page);
163 cpump->page = NULL;
164 goto unlock;
165 }
166 rc = 0;
167
168unlock:
169 /* If rc is non-zero, do not set mode and reference count */
170 if (!rc) {
171 cpump->refcnt++;
172 cpump->mode = a->sample_period ? PAI_MODE_SAMPLING
173 : PAI_MODE_COUNTING;
174 }
175 debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d"
176 " mode %d refcnt %d page %#lx save %p rc %d\n",
177 __func__, a->sample_period, cpump->active_events,
178 cpump->mode, cpump->refcnt,
179 (unsigned long)cpump->page, cpump->save, rc);
180 mutex_unlock(&pai_reserve_mutex);
181 return rc;
182}
183
184/* Might be called on different CPU than the one the event is intended for. */
185static int paicrypt_event_init(struct perf_event *event)
186{
187 struct perf_event_attr *a = &event->attr;
188 struct paicrypt_map *cpump;
189 int rc;
190
191 /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */
192 if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type)
193 return -ENOENT;
194 /* PAI crypto event must be in valid range */
195 if (a->config < PAI_CRYPTO_BASE ||
196 a->config > PAI_CRYPTO_BASE + paicrypt_cnt)
197 return -EINVAL;
198 /* Allow only CPU wide operation, no process context for now. */
199 if (event->hw.target || event->cpu == -1)
200 return -ENOENT;
201 /* Allow only CRYPTO_ALL for sampling. */
202 if (a->sample_period && a->config != PAI_CRYPTO_BASE)
203 return -EINVAL;
204
205 cpump = per_cpu_ptr(&paicrypt_map, event->cpu);
206 rc = paicrypt_busy(a, cpump);
207 if (rc)
208 return rc;
209
210 /* Event initialization sets last_tag to 0. When later on the events
211 * are deleted and re-added, do not reset the event count value to zero.
212 * Events are added, deleted and re-added when 2 or more events
213 * are active at the same time.
214 */
215 event->hw.last_tag = 0;
216 cpump->event = event;
217 event->destroy = paicrypt_event_destroy;
218
219 if (a->sample_period) {
220 a->sample_period = 1;
221 a->freq = 0;
222 /* Register for paicrypt_sched_task() to be called */
223 event->attach_state |= PERF_ATTACH_SCHED_CB;
224 /* Add raw data which contain the memory mapped counters */
225 a->sample_type |= PERF_SAMPLE_RAW;
226 /* Turn off inheritance */
227 a->inherit = 0;
228 }
229
230 static_branch_inc(&pai_key);
231 return 0;
232}
233
234static void paicrypt_read(struct perf_event *event)
235{
236 u64 prev, new, delta;
237
238 prev = local64_read(&event->hw.prev_count);
239 new = paicrypt_getall(event);
240 local64_set(&event->hw.prev_count, new);
241 delta = (prev <= new) ? new - prev
242 : (-1ULL - prev) + new + 1; /* overflow */
243 local64_add(delta, &event->count);
244}
245
246static void paicrypt_start(struct perf_event *event, int flags)
247{
248 u64 sum;
249
250 if (!event->hw.last_tag) {
251 event->hw.last_tag = 1;
252 sum = paicrypt_getall(event); /* Get current value */
253 local64_set(&event->count, 0);
254 local64_set(&event->hw.prev_count, sum);
255 }
256}
257
258static int paicrypt_add(struct perf_event *event, int flags)
259{
260 struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
261 unsigned long ccd;
262
263 if (++cpump->active_events == 1) {
264 ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET;
265 WRITE_ONCE(S390_lowcore.ccd, ccd);
266 __ctl_set_bit(0, 50);
267 }
268 cpump->event = event;
269 if (flags & PERF_EF_START && !event->attr.sample_period) {
270 /* Only counting needs initial counter value */
271 paicrypt_start(event, PERF_EF_RELOAD);
272 }
273 event->hw.state = 0;
274 if (event->attr.sample_period)
275 perf_sched_cb_inc(event->pmu);
276 return 0;
277}
278
279static void paicrypt_stop(struct perf_event *event, int flags)
280{
281 paicrypt_read(event);
282 event->hw.state = PERF_HES_STOPPED;
283}
284
285static void paicrypt_del(struct perf_event *event, int flags)
286{
287 struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
288
289 if (event->attr.sample_period)
290 perf_sched_cb_dec(event->pmu);
291 if (!event->attr.sample_period)
292 /* Only counting needs to read counter */
293 paicrypt_stop(event, PERF_EF_UPDATE);
294 if (--cpump->active_events == 0) {
295 __ctl_clear_bit(0, 50);
296 WRITE_ONCE(S390_lowcore.ccd, 0);
297 }
298}
299
300/* Create raw data and save it in buffer. Returns number of bytes copied.
301 * Saves only positive counter entries of the form
302 * 2 bytes: Number of counter
303 * 8 bytes: Value of counter
304 */
305static size_t paicrypt_copy(struct pai_userdata *userdata,
306 struct paicrypt_map *cpump,
307 bool exclude_user, bool exclude_kernel)
308{
309 int i, outidx = 0;
310
311 for (i = 1; i <= paicrypt_cnt; i++) {
312 u64 val = 0;
313
314 if (!exclude_kernel)
315 val += paicrypt_getctr(cpump, i, true);
316 if (!exclude_user)
317 val += paicrypt_getctr(cpump, i, false);
318 if (val) {
319 userdata[outidx].num = i;
320 userdata[outidx].value = val;
321 outidx++;
322 }
323 }
324 return outidx * sizeof(struct pai_userdata);
325}
326
327static int paicrypt_push_sample(void)
328{
329 struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map);
330 struct perf_event *event = cpump->event;
331 struct perf_sample_data data;
332 struct perf_raw_record raw;
333 struct pt_regs regs;
334 size_t rawsize;
335 int overflow;
336
337 if (!cpump->event) /* No event active */
338 return 0;
339 rawsize = paicrypt_copy(cpump->save, cpump,
340 cpump->event->attr.exclude_user,
341 cpump->event->attr.exclude_kernel);
342 if (!rawsize) /* No incremented counters */
343 return 0;
344
345 /* Setup perf sample */
346 memset(®s, 0, sizeof(regs));
347 memset(&raw, 0, sizeof(raw));
348 memset(&data, 0, sizeof(data));
349 perf_sample_data_init(&data, 0, event->hw.last_period);
350 if (event->attr.sample_type & PERF_SAMPLE_TID) {
351 data.tid_entry.pid = task_tgid_nr(current);
352 data.tid_entry.tid = task_pid_nr(current);
353 }
354 if (event->attr.sample_type & PERF_SAMPLE_TIME)
355 data.time = event->clock();
356 if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER))
357 data.id = event->id;
358 if (event->attr.sample_type & PERF_SAMPLE_CPU) {
359 data.cpu_entry.cpu = smp_processor_id();
360 data.cpu_entry.reserved = 0;
361 }
362 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
363 raw.frag.size = rawsize;
364 raw.frag.data = cpump->save;
365 raw.size = raw.frag.size;
366 data.raw = &raw;
367 data.sample_flags |= PERF_SAMPLE_RAW;
368 }
369
370 overflow = perf_event_overflow(event, &data, ®s);
371 perf_event_update_userpage(event);
372 /* Clear lowcore page after read */
373 memset(cpump->page, 0, PAGE_SIZE);
374 return overflow;
375}
376
377/* Called on schedule-in and schedule-out. No access to event structure,
378 * but for sampling only event CRYPTO_ALL is allowed.
379 */
380static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in)
381{
382 /* We started with a clean page on event installation. So read out
383 * results on schedule_out and if page was dirty, clear values.
384 */
385 if (!sched_in)
386 paicrypt_push_sample();
387}
388
389/* Attribute definitions for paicrypt interface. As with other CPU
390 * Measurement Facilities, there is one attribute per mapped counter.
391 * The number of mapped counters may vary per machine generation. Use
392 * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction
393 * to determine the number of mapped counters. The instructions returns
394 * a positive number, which is the highest number of supported counters.
395 * All counters less than this number are also supported, there are no
396 * holes. A returned number of zero means no support for mapped counters.
397 *
398 * The identification of the counter is a unique number. The chosen range
399 * is 0x1000 + offset in mapped kernel page.
400 * All CPU Measurement Facility counters identifiers must be unique and
401 * the numbers from 0 to 496 are already used for the CPU Measurement
402 * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already
403 * used for the CPU Measurement Sampling facility.
404 */
405PMU_FORMAT_ATTR(event, "config:0-63");
406
407static struct attribute *paicrypt_format_attr[] = {
408 &format_attr_event.attr,
409 NULL,
410};
411
412static struct attribute_group paicrypt_events_group = {
413 .name = "events",
414 .attrs = NULL /* Filled in attr_event_init() */
415};
416
417static struct attribute_group paicrypt_format_group = {
418 .name = "format",
419 .attrs = paicrypt_format_attr,
420};
421
422static const struct attribute_group *paicrypt_attr_groups[] = {
423 &paicrypt_events_group,
424 &paicrypt_format_group,
425 NULL,
426};
427
428/* Performance monitoring unit for mapped counters */
429static struct pmu paicrypt = {
430 .task_ctx_nr = perf_invalid_context,
431 .event_init = paicrypt_event_init,
432 .add = paicrypt_add,
433 .del = paicrypt_del,
434 .start = paicrypt_start,
435 .stop = paicrypt_stop,
436 .read = paicrypt_read,
437 .sched_task = paicrypt_sched_task,
438 .attr_groups = paicrypt_attr_groups
439};
440
441/* List of symbolic PAI counter names. */
442static const char * const paicrypt_ctrnames[] = {
443 [0] = "CRYPTO_ALL",
444 [1] = "KM_DEA",
445 [2] = "KM_TDEA_128",
446 [3] = "KM_TDEA_192",
447 [4] = "KM_ENCRYPTED_DEA",
448 [5] = "KM_ENCRYPTED_TDEA_128",
449 [6] = "KM_ENCRYPTED_TDEA_192",
450 [7] = "KM_AES_128",
451 [8] = "KM_AES_192",
452 [9] = "KM_AES_256",
453 [10] = "KM_ENCRYPTED_AES_128",
454 [11] = "KM_ENCRYPTED_AES_192",
455 [12] = "KM_ENCRYPTED_AES_256",
456 [13] = "KM_XTS_AES_128",
457 [14] = "KM_XTS_AES_256",
458 [15] = "KM_XTS_ENCRYPTED_AES_128",
459 [16] = "KM_XTS_ENCRYPTED_AES_256",
460 [17] = "KMC_DEA",
461 [18] = "KMC_TDEA_128",
462 [19] = "KMC_TDEA_192",
463 [20] = "KMC_ENCRYPTED_DEA",
464 [21] = "KMC_ENCRYPTED_TDEA_128",
465 [22] = "KMC_ENCRYPTED_TDEA_192",
466 [23] = "KMC_AES_128",
467 [24] = "KMC_AES_192",
468 [25] = "KMC_AES_256",
469 [26] = "KMC_ENCRYPTED_AES_128",
470 [27] = "KMC_ENCRYPTED_AES_192",
471 [28] = "KMC_ENCRYPTED_AES_256",
472 [29] = "KMC_PRNG",
473 [30] = "KMA_GCM_AES_128",
474 [31] = "KMA_GCM_AES_192",
475 [32] = "KMA_GCM_AES_256",
476 [33] = "KMA_GCM_ENCRYPTED_AES_128",
477 [34] = "KMA_GCM_ENCRYPTED_AES_192",
478 [35] = "KMA_GCM_ENCRYPTED_AES_256",
479 [36] = "KMF_DEA",
480 [37] = "KMF_TDEA_128",
481 [38] = "KMF_TDEA_192",
482 [39] = "KMF_ENCRYPTED_DEA",
483 [40] = "KMF_ENCRYPTED_TDEA_128",
484 [41] = "KMF_ENCRYPTED_TDEA_192",
485 [42] = "KMF_AES_128",
486 [43] = "KMF_AES_192",
487 [44] = "KMF_AES_256",
488 [45] = "KMF_ENCRYPTED_AES_128",
489 [46] = "KMF_ENCRYPTED_AES_192",
490 [47] = "KMF_ENCRYPTED_AES_256",
491 [48] = "KMCTR_DEA",
492 [49] = "KMCTR_TDEA_128",
493 [50] = "KMCTR_TDEA_192",
494 [51] = "KMCTR_ENCRYPTED_DEA",
495 [52] = "KMCTR_ENCRYPTED_TDEA_128",
496 [53] = "KMCTR_ENCRYPTED_TDEA_192",
497 [54] = "KMCTR_AES_128",
498 [55] = "KMCTR_AES_192",
499 [56] = "KMCTR_AES_256",
500 [57] = "KMCTR_ENCRYPTED_AES_128",
501 [58] = "KMCTR_ENCRYPTED_AES_192",
502 [59] = "KMCTR_ENCRYPTED_AES_256",
503 [60] = "KMO_DEA",
504 [61] = "KMO_TDEA_128",
505 [62] = "KMO_TDEA_192",
506 [63] = "KMO_ENCRYPTED_DEA",
507 [64] = "KMO_ENCRYPTED_TDEA_128",
508 [65] = "KMO_ENCRYPTED_TDEA_192",
509 [66] = "KMO_AES_128",
510 [67] = "KMO_AES_192",
511 [68] = "KMO_AES_256",
512 [69] = "KMO_ENCRYPTED_AES_128",
513 [70] = "KMO_ENCRYPTED_AES_192",
514 [71] = "KMO_ENCRYPTED_AES_256",
515 [72] = "KIMD_SHA_1",
516 [73] = "KIMD_SHA_256",
517 [74] = "KIMD_SHA_512",
518 [75] = "KIMD_SHA3_224",
519 [76] = "KIMD_SHA3_256",
520 [77] = "KIMD_SHA3_384",
521 [78] = "KIMD_SHA3_512",
522 [79] = "KIMD_SHAKE_128",
523 [80] = "KIMD_SHAKE_256",
524 [81] = "KIMD_GHASH",
525 [82] = "KLMD_SHA_1",
526 [83] = "KLMD_SHA_256",
527 [84] = "KLMD_SHA_512",
528 [85] = "KLMD_SHA3_224",
529 [86] = "KLMD_SHA3_256",
530 [87] = "KLMD_SHA3_384",
531 [88] = "KLMD_SHA3_512",
532 [89] = "KLMD_SHAKE_128",
533 [90] = "KLMD_SHAKE_256",
534 [91] = "KMAC_DEA",
535 [92] = "KMAC_TDEA_128",
536 [93] = "KMAC_TDEA_192",
537 [94] = "KMAC_ENCRYPTED_DEA",
538 [95] = "KMAC_ENCRYPTED_TDEA_128",
539 [96] = "KMAC_ENCRYPTED_TDEA_192",
540 [97] = "KMAC_AES_128",
541 [98] = "KMAC_AES_192",
542 [99] = "KMAC_AES_256",
543 [100] = "KMAC_ENCRYPTED_AES_128",
544 [101] = "KMAC_ENCRYPTED_AES_192",
545 [102] = "KMAC_ENCRYPTED_AES_256",
546 [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA",
547 [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128",
548 [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192",
549 [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA",
550 [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128",
551 [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192",
552 [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128",
553 [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192",
554 [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256",
555 [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128",
556 [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192",
557 [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A",
558 [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128",
559 [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256",
560 [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128",
561 [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256",
562 [119] = "PCC_SCALAR_MULTIPLY_P256",
563 [120] = "PCC_SCALAR_MULTIPLY_P384",
564 [121] = "PCC_SCALAR_MULTIPLY_P521",
565 [122] = "PCC_SCALAR_MULTIPLY_ED25519",
566 [123] = "PCC_SCALAR_MULTIPLY_ED448",
567 [124] = "PCC_SCALAR_MULTIPLY_X25519",
568 [125] = "PCC_SCALAR_MULTIPLY_X448",
569 [126] = "PRNO_SHA_512_DRNG",
570 [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO",
571 [128] = "PRNO_TRNG",
572 [129] = "KDSA_ECDSA_VERIFY_P256",
573 [130] = "KDSA_ECDSA_VERIFY_P384",
574 [131] = "KDSA_ECDSA_VERIFY_P521",
575 [132] = "KDSA_ECDSA_SIGN_P256",
576 [133] = "KDSA_ECDSA_SIGN_P384",
577 [134] = "KDSA_ECDSA_SIGN_P521",
578 [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256",
579 [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384",
580 [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521",
581 [138] = "KDSA_EDDSA_VERIFY_ED25519",
582 [139] = "KDSA_EDDSA_VERIFY_ED448",
583 [140] = "KDSA_EDDSA_SIGN_ED25519",
584 [141] = "KDSA_EDDSA_SIGN_ED448",
585 [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519",
586 [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448",
587 [144] = "PCKMO_ENCRYPT_DEA_KEY",
588 [145] = "PCKMO_ENCRYPT_TDEA_128_KEY",
589 [146] = "PCKMO_ENCRYPT_TDEA_192_KEY",
590 [147] = "PCKMO_ENCRYPT_AES_128_KEY",
591 [148] = "PCKMO_ENCRYPT_AES_192_KEY",
592 [149] = "PCKMO_ENCRYPT_AES_256_KEY",
593 [150] = "PCKMO_ENCRYPT_ECC_P256_KEY",
594 [151] = "PCKMO_ENCRYPT_ECC_P384_KEY",
595 [152] = "PCKMO_ENCRYPT_ECC_P521_KEY",
596 [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY",
597 [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY",
598 [155] = "IBM_RESERVED_155",
599 [156] = "IBM_RESERVED_156",
600};
601
602static void __init attr_event_free(struct attribute **attrs, int num)
603{
604 struct perf_pmu_events_attr *pa;
605 int i;
606
607 for (i = 0; i < num; i++) {
608 struct device_attribute *dap;
609
610 dap = container_of(attrs[i], struct device_attribute, attr);
611 pa = container_of(dap, struct perf_pmu_events_attr, attr);
612 kfree(pa);
613 }
614 kfree(attrs);
615}
616
617static int __init attr_event_init_one(struct attribute **attrs, int num)
618{
619 struct perf_pmu_events_attr *pa;
620
621 pa = kzalloc(sizeof(*pa), GFP_KERNEL);
622 if (!pa)
623 return -ENOMEM;
624
625 sysfs_attr_init(&pa->attr.attr);
626 pa->id = PAI_CRYPTO_BASE + num;
627 pa->attr.attr.name = paicrypt_ctrnames[num];
628 pa->attr.attr.mode = 0444;
629 pa->attr.show = cpumf_events_sysfs_show;
630 pa->attr.store = NULL;
631 attrs[num] = &pa->attr.attr;
632 return 0;
633}
634
635/* Create PMU sysfs event attributes on the fly. */
636static int __init attr_event_init(void)
637{
638 struct attribute **attrs;
639 int ret, i;
640
641 attrs = kmalloc_array(ARRAY_SIZE(paicrypt_ctrnames) + 1, sizeof(*attrs),
642 GFP_KERNEL);
643 if (!attrs)
644 return -ENOMEM;
645 for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) {
646 ret = attr_event_init_one(attrs, i);
647 if (ret) {
648 attr_event_free(attrs, i - 1);
649 return ret;
650 }
651 }
652 attrs[i] = NULL;
653 paicrypt_events_group.attrs = attrs;
654 return 0;
655}
656
657static int __init paicrypt_init(void)
658{
659 struct qpaci_info_block ib;
660 int rc;
661
662 if (!test_facility(196))
663 return 0;
664
665 qpaci(&ib);
666 paicrypt_cnt = ib.num_cc;
667 if (paicrypt_cnt == 0)
668 return 0;
669 if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR)
670 paicrypt_cnt = PAI_CRYPTO_MAXCTR - 1;
671
672 rc = attr_event_init(); /* Export known PAI crypto events */
673 if (rc) {
674 pr_err("Creation of PMU pai_crypto /sysfs failed\n");
675 return rc;
676 }
677
678 /* Setup s390dbf facility */
679 cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128);
680 if (!cfm_dbg) {
681 pr_err("Registration of s390dbf pai_crypto failed\n");
682 return -ENOMEM;
683 }
684 debug_register_view(cfm_dbg, &debug_sprintf_view);
685
686 rc = perf_pmu_register(&paicrypt, "pai_crypto", -1);
687 if (rc) {
688 pr_err("Registering the pai_crypto PMU failed with rc=%i\n",
689 rc);
690 debug_unregister_view(cfm_dbg, &debug_sprintf_view);
691 debug_unregister(cfm_dbg);
692 return rc;
693 }
694 return 0;
695}
696
697device_initcall(paicrypt_init);