1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Performance event support - Processor Activity Instrumentation Facility 4 * 5 * Copyright IBM Corp. 2022 6 * Author(s): Thomas Richter <tmricht@linux.ibm.com> 7 */ 8 #define KMSG_COMPONENT "pai_crypto" 9 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/percpu.h> 14 #include <linux/notifier.h> 15 #include <linux/init.h> 16 #include <linux/export.h> 17 #include <linux/io.h> 18 #include <linux/perf_event.h> 19 20 #include <asm/ctl_reg.h> 21 #include <asm/pai.h> 22 #include <asm/debug.h> 23 24 static debug_info_t *cfm_dbg; 25 static unsigned int paicrypt_cnt; /* Size of the mapped counter sets */ 26 /* extracted with QPACI instruction */ 27 28 DEFINE_STATIC_KEY_FALSE(pai_key); 29 30 struct pai_userdata { 31 u16 num; 32 u64 value; 33 } __packed; 34 35 struct paicrypt_map { 36 unsigned long *page; /* Page for CPU to store counters */ 37 struct pai_userdata *save; /* Page to store no-zero counters */ 38 unsigned int active_events; /* # of PAI crypto users */ 39 unsigned int refcnt; /* Reference count mapped buffers */ 40 enum paievt_mode mode; /* Type of event */ 41 struct perf_event *event; /* Perf event for sampling */ 42 }; 43 44 static DEFINE_PER_CPU(struct paicrypt_map, paicrypt_map); 45 46 /* Release the PMU if event is the last perf event */ 47 static DEFINE_MUTEX(pai_reserve_mutex); 48 49 /* Adjust usage counters and remove allocated memory when all users are 50 * gone. 51 */ 52 static void paicrypt_event_destroy(struct perf_event *event) 53 { 54 struct paicrypt_map *cpump = per_cpu_ptr(&paicrypt_map, event->cpu); 55 56 cpump->event = NULL; 57 static_branch_dec(&pai_key); 58 mutex_lock(&pai_reserve_mutex); 59 debug_sprintf_event(cfm_dbg, 5, "%s event %#llx cpu %d users %d" 60 " mode %d refcnt %d\n", __func__, 61 event->attr.config, event->cpu, 62 cpump->active_events, cpump->mode, cpump->refcnt); 63 if (!--cpump->refcnt) { 64 debug_sprintf_event(cfm_dbg, 4, "%s page %#lx save %p\n", 65 __func__, (unsigned long)cpump->page, 66 cpump->save); 67 free_page((unsigned long)cpump->page); 68 cpump->page = NULL; 69 kvfree(cpump->save); 70 cpump->save = NULL; 71 cpump->mode = PAI_MODE_NONE; 72 } 73 mutex_unlock(&pai_reserve_mutex); 74 } 75 76 static u64 paicrypt_getctr(struct paicrypt_map *cpump, int nr, bool kernel) 77 { 78 if (kernel) 79 nr += PAI_CRYPTO_MAXCTR; 80 return cpump->page[nr]; 81 } 82 83 /* Read the counter values. Return value from location in CMP. For event 84 * CRYPTO_ALL sum up all events. 85 */ 86 static u64 paicrypt_getdata(struct perf_event *event, bool kernel) 87 { 88 struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map); 89 u64 sum = 0; 90 int i; 91 92 if (event->attr.config != PAI_CRYPTO_BASE) { 93 return paicrypt_getctr(cpump, 94 event->attr.config - PAI_CRYPTO_BASE, 95 kernel); 96 } 97 98 for (i = 1; i <= paicrypt_cnt; i++) { 99 u64 val = paicrypt_getctr(cpump, i, kernel); 100 101 if (!val) 102 continue; 103 sum += val; 104 } 105 return sum; 106 } 107 108 static u64 paicrypt_getall(struct perf_event *event) 109 { 110 u64 sum = 0; 111 112 if (!event->attr.exclude_kernel) 113 sum += paicrypt_getdata(event, true); 114 if (!event->attr.exclude_user) 115 sum += paicrypt_getdata(event, false); 116 117 return sum; 118 } 119 120 /* Used to avoid races in checking concurrent access of counting and 121 * sampling for crypto events 122 * 123 * Only one instance of event pai_crypto/CRYPTO_ALL/ for sampling is 124 * allowed and when this event is running, no counting event is allowed. 125 * Several counting events are allowed in parallel, but no sampling event 126 * is allowed while one (or more) counting events are running. 127 * 128 * This function is called in process context and it is save to block. 129 * When the event initialization functions fails, no other call back will 130 * be invoked. 131 * 132 * Allocate the memory for the event. 133 */ 134 static int paicrypt_busy(struct perf_event_attr *a, struct paicrypt_map *cpump) 135 { 136 int rc = 0; 137 138 mutex_lock(&pai_reserve_mutex); 139 if (a->sample_period) { /* Sampling requested */ 140 if (cpump->mode != PAI_MODE_NONE) 141 rc = -EBUSY; /* ... sampling/counting active */ 142 } else { /* Counting requested */ 143 if (cpump->mode == PAI_MODE_SAMPLING) 144 rc = -EBUSY; /* ... and sampling active */ 145 } 146 if (rc) 147 goto unlock; 148 149 /* Allocate memory for counter page and counter extraction. 150 * Only the first counting event has to allocate a page. 151 */ 152 if (cpump->page) 153 goto unlock; 154 155 rc = -ENOMEM; 156 cpump->page = (unsigned long *)get_zeroed_page(GFP_KERNEL); 157 if (!cpump->page) 158 goto unlock; 159 cpump->save = kvmalloc_array(paicrypt_cnt + 1, 160 sizeof(struct pai_userdata), GFP_KERNEL); 161 if (!cpump->save) { 162 free_page((unsigned long)cpump->page); 163 cpump->page = NULL; 164 goto unlock; 165 } 166 rc = 0; 167 168 unlock: 169 /* If rc is non-zero, do not set mode and reference count */ 170 if (!rc) { 171 cpump->refcnt++; 172 cpump->mode = a->sample_period ? PAI_MODE_SAMPLING 173 : PAI_MODE_COUNTING; 174 } 175 debug_sprintf_event(cfm_dbg, 5, "%s sample_period %#llx users %d" 176 " mode %d refcnt %d page %#lx save %p rc %d\n", 177 __func__, a->sample_period, cpump->active_events, 178 cpump->mode, cpump->refcnt, 179 (unsigned long)cpump->page, cpump->save, rc); 180 mutex_unlock(&pai_reserve_mutex); 181 return rc; 182 } 183 184 /* Might be called on different CPU than the one the event is intended for. */ 185 static int paicrypt_event_init(struct perf_event *event) 186 { 187 struct perf_event_attr *a = &event->attr; 188 struct paicrypt_map *cpump; 189 int rc; 190 191 /* PAI crypto PMU registered as PERF_TYPE_RAW, check event type */ 192 if (a->type != PERF_TYPE_RAW && event->pmu->type != a->type) 193 return -ENOENT; 194 /* PAI crypto event must be in valid range */ 195 if (a->config < PAI_CRYPTO_BASE || 196 a->config > PAI_CRYPTO_BASE + paicrypt_cnt) 197 return -EINVAL; 198 /* Allow only CPU wide operation, no process context for now. */ 199 if (event->hw.target || event->cpu == -1) 200 return -ENOENT; 201 /* Allow only CRYPTO_ALL for sampling. */ 202 if (a->sample_period && a->config != PAI_CRYPTO_BASE) 203 return -EINVAL; 204 205 cpump = per_cpu_ptr(&paicrypt_map, event->cpu); 206 rc = paicrypt_busy(a, cpump); 207 if (rc) 208 return rc; 209 210 /* Event initialization sets last_tag to 0. When later on the events 211 * are deleted and re-added, do not reset the event count value to zero. 212 * Events are added, deleted and re-added when 2 or more events 213 * are active at the same time. 214 */ 215 event->hw.last_tag = 0; 216 cpump->event = event; 217 event->destroy = paicrypt_event_destroy; 218 219 if (a->sample_period) { 220 a->sample_period = 1; 221 a->freq = 0; 222 /* Register for paicrypt_sched_task() to be called */ 223 event->attach_state |= PERF_ATTACH_SCHED_CB; 224 /* Add raw data which contain the memory mapped counters */ 225 a->sample_type |= PERF_SAMPLE_RAW; 226 /* Turn off inheritance */ 227 a->inherit = 0; 228 } 229 230 static_branch_inc(&pai_key); 231 return 0; 232 } 233 234 static void paicrypt_read(struct perf_event *event) 235 { 236 u64 prev, new, delta; 237 238 prev = local64_read(&event->hw.prev_count); 239 new = paicrypt_getall(event); 240 local64_set(&event->hw.prev_count, new); 241 delta = (prev <= new) ? new - prev 242 : (-1ULL - prev) + new + 1; /* overflow */ 243 local64_add(delta, &event->count); 244 } 245 246 static void paicrypt_start(struct perf_event *event, int flags) 247 { 248 u64 sum; 249 250 if (!event->hw.last_tag) { 251 event->hw.last_tag = 1; 252 sum = paicrypt_getall(event); /* Get current value */ 253 local64_set(&event->count, 0); 254 local64_set(&event->hw.prev_count, sum); 255 } 256 } 257 258 static int paicrypt_add(struct perf_event *event, int flags) 259 { 260 struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map); 261 unsigned long ccd; 262 263 if (++cpump->active_events == 1) { 264 ccd = virt_to_phys(cpump->page) | PAI_CRYPTO_KERNEL_OFFSET; 265 WRITE_ONCE(S390_lowcore.ccd, ccd); 266 __ctl_set_bit(0, 50); 267 } 268 cpump->event = event; 269 if (flags & PERF_EF_START && !event->attr.sample_period) { 270 /* Only counting needs initial counter value */ 271 paicrypt_start(event, PERF_EF_RELOAD); 272 } 273 event->hw.state = 0; 274 if (event->attr.sample_period) 275 perf_sched_cb_inc(event->pmu); 276 return 0; 277 } 278 279 static void paicrypt_stop(struct perf_event *event, int flags) 280 { 281 paicrypt_read(event); 282 event->hw.state = PERF_HES_STOPPED; 283 } 284 285 static void paicrypt_del(struct perf_event *event, int flags) 286 { 287 struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map); 288 289 if (event->attr.sample_period) 290 perf_sched_cb_dec(event->pmu); 291 if (!event->attr.sample_period) 292 /* Only counting needs to read counter */ 293 paicrypt_stop(event, PERF_EF_UPDATE); 294 if (--cpump->active_events == 0) { 295 __ctl_clear_bit(0, 50); 296 WRITE_ONCE(S390_lowcore.ccd, 0); 297 } 298 } 299 300 /* Create raw data and save it in buffer. Returns number of bytes copied. 301 * Saves only positive counter entries of the form 302 * 2 bytes: Number of counter 303 * 8 bytes: Value of counter 304 */ 305 static size_t paicrypt_copy(struct pai_userdata *userdata, 306 struct paicrypt_map *cpump, 307 bool exclude_user, bool exclude_kernel) 308 { 309 int i, outidx = 0; 310 311 for (i = 1; i <= paicrypt_cnt; i++) { 312 u64 val = 0; 313 314 if (!exclude_kernel) 315 val += paicrypt_getctr(cpump, i, true); 316 if (!exclude_user) 317 val += paicrypt_getctr(cpump, i, false); 318 if (val) { 319 userdata[outidx].num = i; 320 userdata[outidx].value = val; 321 outidx++; 322 } 323 } 324 return outidx * sizeof(struct pai_userdata); 325 } 326 327 static int paicrypt_push_sample(void) 328 { 329 struct paicrypt_map *cpump = this_cpu_ptr(&paicrypt_map); 330 struct perf_event *event = cpump->event; 331 struct perf_sample_data data; 332 struct perf_raw_record raw; 333 struct pt_regs regs; 334 size_t rawsize; 335 int overflow; 336 337 if (!cpump->event) /* No event active */ 338 return 0; 339 rawsize = paicrypt_copy(cpump->save, cpump, 340 cpump->event->attr.exclude_user, 341 cpump->event->attr.exclude_kernel); 342 if (!rawsize) /* No incremented counters */ 343 return 0; 344 345 /* Setup perf sample */ 346 memset(®s, 0, sizeof(regs)); 347 memset(&raw, 0, sizeof(raw)); 348 memset(&data, 0, sizeof(data)); 349 perf_sample_data_init(&data, 0, event->hw.last_period); 350 if (event->attr.sample_type & PERF_SAMPLE_TID) { 351 data.tid_entry.pid = task_tgid_nr(current); 352 data.tid_entry.tid = task_pid_nr(current); 353 } 354 if (event->attr.sample_type & PERF_SAMPLE_TIME) 355 data.time = event->clock(); 356 if (event->attr.sample_type & (PERF_SAMPLE_ID | PERF_SAMPLE_IDENTIFIER)) 357 data.id = event->id; 358 if (event->attr.sample_type & PERF_SAMPLE_CPU) { 359 data.cpu_entry.cpu = smp_processor_id(); 360 data.cpu_entry.reserved = 0; 361 } 362 if (event->attr.sample_type & PERF_SAMPLE_RAW) { 363 raw.frag.size = rawsize; 364 raw.frag.data = cpump->save; 365 raw.size = raw.frag.size; 366 data.raw = &raw; 367 data.sample_flags |= PERF_SAMPLE_RAW; 368 } 369 370 overflow = perf_event_overflow(event, &data, ®s); 371 perf_event_update_userpage(event); 372 /* Clear lowcore page after read */ 373 memset(cpump->page, 0, PAGE_SIZE); 374 return overflow; 375 } 376 377 /* Called on schedule-in and schedule-out. No access to event structure, 378 * but for sampling only event CRYPTO_ALL is allowed. 379 */ 380 static void paicrypt_sched_task(struct perf_event_pmu_context *pmu_ctx, bool sched_in) 381 { 382 /* We started with a clean page on event installation. So read out 383 * results on schedule_out and if page was dirty, clear values. 384 */ 385 if (!sched_in) 386 paicrypt_push_sample(); 387 } 388 389 /* Attribute definitions for paicrypt interface. As with other CPU 390 * Measurement Facilities, there is one attribute per mapped counter. 391 * The number of mapped counters may vary per machine generation. Use 392 * the QUERY PROCESSOR ACTIVITY COUNTER INFORMATION (QPACI) instruction 393 * to determine the number of mapped counters. The instructions returns 394 * a positive number, which is the highest number of supported counters. 395 * All counters less than this number are also supported, there are no 396 * holes. A returned number of zero means no support for mapped counters. 397 * 398 * The identification of the counter is a unique number. The chosen range 399 * is 0x1000 + offset in mapped kernel page. 400 * All CPU Measurement Facility counters identifiers must be unique and 401 * the numbers from 0 to 496 are already used for the CPU Measurement 402 * Counter facility. Numbers 0xb0000, 0xbc000 and 0xbd000 are already 403 * used for the CPU Measurement Sampling facility. 404 */ 405 PMU_FORMAT_ATTR(event, "config:0-63"); 406 407 static struct attribute *paicrypt_format_attr[] = { 408 &format_attr_event.attr, 409 NULL, 410 }; 411 412 static struct attribute_group paicrypt_events_group = { 413 .name = "events", 414 .attrs = NULL /* Filled in attr_event_init() */ 415 }; 416 417 static struct attribute_group paicrypt_format_group = { 418 .name = "format", 419 .attrs = paicrypt_format_attr, 420 }; 421 422 static const struct attribute_group *paicrypt_attr_groups[] = { 423 &paicrypt_events_group, 424 &paicrypt_format_group, 425 NULL, 426 }; 427 428 /* Performance monitoring unit for mapped counters */ 429 static struct pmu paicrypt = { 430 .task_ctx_nr = perf_invalid_context, 431 .event_init = paicrypt_event_init, 432 .add = paicrypt_add, 433 .del = paicrypt_del, 434 .start = paicrypt_start, 435 .stop = paicrypt_stop, 436 .read = paicrypt_read, 437 .sched_task = paicrypt_sched_task, 438 .attr_groups = paicrypt_attr_groups 439 }; 440 441 /* List of symbolic PAI counter names. */ 442 static const char * const paicrypt_ctrnames[] = { 443 [0] = "CRYPTO_ALL", 444 [1] = "KM_DEA", 445 [2] = "KM_TDEA_128", 446 [3] = "KM_TDEA_192", 447 [4] = "KM_ENCRYPTED_DEA", 448 [5] = "KM_ENCRYPTED_TDEA_128", 449 [6] = "KM_ENCRYPTED_TDEA_192", 450 [7] = "KM_AES_128", 451 [8] = "KM_AES_192", 452 [9] = "KM_AES_256", 453 [10] = "KM_ENCRYPTED_AES_128", 454 [11] = "KM_ENCRYPTED_AES_192", 455 [12] = "KM_ENCRYPTED_AES_256", 456 [13] = "KM_XTS_AES_128", 457 [14] = "KM_XTS_AES_256", 458 [15] = "KM_XTS_ENCRYPTED_AES_128", 459 [16] = "KM_XTS_ENCRYPTED_AES_256", 460 [17] = "KMC_DEA", 461 [18] = "KMC_TDEA_128", 462 [19] = "KMC_TDEA_192", 463 [20] = "KMC_ENCRYPTED_DEA", 464 [21] = "KMC_ENCRYPTED_TDEA_128", 465 [22] = "KMC_ENCRYPTED_TDEA_192", 466 [23] = "KMC_AES_128", 467 [24] = "KMC_AES_192", 468 [25] = "KMC_AES_256", 469 [26] = "KMC_ENCRYPTED_AES_128", 470 [27] = "KMC_ENCRYPTED_AES_192", 471 [28] = "KMC_ENCRYPTED_AES_256", 472 [29] = "KMC_PRNG", 473 [30] = "KMA_GCM_AES_128", 474 [31] = "KMA_GCM_AES_192", 475 [32] = "KMA_GCM_AES_256", 476 [33] = "KMA_GCM_ENCRYPTED_AES_128", 477 [34] = "KMA_GCM_ENCRYPTED_AES_192", 478 [35] = "KMA_GCM_ENCRYPTED_AES_256", 479 [36] = "KMF_DEA", 480 [37] = "KMF_TDEA_128", 481 [38] = "KMF_TDEA_192", 482 [39] = "KMF_ENCRYPTED_DEA", 483 [40] = "KMF_ENCRYPTED_TDEA_128", 484 [41] = "KMF_ENCRYPTED_TDEA_192", 485 [42] = "KMF_AES_128", 486 [43] = "KMF_AES_192", 487 [44] = "KMF_AES_256", 488 [45] = "KMF_ENCRYPTED_AES_128", 489 [46] = "KMF_ENCRYPTED_AES_192", 490 [47] = "KMF_ENCRYPTED_AES_256", 491 [48] = "KMCTR_DEA", 492 [49] = "KMCTR_TDEA_128", 493 [50] = "KMCTR_TDEA_192", 494 [51] = "KMCTR_ENCRYPTED_DEA", 495 [52] = "KMCTR_ENCRYPTED_TDEA_128", 496 [53] = "KMCTR_ENCRYPTED_TDEA_192", 497 [54] = "KMCTR_AES_128", 498 [55] = "KMCTR_AES_192", 499 [56] = "KMCTR_AES_256", 500 [57] = "KMCTR_ENCRYPTED_AES_128", 501 [58] = "KMCTR_ENCRYPTED_AES_192", 502 [59] = "KMCTR_ENCRYPTED_AES_256", 503 [60] = "KMO_DEA", 504 [61] = "KMO_TDEA_128", 505 [62] = "KMO_TDEA_192", 506 [63] = "KMO_ENCRYPTED_DEA", 507 [64] = "KMO_ENCRYPTED_TDEA_128", 508 [65] = "KMO_ENCRYPTED_TDEA_192", 509 [66] = "KMO_AES_128", 510 [67] = "KMO_AES_192", 511 [68] = "KMO_AES_256", 512 [69] = "KMO_ENCRYPTED_AES_128", 513 [70] = "KMO_ENCRYPTED_AES_192", 514 [71] = "KMO_ENCRYPTED_AES_256", 515 [72] = "KIMD_SHA_1", 516 [73] = "KIMD_SHA_256", 517 [74] = "KIMD_SHA_512", 518 [75] = "KIMD_SHA3_224", 519 [76] = "KIMD_SHA3_256", 520 [77] = "KIMD_SHA3_384", 521 [78] = "KIMD_SHA3_512", 522 [79] = "KIMD_SHAKE_128", 523 [80] = "KIMD_SHAKE_256", 524 [81] = "KIMD_GHASH", 525 [82] = "KLMD_SHA_1", 526 [83] = "KLMD_SHA_256", 527 [84] = "KLMD_SHA_512", 528 [85] = "KLMD_SHA3_224", 529 [86] = "KLMD_SHA3_256", 530 [87] = "KLMD_SHA3_384", 531 [88] = "KLMD_SHA3_512", 532 [89] = "KLMD_SHAKE_128", 533 [90] = "KLMD_SHAKE_256", 534 [91] = "KMAC_DEA", 535 [92] = "KMAC_TDEA_128", 536 [93] = "KMAC_TDEA_192", 537 [94] = "KMAC_ENCRYPTED_DEA", 538 [95] = "KMAC_ENCRYPTED_TDEA_128", 539 [96] = "KMAC_ENCRYPTED_TDEA_192", 540 [97] = "KMAC_AES_128", 541 [98] = "KMAC_AES_192", 542 [99] = "KMAC_AES_256", 543 [100] = "KMAC_ENCRYPTED_AES_128", 544 [101] = "KMAC_ENCRYPTED_AES_192", 545 [102] = "KMAC_ENCRYPTED_AES_256", 546 [103] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_DEA", 547 [104] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_128", 548 [105] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_TDEA_192", 549 [106] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_DEA", 550 [107] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_128", 551 [108] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_TDEA_192", 552 [109] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_128", 553 [110] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_192", 554 [111] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_AES_256", 555 [112] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_128", 556 [113] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_192", 557 [114] = "PCC_COMPUTE_LAST_BLOCK_CMAC_USING_ENCRYPTED_AES_256A", 558 [115] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_128", 559 [116] = "PCC_COMPUTE_XTS_PARAMETER_USING_AES_256", 560 [117] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_128", 561 [118] = "PCC_COMPUTE_XTS_PARAMETER_USING_ENCRYPTED_AES_256", 562 [119] = "PCC_SCALAR_MULTIPLY_P256", 563 [120] = "PCC_SCALAR_MULTIPLY_P384", 564 [121] = "PCC_SCALAR_MULTIPLY_P521", 565 [122] = "PCC_SCALAR_MULTIPLY_ED25519", 566 [123] = "PCC_SCALAR_MULTIPLY_ED448", 567 [124] = "PCC_SCALAR_MULTIPLY_X25519", 568 [125] = "PCC_SCALAR_MULTIPLY_X448", 569 [126] = "PRNO_SHA_512_DRNG", 570 [127] = "PRNO_TRNG_QUERY_RAW_TO_CONDITIONED_RATIO", 571 [128] = "PRNO_TRNG", 572 [129] = "KDSA_ECDSA_VERIFY_P256", 573 [130] = "KDSA_ECDSA_VERIFY_P384", 574 [131] = "KDSA_ECDSA_VERIFY_P521", 575 [132] = "KDSA_ECDSA_SIGN_P256", 576 [133] = "KDSA_ECDSA_SIGN_P384", 577 [134] = "KDSA_ECDSA_SIGN_P521", 578 [135] = "KDSA_ENCRYPTED_ECDSA_SIGN_P256", 579 [136] = "KDSA_ENCRYPTED_ECDSA_SIGN_P384", 580 [137] = "KDSA_ENCRYPTED_ECDSA_SIGN_P521", 581 [138] = "KDSA_EDDSA_VERIFY_ED25519", 582 [139] = "KDSA_EDDSA_VERIFY_ED448", 583 [140] = "KDSA_EDDSA_SIGN_ED25519", 584 [141] = "KDSA_EDDSA_SIGN_ED448", 585 [142] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED25519", 586 [143] = "KDSA_ENCRYPTED_EDDSA_SIGN_ED448", 587 [144] = "PCKMO_ENCRYPT_DEA_KEY", 588 [145] = "PCKMO_ENCRYPT_TDEA_128_KEY", 589 [146] = "PCKMO_ENCRYPT_TDEA_192_KEY", 590 [147] = "PCKMO_ENCRYPT_AES_128_KEY", 591 [148] = "PCKMO_ENCRYPT_AES_192_KEY", 592 [149] = "PCKMO_ENCRYPT_AES_256_KEY", 593 [150] = "PCKMO_ENCRYPT_ECC_P256_KEY", 594 [151] = "PCKMO_ENCRYPT_ECC_P384_KEY", 595 [152] = "PCKMO_ENCRYPT_ECC_P521_KEY", 596 [153] = "PCKMO_ENCRYPT_ECC_ED25519_KEY", 597 [154] = "PCKMO_ENCRYPT_ECC_ED448_KEY", 598 [155] = "IBM_RESERVED_155", 599 [156] = "IBM_RESERVED_156", 600 }; 601 602 static void __init attr_event_free(struct attribute **attrs, int num) 603 { 604 struct perf_pmu_events_attr *pa; 605 int i; 606 607 for (i = 0; i < num; i++) { 608 struct device_attribute *dap; 609 610 dap = container_of(attrs[i], struct device_attribute, attr); 611 pa = container_of(dap, struct perf_pmu_events_attr, attr); 612 kfree(pa); 613 } 614 kfree(attrs); 615 } 616 617 static int __init attr_event_init_one(struct attribute **attrs, int num) 618 { 619 struct perf_pmu_events_attr *pa; 620 621 pa = kzalloc(sizeof(*pa), GFP_KERNEL); 622 if (!pa) 623 return -ENOMEM; 624 625 sysfs_attr_init(&pa->attr.attr); 626 pa->id = PAI_CRYPTO_BASE + num; 627 pa->attr.attr.name = paicrypt_ctrnames[num]; 628 pa->attr.attr.mode = 0444; 629 pa->attr.show = cpumf_events_sysfs_show; 630 pa->attr.store = NULL; 631 attrs[num] = &pa->attr.attr; 632 return 0; 633 } 634 635 /* Create PMU sysfs event attributes on the fly. */ 636 static int __init attr_event_init(void) 637 { 638 struct attribute **attrs; 639 int ret, i; 640 641 attrs = kmalloc_array(ARRAY_SIZE(paicrypt_ctrnames) + 1, sizeof(*attrs), 642 GFP_KERNEL); 643 if (!attrs) 644 return -ENOMEM; 645 for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) { 646 ret = attr_event_init_one(attrs, i); 647 if (ret) { 648 attr_event_free(attrs, i - 1); 649 return ret; 650 } 651 } 652 attrs[i] = NULL; 653 paicrypt_events_group.attrs = attrs; 654 return 0; 655 } 656 657 static int __init paicrypt_init(void) 658 { 659 struct qpaci_info_block ib; 660 int rc; 661 662 if (!test_facility(196)) 663 return 0; 664 665 qpaci(&ib); 666 paicrypt_cnt = ib.num_cc; 667 if (paicrypt_cnt == 0) 668 return 0; 669 if (paicrypt_cnt >= PAI_CRYPTO_MAXCTR) 670 paicrypt_cnt = PAI_CRYPTO_MAXCTR - 1; 671 672 rc = attr_event_init(); /* Export known PAI crypto events */ 673 if (rc) { 674 pr_err("Creation of PMU pai_crypto /sysfs failed\n"); 675 return rc; 676 } 677 678 /* Setup s390dbf facility */ 679 cfm_dbg = debug_register(KMSG_COMPONENT, 2, 256, 128); 680 if (!cfm_dbg) { 681 pr_err("Registration of s390dbf pai_crypto failed\n"); 682 return -ENOMEM; 683 } 684 debug_register_view(cfm_dbg, &debug_sprintf_view); 685 686 rc = perf_pmu_register(&paicrypt, "pai_crypto", -1); 687 if (rc) { 688 pr_err("Registering the pai_crypto PMU failed with rc=%i\n", 689 rc); 690 debug_unregister_view(cfm_dbg, &debug_sprintf_view); 691 debug_unregister(cfm_dbg); 692 return rc; 693 } 694 return 0; 695 } 696 697 device_initcall(paicrypt_init); 698