1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Cryptographic Coprocessor (CCP) driver 4 * 5 * Copyright (C) 2013,2017 Advanced Micro Devices, Inc. 6 * 7 * Author: Tom Lendacky <thomas.lendacky@amd.com> 8 * Author: Gary R Hook <gary.hook@amd.com> 9 */ 10 11 #include <linux/kernel.h> 12 #include <linux/kthread.h> 13 #include <linux/sched.h> 14 #include <linux/interrupt.h> 15 #include <linux/spinlock.h> 16 #include <linux/spinlock_types.h> 17 #include <linux/types.h> 18 #include <linux/mutex.h> 19 #include <linux/delay.h> 20 #include <linux/hw_random.h> 21 #include <linux/cpu.h> 22 #ifdef CONFIG_X86 23 #include <asm/cpu_device_id.h> 24 #endif 25 #include <linux/ccp.h> 26 27 #include "ccp-dev.h" 28 29 struct ccp_tasklet_data { 30 struct completion completion; 31 struct ccp_cmd *cmd; 32 }; 33 34 /* Human-readable error strings */ 35 #define CCP_MAX_ERROR_CODE 64 36 static char *ccp_error_codes[] = { 37 "", 38 "ILLEGAL_ENGINE", 39 "ILLEGAL_KEY_ID", 40 "ILLEGAL_FUNCTION_TYPE", 41 "ILLEGAL_FUNCTION_MODE", 42 "ILLEGAL_FUNCTION_ENCRYPT", 43 "ILLEGAL_FUNCTION_SIZE", 44 "Zlib_MISSING_INIT_EOM", 45 "ILLEGAL_FUNCTION_RSVD", 46 "ILLEGAL_BUFFER_LENGTH", 47 "VLSB_FAULT", 48 "ILLEGAL_MEM_ADDR", 49 "ILLEGAL_MEM_SEL", 50 "ILLEGAL_CONTEXT_ID", 51 "ILLEGAL_KEY_ADDR", 52 "0xF Reserved", 53 "Zlib_ILLEGAL_MULTI_QUEUE", 54 "Zlib_ILLEGAL_JOBID_CHANGE", 55 "CMD_TIMEOUT", 56 "IDMA0_AXI_SLVERR", 57 "IDMA0_AXI_DECERR", 58 "0x15 Reserved", 59 "IDMA1_AXI_SLAVE_FAULT", 60 "IDMA1_AIXI_DECERR", 61 "0x18 Reserved", 62 "ZLIBVHB_AXI_SLVERR", 63 "ZLIBVHB_AXI_DECERR", 64 "0x1B Reserved", 65 "ZLIB_UNEXPECTED_EOM", 66 "ZLIB_EXTRA_DATA", 67 "ZLIB_BTYPE", 68 "ZLIB_UNDEFINED_SYMBOL", 69 "ZLIB_UNDEFINED_DISTANCE_S", 70 "ZLIB_CODE_LENGTH_SYMBOL", 71 "ZLIB _VHB_ILLEGAL_FETCH", 72 "ZLIB_UNCOMPRESSED_LEN", 73 "ZLIB_LIMIT_REACHED", 74 "ZLIB_CHECKSUM_MISMATCH0", 75 "ODMA0_AXI_SLVERR", 76 "ODMA0_AXI_DECERR", 77 "0x28 Reserved", 78 "ODMA1_AXI_SLVERR", 79 "ODMA1_AXI_DECERR", 80 }; 81 82 void ccp_log_error(struct ccp_device *d, unsigned int e) 83 { 84 if (WARN_ON(e >= CCP_MAX_ERROR_CODE)) 85 return; 86 87 if (e < ARRAY_SIZE(ccp_error_codes)) 88 dev_err(d->dev, "CCP error %d: %s\n", e, ccp_error_codes[e]); 89 else 90 dev_err(d->dev, "CCP error %d: Unknown Error\n", e); 91 } 92 93 /* List of CCPs, CCP count, read-write access lock, and access functions 94 * 95 * Lock structure: get ccp_unit_lock for reading whenever we need to 96 * examine the CCP list. While holding it for reading we can acquire 97 * the RR lock to update the round-robin next-CCP pointer. The unit lock 98 * must be acquired before the RR lock. 99 * 100 * If the unit-lock is acquired for writing, we have total control over 101 * the list, so there's no value in getting the RR lock. 102 */ 103 static DEFINE_RWLOCK(ccp_unit_lock); 104 static LIST_HEAD(ccp_units); 105 106 /* Round-robin counter */ 107 static DEFINE_SPINLOCK(ccp_rr_lock); 108 static struct ccp_device *ccp_rr; 109 110 /** 111 * ccp_add_device - add a CCP device to the list 112 * 113 * @ccp: ccp_device struct pointer 114 * 115 * Put this CCP on the unit list, which makes it available 116 * for use. 117 * 118 * Returns zero if a CCP device is present, -ENODEV otherwise. 119 */ 120 void ccp_add_device(struct ccp_device *ccp) 121 { 122 unsigned long flags; 123 124 write_lock_irqsave(&ccp_unit_lock, flags); 125 list_add_tail(&ccp->entry, &ccp_units); 126 if (!ccp_rr) 127 /* We already have the list lock (we're first) so this 128 * pointer can't change on us. Set its initial value. 129 */ 130 ccp_rr = ccp; 131 write_unlock_irqrestore(&ccp_unit_lock, flags); 132 } 133 134 /** 135 * ccp_del_device - remove a CCP device from the list 136 * 137 * @ccp: ccp_device struct pointer 138 * 139 * Remove this unit from the list of devices. If the next device 140 * up for use is this one, adjust the pointer. If this is the last 141 * device, NULL the pointer. 142 */ 143 void ccp_del_device(struct ccp_device *ccp) 144 { 145 unsigned long flags; 146 147 write_lock_irqsave(&ccp_unit_lock, flags); 148 if (ccp_rr == ccp) { 149 /* ccp_unit_lock is read/write; any read access 150 * will be suspended while we make changes to the 151 * list and RR pointer. 152 */ 153 if (list_is_last(&ccp_rr->entry, &ccp_units)) 154 ccp_rr = list_first_entry(&ccp_units, struct ccp_device, 155 entry); 156 else 157 ccp_rr = list_next_entry(ccp_rr, entry); 158 } 159 list_del(&ccp->entry); 160 if (list_empty(&ccp_units)) 161 ccp_rr = NULL; 162 write_unlock_irqrestore(&ccp_unit_lock, flags); 163 } 164 165 166 167 int ccp_register_rng(struct ccp_device *ccp) 168 { 169 int ret = 0; 170 171 dev_dbg(ccp->dev, "Registering RNG...\n"); 172 /* Register an RNG */ 173 ccp->hwrng.name = ccp->rngname; 174 ccp->hwrng.read = ccp_trng_read; 175 ret = hwrng_register(&ccp->hwrng); 176 if (ret) 177 dev_err(ccp->dev, "error registering hwrng (%d)\n", ret); 178 179 return ret; 180 } 181 182 void ccp_unregister_rng(struct ccp_device *ccp) 183 { 184 if (ccp->hwrng.name) 185 hwrng_unregister(&ccp->hwrng); 186 } 187 188 static struct ccp_device *ccp_get_device(void) 189 { 190 unsigned long flags; 191 struct ccp_device *dp = NULL; 192 193 /* We round-robin through the unit list. 194 * The (ccp_rr) pointer refers to the next unit to use. 195 */ 196 read_lock_irqsave(&ccp_unit_lock, flags); 197 if (!list_empty(&ccp_units)) { 198 spin_lock(&ccp_rr_lock); 199 dp = ccp_rr; 200 if (list_is_last(&ccp_rr->entry, &ccp_units)) 201 ccp_rr = list_first_entry(&ccp_units, struct ccp_device, 202 entry); 203 else 204 ccp_rr = list_next_entry(ccp_rr, entry); 205 spin_unlock(&ccp_rr_lock); 206 } 207 read_unlock_irqrestore(&ccp_unit_lock, flags); 208 209 return dp; 210 } 211 212 /** 213 * ccp_present - check if a CCP device is present 214 * 215 * Returns zero if a CCP device is present, -ENODEV otherwise. 216 */ 217 int ccp_present(void) 218 { 219 unsigned long flags; 220 int ret; 221 222 read_lock_irqsave(&ccp_unit_lock, flags); 223 ret = list_empty(&ccp_units); 224 read_unlock_irqrestore(&ccp_unit_lock, flags); 225 226 return ret ? -ENODEV : 0; 227 } 228 EXPORT_SYMBOL_GPL(ccp_present); 229 230 /** 231 * ccp_version - get the version of the CCP device 232 * 233 * Returns the version from the first unit on the list; 234 * otherwise a zero if no CCP device is present 235 */ 236 unsigned int ccp_version(void) 237 { 238 struct ccp_device *dp; 239 unsigned long flags; 240 int ret = 0; 241 242 read_lock_irqsave(&ccp_unit_lock, flags); 243 if (!list_empty(&ccp_units)) { 244 dp = list_first_entry(&ccp_units, struct ccp_device, entry); 245 ret = dp->vdata->version; 246 } 247 read_unlock_irqrestore(&ccp_unit_lock, flags); 248 249 return ret; 250 } 251 EXPORT_SYMBOL_GPL(ccp_version); 252 253 /** 254 * ccp_enqueue_cmd - queue an operation for processing by the CCP 255 * 256 * @cmd: ccp_cmd struct to be processed 257 * 258 * Queue a cmd to be processed by the CCP. If queueing the cmd 259 * would exceed the defined length of the cmd queue the cmd will 260 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will 261 * result in a return code of -EBUSY. 262 * 263 * The callback routine specified in the ccp_cmd struct will be 264 * called to notify the caller of completion (if the cmd was not 265 * backlogged) or advancement out of the backlog. If the cmd has 266 * advanced out of the backlog the "err" value of the callback 267 * will be -EINPROGRESS. Any other "err" value during callback is 268 * the result of the operation. 269 * 270 * The cmd has been successfully queued if: 271 * the return code is -EINPROGRESS or 272 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set 273 */ 274 int ccp_enqueue_cmd(struct ccp_cmd *cmd) 275 { 276 struct ccp_device *ccp; 277 unsigned long flags; 278 unsigned int i; 279 int ret; 280 281 /* Some commands might need to be sent to a specific device */ 282 ccp = cmd->ccp ? cmd->ccp : ccp_get_device(); 283 284 if (!ccp) 285 return -ENODEV; 286 287 /* Caller must supply a callback routine */ 288 if (!cmd->callback) 289 return -EINVAL; 290 291 cmd->ccp = ccp; 292 293 spin_lock_irqsave(&ccp->cmd_lock, flags); 294 295 i = ccp->cmd_q_count; 296 297 if (ccp->cmd_count >= MAX_CMD_QLEN) { 298 if (cmd->flags & CCP_CMD_MAY_BACKLOG) { 299 ret = -EBUSY; 300 list_add_tail(&cmd->entry, &ccp->backlog); 301 } else { 302 ret = -ENOSPC; 303 } 304 } else { 305 ret = -EINPROGRESS; 306 ccp->cmd_count++; 307 list_add_tail(&cmd->entry, &ccp->cmd); 308 309 /* Find an idle queue */ 310 if (!ccp->suspending) { 311 for (i = 0; i < ccp->cmd_q_count; i++) { 312 if (ccp->cmd_q[i].active) 313 continue; 314 315 break; 316 } 317 } 318 } 319 320 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 321 322 /* If we found an idle queue, wake it up */ 323 if (i < ccp->cmd_q_count) 324 wake_up_process(ccp->cmd_q[i].kthread); 325 326 return ret; 327 } 328 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd); 329 330 static void ccp_do_cmd_backlog(struct work_struct *work) 331 { 332 struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work); 333 struct ccp_device *ccp = cmd->ccp; 334 unsigned long flags; 335 unsigned int i; 336 337 cmd->callback(cmd->data, -EINPROGRESS); 338 339 spin_lock_irqsave(&ccp->cmd_lock, flags); 340 341 ccp->cmd_count++; 342 list_add_tail(&cmd->entry, &ccp->cmd); 343 344 /* Find an idle queue */ 345 for (i = 0; i < ccp->cmd_q_count; i++) { 346 if (ccp->cmd_q[i].active) 347 continue; 348 349 break; 350 } 351 352 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 353 354 /* If we found an idle queue, wake it up */ 355 if (i < ccp->cmd_q_count) 356 wake_up_process(ccp->cmd_q[i].kthread); 357 } 358 359 static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q) 360 { 361 struct ccp_device *ccp = cmd_q->ccp; 362 struct ccp_cmd *cmd = NULL; 363 struct ccp_cmd *backlog = NULL; 364 unsigned long flags; 365 366 spin_lock_irqsave(&ccp->cmd_lock, flags); 367 368 cmd_q->active = 0; 369 370 if (ccp->suspending) { 371 cmd_q->suspended = 1; 372 373 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 374 wake_up_interruptible(&ccp->suspend_queue); 375 376 return NULL; 377 } 378 379 if (ccp->cmd_count) { 380 cmd_q->active = 1; 381 382 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); 383 list_del(&cmd->entry); 384 385 ccp->cmd_count--; 386 } 387 388 if (!list_empty(&ccp->backlog)) { 389 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, 390 entry); 391 list_del(&backlog->entry); 392 } 393 394 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 395 396 if (backlog) { 397 INIT_WORK(&backlog->work, ccp_do_cmd_backlog); 398 schedule_work(&backlog->work); 399 } 400 401 return cmd; 402 } 403 404 static void ccp_do_cmd_complete(unsigned long data) 405 { 406 struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data; 407 struct ccp_cmd *cmd = tdata->cmd; 408 409 cmd->callback(cmd->data, cmd->ret); 410 411 complete(&tdata->completion); 412 } 413 414 /** 415 * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue 416 * 417 * @data: thread-specific data 418 */ 419 int ccp_cmd_queue_thread(void *data) 420 { 421 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; 422 struct ccp_cmd *cmd; 423 struct ccp_tasklet_data tdata; 424 struct tasklet_struct tasklet; 425 426 tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata); 427 428 set_current_state(TASK_INTERRUPTIBLE); 429 while (!kthread_should_stop()) { 430 schedule(); 431 432 set_current_state(TASK_INTERRUPTIBLE); 433 434 cmd = ccp_dequeue_cmd(cmd_q); 435 if (!cmd) 436 continue; 437 438 __set_current_state(TASK_RUNNING); 439 440 /* Execute the command */ 441 cmd->ret = ccp_run_cmd(cmd_q, cmd); 442 443 /* Schedule the completion callback */ 444 tdata.cmd = cmd; 445 init_completion(&tdata.completion); 446 tasklet_schedule(&tasklet); 447 wait_for_completion(&tdata.completion); 448 } 449 450 __set_current_state(TASK_RUNNING); 451 452 return 0; 453 } 454 455 /** 456 * ccp_alloc_struct - allocate and initialize the ccp_device struct 457 * 458 * @dev: device struct of the CCP 459 */ 460 struct ccp_device *ccp_alloc_struct(struct sp_device *sp) 461 { 462 struct device *dev = sp->dev; 463 struct ccp_device *ccp; 464 465 ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL); 466 if (!ccp) 467 return NULL; 468 ccp->dev = dev; 469 ccp->sp = sp; 470 ccp->axcache = sp->axcache; 471 472 INIT_LIST_HEAD(&ccp->cmd); 473 INIT_LIST_HEAD(&ccp->backlog); 474 475 spin_lock_init(&ccp->cmd_lock); 476 mutex_init(&ccp->req_mutex); 477 mutex_init(&ccp->sb_mutex); 478 ccp->sb_count = KSB_COUNT; 479 ccp->sb_start = 0; 480 481 /* Initialize the wait queues */ 482 init_waitqueue_head(&ccp->sb_queue); 483 init_waitqueue_head(&ccp->suspend_queue); 484 485 snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", sp->ord); 486 snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", sp->ord); 487 488 return ccp; 489 } 490 491 int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) 492 { 493 struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); 494 u32 trng_value; 495 int len = min_t(int, sizeof(trng_value), max); 496 497 /* Locking is provided by the caller so we can update device 498 * hwrng-related fields safely 499 */ 500 trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); 501 if (!trng_value) { 502 /* Zero is returned if not data is available or if a 503 * bad-entropy error is present. Assume an error if 504 * we exceed TRNG_RETRIES reads of zero. 505 */ 506 if (ccp->hwrng_retries++ > TRNG_RETRIES) 507 return -EIO; 508 509 return 0; 510 } 511 512 /* Reset the counter and save the rng value */ 513 ccp->hwrng_retries = 0; 514 memcpy(data, &trng_value, len); 515 516 return len; 517 } 518 519 #ifdef CONFIG_PM 520 bool ccp_queues_suspended(struct ccp_device *ccp) 521 { 522 unsigned int suspended = 0; 523 unsigned long flags; 524 unsigned int i; 525 526 spin_lock_irqsave(&ccp->cmd_lock, flags); 527 528 for (i = 0; i < ccp->cmd_q_count; i++) 529 if (ccp->cmd_q[i].suspended) 530 suspended++; 531 532 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 533 534 return ccp->cmd_q_count == suspended; 535 } 536 537 int ccp_dev_suspend(struct sp_device *sp, pm_message_t state) 538 { 539 struct ccp_device *ccp = sp->ccp_data; 540 unsigned long flags; 541 unsigned int i; 542 543 spin_lock_irqsave(&ccp->cmd_lock, flags); 544 545 ccp->suspending = 1; 546 547 /* Wake all the queue kthreads to prepare for suspend */ 548 for (i = 0; i < ccp->cmd_q_count; i++) 549 wake_up_process(ccp->cmd_q[i].kthread); 550 551 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 552 553 /* Wait for all queue kthreads to say they're done */ 554 while (!ccp_queues_suspended(ccp)) 555 wait_event_interruptible(ccp->suspend_queue, 556 ccp_queues_suspended(ccp)); 557 558 return 0; 559 } 560 561 int ccp_dev_resume(struct sp_device *sp) 562 { 563 struct ccp_device *ccp = sp->ccp_data; 564 unsigned long flags; 565 unsigned int i; 566 567 spin_lock_irqsave(&ccp->cmd_lock, flags); 568 569 ccp->suspending = 0; 570 571 /* Wake up all the kthreads */ 572 for (i = 0; i < ccp->cmd_q_count; i++) { 573 ccp->cmd_q[i].suspended = 0; 574 wake_up_process(ccp->cmd_q[i].kthread); 575 } 576 577 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 578 579 return 0; 580 } 581 #endif 582 583 int ccp_dev_init(struct sp_device *sp) 584 { 585 struct device *dev = sp->dev; 586 struct ccp_device *ccp; 587 int ret; 588 589 ret = -ENOMEM; 590 ccp = ccp_alloc_struct(sp); 591 if (!ccp) 592 goto e_err; 593 sp->ccp_data = ccp; 594 595 ccp->vdata = (struct ccp_vdata *)sp->dev_vdata->ccp_vdata; 596 if (!ccp->vdata || !ccp->vdata->version) { 597 ret = -ENODEV; 598 dev_err(dev, "missing driver data\n"); 599 goto e_err; 600 } 601 602 ccp->use_tasklet = sp->use_tasklet; 603 604 ccp->io_regs = sp->io_map + ccp->vdata->offset; 605 if (ccp->vdata->setup) 606 ccp->vdata->setup(ccp); 607 608 ret = ccp->vdata->perform->init(ccp); 609 if (ret) 610 goto e_err; 611 612 dev_notice(dev, "ccp enabled\n"); 613 614 return 0; 615 616 e_err: 617 sp->ccp_data = NULL; 618 619 dev_notice(dev, "ccp initialization failed\n"); 620 621 return ret; 622 } 623 624 void ccp_dev_destroy(struct sp_device *sp) 625 { 626 struct ccp_device *ccp = sp->ccp_data; 627 628 if (!ccp) 629 return; 630 631 ccp->vdata->perform->destroy(ccp); 632 } 633