1 /* 2 * AMD Cryptographic Coprocessor (CCP) driver 3 * 4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. 5 * 6 * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 * Author: Gary R Hook <gary.hook@amd.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/kthread.h> 17 #include <linux/sched.h> 18 #include <linux/interrupt.h> 19 #include <linux/spinlock.h> 20 #include <linux/spinlock_types.h> 21 #include <linux/types.h> 22 #include <linux/mutex.h> 23 #include <linux/delay.h> 24 #include <linux/hw_random.h> 25 #include <linux/cpu.h> 26 #ifdef CONFIG_X86 27 #include <asm/cpu_device_id.h> 28 #endif 29 #include <linux/ccp.h> 30 31 #include "ccp-dev.h" 32 33 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); 34 MODULE_LICENSE("GPL"); 35 MODULE_VERSION("1.0.0"); 36 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver"); 37 38 struct ccp_tasklet_data { 39 struct completion completion; 40 struct ccp_cmd *cmd; 41 }; 42 43 /* Human-readable error strings */ 44 static char *ccp_error_codes[] = { 45 "", 46 "ERR 01: ILLEGAL_ENGINE", 47 "ERR 02: ILLEGAL_KEY_ID", 48 "ERR 03: ILLEGAL_FUNCTION_TYPE", 49 "ERR 04: ILLEGAL_FUNCTION_MODE", 50 "ERR 05: ILLEGAL_FUNCTION_ENCRYPT", 51 "ERR 06: ILLEGAL_FUNCTION_SIZE", 52 "ERR 07: Zlib_MISSING_INIT_EOM", 53 "ERR 08: ILLEGAL_FUNCTION_RSVD", 54 "ERR 09: ILLEGAL_BUFFER_LENGTH", 55 "ERR 10: VLSB_FAULT", 56 "ERR 11: ILLEGAL_MEM_ADDR", 57 "ERR 12: ILLEGAL_MEM_SEL", 58 "ERR 13: ILLEGAL_CONTEXT_ID", 59 "ERR 14: ILLEGAL_KEY_ADDR", 60 "ERR 15: 0xF Reserved", 61 "ERR 16: Zlib_ILLEGAL_MULTI_QUEUE", 62 "ERR 17: Zlib_ILLEGAL_JOBID_CHANGE", 63 "ERR 18: CMD_TIMEOUT", 64 "ERR 19: IDMA0_AXI_SLVERR", 65 "ERR 20: IDMA0_AXI_DECERR", 66 "ERR 21: 0x15 Reserved", 67 "ERR 22: IDMA1_AXI_SLAVE_FAULT", 68 "ERR 23: IDMA1_AIXI_DECERR", 69 "ERR 24: 0x18 Reserved", 70 "ERR 25: ZLIBVHB_AXI_SLVERR", 71 "ERR 26: ZLIBVHB_AXI_DECERR", 72 "ERR 27: 0x1B Reserved", 73 "ERR 27: ZLIB_UNEXPECTED_EOM", 74 "ERR 27: ZLIB_EXTRA_DATA", 75 "ERR 30: ZLIB_BTYPE", 76 "ERR 31: ZLIB_UNDEFINED_SYMBOL", 77 "ERR 32: ZLIB_UNDEFINED_DISTANCE_S", 78 "ERR 33: ZLIB_CODE_LENGTH_SYMBOL", 79 "ERR 34: ZLIB _VHB_ILLEGAL_FETCH", 80 "ERR 35: ZLIB_UNCOMPRESSED_LEN", 81 "ERR 36: ZLIB_LIMIT_REACHED", 82 "ERR 37: ZLIB_CHECKSUM_MISMATCH0", 83 "ERR 38: ODMA0_AXI_SLVERR", 84 "ERR 39: ODMA0_AXI_DECERR", 85 "ERR 40: 0x28 Reserved", 86 "ERR 41: ODMA1_AXI_SLVERR", 87 "ERR 42: ODMA1_AXI_DECERR", 88 "ERR 43: LSB_PARITY_ERR", 89 }; 90 91 void ccp_log_error(struct ccp_device *d, int e) 92 { 93 dev_err(d->dev, "CCP error: %s (0x%x)\n", ccp_error_codes[e], e); 94 } 95 96 /* List of CCPs, CCP count, read-write access lock, and access functions 97 * 98 * Lock structure: get ccp_unit_lock for reading whenever we need to 99 * examine the CCP list. While holding it for reading we can acquire 100 * the RR lock to update the round-robin next-CCP pointer. The unit lock 101 * must be acquired before the RR lock. 102 * 103 * If the unit-lock is acquired for writing, we have total control over 104 * the list, so there's no value in getting the RR lock. 105 */ 106 static DEFINE_RWLOCK(ccp_unit_lock); 107 static LIST_HEAD(ccp_units); 108 109 /* Round-robin counter */ 110 static DEFINE_SPINLOCK(ccp_rr_lock); 111 static struct ccp_device *ccp_rr; 112 113 /* Ever-increasing value to produce unique unit numbers */ 114 static atomic_t ccp_unit_ordinal; 115 static unsigned int ccp_increment_unit_ordinal(void) 116 { 117 return atomic_inc_return(&ccp_unit_ordinal); 118 } 119 120 /** 121 * ccp_add_device - add a CCP device to the list 122 * 123 * @ccp: ccp_device struct pointer 124 * 125 * Put this CCP on the unit list, which makes it available 126 * for use. 127 * 128 * Returns zero if a CCP device is present, -ENODEV otherwise. 129 */ 130 void ccp_add_device(struct ccp_device *ccp) 131 { 132 unsigned long flags; 133 134 write_lock_irqsave(&ccp_unit_lock, flags); 135 list_add_tail(&ccp->entry, &ccp_units); 136 if (!ccp_rr) 137 /* We already have the list lock (we're first) so this 138 * pointer can't change on us. Set its initial value. 139 */ 140 ccp_rr = ccp; 141 write_unlock_irqrestore(&ccp_unit_lock, flags); 142 } 143 144 /** 145 * ccp_del_device - remove a CCP device from the list 146 * 147 * @ccp: ccp_device struct pointer 148 * 149 * Remove this unit from the list of devices. If the next device 150 * up for use is this one, adjust the pointer. If this is the last 151 * device, NULL the pointer. 152 */ 153 void ccp_del_device(struct ccp_device *ccp) 154 { 155 unsigned long flags; 156 157 write_lock_irqsave(&ccp_unit_lock, flags); 158 if (ccp_rr == ccp) { 159 /* ccp_unit_lock is read/write; any read access 160 * will be suspended while we make changes to the 161 * list and RR pointer. 162 */ 163 if (list_is_last(&ccp_rr->entry, &ccp_units)) 164 ccp_rr = list_first_entry(&ccp_units, struct ccp_device, 165 entry); 166 else 167 ccp_rr = list_next_entry(ccp_rr, entry); 168 } 169 list_del(&ccp->entry); 170 if (list_empty(&ccp_units)) 171 ccp_rr = NULL; 172 write_unlock_irqrestore(&ccp_unit_lock, flags); 173 } 174 175 176 177 int ccp_register_rng(struct ccp_device *ccp) 178 { 179 int ret = 0; 180 181 dev_dbg(ccp->dev, "Registering RNG...\n"); 182 /* Register an RNG */ 183 ccp->hwrng.name = ccp->rngname; 184 ccp->hwrng.read = ccp_trng_read; 185 ret = hwrng_register(&ccp->hwrng); 186 if (ret) 187 dev_err(ccp->dev, "error registering hwrng (%d)\n", ret); 188 189 return ret; 190 } 191 192 void ccp_unregister_rng(struct ccp_device *ccp) 193 { 194 if (ccp->hwrng.name) 195 hwrng_unregister(&ccp->hwrng); 196 } 197 198 static struct ccp_device *ccp_get_device(void) 199 { 200 unsigned long flags; 201 struct ccp_device *dp = NULL; 202 203 /* We round-robin through the unit list. 204 * The (ccp_rr) pointer refers to the next unit to use. 205 */ 206 read_lock_irqsave(&ccp_unit_lock, flags); 207 if (!list_empty(&ccp_units)) { 208 spin_lock(&ccp_rr_lock); 209 dp = ccp_rr; 210 if (list_is_last(&ccp_rr->entry, &ccp_units)) 211 ccp_rr = list_first_entry(&ccp_units, struct ccp_device, 212 entry); 213 else 214 ccp_rr = list_next_entry(ccp_rr, entry); 215 spin_unlock(&ccp_rr_lock); 216 } 217 read_unlock_irqrestore(&ccp_unit_lock, flags); 218 219 return dp; 220 } 221 222 /** 223 * ccp_present - check if a CCP device is present 224 * 225 * Returns zero if a CCP device is present, -ENODEV otherwise. 226 */ 227 int ccp_present(void) 228 { 229 unsigned long flags; 230 int ret; 231 232 read_lock_irqsave(&ccp_unit_lock, flags); 233 ret = list_empty(&ccp_units); 234 read_unlock_irqrestore(&ccp_unit_lock, flags); 235 236 return ret ? -ENODEV : 0; 237 } 238 EXPORT_SYMBOL_GPL(ccp_present); 239 240 /** 241 * ccp_version - get the version of the CCP device 242 * 243 * Returns the version from the first unit on the list; 244 * otherwise a zero if no CCP device is present 245 */ 246 unsigned int ccp_version(void) 247 { 248 struct ccp_device *dp; 249 unsigned long flags; 250 int ret = 0; 251 252 read_lock_irqsave(&ccp_unit_lock, flags); 253 if (!list_empty(&ccp_units)) { 254 dp = list_first_entry(&ccp_units, struct ccp_device, entry); 255 ret = dp->vdata->version; 256 } 257 read_unlock_irqrestore(&ccp_unit_lock, flags); 258 259 return ret; 260 } 261 EXPORT_SYMBOL_GPL(ccp_version); 262 263 /** 264 * ccp_enqueue_cmd - queue an operation for processing by the CCP 265 * 266 * @cmd: ccp_cmd struct to be processed 267 * 268 * Queue a cmd to be processed by the CCP. If queueing the cmd 269 * would exceed the defined length of the cmd queue the cmd will 270 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will 271 * result in a return code of -EBUSY. 272 * 273 * The callback routine specified in the ccp_cmd struct will be 274 * called to notify the caller of completion (if the cmd was not 275 * backlogged) or advancement out of the backlog. If the cmd has 276 * advanced out of the backlog the "err" value of the callback 277 * will be -EINPROGRESS. Any other "err" value during callback is 278 * the result of the operation. 279 * 280 * The cmd has been successfully queued if: 281 * the return code is -EINPROGRESS or 282 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set 283 */ 284 int ccp_enqueue_cmd(struct ccp_cmd *cmd) 285 { 286 struct ccp_device *ccp = ccp_get_device(); 287 unsigned long flags; 288 unsigned int i; 289 int ret; 290 291 if (!ccp) 292 return -ENODEV; 293 294 /* Caller must supply a callback routine */ 295 if (!cmd->callback) 296 return -EINVAL; 297 298 cmd->ccp = ccp; 299 300 spin_lock_irqsave(&ccp->cmd_lock, flags); 301 302 i = ccp->cmd_q_count; 303 304 if (ccp->cmd_count >= MAX_CMD_QLEN) { 305 ret = -EBUSY; 306 if (cmd->flags & CCP_CMD_MAY_BACKLOG) 307 list_add_tail(&cmd->entry, &ccp->backlog); 308 } else { 309 ret = -EINPROGRESS; 310 ccp->cmd_count++; 311 list_add_tail(&cmd->entry, &ccp->cmd); 312 313 /* Find an idle queue */ 314 if (!ccp->suspending) { 315 for (i = 0; i < ccp->cmd_q_count; i++) { 316 if (ccp->cmd_q[i].active) 317 continue; 318 319 break; 320 } 321 } 322 } 323 324 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 325 326 /* If we found an idle queue, wake it up */ 327 if (i < ccp->cmd_q_count) 328 wake_up_process(ccp->cmd_q[i].kthread); 329 330 return ret; 331 } 332 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd); 333 334 static void ccp_do_cmd_backlog(struct work_struct *work) 335 { 336 struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work); 337 struct ccp_device *ccp = cmd->ccp; 338 unsigned long flags; 339 unsigned int i; 340 341 cmd->callback(cmd->data, -EINPROGRESS); 342 343 spin_lock_irqsave(&ccp->cmd_lock, flags); 344 345 ccp->cmd_count++; 346 list_add_tail(&cmd->entry, &ccp->cmd); 347 348 /* Find an idle queue */ 349 for (i = 0; i < ccp->cmd_q_count; i++) { 350 if (ccp->cmd_q[i].active) 351 continue; 352 353 break; 354 } 355 356 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 357 358 /* If we found an idle queue, wake it up */ 359 if (i < ccp->cmd_q_count) 360 wake_up_process(ccp->cmd_q[i].kthread); 361 } 362 363 static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q) 364 { 365 struct ccp_device *ccp = cmd_q->ccp; 366 struct ccp_cmd *cmd = NULL; 367 struct ccp_cmd *backlog = NULL; 368 unsigned long flags; 369 370 spin_lock_irqsave(&ccp->cmd_lock, flags); 371 372 cmd_q->active = 0; 373 374 if (ccp->suspending) { 375 cmd_q->suspended = 1; 376 377 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 378 wake_up_interruptible(&ccp->suspend_queue); 379 380 return NULL; 381 } 382 383 if (ccp->cmd_count) { 384 cmd_q->active = 1; 385 386 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); 387 list_del(&cmd->entry); 388 389 ccp->cmd_count--; 390 } 391 392 if (!list_empty(&ccp->backlog)) { 393 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, 394 entry); 395 list_del(&backlog->entry); 396 } 397 398 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 399 400 if (backlog) { 401 INIT_WORK(&backlog->work, ccp_do_cmd_backlog); 402 schedule_work(&backlog->work); 403 } 404 405 return cmd; 406 } 407 408 static void ccp_do_cmd_complete(unsigned long data) 409 { 410 struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data; 411 struct ccp_cmd *cmd = tdata->cmd; 412 413 cmd->callback(cmd->data, cmd->ret); 414 complete(&tdata->completion); 415 } 416 417 /** 418 * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue 419 * 420 * @data: thread-specific data 421 */ 422 int ccp_cmd_queue_thread(void *data) 423 { 424 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; 425 struct ccp_cmd *cmd; 426 struct ccp_tasklet_data tdata; 427 struct tasklet_struct tasklet; 428 429 tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata); 430 431 set_current_state(TASK_INTERRUPTIBLE); 432 while (!kthread_should_stop()) { 433 schedule(); 434 435 set_current_state(TASK_INTERRUPTIBLE); 436 437 cmd = ccp_dequeue_cmd(cmd_q); 438 if (!cmd) 439 continue; 440 441 __set_current_state(TASK_RUNNING); 442 443 /* Execute the command */ 444 cmd->ret = ccp_run_cmd(cmd_q, cmd); 445 446 /* Schedule the completion callback */ 447 tdata.cmd = cmd; 448 init_completion(&tdata.completion); 449 tasklet_schedule(&tasklet); 450 wait_for_completion(&tdata.completion); 451 } 452 453 __set_current_state(TASK_RUNNING); 454 455 return 0; 456 } 457 458 /** 459 * ccp_alloc_struct - allocate and initialize the ccp_device struct 460 * 461 * @dev: device struct of the CCP 462 */ 463 struct ccp_device *ccp_alloc_struct(struct device *dev) 464 { 465 struct ccp_device *ccp; 466 467 ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL); 468 if (!ccp) 469 return NULL; 470 ccp->dev = dev; 471 472 INIT_LIST_HEAD(&ccp->cmd); 473 INIT_LIST_HEAD(&ccp->backlog); 474 475 spin_lock_init(&ccp->cmd_lock); 476 mutex_init(&ccp->req_mutex); 477 mutex_init(&ccp->sb_mutex); 478 ccp->sb_count = KSB_COUNT; 479 ccp->sb_start = 0; 480 481 /* Initialize the wait queues */ 482 init_waitqueue_head(&ccp->sb_queue); 483 init_waitqueue_head(&ccp->suspend_queue); 484 485 ccp->ord = ccp_increment_unit_ordinal(); 486 snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); 487 snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord); 488 489 return ccp; 490 } 491 492 int ccp_trng_read(struct hwrng *rng, void *data, size_t max, bool wait) 493 { 494 struct ccp_device *ccp = container_of(rng, struct ccp_device, hwrng); 495 u32 trng_value; 496 int len = min_t(int, sizeof(trng_value), max); 497 498 /* Locking is provided by the caller so we can update device 499 * hwrng-related fields safely 500 */ 501 trng_value = ioread32(ccp->io_regs + TRNG_OUT_REG); 502 if (!trng_value) { 503 /* Zero is returned if not data is available or if a 504 * bad-entropy error is present. Assume an error if 505 * we exceed TRNG_RETRIES reads of zero. 506 */ 507 if (ccp->hwrng_retries++ > TRNG_RETRIES) 508 return -EIO; 509 510 return 0; 511 } 512 513 /* Reset the counter and save the rng value */ 514 ccp->hwrng_retries = 0; 515 memcpy(data, &trng_value, len); 516 517 return len; 518 } 519 520 #ifdef CONFIG_PM 521 bool ccp_queues_suspended(struct ccp_device *ccp) 522 { 523 unsigned int suspended = 0; 524 unsigned long flags; 525 unsigned int i; 526 527 spin_lock_irqsave(&ccp->cmd_lock, flags); 528 529 for (i = 0; i < ccp->cmd_q_count; i++) 530 if (ccp->cmd_q[i].suspended) 531 suspended++; 532 533 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 534 535 return ccp->cmd_q_count == suspended; 536 } 537 #endif 538 539 static int __init ccp_mod_init(void) 540 { 541 #ifdef CONFIG_X86 542 int ret; 543 544 ret = ccp_pci_init(); 545 if (ret) 546 return ret; 547 548 /* Don't leave the driver loaded if init failed */ 549 if (ccp_present() != 0) { 550 ccp_pci_exit(); 551 return -ENODEV; 552 } 553 554 return 0; 555 #endif 556 557 #ifdef CONFIG_ARM64 558 int ret; 559 560 ret = ccp_platform_init(); 561 if (ret) 562 return ret; 563 564 /* Don't leave the driver loaded if init failed */ 565 if (ccp_present() != 0) { 566 ccp_platform_exit(); 567 return -ENODEV; 568 } 569 570 return 0; 571 #endif 572 573 return -ENODEV; 574 } 575 576 static void __exit ccp_mod_exit(void) 577 { 578 #ifdef CONFIG_X86 579 ccp_pci_exit(); 580 #endif 581 582 #ifdef CONFIG_ARM64 583 ccp_platform_exit(); 584 #endif 585 } 586 587 module_init(ccp_mod_init); 588 module_exit(ccp_mod_exit); 589