1 /* 2 * AMD Cryptographic Coprocessor (CCP) driver 3 * 4 * Copyright (C) 2013,2016 Advanced Micro Devices, Inc. 5 * 6 * Author: Tom Lendacky <thomas.lendacky@amd.com> 7 * Author: Gary R Hook <gary.hook@amd.com> 8 * 9 * This program is free software; you can redistribute it and/or modify 10 * it under the terms of the GNU General Public License version 2 as 11 * published by the Free Software Foundation. 12 */ 13 14 #include <linux/module.h> 15 #include <linux/kernel.h> 16 #include <linux/kthread.h> 17 #include <linux/sched.h> 18 #include <linux/interrupt.h> 19 #include <linux/spinlock.h> 20 #include <linux/spinlock_types.h> 21 #include <linux/types.h> 22 #include <linux/mutex.h> 23 #include <linux/delay.h> 24 #include <linux/hw_random.h> 25 #include <linux/cpu.h> 26 #ifdef CONFIG_X86 27 #include <asm/cpu_device_id.h> 28 #endif 29 #include <linux/ccp.h> 30 31 #include "ccp-dev.h" 32 33 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>"); 34 MODULE_LICENSE("GPL"); 35 MODULE_VERSION("1.0.0"); 36 MODULE_DESCRIPTION("AMD Cryptographic Coprocessor driver"); 37 38 struct ccp_tasklet_data { 39 struct completion completion; 40 struct ccp_cmd *cmd; 41 }; 42 43 /* List of CCPs, CCP count, read-write access lock, and access functions 44 * 45 * Lock structure: get ccp_unit_lock for reading whenever we need to 46 * examine the CCP list. While holding it for reading we can acquire 47 * the RR lock to update the round-robin next-CCP pointer. The unit lock 48 * must be acquired before the RR lock. 49 * 50 * If the unit-lock is acquired for writing, we have total control over 51 * the list, so there's no value in getting the RR lock. 52 */ 53 static DEFINE_RWLOCK(ccp_unit_lock); 54 static LIST_HEAD(ccp_units); 55 56 /* Round-robin counter */ 57 static DEFINE_SPINLOCK(ccp_rr_lock); 58 static struct ccp_device *ccp_rr; 59 60 /* Ever-increasing value to produce unique unit numbers */ 61 static atomic_t ccp_unit_ordinal; 62 unsigned int ccp_increment_unit_ordinal(void) 63 { 64 return atomic_inc_return(&ccp_unit_ordinal); 65 } 66 67 /** 68 * ccp_add_device - add a CCP device to the list 69 * 70 * @ccp: ccp_device struct pointer 71 * 72 * Put this CCP on the unit list, which makes it available 73 * for use. 74 * 75 * Returns zero if a CCP device is present, -ENODEV otherwise. 76 */ 77 void ccp_add_device(struct ccp_device *ccp) 78 { 79 unsigned long flags; 80 81 write_lock_irqsave(&ccp_unit_lock, flags); 82 list_add_tail(&ccp->entry, &ccp_units); 83 if (!ccp_rr) 84 /* We already have the list lock (we're first) so this 85 * pointer can't change on us. Set its initial value. 86 */ 87 ccp_rr = ccp; 88 write_unlock_irqrestore(&ccp_unit_lock, flags); 89 } 90 91 /** 92 * ccp_del_device - remove a CCP device from the list 93 * 94 * @ccp: ccp_device struct pointer 95 * 96 * Remove this unit from the list of devices. If the next device 97 * up for use is this one, adjust the pointer. If this is the last 98 * device, NULL the pointer. 99 */ 100 void ccp_del_device(struct ccp_device *ccp) 101 { 102 unsigned long flags; 103 104 write_lock_irqsave(&ccp_unit_lock, flags); 105 if (ccp_rr == ccp) { 106 /* ccp_unit_lock is read/write; any read access 107 * will be suspended while we make changes to the 108 * list and RR pointer. 109 */ 110 if (list_is_last(&ccp_rr->entry, &ccp_units)) 111 ccp_rr = list_first_entry(&ccp_units, struct ccp_device, 112 entry); 113 else 114 ccp_rr = list_next_entry(ccp_rr, entry); 115 } 116 list_del(&ccp->entry); 117 if (list_empty(&ccp_units)) 118 ccp_rr = NULL; 119 write_unlock_irqrestore(&ccp_unit_lock, flags); 120 } 121 122 static struct ccp_device *ccp_get_device(void) 123 { 124 unsigned long flags; 125 struct ccp_device *dp = NULL; 126 127 /* We round-robin through the unit list. 128 * The (ccp_rr) pointer refers to the next unit to use. 129 */ 130 read_lock_irqsave(&ccp_unit_lock, flags); 131 if (!list_empty(&ccp_units)) { 132 spin_lock(&ccp_rr_lock); 133 dp = ccp_rr; 134 if (list_is_last(&ccp_rr->entry, &ccp_units)) 135 ccp_rr = list_first_entry(&ccp_units, struct ccp_device, 136 entry); 137 else 138 ccp_rr = list_next_entry(ccp_rr, entry); 139 spin_unlock(&ccp_rr_lock); 140 } 141 read_unlock_irqrestore(&ccp_unit_lock, flags); 142 143 return dp; 144 } 145 146 /** 147 * ccp_present - check if a CCP device is present 148 * 149 * Returns zero if a CCP device is present, -ENODEV otherwise. 150 */ 151 int ccp_present(void) 152 { 153 unsigned long flags; 154 int ret; 155 156 read_lock_irqsave(&ccp_unit_lock, flags); 157 ret = list_empty(&ccp_units); 158 read_unlock_irqrestore(&ccp_unit_lock, flags); 159 160 return ret ? -ENODEV : 0; 161 } 162 EXPORT_SYMBOL_GPL(ccp_present); 163 164 /** 165 * ccp_version - get the version of the CCP device 166 * 167 * Returns the version from the first unit on the list; 168 * otherwise a zero if no CCP device is present 169 */ 170 unsigned int ccp_version(void) 171 { 172 struct ccp_device *dp; 173 unsigned long flags; 174 int ret = 0; 175 176 read_lock_irqsave(&ccp_unit_lock, flags); 177 if (!list_empty(&ccp_units)) { 178 dp = list_first_entry(&ccp_units, struct ccp_device, entry); 179 ret = dp->vdata->version; 180 } 181 read_unlock_irqrestore(&ccp_unit_lock, flags); 182 183 return ret; 184 } 185 EXPORT_SYMBOL_GPL(ccp_version); 186 187 /** 188 * ccp_enqueue_cmd - queue an operation for processing by the CCP 189 * 190 * @cmd: ccp_cmd struct to be processed 191 * 192 * Queue a cmd to be processed by the CCP. If queueing the cmd 193 * would exceed the defined length of the cmd queue the cmd will 194 * only be queued if the CCP_CMD_MAY_BACKLOG flag is set and will 195 * result in a return code of -EBUSY. 196 * 197 * The callback routine specified in the ccp_cmd struct will be 198 * called to notify the caller of completion (if the cmd was not 199 * backlogged) or advancement out of the backlog. If the cmd has 200 * advanced out of the backlog the "err" value of the callback 201 * will be -EINPROGRESS. Any other "err" value during callback is 202 * the result of the operation. 203 * 204 * The cmd has been successfully queued if: 205 * the return code is -EINPROGRESS or 206 * the return code is -EBUSY and CCP_CMD_MAY_BACKLOG flag is set 207 */ 208 int ccp_enqueue_cmd(struct ccp_cmd *cmd) 209 { 210 struct ccp_device *ccp = ccp_get_device(); 211 unsigned long flags; 212 unsigned int i; 213 int ret; 214 215 if (!ccp) 216 return -ENODEV; 217 218 /* Caller must supply a callback routine */ 219 if (!cmd->callback) 220 return -EINVAL; 221 222 cmd->ccp = ccp; 223 224 spin_lock_irqsave(&ccp->cmd_lock, flags); 225 226 i = ccp->cmd_q_count; 227 228 if (ccp->cmd_count >= MAX_CMD_QLEN) { 229 ret = -EBUSY; 230 if (cmd->flags & CCP_CMD_MAY_BACKLOG) 231 list_add_tail(&cmd->entry, &ccp->backlog); 232 } else { 233 ret = -EINPROGRESS; 234 ccp->cmd_count++; 235 list_add_tail(&cmd->entry, &ccp->cmd); 236 237 /* Find an idle queue */ 238 if (!ccp->suspending) { 239 for (i = 0; i < ccp->cmd_q_count; i++) { 240 if (ccp->cmd_q[i].active) 241 continue; 242 243 break; 244 } 245 } 246 } 247 248 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 249 250 /* If we found an idle queue, wake it up */ 251 if (i < ccp->cmd_q_count) 252 wake_up_process(ccp->cmd_q[i].kthread); 253 254 return ret; 255 } 256 EXPORT_SYMBOL_GPL(ccp_enqueue_cmd); 257 258 static void ccp_do_cmd_backlog(struct work_struct *work) 259 { 260 struct ccp_cmd *cmd = container_of(work, struct ccp_cmd, work); 261 struct ccp_device *ccp = cmd->ccp; 262 unsigned long flags; 263 unsigned int i; 264 265 cmd->callback(cmd->data, -EINPROGRESS); 266 267 spin_lock_irqsave(&ccp->cmd_lock, flags); 268 269 ccp->cmd_count++; 270 list_add_tail(&cmd->entry, &ccp->cmd); 271 272 /* Find an idle queue */ 273 for (i = 0; i < ccp->cmd_q_count; i++) { 274 if (ccp->cmd_q[i].active) 275 continue; 276 277 break; 278 } 279 280 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 281 282 /* If we found an idle queue, wake it up */ 283 if (i < ccp->cmd_q_count) 284 wake_up_process(ccp->cmd_q[i].kthread); 285 } 286 287 static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q) 288 { 289 struct ccp_device *ccp = cmd_q->ccp; 290 struct ccp_cmd *cmd = NULL; 291 struct ccp_cmd *backlog = NULL; 292 unsigned long flags; 293 294 spin_lock_irqsave(&ccp->cmd_lock, flags); 295 296 cmd_q->active = 0; 297 298 if (ccp->suspending) { 299 cmd_q->suspended = 1; 300 301 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 302 wake_up_interruptible(&ccp->suspend_queue); 303 304 return NULL; 305 } 306 307 if (ccp->cmd_count) { 308 cmd_q->active = 1; 309 310 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); 311 list_del(&cmd->entry); 312 313 ccp->cmd_count--; 314 } 315 316 if (!list_empty(&ccp->backlog)) { 317 backlog = list_first_entry(&ccp->backlog, struct ccp_cmd, 318 entry); 319 list_del(&backlog->entry); 320 } 321 322 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 323 324 if (backlog) { 325 INIT_WORK(&backlog->work, ccp_do_cmd_backlog); 326 schedule_work(&backlog->work); 327 } 328 329 return cmd; 330 } 331 332 static void ccp_do_cmd_complete(unsigned long data) 333 { 334 struct ccp_tasklet_data *tdata = (struct ccp_tasklet_data *)data; 335 struct ccp_cmd *cmd = tdata->cmd; 336 337 cmd->callback(cmd->data, cmd->ret); 338 complete(&tdata->completion); 339 } 340 341 /** 342 * ccp_cmd_queue_thread - create a kernel thread to manage a CCP queue 343 * 344 * @data: thread-specific data 345 */ 346 int ccp_cmd_queue_thread(void *data) 347 { 348 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; 349 struct ccp_cmd *cmd; 350 struct ccp_tasklet_data tdata; 351 struct tasklet_struct tasklet; 352 353 tasklet_init(&tasklet, ccp_do_cmd_complete, (unsigned long)&tdata); 354 355 set_current_state(TASK_INTERRUPTIBLE); 356 while (!kthread_should_stop()) { 357 schedule(); 358 359 set_current_state(TASK_INTERRUPTIBLE); 360 361 cmd = ccp_dequeue_cmd(cmd_q); 362 if (!cmd) 363 continue; 364 365 __set_current_state(TASK_RUNNING); 366 367 /* Execute the command */ 368 cmd->ret = ccp_run_cmd(cmd_q, cmd); 369 370 /* Schedule the completion callback */ 371 tdata.cmd = cmd; 372 init_completion(&tdata.completion); 373 tasklet_schedule(&tasklet); 374 wait_for_completion(&tdata.completion); 375 } 376 377 __set_current_state(TASK_RUNNING); 378 379 return 0; 380 } 381 382 /** 383 * ccp_alloc_struct - allocate and initialize the ccp_device struct 384 * 385 * @dev: device struct of the CCP 386 */ 387 struct ccp_device *ccp_alloc_struct(struct device *dev) 388 { 389 struct ccp_device *ccp; 390 391 ccp = devm_kzalloc(dev, sizeof(*ccp), GFP_KERNEL); 392 if (!ccp) 393 return NULL; 394 ccp->dev = dev; 395 396 INIT_LIST_HEAD(&ccp->cmd); 397 INIT_LIST_HEAD(&ccp->backlog); 398 399 spin_lock_init(&ccp->cmd_lock); 400 mutex_init(&ccp->req_mutex); 401 mutex_init(&ccp->sb_mutex); 402 ccp->sb_count = KSB_COUNT; 403 ccp->sb_start = 0; 404 405 ccp->ord = ccp_increment_unit_ordinal(); 406 snprintf(ccp->name, MAX_CCP_NAME_LEN, "ccp-%u", ccp->ord); 407 snprintf(ccp->rngname, MAX_CCP_NAME_LEN, "ccp-%u-rng", ccp->ord); 408 409 return ccp; 410 } 411 412 #ifdef CONFIG_PM 413 bool ccp_queues_suspended(struct ccp_device *ccp) 414 { 415 unsigned int suspended = 0; 416 unsigned long flags; 417 unsigned int i; 418 419 spin_lock_irqsave(&ccp->cmd_lock, flags); 420 421 for (i = 0; i < ccp->cmd_q_count; i++) 422 if (ccp->cmd_q[i].suspended) 423 suspended++; 424 425 spin_unlock_irqrestore(&ccp->cmd_lock, flags); 426 427 return ccp->cmd_q_count == suspended; 428 } 429 #endif 430 431 static int __init ccp_mod_init(void) 432 { 433 #ifdef CONFIG_X86 434 int ret; 435 436 ret = ccp_pci_init(); 437 if (ret) 438 return ret; 439 440 /* Don't leave the driver loaded if init failed */ 441 if (ccp_present() != 0) { 442 ccp_pci_exit(); 443 return -ENODEV; 444 } 445 446 return 0; 447 #endif 448 449 #ifdef CONFIG_ARM64 450 int ret; 451 452 ret = ccp_platform_init(); 453 if (ret) 454 return ret; 455 456 /* Don't leave the driver loaded if init failed */ 457 if (ccp_present() != 0) { 458 ccp_platform_exit(); 459 return -ENODEV; 460 } 461 462 return 0; 463 #endif 464 465 return -ENODEV; 466 } 467 468 static void __exit ccp_mod_exit(void) 469 { 470 #ifdef CONFIG_X86 471 ccp_pci_exit(); 472 #endif 473 474 #ifdef CONFIG_ARM64 475 ccp_platform_exit(); 476 #endif 477 } 478 479 module_init(ccp_mod_init); 480 module_exit(ccp_mod_exit); 481