1 /* 2 * AMD Cryptographic Coprocessor (CCP) driver 3 * 4 * Copyright (C) 2016,2017 Advanced Micro Devices, Inc. 5 * 6 * Author: Gary R Hook <gary.hook@amd.com> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License version 2 as 10 * published by the Free Software Foundation. 11 */ 12 13 #include <linux/module.h> 14 #include <linux/kernel.h> 15 #include <linux/pci.h> 16 #include <linux/kthread.h> 17 #include <linux/debugfs.h> 18 #include <linux/dma-mapping.h> 19 #include <linux/interrupt.h> 20 #include <linux/compiler.h> 21 #include <linux/ccp.h> 22 23 #include "ccp-dev.h" 24 25 /* Allocate the requested number of contiguous LSB slots 26 * from the LSB bitmap. Look in the private range for this 27 * queue first; failing that, check the public area. 28 * If no space is available, wait around. 29 * Return: first slot number 30 */ 31 static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) 32 { 33 struct ccp_device *ccp; 34 int start; 35 36 /* First look at the map for the queue */ 37 if (cmd_q->lsb >= 0) { 38 start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap, 39 LSB_SIZE, 40 0, count, 0); 41 if (start < LSB_SIZE) { 42 bitmap_set(cmd_q->lsbmap, start, count); 43 return start + cmd_q->lsb * LSB_SIZE; 44 } 45 } 46 47 /* No joy; try to get an entry from the shared blocks */ 48 ccp = cmd_q->ccp; 49 for (;;) { 50 mutex_lock(&ccp->sb_mutex); 51 52 start = (u32)bitmap_find_next_zero_area(ccp->lsbmap, 53 MAX_LSB_CNT * LSB_SIZE, 54 0, 55 count, 0); 56 if (start <= MAX_LSB_CNT * LSB_SIZE) { 57 bitmap_set(ccp->lsbmap, start, count); 58 59 mutex_unlock(&ccp->sb_mutex); 60 return start; 61 } 62 63 ccp->sb_avail = 0; 64 65 mutex_unlock(&ccp->sb_mutex); 66 67 /* Wait for KSB entries to become available */ 68 if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) 69 return 0; 70 } 71 } 72 73 /* Free a number of LSB slots from the bitmap, starting at 74 * the indicated starting slot number. 75 */ 76 static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, 77 unsigned int count) 78 { 79 if (!start) 80 return; 81 82 if (cmd_q->lsb == start) { 83 /* An entry from the private LSB */ 84 bitmap_clear(cmd_q->lsbmap, start, count); 85 } else { 86 /* From the shared LSBs */ 87 struct ccp_device *ccp = cmd_q->ccp; 88 89 mutex_lock(&ccp->sb_mutex); 90 bitmap_clear(ccp->lsbmap, start, count); 91 ccp->sb_avail = 1; 92 mutex_unlock(&ccp->sb_mutex); 93 wake_up_interruptible_all(&ccp->sb_queue); 94 } 95 } 96 97 /* CCP version 5: Union to define the function field (cmd_reg1/dword0) */ 98 union ccp_function { 99 struct { 100 u16 size:7; 101 u16 encrypt:1; 102 u16 mode:5; 103 u16 type:2; 104 } aes; 105 struct { 106 u16 size:7; 107 u16 encrypt:1; 108 u16 rsvd:5; 109 u16 type:2; 110 } aes_xts; 111 struct { 112 u16 size:7; 113 u16 encrypt:1; 114 u16 mode:5; 115 u16 type:2; 116 } des3; 117 struct { 118 u16 rsvd1:10; 119 u16 type:4; 120 u16 rsvd2:1; 121 } sha; 122 struct { 123 u16 mode:3; 124 u16 size:12; 125 } rsa; 126 struct { 127 u16 byteswap:2; 128 u16 bitwise:3; 129 u16 reflect:2; 130 u16 rsvd:8; 131 } pt; 132 struct { 133 u16 rsvd:13; 134 } zlib; 135 struct { 136 u16 size:10; 137 u16 type:2; 138 u16 mode:3; 139 } ecc; 140 u16 raw; 141 }; 142 143 #define CCP_AES_SIZE(p) ((p)->aes.size) 144 #define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt) 145 #define CCP_AES_MODE(p) ((p)->aes.mode) 146 #define CCP_AES_TYPE(p) ((p)->aes.type) 147 #define CCP_XTS_SIZE(p) ((p)->aes_xts.size) 148 #define CCP_XTS_TYPE(p) ((p)->aes_xts.type) 149 #define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt) 150 #define CCP_DES3_SIZE(p) ((p)->des3.size) 151 #define CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt) 152 #define CCP_DES3_MODE(p) ((p)->des3.mode) 153 #define CCP_DES3_TYPE(p) ((p)->des3.type) 154 #define CCP_SHA_TYPE(p) ((p)->sha.type) 155 #define CCP_RSA_SIZE(p) ((p)->rsa.size) 156 #define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap) 157 #define CCP_PT_BITWISE(p) ((p)->pt.bitwise) 158 #define CCP_ECC_MODE(p) ((p)->ecc.mode) 159 #define CCP_ECC_AFFINE(p) ((p)->ecc.one) 160 161 /* Word 0 */ 162 #define CCP5_CMD_DW0(p) ((p)->dw0) 163 #define CCP5_CMD_SOC(p) (CCP5_CMD_DW0(p).soc) 164 #define CCP5_CMD_IOC(p) (CCP5_CMD_DW0(p).ioc) 165 #define CCP5_CMD_INIT(p) (CCP5_CMD_DW0(p).init) 166 #define CCP5_CMD_EOM(p) (CCP5_CMD_DW0(p).eom) 167 #define CCP5_CMD_FUNCTION(p) (CCP5_CMD_DW0(p).function) 168 #define CCP5_CMD_ENGINE(p) (CCP5_CMD_DW0(p).engine) 169 #define CCP5_CMD_PROT(p) (CCP5_CMD_DW0(p).prot) 170 171 /* Word 1 */ 172 #define CCP5_CMD_DW1(p) ((p)->length) 173 #define CCP5_CMD_LEN(p) (CCP5_CMD_DW1(p)) 174 175 /* Word 2 */ 176 #define CCP5_CMD_DW2(p) ((p)->src_lo) 177 #define CCP5_CMD_SRC_LO(p) (CCP5_CMD_DW2(p)) 178 179 /* Word 3 */ 180 #define CCP5_CMD_DW3(p) ((p)->dw3) 181 #define CCP5_CMD_SRC_MEM(p) ((p)->dw3.src_mem) 182 #define CCP5_CMD_SRC_HI(p) ((p)->dw3.src_hi) 183 #define CCP5_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id) 184 #define CCP5_CMD_FIX_SRC(p) ((p)->dw3.fixed) 185 186 /* Words 4/5 */ 187 #define CCP5_CMD_DW4(p) ((p)->dw4) 188 #define CCP5_CMD_DST_LO(p) (CCP5_CMD_DW4(p).dst_lo) 189 #define CCP5_CMD_DW5(p) ((p)->dw5.fields.dst_hi) 190 #define CCP5_CMD_DST_HI(p) (CCP5_CMD_DW5(p)) 191 #define CCP5_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem) 192 #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) 193 #define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) 194 #define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) 195 196 /* Word 6/7 */ 197 #define CCP5_CMD_DW6(p) ((p)->key_lo) 198 #define CCP5_CMD_KEY_LO(p) (CCP5_CMD_DW6(p)) 199 #define CCP5_CMD_DW7(p) ((p)->dw7) 200 #define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) 201 #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) 202 203 static inline u32 low_address(unsigned long addr) 204 { 205 return (u64)addr & 0x0ffffffff; 206 } 207 208 static inline u32 high_address(unsigned long addr) 209 { 210 return ((u64)addr >> 32) & 0x00000ffff; 211 } 212 213 static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) 214 { 215 unsigned int head_idx, n; 216 u32 head_lo, queue_start; 217 218 queue_start = low_address(cmd_q->qdma_tail); 219 head_lo = ioread32(cmd_q->reg_head_lo); 220 head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); 221 222 n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; 223 224 return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ 225 } 226 227 static int ccp5_do_cmd(struct ccp5_desc *desc, 228 struct ccp_cmd_queue *cmd_q) 229 { 230 u32 *mP; 231 __le32 *dP; 232 u32 tail; 233 int i; 234 int ret = 0; 235 236 cmd_q->total_ops++; 237 238 if (CCP5_CMD_SOC(desc)) { 239 CCP5_CMD_IOC(desc) = 1; 240 CCP5_CMD_SOC(desc) = 0; 241 } 242 mutex_lock(&cmd_q->q_mutex); 243 244 mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; 245 dP = (__le32 *) desc; 246 for (i = 0; i < 8; i++) 247 mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ 248 249 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; 250 251 /* The data used by this command must be flushed to memory */ 252 wmb(); 253 254 /* Write the new tail address back to the queue register */ 255 tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); 256 iowrite32(tail, cmd_q->reg_tail_lo); 257 258 /* Turn the queue back on using our cached control register */ 259 iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); 260 mutex_unlock(&cmd_q->q_mutex); 261 262 if (CCP5_CMD_IOC(desc)) { 263 /* Wait for the job to complete */ 264 ret = wait_event_interruptible(cmd_q->int_queue, 265 cmd_q->int_rcvd); 266 if (ret || cmd_q->cmd_error) { 267 /* Log the error and flush the queue by 268 * moving the head pointer 269 */ 270 if (cmd_q->cmd_error) 271 ccp_log_error(cmd_q->ccp, 272 cmd_q->cmd_error); 273 iowrite32(tail, cmd_q->reg_head_lo); 274 if (!ret) 275 ret = -EIO; 276 } 277 cmd_q->int_rcvd = 0; 278 } 279 280 return ret; 281 } 282 283 static int ccp5_perform_aes(struct ccp_op *op) 284 { 285 struct ccp5_desc desc; 286 union ccp_function function; 287 u32 key_addr = op->sb_key * LSB_ITEM_SIZE; 288 289 op->cmd_q->total_aes_ops++; 290 291 /* Zero out all the fields of the command desc */ 292 memset(&desc, 0, Q_DESC_SIZE); 293 294 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES; 295 296 CCP5_CMD_SOC(&desc) = op->soc; 297 CCP5_CMD_IOC(&desc) = 1; 298 CCP5_CMD_INIT(&desc) = op->init; 299 CCP5_CMD_EOM(&desc) = op->eom; 300 CCP5_CMD_PROT(&desc) = 0; 301 302 function.raw = 0; 303 CCP_AES_ENCRYPT(&function) = op->u.aes.action; 304 CCP_AES_MODE(&function) = op->u.aes.mode; 305 CCP_AES_TYPE(&function) = op->u.aes.type; 306 CCP_AES_SIZE(&function) = op->u.aes.size; 307 308 CCP5_CMD_FUNCTION(&desc) = function.raw; 309 310 CCP5_CMD_LEN(&desc) = op->src.u.dma.length; 311 312 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); 313 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); 314 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 315 316 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); 317 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); 318 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 319 320 CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); 321 CCP5_CMD_KEY_HI(&desc) = 0; 322 CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; 323 CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; 324 325 return ccp5_do_cmd(&desc, op->cmd_q); 326 } 327 328 static int ccp5_perform_xts_aes(struct ccp_op *op) 329 { 330 struct ccp5_desc desc; 331 union ccp_function function; 332 u32 key_addr = op->sb_key * LSB_ITEM_SIZE; 333 334 op->cmd_q->total_xts_aes_ops++; 335 336 /* Zero out all the fields of the command desc */ 337 memset(&desc, 0, Q_DESC_SIZE); 338 339 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128; 340 341 CCP5_CMD_SOC(&desc) = op->soc; 342 CCP5_CMD_IOC(&desc) = 1; 343 CCP5_CMD_INIT(&desc) = op->init; 344 CCP5_CMD_EOM(&desc) = op->eom; 345 CCP5_CMD_PROT(&desc) = 0; 346 347 function.raw = 0; 348 CCP_XTS_TYPE(&function) = op->u.xts.type; 349 CCP_XTS_ENCRYPT(&function) = op->u.xts.action; 350 CCP_XTS_SIZE(&function) = op->u.xts.unit_size; 351 CCP5_CMD_FUNCTION(&desc) = function.raw; 352 353 CCP5_CMD_LEN(&desc) = op->src.u.dma.length; 354 355 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); 356 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); 357 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 358 359 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); 360 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); 361 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 362 363 CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); 364 CCP5_CMD_KEY_HI(&desc) = 0; 365 CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; 366 CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; 367 368 return ccp5_do_cmd(&desc, op->cmd_q); 369 } 370 371 static int ccp5_perform_sha(struct ccp_op *op) 372 { 373 struct ccp5_desc desc; 374 union ccp_function function; 375 376 op->cmd_q->total_sha_ops++; 377 378 /* Zero out all the fields of the command desc */ 379 memset(&desc, 0, Q_DESC_SIZE); 380 381 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA; 382 383 CCP5_CMD_SOC(&desc) = op->soc; 384 CCP5_CMD_IOC(&desc) = 1; 385 CCP5_CMD_INIT(&desc) = 1; 386 CCP5_CMD_EOM(&desc) = op->eom; 387 CCP5_CMD_PROT(&desc) = 0; 388 389 function.raw = 0; 390 CCP_SHA_TYPE(&function) = op->u.sha.type; 391 CCP5_CMD_FUNCTION(&desc) = function.raw; 392 393 CCP5_CMD_LEN(&desc) = op->src.u.dma.length; 394 395 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); 396 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); 397 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 398 399 CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; 400 401 if (op->eom) { 402 CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits); 403 CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits); 404 } else { 405 CCP5_CMD_SHA_LO(&desc) = 0; 406 CCP5_CMD_SHA_HI(&desc) = 0; 407 } 408 409 return ccp5_do_cmd(&desc, op->cmd_q); 410 } 411 412 static int ccp5_perform_des3(struct ccp_op *op) 413 { 414 struct ccp5_desc desc; 415 union ccp_function function; 416 u32 key_addr = op->sb_key * LSB_ITEM_SIZE; 417 418 op->cmd_q->total_3des_ops++; 419 420 /* Zero out all the fields of the command desc */ 421 memset(&desc, 0, sizeof(struct ccp5_desc)); 422 423 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3; 424 425 CCP5_CMD_SOC(&desc) = op->soc; 426 CCP5_CMD_IOC(&desc) = 1; 427 CCP5_CMD_INIT(&desc) = op->init; 428 CCP5_CMD_EOM(&desc) = op->eom; 429 CCP5_CMD_PROT(&desc) = 0; 430 431 function.raw = 0; 432 CCP_DES3_ENCRYPT(&function) = op->u.des3.action; 433 CCP_DES3_MODE(&function) = op->u.des3.mode; 434 CCP_DES3_TYPE(&function) = op->u.des3.type; 435 CCP5_CMD_FUNCTION(&desc) = function.raw; 436 437 CCP5_CMD_LEN(&desc) = op->src.u.dma.length; 438 439 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); 440 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); 441 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 442 443 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); 444 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); 445 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 446 447 CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); 448 CCP5_CMD_KEY_HI(&desc) = 0; 449 CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; 450 CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; 451 452 return ccp5_do_cmd(&desc, op->cmd_q); 453 } 454 455 static int ccp5_perform_rsa(struct ccp_op *op) 456 { 457 struct ccp5_desc desc; 458 union ccp_function function; 459 460 op->cmd_q->total_rsa_ops++; 461 462 /* Zero out all the fields of the command desc */ 463 memset(&desc, 0, Q_DESC_SIZE); 464 465 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA; 466 467 CCP5_CMD_SOC(&desc) = op->soc; 468 CCP5_CMD_IOC(&desc) = 1; 469 CCP5_CMD_INIT(&desc) = 0; 470 CCP5_CMD_EOM(&desc) = 1; 471 CCP5_CMD_PROT(&desc) = 0; 472 473 function.raw = 0; 474 CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3; 475 CCP5_CMD_FUNCTION(&desc) = function.raw; 476 477 CCP5_CMD_LEN(&desc) = op->u.rsa.input_len; 478 479 /* Source is from external memory */ 480 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); 481 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); 482 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 483 484 /* Destination is in external memory */ 485 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); 486 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); 487 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 488 489 /* Key (Exponent) is in external memory */ 490 CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma); 491 CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma); 492 CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 493 494 return ccp5_do_cmd(&desc, op->cmd_q); 495 } 496 497 static int ccp5_perform_passthru(struct ccp_op *op) 498 { 499 struct ccp5_desc desc; 500 union ccp_function function; 501 struct ccp_dma_info *saddr = &op->src.u.dma; 502 struct ccp_dma_info *daddr = &op->dst.u.dma; 503 504 505 op->cmd_q->total_pt_ops++; 506 507 memset(&desc, 0, Q_DESC_SIZE); 508 509 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU; 510 511 CCP5_CMD_SOC(&desc) = 0; 512 CCP5_CMD_IOC(&desc) = 1; 513 CCP5_CMD_INIT(&desc) = 0; 514 CCP5_CMD_EOM(&desc) = op->eom; 515 CCP5_CMD_PROT(&desc) = 0; 516 517 function.raw = 0; 518 CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap; 519 CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod; 520 CCP5_CMD_FUNCTION(&desc) = function.raw; 521 522 /* Length of source data is always 256 bytes */ 523 if (op->src.type == CCP_MEMTYPE_SYSTEM) 524 CCP5_CMD_LEN(&desc) = saddr->length; 525 else 526 CCP5_CMD_LEN(&desc) = daddr->length; 527 528 if (op->src.type == CCP_MEMTYPE_SYSTEM) { 529 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); 530 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); 531 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 532 533 if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) 534 CCP5_CMD_LSB_ID(&desc) = op->sb_key; 535 } else { 536 u32 key_addr = op->src.u.sb * CCP_SB_BYTES; 537 538 CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr); 539 CCP5_CMD_SRC_HI(&desc) = 0; 540 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB; 541 } 542 543 if (op->dst.type == CCP_MEMTYPE_SYSTEM) { 544 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); 545 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); 546 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 547 } else { 548 u32 key_addr = op->dst.u.sb * CCP_SB_BYTES; 549 550 CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr); 551 CCP5_CMD_DST_HI(&desc) = 0; 552 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB; 553 } 554 555 return ccp5_do_cmd(&desc, op->cmd_q); 556 } 557 558 static int ccp5_perform_ecc(struct ccp_op *op) 559 { 560 struct ccp5_desc desc; 561 union ccp_function function; 562 563 op->cmd_q->total_ecc_ops++; 564 565 /* Zero out all the fields of the command desc */ 566 memset(&desc, 0, Q_DESC_SIZE); 567 568 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC; 569 570 CCP5_CMD_SOC(&desc) = 0; 571 CCP5_CMD_IOC(&desc) = 1; 572 CCP5_CMD_INIT(&desc) = 0; 573 CCP5_CMD_EOM(&desc) = 1; 574 CCP5_CMD_PROT(&desc) = 0; 575 576 function.raw = 0; 577 function.ecc.mode = op->u.ecc.function; 578 CCP5_CMD_FUNCTION(&desc) = function.raw; 579 580 CCP5_CMD_LEN(&desc) = op->src.u.dma.length; 581 582 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); 583 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); 584 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 585 586 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); 587 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); 588 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 589 590 return ccp5_do_cmd(&desc, op->cmd_q); 591 } 592 593 static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) 594 { 595 int q_mask = 1 << cmd_q->id; 596 int queues = 0; 597 int j; 598 599 /* Build a bit mask to know which LSBs this queue has access to. 600 * Don't bother with segment 0 as it has special privileges. 601 */ 602 for (j = 1; j < MAX_LSB_CNT; j++) { 603 if (status & q_mask) 604 bitmap_set(cmd_q->lsbmask, j, 1); 605 status >>= LSB_REGION_WIDTH; 606 } 607 queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); 608 dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n", 609 cmd_q->id, queues); 610 611 return queues ? 0 : -EINVAL; 612 } 613 614 static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, 615 int lsb_cnt, int n_lsbs, 616 unsigned long *lsb_pub) 617 { 618 DECLARE_BITMAP(qlsb, MAX_LSB_CNT); 619 int bitno; 620 int qlsb_wgt; 621 int i; 622 623 /* For each queue: 624 * If the count of potential LSBs available to a queue matches the 625 * ordinal given to us in lsb_cnt: 626 * Copy the mask of possible LSBs for this queue into "qlsb"; 627 * For each bit in qlsb, see if the corresponding bit in the 628 * aggregation mask is set; if so, we have a match. 629 * If we have a match, clear the bit in the aggregation to 630 * mark it as no longer available. 631 * If there is no match, clear the bit in qlsb and keep looking. 632 */ 633 for (i = 0; i < ccp->cmd_q_count; i++) { 634 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; 635 636 qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); 637 638 if (qlsb_wgt == lsb_cnt) { 639 bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT); 640 641 bitno = find_first_bit(qlsb, MAX_LSB_CNT); 642 while (bitno < MAX_LSB_CNT) { 643 if (test_bit(bitno, lsb_pub)) { 644 /* We found an available LSB 645 * that this queue can access 646 */ 647 cmd_q->lsb = bitno; 648 bitmap_clear(lsb_pub, bitno, 1); 649 dev_dbg(ccp->dev, 650 "Queue %d gets LSB %d\n", 651 i, bitno); 652 break; 653 } 654 bitmap_clear(qlsb, bitno, 1); 655 bitno = find_first_bit(qlsb, MAX_LSB_CNT); 656 } 657 if (bitno >= MAX_LSB_CNT) 658 return -EINVAL; 659 n_lsbs--; 660 } 661 } 662 return n_lsbs; 663 } 664 665 /* For each queue, from the most- to least-constrained: 666 * find an LSB that can be assigned to the queue. If there are N queues that 667 * can only use M LSBs, where N > M, fail; otherwise, every queue will get a 668 * dedicated LSB. Remaining LSB regions become a shared resource. 669 * If we have fewer LSBs than queues, all LSB regions become shared resources. 670 */ 671 static int ccp_assign_lsbs(struct ccp_device *ccp) 672 { 673 DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT); 674 DECLARE_BITMAP(qlsb, MAX_LSB_CNT); 675 int n_lsbs = 0; 676 int bitno; 677 int i, lsb_cnt; 678 int rc = 0; 679 680 bitmap_zero(lsb_pub, MAX_LSB_CNT); 681 682 /* Create an aggregate bitmap to get a total count of available LSBs */ 683 for (i = 0; i < ccp->cmd_q_count; i++) 684 bitmap_or(lsb_pub, 685 lsb_pub, ccp->cmd_q[i].lsbmask, 686 MAX_LSB_CNT); 687 688 n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT); 689 690 if (n_lsbs >= ccp->cmd_q_count) { 691 /* We have enough LSBS to give every queue a private LSB. 692 * Brute force search to start with the queues that are more 693 * constrained in LSB choice. When an LSB is privately 694 * assigned, it is removed from the public mask. 695 * This is an ugly N squared algorithm with some optimization. 696 */ 697 for (lsb_cnt = 1; 698 n_lsbs && (lsb_cnt <= MAX_LSB_CNT); 699 lsb_cnt++) { 700 rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, 701 lsb_pub); 702 if (rc < 0) 703 return -EINVAL; 704 n_lsbs = rc; 705 } 706 } 707 708 rc = 0; 709 /* What's left of the LSBs, according to the public mask, now become 710 * shared. Any zero bits in the lsb_pub mask represent an LSB region 711 * that can't be used as a shared resource, so mark the LSB slots for 712 * them as "in use". 713 */ 714 bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT); 715 716 bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); 717 while (bitno < MAX_LSB_CNT) { 718 bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); 719 bitmap_set(qlsb, bitno, 1); 720 bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); 721 } 722 723 return rc; 724 } 725 726 static void ccp5_disable_queue_interrupts(struct ccp_device *ccp) 727 { 728 unsigned int i; 729 730 for (i = 0; i < ccp->cmd_q_count; i++) 731 iowrite32(0x0, ccp->cmd_q[i].reg_int_enable); 732 } 733 734 static void ccp5_enable_queue_interrupts(struct ccp_device *ccp) 735 { 736 unsigned int i; 737 738 for (i = 0; i < ccp->cmd_q_count; i++) 739 iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable); 740 } 741 742 static void ccp5_irq_bh(unsigned long data) 743 { 744 struct ccp_device *ccp = (struct ccp_device *)data; 745 u32 status; 746 unsigned int i; 747 748 for (i = 0; i < ccp->cmd_q_count; i++) { 749 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; 750 751 status = ioread32(cmd_q->reg_interrupt_status); 752 753 if (status) { 754 cmd_q->int_status = status; 755 cmd_q->q_status = ioread32(cmd_q->reg_status); 756 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); 757 758 /* On error, only save the first error value */ 759 if ((status & INT_ERROR) && !cmd_q->cmd_error) 760 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); 761 762 cmd_q->int_rcvd = 1; 763 764 /* Acknowledge the interrupt and wake the kthread */ 765 iowrite32(status, cmd_q->reg_interrupt_status); 766 wake_up_interruptible(&cmd_q->int_queue); 767 } 768 } 769 ccp5_enable_queue_interrupts(ccp); 770 } 771 772 static irqreturn_t ccp5_irq_handler(int irq, void *data) 773 { 774 struct ccp_device *ccp = (struct ccp_device *)data; 775 776 ccp5_disable_queue_interrupts(ccp); 777 ccp->total_interrupts++; 778 if (ccp->use_tasklet) 779 tasklet_schedule(&ccp->irq_tasklet); 780 else 781 ccp5_irq_bh((unsigned long)ccp); 782 return IRQ_HANDLED; 783 } 784 785 static int ccp5_init(struct ccp_device *ccp) 786 { 787 struct device *dev = ccp->dev; 788 struct ccp_cmd_queue *cmd_q; 789 struct dma_pool *dma_pool; 790 char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; 791 unsigned int qmr, qim, i; 792 u64 status; 793 u32 status_lo, status_hi; 794 int ret; 795 796 /* Find available queues */ 797 qim = 0; 798 qmr = ioread32(ccp->io_regs + Q_MASK_REG); 799 for (i = 0; i < MAX_HW_QUEUES; i++) { 800 801 if (!(qmr & (1 << i))) 802 continue; 803 804 /* Allocate a dma pool for this queue */ 805 snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", 806 ccp->name, i); 807 dma_pool = dma_pool_create(dma_pool_name, dev, 808 CCP_DMAPOOL_MAX_SIZE, 809 CCP_DMAPOOL_ALIGN, 0); 810 if (!dma_pool) { 811 dev_err(dev, "unable to allocate dma pool\n"); 812 ret = -ENOMEM; 813 } 814 815 cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; 816 ccp->cmd_q_count++; 817 818 cmd_q->ccp = ccp; 819 cmd_q->id = i; 820 cmd_q->dma_pool = dma_pool; 821 mutex_init(&cmd_q->q_mutex); 822 823 /* Page alignment satisfies our needs for N <= 128 */ 824 BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); 825 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); 826 cmd_q->qbase = dma_zalloc_coherent(dev, cmd_q->qsize, 827 &cmd_q->qbase_dma, 828 GFP_KERNEL); 829 if (!cmd_q->qbase) { 830 dev_err(dev, "unable to allocate command queue\n"); 831 ret = -ENOMEM; 832 goto e_pool; 833 } 834 835 cmd_q->qidx = 0; 836 /* Preset some register values and masks that are queue 837 * number dependent 838 */ 839 cmd_q->reg_control = ccp->io_regs + 840 CMD5_Q_STATUS_INCR * (i + 1); 841 cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE; 842 cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE; 843 cmd_q->reg_int_enable = cmd_q->reg_control + 844 CMD5_Q_INT_ENABLE_BASE; 845 cmd_q->reg_interrupt_status = cmd_q->reg_control + 846 CMD5_Q_INTERRUPT_STATUS_BASE; 847 cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE; 848 cmd_q->reg_int_status = cmd_q->reg_control + 849 CMD5_Q_INT_STATUS_BASE; 850 cmd_q->reg_dma_status = cmd_q->reg_control + 851 CMD5_Q_DMA_STATUS_BASE; 852 cmd_q->reg_dma_read_status = cmd_q->reg_control + 853 CMD5_Q_DMA_READ_STATUS_BASE; 854 cmd_q->reg_dma_write_status = cmd_q->reg_control + 855 CMD5_Q_DMA_WRITE_STATUS_BASE; 856 857 init_waitqueue_head(&cmd_q->int_queue); 858 859 dev_dbg(dev, "queue #%u available\n", i); 860 } 861 862 if (ccp->cmd_q_count == 0) { 863 dev_notice(dev, "no command queues available\n"); 864 ret = -EIO; 865 goto e_pool; 866 } 867 868 /* Turn off the queues and disable interrupts until ready */ 869 ccp5_disable_queue_interrupts(ccp); 870 for (i = 0; i < ccp->cmd_q_count; i++) { 871 cmd_q = &ccp->cmd_q[i]; 872 873 cmd_q->qcontrol = 0; /* Start with nothing */ 874 iowrite32(cmd_q->qcontrol, cmd_q->reg_control); 875 876 ioread32(cmd_q->reg_int_status); 877 ioread32(cmd_q->reg_status); 878 879 /* Clear the interrupt status */ 880 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); 881 } 882 883 dev_dbg(dev, "Requesting an IRQ...\n"); 884 /* Request an irq */ 885 ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp); 886 if (ret) { 887 dev_err(dev, "unable to allocate an IRQ\n"); 888 goto e_pool; 889 } 890 /* Initialize the ISR tasklet */ 891 if (ccp->use_tasklet) 892 tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh, 893 (unsigned long)ccp); 894 895 dev_dbg(dev, "Loading LSB map...\n"); 896 /* Copy the private LSB mask to the public registers */ 897 status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); 898 status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); 899 iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET); 900 iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET); 901 status = ((u64)status_hi<<30) | (u64)status_lo; 902 903 dev_dbg(dev, "Configuring virtual queues...\n"); 904 /* Configure size of each virtual queue accessible to host */ 905 for (i = 0; i < ccp->cmd_q_count; i++) { 906 u32 dma_addr_lo; 907 u32 dma_addr_hi; 908 909 cmd_q = &ccp->cmd_q[i]; 910 911 cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); 912 cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; 913 914 cmd_q->qdma_tail = cmd_q->qbase_dma; 915 dma_addr_lo = low_address(cmd_q->qdma_tail); 916 iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo); 917 iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo); 918 919 dma_addr_hi = high_address(cmd_q->qdma_tail); 920 cmd_q->qcontrol |= (dma_addr_hi << 16); 921 iowrite32(cmd_q->qcontrol, cmd_q->reg_control); 922 923 /* Find the LSB regions accessible to the queue */ 924 ccp_find_lsb_regions(cmd_q, status); 925 cmd_q->lsb = -1; /* Unassigned value */ 926 } 927 928 dev_dbg(dev, "Assigning LSBs...\n"); 929 ret = ccp_assign_lsbs(ccp); 930 if (ret) { 931 dev_err(dev, "Unable to assign LSBs (%d)\n", ret); 932 goto e_irq; 933 } 934 935 /* Optimization: pre-allocate LSB slots for each queue */ 936 for (i = 0; i < ccp->cmd_q_count; i++) { 937 ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2); 938 ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2); 939 } 940 941 dev_dbg(dev, "Starting threads...\n"); 942 /* Create a kthread for each queue */ 943 for (i = 0; i < ccp->cmd_q_count; i++) { 944 struct task_struct *kthread; 945 946 cmd_q = &ccp->cmd_q[i]; 947 948 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q, 949 "%s-q%u", ccp->name, cmd_q->id); 950 if (IS_ERR(kthread)) { 951 dev_err(dev, "error creating queue thread (%ld)\n", 952 PTR_ERR(kthread)); 953 ret = PTR_ERR(kthread); 954 goto e_kthread; 955 } 956 957 cmd_q->kthread = kthread; 958 wake_up_process(kthread); 959 } 960 961 dev_dbg(dev, "Enabling interrupts...\n"); 962 ccp5_enable_queue_interrupts(ccp); 963 964 dev_dbg(dev, "Registering device...\n"); 965 /* Put this on the unit list to make it available */ 966 ccp_add_device(ccp); 967 968 ret = ccp_register_rng(ccp); 969 if (ret) 970 goto e_kthread; 971 972 /* Register the DMA engine support */ 973 ret = ccp_dmaengine_register(ccp); 974 if (ret) 975 goto e_hwrng; 976 977 /* Set up debugfs entries */ 978 ccp5_debugfs_setup(ccp); 979 980 return 0; 981 982 e_hwrng: 983 ccp_unregister_rng(ccp); 984 985 e_kthread: 986 for (i = 0; i < ccp->cmd_q_count; i++) 987 if (ccp->cmd_q[i].kthread) 988 kthread_stop(ccp->cmd_q[i].kthread); 989 990 e_irq: 991 sp_free_ccp_irq(ccp->sp, ccp); 992 993 e_pool: 994 for (i = 0; i < ccp->cmd_q_count; i++) 995 dma_pool_destroy(ccp->cmd_q[i].dma_pool); 996 997 return ret; 998 } 999 1000 static void ccp5_destroy(struct ccp_device *ccp) 1001 { 1002 struct device *dev = ccp->dev; 1003 struct ccp_cmd_queue *cmd_q; 1004 struct ccp_cmd *cmd; 1005 unsigned int i; 1006 1007 /* Unregister the DMA engine */ 1008 ccp_dmaengine_unregister(ccp); 1009 1010 /* Unregister the RNG */ 1011 ccp_unregister_rng(ccp); 1012 1013 /* Remove this device from the list of available units first */ 1014 ccp_del_device(ccp); 1015 1016 /* We're in the process of tearing down the entire driver; 1017 * when all the devices are gone clean up debugfs 1018 */ 1019 if (ccp_present()) 1020 ccp5_debugfs_destroy(); 1021 1022 /* Disable and clear interrupts */ 1023 ccp5_disable_queue_interrupts(ccp); 1024 for (i = 0; i < ccp->cmd_q_count; i++) { 1025 cmd_q = &ccp->cmd_q[i]; 1026 1027 /* Turn off the run bit */ 1028 iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); 1029 1030 /* Clear the interrupt status */ 1031 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); 1032 ioread32(cmd_q->reg_int_status); 1033 ioread32(cmd_q->reg_status); 1034 } 1035 1036 /* Stop the queue kthreads */ 1037 for (i = 0; i < ccp->cmd_q_count; i++) 1038 if (ccp->cmd_q[i].kthread) 1039 kthread_stop(ccp->cmd_q[i].kthread); 1040 1041 sp_free_ccp_irq(ccp->sp, ccp); 1042 1043 for (i = 0; i < ccp->cmd_q_count; i++) { 1044 cmd_q = &ccp->cmd_q[i]; 1045 dma_free_coherent(dev, cmd_q->qsize, cmd_q->qbase, 1046 cmd_q->qbase_dma); 1047 } 1048 1049 /* Flush the cmd and backlog queue */ 1050 while (!list_empty(&ccp->cmd)) { 1051 /* Invoke the callback directly with an error code */ 1052 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); 1053 list_del(&cmd->entry); 1054 cmd->callback(cmd->data, -ENODEV); 1055 } 1056 while (!list_empty(&ccp->backlog)) { 1057 /* Invoke the callback directly with an error code */ 1058 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); 1059 list_del(&cmd->entry); 1060 cmd->callback(cmd->data, -ENODEV); 1061 } 1062 } 1063 1064 static void ccp5_config(struct ccp_device *ccp) 1065 { 1066 /* Public side */ 1067 iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); 1068 } 1069 1070 static void ccp5other_config(struct ccp_device *ccp) 1071 { 1072 int i; 1073 u32 rnd; 1074 1075 /* We own all of the queues on the NTB CCP */ 1076 1077 iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); 1078 iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); 1079 for (i = 0; i < 12; i++) { 1080 rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); 1081 iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); 1082 } 1083 1084 iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET); 1085 iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET); 1086 iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET); 1087 1088 iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); 1089 iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); 1090 1091 iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET); 1092 1093 ccp5_config(ccp); 1094 } 1095 1096 /* Version 5 adds some function, but is essentially the same as v5 */ 1097 static const struct ccp_actions ccp5_actions = { 1098 .aes = ccp5_perform_aes, 1099 .xts_aes = ccp5_perform_xts_aes, 1100 .sha = ccp5_perform_sha, 1101 .des3 = ccp5_perform_des3, 1102 .rsa = ccp5_perform_rsa, 1103 .passthru = ccp5_perform_passthru, 1104 .ecc = ccp5_perform_ecc, 1105 .sballoc = ccp_lsb_alloc, 1106 .sbfree = ccp_lsb_free, 1107 .init = ccp5_init, 1108 .destroy = ccp5_destroy, 1109 .get_free_slots = ccp5_get_free_slots, 1110 }; 1111 1112 const struct ccp_vdata ccpv5a = { 1113 .version = CCP_VERSION(5, 0), 1114 .setup = ccp5_config, 1115 .perform = &ccp5_actions, 1116 .offset = 0x0, 1117 .rsamax = CCP5_RSA_MAX_WIDTH, 1118 }; 1119 1120 const struct ccp_vdata ccpv5b = { 1121 .version = CCP_VERSION(5, 0), 1122 .dma_chan_attr = DMA_PRIVATE, 1123 .setup = ccp5other_config, 1124 .perform = &ccp5_actions, 1125 .offset = 0x0, 1126 .rsamax = CCP5_RSA_MAX_WIDTH, 1127 }; 1128