1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * AMD Cryptographic Coprocessor (CCP) driver 4 * 5 * Copyright (C) 2016,2019 Advanced Micro Devices, Inc. 6 * 7 * Author: Gary R Hook <gary.hook@amd.com> 8 */ 9 10 #include <linux/kernel.h> 11 #include <linux/pci.h> 12 #include <linux/kthread.h> 13 #include <linux/dma-mapping.h> 14 #include <linux/interrupt.h> 15 #include <linux/compiler.h> 16 #include <linux/ccp.h> 17 18 #include "ccp-dev.h" 19 20 /* Allocate the requested number of contiguous LSB slots 21 * from the LSB bitmap. Look in the private range for this 22 * queue first; failing that, check the public area. 23 * If no space is available, wait around. 24 * Return: first slot number 25 */ 26 static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) 27 { 28 struct ccp_device *ccp; 29 int start; 30 31 /* First look at the map for the queue */ 32 if (cmd_q->lsb >= 0) { 33 start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap, 34 LSB_SIZE, 35 0, count, 0); 36 if (start < LSB_SIZE) { 37 bitmap_set(cmd_q->lsbmap, start, count); 38 return start + cmd_q->lsb * LSB_SIZE; 39 } 40 } 41 42 /* No joy; try to get an entry from the shared blocks */ 43 ccp = cmd_q->ccp; 44 for (;;) { 45 mutex_lock(&ccp->sb_mutex); 46 47 start = (u32)bitmap_find_next_zero_area(ccp->lsbmap, 48 MAX_LSB_CNT * LSB_SIZE, 49 0, 50 count, 0); 51 if (start <= MAX_LSB_CNT * LSB_SIZE) { 52 bitmap_set(ccp->lsbmap, start, count); 53 54 mutex_unlock(&ccp->sb_mutex); 55 return start; 56 } 57 58 ccp->sb_avail = 0; 59 60 mutex_unlock(&ccp->sb_mutex); 61 62 /* Wait for KSB entries to become available */ 63 if (wait_event_interruptible(ccp->sb_queue, ccp->sb_avail)) 64 return 0; 65 } 66 } 67 68 /* Free a number of LSB slots from the bitmap, starting at 69 * the indicated starting slot number. 70 */ 71 static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, 72 unsigned int count) 73 { 74 if (!start) 75 return; 76 77 if (cmd_q->lsb == start) { 78 /* An entry from the private LSB */ 79 bitmap_clear(cmd_q->lsbmap, start, count); 80 } else { 81 /* From the shared LSBs */ 82 struct ccp_device *ccp = cmd_q->ccp; 83 84 mutex_lock(&ccp->sb_mutex); 85 bitmap_clear(ccp->lsbmap, start, count); 86 ccp->sb_avail = 1; 87 mutex_unlock(&ccp->sb_mutex); 88 wake_up_interruptible_all(&ccp->sb_queue); 89 } 90 } 91 92 /* CCP version 5: Union to define the function field (cmd_reg1/dword0) */ 93 union ccp_function { 94 struct { 95 u16 size:7; 96 u16 encrypt:1; 97 u16 mode:5; 98 u16 type:2; 99 } aes; 100 struct { 101 u16 size:7; 102 u16 encrypt:1; 103 u16 rsvd:5; 104 u16 type:2; 105 } aes_xts; 106 struct { 107 u16 size:7; 108 u16 encrypt:1; 109 u16 mode:5; 110 u16 type:2; 111 } des3; 112 struct { 113 u16 rsvd1:10; 114 u16 type:4; 115 u16 rsvd2:1; 116 } sha; 117 struct { 118 u16 mode:3; 119 u16 size:12; 120 } rsa; 121 struct { 122 u16 byteswap:2; 123 u16 bitwise:3; 124 u16 reflect:2; 125 u16 rsvd:8; 126 } pt; 127 struct { 128 u16 rsvd:13; 129 } zlib; 130 struct { 131 u16 size:10; 132 u16 type:2; 133 u16 mode:3; 134 } ecc; 135 u16 raw; 136 }; 137 138 #define CCP_AES_SIZE(p) ((p)->aes.size) 139 #define CCP_AES_ENCRYPT(p) ((p)->aes.encrypt) 140 #define CCP_AES_MODE(p) ((p)->aes.mode) 141 #define CCP_AES_TYPE(p) ((p)->aes.type) 142 #define CCP_XTS_SIZE(p) ((p)->aes_xts.size) 143 #define CCP_XTS_TYPE(p) ((p)->aes_xts.type) 144 #define CCP_XTS_ENCRYPT(p) ((p)->aes_xts.encrypt) 145 #define CCP_DES3_SIZE(p) ((p)->des3.size) 146 #define CCP_DES3_ENCRYPT(p) ((p)->des3.encrypt) 147 #define CCP_DES3_MODE(p) ((p)->des3.mode) 148 #define CCP_DES3_TYPE(p) ((p)->des3.type) 149 #define CCP_SHA_TYPE(p) ((p)->sha.type) 150 #define CCP_RSA_SIZE(p) ((p)->rsa.size) 151 #define CCP_PT_BYTESWAP(p) ((p)->pt.byteswap) 152 #define CCP_PT_BITWISE(p) ((p)->pt.bitwise) 153 #define CCP_ECC_MODE(p) ((p)->ecc.mode) 154 #define CCP_ECC_AFFINE(p) ((p)->ecc.one) 155 156 /* Word 0 */ 157 #define CCP5_CMD_DW0(p) ((p)->dw0) 158 #define CCP5_CMD_SOC(p) (CCP5_CMD_DW0(p).soc) 159 #define CCP5_CMD_IOC(p) (CCP5_CMD_DW0(p).ioc) 160 #define CCP5_CMD_INIT(p) (CCP5_CMD_DW0(p).init) 161 #define CCP5_CMD_EOM(p) (CCP5_CMD_DW0(p).eom) 162 #define CCP5_CMD_FUNCTION(p) (CCP5_CMD_DW0(p).function) 163 #define CCP5_CMD_ENGINE(p) (CCP5_CMD_DW0(p).engine) 164 #define CCP5_CMD_PROT(p) (CCP5_CMD_DW0(p).prot) 165 166 /* Word 1 */ 167 #define CCP5_CMD_DW1(p) ((p)->length) 168 #define CCP5_CMD_LEN(p) (CCP5_CMD_DW1(p)) 169 170 /* Word 2 */ 171 #define CCP5_CMD_DW2(p) ((p)->src_lo) 172 #define CCP5_CMD_SRC_LO(p) (CCP5_CMD_DW2(p)) 173 174 /* Word 3 */ 175 #define CCP5_CMD_DW3(p) ((p)->dw3) 176 #define CCP5_CMD_SRC_MEM(p) ((p)->dw3.src_mem) 177 #define CCP5_CMD_SRC_HI(p) ((p)->dw3.src_hi) 178 #define CCP5_CMD_LSB_ID(p) ((p)->dw3.lsb_cxt_id) 179 #define CCP5_CMD_FIX_SRC(p) ((p)->dw3.fixed) 180 181 /* Words 4/5 */ 182 #define CCP5_CMD_DW4(p) ((p)->dw4) 183 #define CCP5_CMD_DST_LO(p) (CCP5_CMD_DW4(p).dst_lo) 184 #define CCP5_CMD_DW5(p) ((p)->dw5.fields.dst_hi) 185 #define CCP5_CMD_DST_HI(p) (CCP5_CMD_DW5(p)) 186 #define CCP5_CMD_DST_MEM(p) ((p)->dw5.fields.dst_mem) 187 #define CCP5_CMD_FIX_DST(p) ((p)->dw5.fields.fixed) 188 #define CCP5_CMD_SHA_LO(p) ((p)->dw4.sha_len_lo) 189 #define CCP5_CMD_SHA_HI(p) ((p)->dw5.sha_len_hi) 190 191 /* Word 6/7 */ 192 #define CCP5_CMD_DW6(p) ((p)->key_lo) 193 #define CCP5_CMD_KEY_LO(p) (CCP5_CMD_DW6(p)) 194 #define CCP5_CMD_DW7(p) ((p)->dw7) 195 #define CCP5_CMD_KEY_HI(p) ((p)->dw7.key_hi) 196 #define CCP5_CMD_KEY_MEM(p) ((p)->dw7.key_mem) 197 198 static inline u32 low_address(unsigned long addr) 199 { 200 return (u64)addr & 0x0ffffffff; 201 } 202 203 static inline u32 high_address(unsigned long addr) 204 { 205 return ((u64)addr >> 32) & 0x00000ffff; 206 } 207 208 static unsigned int ccp5_get_free_slots(struct ccp_cmd_queue *cmd_q) 209 { 210 unsigned int head_idx, n; 211 u32 head_lo, queue_start; 212 213 queue_start = low_address(cmd_q->qdma_tail); 214 head_lo = ioread32(cmd_q->reg_head_lo); 215 head_idx = (head_lo - queue_start) / sizeof(struct ccp5_desc); 216 217 n = head_idx + COMMANDS_PER_QUEUE - cmd_q->qidx - 1; 218 219 return n % COMMANDS_PER_QUEUE; /* Always one unused spot */ 220 } 221 222 static int ccp5_do_cmd(struct ccp5_desc *desc, 223 struct ccp_cmd_queue *cmd_q) 224 { 225 u32 *mP; 226 __le32 *dP; 227 u32 tail; 228 int i; 229 int ret = 0; 230 231 cmd_q->total_ops++; 232 233 if (CCP5_CMD_SOC(desc)) { 234 CCP5_CMD_IOC(desc) = 1; 235 CCP5_CMD_SOC(desc) = 0; 236 } 237 mutex_lock(&cmd_q->q_mutex); 238 239 mP = (u32 *) &cmd_q->qbase[cmd_q->qidx]; 240 dP = (__le32 *) desc; 241 for (i = 0; i < 8; i++) 242 mP[i] = cpu_to_le32(dP[i]); /* handle endianness */ 243 244 cmd_q->qidx = (cmd_q->qidx + 1) % COMMANDS_PER_QUEUE; 245 246 /* The data used by this command must be flushed to memory */ 247 wmb(); 248 249 /* Write the new tail address back to the queue register */ 250 tail = low_address(cmd_q->qdma_tail + cmd_q->qidx * Q_DESC_SIZE); 251 iowrite32(tail, cmd_q->reg_tail_lo); 252 253 /* Turn the queue back on using our cached control register */ 254 iowrite32(cmd_q->qcontrol | CMD5_Q_RUN, cmd_q->reg_control); 255 mutex_unlock(&cmd_q->q_mutex); 256 257 if (CCP5_CMD_IOC(desc)) { 258 /* Wait for the job to complete */ 259 ret = wait_event_interruptible(cmd_q->int_queue, 260 cmd_q->int_rcvd); 261 if (ret || cmd_q->cmd_error) { 262 /* Log the error and flush the queue by 263 * moving the head pointer 264 */ 265 if (cmd_q->cmd_error) 266 ccp_log_error(cmd_q->ccp, 267 cmd_q->cmd_error); 268 iowrite32(tail, cmd_q->reg_head_lo); 269 if (!ret) 270 ret = -EIO; 271 } 272 cmd_q->int_rcvd = 0; 273 } 274 275 return ret; 276 } 277 278 static int ccp5_perform_aes(struct ccp_op *op) 279 { 280 struct ccp5_desc desc; 281 union ccp_function function; 282 u32 key_addr = op->sb_key * LSB_ITEM_SIZE; 283 284 op->cmd_q->total_aes_ops++; 285 286 /* Zero out all the fields of the command desc */ 287 memset(&desc, 0, Q_DESC_SIZE); 288 289 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_AES; 290 291 CCP5_CMD_SOC(&desc) = op->soc; 292 CCP5_CMD_IOC(&desc) = 1; 293 CCP5_CMD_INIT(&desc) = op->init; 294 CCP5_CMD_EOM(&desc) = op->eom; 295 CCP5_CMD_PROT(&desc) = 0; 296 297 function.raw = 0; 298 CCP_AES_ENCRYPT(&function) = op->u.aes.action; 299 CCP_AES_MODE(&function) = op->u.aes.mode; 300 CCP_AES_TYPE(&function) = op->u.aes.type; 301 CCP_AES_SIZE(&function) = op->u.aes.size; 302 303 CCP5_CMD_FUNCTION(&desc) = function.raw; 304 305 CCP5_CMD_LEN(&desc) = op->src.u.dma.length; 306 307 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); 308 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); 309 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 310 311 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); 312 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); 313 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 314 315 CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); 316 CCP5_CMD_KEY_HI(&desc) = 0; 317 CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; 318 CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; 319 320 return ccp5_do_cmd(&desc, op->cmd_q); 321 } 322 323 static int ccp5_perform_xts_aes(struct ccp_op *op) 324 { 325 struct ccp5_desc desc; 326 union ccp_function function; 327 u32 key_addr = op->sb_key * LSB_ITEM_SIZE; 328 329 op->cmd_q->total_xts_aes_ops++; 330 331 /* Zero out all the fields of the command desc */ 332 memset(&desc, 0, Q_DESC_SIZE); 333 334 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_XTS_AES_128; 335 336 CCP5_CMD_SOC(&desc) = op->soc; 337 CCP5_CMD_IOC(&desc) = 1; 338 CCP5_CMD_INIT(&desc) = op->init; 339 CCP5_CMD_EOM(&desc) = op->eom; 340 CCP5_CMD_PROT(&desc) = 0; 341 342 function.raw = 0; 343 CCP_XTS_TYPE(&function) = op->u.xts.type; 344 CCP_XTS_ENCRYPT(&function) = op->u.xts.action; 345 CCP_XTS_SIZE(&function) = op->u.xts.unit_size; 346 CCP5_CMD_FUNCTION(&desc) = function.raw; 347 348 CCP5_CMD_LEN(&desc) = op->src.u.dma.length; 349 350 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); 351 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); 352 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 353 354 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); 355 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); 356 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 357 358 CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); 359 CCP5_CMD_KEY_HI(&desc) = 0; 360 CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; 361 CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; 362 363 return ccp5_do_cmd(&desc, op->cmd_q); 364 } 365 366 static int ccp5_perform_sha(struct ccp_op *op) 367 { 368 struct ccp5_desc desc; 369 union ccp_function function; 370 371 op->cmd_q->total_sha_ops++; 372 373 /* Zero out all the fields of the command desc */ 374 memset(&desc, 0, Q_DESC_SIZE); 375 376 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_SHA; 377 378 CCP5_CMD_SOC(&desc) = op->soc; 379 CCP5_CMD_IOC(&desc) = 1; 380 CCP5_CMD_INIT(&desc) = 1; 381 CCP5_CMD_EOM(&desc) = op->eom; 382 CCP5_CMD_PROT(&desc) = 0; 383 384 function.raw = 0; 385 CCP_SHA_TYPE(&function) = op->u.sha.type; 386 CCP5_CMD_FUNCTION(&desc) = function.raw; 387 388 CCP5_CMD_LEN(&desc) = op->src.u.dma.length; 389 390 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); 391 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); 392 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 393 394 CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; 395 396 if (op->eom) { 397 CCP5_CMD_SHA_LO(&desc) = lower_32_bits(op->u.sha.msg_bits); 398 CCP5_CMD_SHA_HI(&desc) = upper_32_bits(op->u.sha.msg_bits); 399 } else { 400 CCP5_CMD_SHA_LO(&desc) = 0; 401 CCP5_CMD_SHA_HI(&desc) = 0; 402 } 403 404 return ccp5_do_cmd(&desc, op->cmd_q); 405 } 406 407 static int ccp5_perform_des3(struct ccp_op *op) 408 { 409 struct ccp5_desc desc; 410 union ccp_function function; 411 u32 key_addr = op->sb_key * LSB_ITEM_SIZE; 412 413 op->cmd_q->total_3des_ops++; 414 415 /* Zero out all the fields of the command desc */ 416 memset(&desc, 0, sizeof(struct ccp5_desc)); 417 418 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_DES3; 419 420 CCP5_CMD_SOC(&desc) = op->soc; 421 CCP5_CMD_IOC(&desc) = 1; 422 CCP5_CMD_INIT(&desc) = op->init; 423 CCP5_CMD_EOM(&desc) = op->eom; 424 CCP5_CMD_PROT(&desc) = 0; 425 426 function.raw = 0; 427 CCP_DES3_ENCRYPT(&function) = op->u.des3.action; 428 CCP_DES3_MODE(&function) = op->u.des3.mode; 429 CCP_DES3_TYPE(&function) = op->u.des3.type; 430 CCP5_CMD_FUNCTION(&desc) = function.raw; 431 432 CCP5_CMD_LEN(&desc) = op->src.u.dma.length; 433 434 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); 435 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); 436 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 437 438 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); 439 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); 440 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 441 442 CCP5_CMD_KEY_LO(&desc) = lower_32_bits(key_addr); 443 CCP5_CMD_KEY_HI(&desc) = 0; 444 CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SB; 445 CCP5_CMD_LSB_ID(&desc) = op->sb_ctx; 446 447 return ccp5_do_cmd(&desc, op->cmd_q); 448 } 449 450 static int ccp5_perform_rsa(struct ccp_op *op) 451 { 452 struct ccp5_desc desc; 453 union ccp_function function; 454 455 op->cmd_q->total_rsa_ops++; 456 457 /* Zero out all the fields of the command desc */ 458 memset(&desc, 0, Q_DESC_SIZE); 459 460 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_RSA; 461 462 CCP5_CMD_SOC(&desc) = op->soc; 463 CCP5_CMD_IOC(&desc) = 1; 464 CCP5_CMD_INIT(&desc) = 0; 465 CCP5_CMD_EOM(&desc) = 1; 466 CCP5_CMD_PROT(&desc) = 0; 467 468 function.raw = 0; 469 CCP_RSA_SIZE(&function) = (op->u.rsa.mod_size + 7) >> 3; 470 CCP5_CMD_FUNCTION(&desc) = function.raw; 471 472 CCP5_CMD_LEN(&desc) = op->u.rsa.input_len; 473 474 /* Source is from external memory */ 475 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); 476 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); 477 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 478 479 /* Destination is in external memory */ 480 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); 481 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); 482 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 483 484 /* Key (Exponent) is in external memory */ 485 CCP5_CMD_KEY_LO(&desc) = ccp_addr_lo(&op->exp.u.dma); 486 CCP5_CMD_KEY_HI(&desc) = ccp_addr_hi(&op->exp.u.dma); 487 CCP5_CMD_KEY_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 488 489 return ccp5_do_cmd(&desc, op->cmd_q); 490 } 491 492 static int ccp5_perform_passthru(struct ccp_op *op) 493 { 494 struct ccp5_desc desc; 495 union ccp_function function; 496 struct ccp_dma_info *saddr = &op->src.u.dma; 497 struct ccp_dma_info *daddr = &op->dst.u.dma; 498 499 500 op->cmd_q->total_pt_ops++; 501 502 memset(&desc, 0, Q_DESC_SIZE); 503 504 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_PASSTHRU; 505 506 CCP5_CMD_SOC(&desc) = 0; 507 CCP5_CMD_IOC(&desc) = 1; 508 CCP5_CMD_INIT(&desc) = 0; 509 CCP5_CMD_EOM(&desc) = op->eom; 510 CCP5_CMD_PROT(&desc) = 0; 511 512 function.raw = 0; 513 CCP_PT_BYTESWAP(&function) = op->u.passthru.byte_swap; 514 CCP_PT_BITWISE(&function) = op->u.passthru.bit_mod; 515 CCP5_CMD_FUNCTION(&desc) = function.raw; 516 517 /* Length of source data is always 256 bytes */ 518 if (op->src.type == CCP_MEMTYPE_SYSTEM) 519 CCP5_CMD_LEN(&desc) = saddr->length; 520 else 521 CCP5_CMD_LEN(&desc) = daddr->length; 522 523 if (op->src.type == CCP_MEMTYPE_SYSTEM) { 524 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); 525 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); 526 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 527 528 if (op->u.passthru.bit_mod != CCP_PASSTHRU_BITWISE_NOOP) 529 CCP5_CMD_LSB_ID(&desc) = op->sb_key; 530 } else { 531 u32 key_addr = op->src.u.sb * CCP_SB_BYTES; 532 533 CCP5_CMD_SRC_LO(&desc) = lower_32_bits(key_addr); 534 CCP5_CMD_SRC_HI(&desc) = 0; 535 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SB; 536 } 537 538 if (op->dst.type == CCP_MEMTYPE_SYSTEM) { 539 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); 540 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); 541 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 542 } else { 543 u32 key_addr = op->dst.u.sb * CCP_SB_BYTES; 544 545 CCP5_CMD_DST_LO(&desc) = lower_32_bits(key_addr); 546 CCP5_CMD_DST_HI(&desc) = 0; 547 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SB; 548 } 549 550 return ccp5_do_cmd(&desc, op->cmd_q); 551 } 552 553 static int ccp5_perform_ecc(struct ccp_op *op) 554 { 555 struct ccp5_desc desc; 556 union ccp_function function; 557 558 op->cmd_q->total_ecc_ops++; 559 560 /* Zero out all the fields of the command desc */ 561 memset(&desc, 0, Q_DESC_SIZE); 562 563 CCP5_CMD_ENGINE(&desc) = CCP_ENGINE_ECC; 564 565 CCP5_CMD_SOC(&desc) = 0; 566 CCP5_CMD_IOC(&desc) = 1; 567 CCP5_CMD_INIT(&desc) = 0; 568 CCP5_CMD_EOM(&desc) = 1; 569 CCP5_CMD_PROT(&desc) = 0; 570 571 function.raw = 0; 572 function.ecc.mode = op->u.ecc.function; 573 CCP5_CMD_FUNCTION(&desc) = function.raw; 574 575 CCP5_CMD_LEN(&desc) = op->src.u.dma.length; 576 577 CCP5_CMD_SRC_LO(&desc) = ccp_addr_lo(&op->src.u.dma); 578 CCP5_CMD_SRC_HI(&desc) = ccp_addr_hi(&op->src.u.dma); 579 CCP5_CMD_SRC_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 580 581 CCP5_CMD_DST_LO(&desc) = ccp_addr_lo(&op->dst.u.dma); 582 CCP5_CMD_DST_HI(&desc) = ccp_addr_hi(&op->dst.u.dma); 583 CCP5_CMD_DST_MEM(&desc) = CCP_MEMTYPE_SYSTEM; 584 585 return ccp5_do_cmd(&desc, op->cmd_q); 586 } 587 588 static int ccp_find_lsb_regions(struct ccp_cmd_queue *cmd_q, u64 status) 589 { 590 int q_mask = 1 << cmd_q->id; 591 int queues = 0; 592 int j; 593 594 /* Build a bit mask to know which LSBs this queue has access to. 595 * Don't bother with segment 0 as it has special privileges. 596 */ 597 for (j = 1; j < MAX_LSB_CNT; j++) { 598 if (status & q_mask) 599 bitmap_set(cmd_q->lsbmask, j, 1); 600 status >>= LSB_REGION_WIDTH; 601 } 602 queues = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); 603 dev_dbg(cmd_q->ccp->dev, "Queue %d can access %d LSB regions\n", 604 cmd_q->id, queues); 605 606 return queues ? 0 : -EINVAL; 607 } 608 609 static int ccp_find_and_assign_lsb_to_q(struct ccp_device *ccp, 610 int lsb_cnt, int n_lsbs, 611 unsigned long *lsb_pub) 612 { 613 DECLARE_BITMAP(qlsb, MAX_LSB_CNT); 614 int bitno; 615 int qlsb_wgt; 616 int i; 617 618 /* For each queue: 619 * If the count of potential LSBs available to a queue matches the 620 * ordinal given to us in lsb_cnt: 621 * Copy the mask of possible LSBs for this queue into "qlsb"; 622 * For each bit in qlsb, see if the corresponding bit in the 623 * aggregation mask is set; if so, we have a match. 624 * If we have a match, clear the bit in the aggregation to 625 * mark it as no longer available. 626 * If there is no match, clear the bit in qlsb and keep looking. 627 */ 628 for (i = 0; i < ccp->cmd_q_count; i++) { 629 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; 630 631 qlsb_wgt = bitmap_weight(cmd_q->lsbmask, MAX_LSB_CNT); 632 633 if (qlsb_wgt == lsb_cnt) { 634 bitmap_copy(qlsb, cmd_q->lsbmask, MAX_LSB_CNT); 635 636 bitno = find_first_bit(qlsb, MAX_LSB_CNT); 637 while (bitno < MAX_LSB_CNT) { 638 if (test_bit(bitno, lsb_pub)) { 639 /* We found an available LSB 640 * that this queue can access 641 */ 642 cmd_q->lsb = bitno; 643 bitmap_clear(lsb_pub, bitno, 1); 644 dev_dbg(ccp->dev, 645 "Queue %d gets LSB %d\n", 646 i, bitno); 647 break; 648 } 649 bitmap_clear(qlsb, bitno, 1); 650 bitno = find_first_bit(qlsb, MAX_LSB_CNT); 651 } 652 if (bitno >= MAX_LSB_CNT) 653 return -EINVAL; 654 n_lsbs--; 655 } 656 } 657 return n_lsbs; 658 } 659 660 /* For each queue, from the most- to least-constrained: 661 * find an LSB that can be assigned to the queue. If there are N queues that 662 * can only use M LSBs, where N > M, fail; otherwise, every queue will get a 663 * dedicated LSB. Remaining LSB regions become a shared resource. 664 * If we have fewer LSBs than queues, all LSB regions become shared resources. 665 */ 666 static int ccp_assign_lsbs(struct ccp_device *ccp) 667 { 668 DECLARE_BITMAP(lsb_pub, MAX_LSB_CNT); 669 DECLARE_BITMAP(qlsb, MAX_LSB_CNT); 670 int n_lsbs = 0; 671 int bitno; 672 int i, lsb_cnt; 673 int rc = 0; 674 675 bitmap_zero(lsb_pub, MAX_LSB_CNT); 676 677 /* Create an aggregate bitmap to get a total count of available LSBs */ 678 for (i = 0; i < ccp->cmd_q_count; i++) 679 bitmap_or(lsb_pub, 680 lsb_pub, ccp->cmd_q[i].lsbmask, 681 MAX_LSB_CNT); 682 683 n_lsbs = bitmap_weight(lsb_pub, MAX_LSB_CNT); 684 685 if (n_lsbs >= ccp->cmd_q_count) { 686 /* We have enough LSBS to give every queue a private LSB. 687 * Brute force search to start with the queues that are more 688 * constrained in LSB choice. When an LSB is privately 689 * assigned, it is removed from the public mask. 690 * This is an ugly N squared algorithm with some optimization. 691 */ 692 for (lsb_cnt = 1; 693 n_lsbs && (lsb_cnt <= MAX_LSB_CNT); 694 lsb_cnt++) { 695 rc = ccp_find_and_assign_lsb_to_q(ccp, lsb_cnt, n_lsbs, 696 lsb_pub); 697 if (rc < 0) 698 return -EINVAL; 699 n_lsbs = rc; 700 } 701 } 702 703 rc = 0; 704 /* What's left of the LSBs, according to the public mask, now become 705 * shared. Any zero bits in the lsb_pub mask represent an LSB region 706 * that can't be used as a shared resource, so mark the LSB slots for 707 * them as "in use". 708 */ 709 bitmap_copy(qlsb, lsb_pub, MAX_LSB_CNT); 710 711 bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); 712 while (bitno < MAX_LSB_CNT) { 713 bitmap_set(ccp->lsbmap, bitno * LSB_SIZE, LSB_SIZE); 714 bitmap_set(qlsb, bitno, 1); 715 bitno = find_first_zero_bit(qlsb, MAX_LSB_CNT); 716 } 717 718 return rc; 719 } 720 721 static void ccp5_disable_queue_interrupts(struct ccp_device *ccp) 722 { 723 unsigned int i; 724 725 for (i = 0; i < ccp->cmd_q_count; i++) 726 iowrite32(0x0, ccp->cmd_q[i].reg_int_enable); 727 } 728 729 static void ccp5_enable_queue_interrupts(struct ccp_device *ccp) 730 { 731 unsigned int i; 732 733 for (i = 0; i < ccp->cmd_q_count; i++) 734 iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable); 735 } 736 737 static void ccp5_irq_bh(unsigned long data) 738 { 739 struct ccp_device *ccp = (struct ccp_device *)data; 740 u32 status; 741 unsigned int i; 742 743 for (i = 0; i < ccp->cmd_q_count; i++) { 744 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; 745 746 status = ioread32(cmd_q->reg_interrupt_status); 747 748 if (status) { 749 cmd_q->int_status = status; 750 cmd_q->q_status = ioread32(cmd_q->reg_status); 751 cmd_q->q_int_status = ioread32(cmd_q->reg_int_status); 752 753 /* On error, only save the first error value */ 754 if ((status & INT_ERROR) && !cmd_q->cmd_error) 755 cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status); 756 757 cmd_q->int_rcvd = 1; 758 759 /* Acknowledge the interrupt and wake the kthread */ 760 iowrite32(status, cmd_q->reg_interrupt_status); 761 wake_up_interruptible(&cmd_q->int_queue); 762 } 763 } 764 ccp5_enable_queue_interrupts(ccp); 765 } 766 767 static irqreturn_t ccp5_irq_handler(int irq, void *data) 768 { 769 struct ccp_device *ccp = (struct ccp_device *)data; 770 771 ccp5_disable_queue_interrupts(ccp); 772 ccp->total_interrupts++; 773 if (ccp->use_tasklet) 774 tasklet_schedule(&ccp->irq_tasklet); 775 else 776 ccp5_irq_bh((unsigned long)ccp); 777 return IRQ_HANDLED; 778 } 779 780 static int ccp5_init(struct ccp_device *ccp) 781 { 782 struct device *dev = ccp->dev; 783 struct ccp_cmd_queue *cmd_q; 784 struct dma_pool *dma_pool; 785 char dma_pool_name[MAX_DMAPOOL_NAME_LEN]; 786 unsigned int qmr, i; 787 u64 status; 788 u32 status_lo, status_hi; 789 int ret; 790 791 /* Find available queues */ 792 qmr = ioread32(ccp->io_regs + Q_MASK_REG); 793 for (i = 0; (i < MAX_HW_QUEUES) && (ccp->cmd_q_count < ccp->max_q_count); i++) { 794 if (!(qmr & (1 << i))) 795 continue; 796 797 /* Allocate a dma pool for this queue */ 798 snprintf(dma_pool_name, sizeof(dma_pool_name), "%s_q%d", 799 ccp->name, i); 800 dma_pool = dma_pool_create(dma_pool_name, dev, 801 CCP_DMAPOOL_MAX_SIZE, 802 CCP_DMAPOOL_ALIGN, 0); 803 if (!dma_pool) { 804 dev_err(dev, "unable to allocate dma pool\n"); 805 ret = -ENOMEM; 806 goto e_pool; 807 } 808 809 cmd_q = &ccp->cmd_q[ccp->cmd_q_count]; 810 ccp->cmd_q_count++; 811 812 cmd_q->ccp = ccp; 813 cmd_q->id = i; 814 cmd_q->dma_pool = dma_pool; 815 mutex_init(&cmd_q->q_mutex); 816 817 /* Page alignment satisfies our needs for N <= 128 */ 818 BUILD_BUG_ON(COMMANDS_PER_QUEUE > 128); 819 cmd_q->qsize = Q_SIZE(Q_DESC_SIZE); 820 cmd_q->qbase = dmam_alloc_coherent(dev, cmd_q->qsize, 821 &cmd_q->qbase_dma, 822 GFP_KERNEL); 823 if (!cmd_q->qbase) { 824 dev_err(dev, "unable to allocate command queue\n"); 825 ret = -ENOMEM; 826 goto e_pool; 827 } 828 829 cmd_q->qidx = 0; 830 /* Preset some register values and masks that are queue 831 * number dependent 832 */ 833 cmd_q->reg_control = ccp->io_regs + 834 CMD5_Q_STATUS_INCR * (i + 1); 835 cmd_q->reg_tail_lo = cmd_q->reg_control + CMD5_Q_TAIL_LO_BASE; 836 cmd_q->reg_head_lo = cmd_q->reg_control + CMD5_Q_HEAD_LO_BASE; 837 cmd_q->reg_int_enable = cmd_q->reg_control + 838 CMD5_Q_INT_ENABLE_BASE; 839 cmd_q->reg_interrupt_status = cmd_q->reg_control + 840 CMD5_Q_INTERRUPT_STATUS_BASE; 841 cmd_q->reg_status = cmd_q->reg_control + CMD5_Q_STATUS_BASE; 842 cmd_q->reg_int_status = cmd_q->reg_control + 843 CMD5_Q_INT_STATUS_BASE; 844 cmd_q->reg_dma_status = cmd_q->reg_control + 845 CMD5_Q_DMA_STATUS_BASE; 846 cmd_q->reg_dma_read_status = cmd_q->reg_control + 847 CMD5_Q_DMA_READ_STATUS_BASE; 848 cmd_q->reg_dma_write_status = cmd_q->reg_control + 849 CMD5_Q_DMA_WRITE_STATUS_BASE; 850 851 init_waitqueue_head(&cmd_q->int_queue); 852 853 dev_dbg(dev, "queue #%u available\n", i); 854 } 855 856 if (ccp->cmd_q_count == 0) { 857 dev_notice(dev, "no command queues available\n"); 858 ret = -EIO; 859 goto e_pool; 860 } 861 862 /* Turn off the queues and disable interrupts until ready */ 863 ccp5_disable_queue_interrupts(ccp); 864 for (i = 0; i < ccp->cmd_q_count; i++) { 865 cmd_q = &ccp->cmd_q[i]; 866 867 cmd_q->qcontrol = 0; /* Start with nothing */ 868 iowrite32(cmd_q->qcontrol, cmd_q->reg_control); 869 870 ioread32(cmd_q->reg_int_status); 871 ioread32(cmd_q->reg_status); 872 873 /* Clear the interrupt status */ 874 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); 875 } 876 877 dev_dbg(dev, "Requesting an IRQ...\n"); 878 /* Request an irq */ 879 ret = sp_request_ccp_irq(ccp->sp, ccp5_irq_handler, ccp->name, ccp); 880 if (ret) { 881 dev_err(dev, "unable to allocate an IRQ\n"); 882 goto e_pool; 883 } 884 /* Initialize the ISR tasklet */ 885 if (ccp->use_tasklet) 886 tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh, 887 (unsigned long)ccp); 888 889 dev_dbg(dev, "Loading LSB map...\n"); 890 /* Copy the private LSB mask to the public registers */ 891 status_lo = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); 892 status_hi = ioread32(ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); 893 iowrite32(status_lo, ccp->io_regs + LSB_PUBLIC_MASK_LO_OFFSET); 894 iowrite32(status_hi, ccp->io_regs + LSB_PUBLIC_MASK_HI_OFFSET); 895 status = ((u64)status_hi<<30) | (u64)status_lo; 896 897 dev_dbg(dev, "Configuring virtual queues...\n"); 898 /* Configure size of each virtual queue accessible to host */ 899 for (i = 0; i < ccp->cmd_q_count; i++) { 900 u32 dma_addr_lo; 901 u32 dma_addr_hi; 902 903 cmd_q = &ccp->cmd_q[i]; 904 905 cmd_q->qcontrol &= ~(CMD5_Q_SIZE << CMD5_Q_SHIFT); 906 cmd_q->qcontrol |= QUEUE_SIZE_VAL << CMD5_Q_SHIFT; 907 908 cmd_q->qdma_tail = cmd_q->qbase_dma; 909 dma_addr_lo = low_address(cmd_q->qdma_tail); 910 iowrite32((u32)dma_addr_lo, cmd_q->reg_tail_lo); 911 iowrite32((u32)dma_addr_lo, cmd_q->reg_head_lo); 912 913 dma_addr_hi = high_address(cmd_q->qdma_tail); 914 cmd_q->qcontrol |= (dma_addr_hi << 16); 915 iowrite32(cmd_q->qcontrol, cmd_q->reg_control); 916 917 /* Find the LSB regions accessible to the queue */ 918 ccp_find_lsb_regions(cmd_q, status); 919 cmd_q->lsb = -1; /* Unassigned value */ 920 } 921 922 dev_dbg(dev, "Assigning LSBs...\n"); 923 ret = ccp_assign_lsbs(ccp); 924 if (ret) { 925 dev_err(dev, "Unable to assign LSBs (%d)\n", ret); 926 goto e_irq; 927 } 928 929 /* Optimization: pre-allocate LSB slots for each queue */ 930 for (i = 0; i < ccp->cmd_q_count; i++) { 931 ccp->cmd_q[i].sb_key = ccp_lsb_alloc(&ccp->cmd_q[i], 2); 932 ccp->cmd_q[i].sb_ctx = ccp_lsb_alloc(&ccp->cmd_q[i], 2); 933 } 934 935 dev_dbg(dev, "Starting threads...\n"); 936 /* Create a kthread for each queue */ 937 for (i = 0; i < ccp->cmd_q_count; i++) { 938 struct task_struct *kthread; 939 940 cmd_q = &ccp->cmd_q[i]; 941 942 kthread = kthread_create(ccp_cmd_queue_thread, cmd_q, 943 "%s-q%u", ccp->name, cmd_q->id); 944 if (IS_ERR(kthread)) { 945 dev_err(dev, "error creating queue thread (%ld)\n", 946 PTR_ERR(kthread)); 947 ret = PTR_ERR(kthread); 948 goto e_kthread; 949 } 950 951 cmd_q->kthread = kthread; 952 wake_up_process(kthread); 953 } 954 955 dev_dbg(dev, "Enabling interrupts...\n"); 956 ccp5_enable_queue_interrupts(ccp); 957 958 dev_dbg(dev, "Registering device...\n"); 959 /* Put this on the unit list to make it available */ 960 ccp_add_device(ccp); 961 962 ret = ccp_register_rng(ccp); 963 if (ret) 964 goto e_kthread; 965 966 /* Register the DMA engine support */ 967 ret = ccp_dmaengine_register(ccp); 968 if (ret) 969 goto e_hwrng; 970 971 #ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS 972 /* Set up debugfs entries */ 973 ccp5_debugfs_setup(ccp); 974 #endif 975 976 return 0; 977 978 e_hwrng: 979 ccp_unregister_rng(ccp); 980 981 e_kthread: 982 for (i = 0; i < ccp->cmd_q_count; i++) 983 if (ccp->cmd_q[i].kthread) 984 kthread_stop(ccp->cmd_q[i].kthread); 985 986 e_irq: 987 sp_free_ccp_irq(ccp->sp, ccp); 988 989 e_pool: 990 for (i = 0; i < ccp->cmd_q_count; i++) 991 dma_pool_destroy(ccp->cmd_q[i].dma_pool); 992 993 return ret; 994 } 995 996 static void ccp5_destroy(struct ccp_device *ccp) 997 { 998 struct ccp_cmd_queue *cmd_q; 999 struct ccp_cmd *cmd; 1000 unsigned int i; 1001 1002 /* Unregister the DMA engine */ 1003 ccp_dmaengine_unregister(ccp); 1004 1005 /* Unregister the RNG */ 1006 ccp_unregister_rng(ccp); 1007 1008 /* Remove this device from the list of available units first */ 1009 ccp_del_device(ccp); 1010 1011 #ifdef CONFIG_CRYPTO_DEV_CCP_DEBUGFS 1012 /* We're in the process of tearing down the entire driver; 1013 * when all the devices are gone clean up debugfs 1014 */ 1015 if (ccp_present()) 1016 ccp5_debugfs_destroy(); 1017 #endif 1018 1019 /* Disable and clear interrupts */ 1020 ccp5_disable_queue_interrupts(ccp); 1021 for (i = 0; i < ccp->cmd_q_count; i++) { 1022 cmd_q = &ccp->cmd_q[i]; 1023 1024 /* Turn off the run bit */ 1025 iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); 1026 1027 /* Clear the interrupt status */ 1028 iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); 1029 ioread32(cmd_q->reg_int_status); 1030 ioread32(cmd_q->reg_status); 1031 } 1032 1033 /* Stop the queue kthreads */ 1034 for (i = 0; i < ccp->cmd_q_count; i++) 1035 if (ccp->cmd_q[i].kthread) 1036 kthread_stop(ccp->cmd_q[i].kthread); 1037 1038 sp_free_ccp_irq(ccp->sp, ccp); 1039 1040 /* Flush the cmd and backlog queue */ 1041 while (!list_empty(&ccp->cmd)) { 1042 /* Invoke the callback directly with an error code */ 1043 cmd = list_first_entry(&ccp->cmd, struct ccp_cmd, entry); 1044 list_del(&cmd->entry); 1045 cmd->callback(cmd->data, -ENODEV); 1046 } 1047 while (!list_empty(&ccp->backlog)) { 1048 /* Invoke the callback directly with an error code */ 1049 cmd = list_first_entry(&ccp->backlog, struct ccp_cmd, entry); 1050 list_del(&cmd->entry); 1051 cmd->callback(cmd->data, -ENODEV); 1052 } 1053 } 1054 1055 static void ccp5_config(struct ccp_device *ccp) 1056 { 1057 /* Public side */ 1058 iowrite32(0x0, ccp->io_regs + CMD5_REQID_CONFIG_OFFSET); 1059 } 1060 1061 static void ccp5other_config(struct ccp_device *ccp) 1062 { 1063 int i; 1064 u32 rnd; 1065 1066 /* We own all of the queues on the NTB CCP */ 1067 1068 iowrite32(0x00012D57, ccp->io_regs + CMD5_TRNG_CTL_OFFSET); 1069 iowrite32(0x00000003, ccp->io_regs + CMD5_CONFIG_0_OFFSET); 1070 for (i = 0; i < 12; i++) { 1071 rnd = ioread32(ccp->io_regs + TRNG_OUT_REG); 1072 iowrite32(rnd, ccp->io_regs + CMD5_AES_MASK_OFFSET); 1073 } 1074 1075 iowrite32(0x0000001F, ccp->io_regs + CMD5_QUEUE_MASK_OFFSET); 1076 iowrite32(0x00005B6D, ccp->io_regs + CMD5_QUEUE_PRIO_OFFSET); 1077 iowrite32(0x00000000, ccp->io_regs + CMD5_CMD_TIMEOUT_OFFSET); 1078 1079 iowrite32(0x3FFFFFFF, ccp->io_regs + LSB_PRIVATE_MASK_LO_OFFSET); 1080 iowrite32(0x000003FF, ccp->io_regs + LSB_PRIVATE_MASK_HI_OFFSET); 1081 1082 iowrite32(0x00108823, ccp->io_regs + CMD5_CLK_GATE_CTL_OFFSET); 1083 1084 ccp5_config(ccp); 1085 } 1086 1087 /* Version 5 adds some function, but is essentially the same as v5 */ 1088 static const struct ccp_actions ccp5_actions = { 1089 .aes = ccp5_perform_aes, 1090 .xts_aes = ccp5_perform_xts_aes, 1091 .sha = ccp5_perform_sha, 1092 .des3 = ccp5_perform_des3, 1093 .rsa = ccp5_perform_rsa, 1094 .passthru = ccp5_perform_passthru, 1095 .ecc = ccp5_perform_ecc, 1096 .sballoc = ccp_lsb_alloc, 1097 .sbfree = ccp_lsb_free, 1098 .init = ccp5_init, 1099 .destroy = ccp5_destroy, 1100 .get_free_slots = ccp5_get_free_slots, 1101 }; 1102 1103 const struct ccp_vdata ccpv5a = { 1104 .version = CCP_VERSION(5, 0), 1105 .setup = ccp5_config, 1106 .perform = &ccp5_actions, 1107 .offset = 0x0, 1108 .rsamax = CCP5_RSA_MAX_WIDTH, 1109 }; 1110 1111 const struct ccp_vdata ccpv5b = { 1112 .version = CCP_VERSION(5, 0), 1113 .dma_chan_attr = DMA_PRIVATE, 1114 .setup = ccp5other_config, 1115 .perform = &ccp5_actions, 1116 .offset = 0x0, 1117 .rsamax = CCP5_RSA_MAX_WIDTH, 1118 }; 1119