1 /* 2 * Copyright 2016 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License, version 2, as 6 * published by the Free Software Foundation (the "GPL"). 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License version 2 (GPLv2) for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * version 2 (GPLv2) along with this source code. 15 */ 16 17 #include <linux/debugfs.h> 18 19 #include "cipher.h" 20 #include "util.h" 21 22 /* offset of SPU_OFIFO_CTRL register */ 23 #define SPU_OFIFO_CTRL 0x40 24 #define SPU_FIFO_WATERMARK 0x1FF 25 26 /** 27 * spu_sg_at_offset() - Find the scatterlist entry at a given distance from the 28 * start of a scatterlist. 29 * @sg: [in] Start of a scatterlist 30 * @skip: [in] Distance from the start of the scatterlist, in bytes 31 * @sge: [out] Scatterlist entry at skip bytes from start 32 * @sge_offset: [out] Number of bytes from start of sge buffer to get to 33 * requested distance. 34 * 35 * Return: 0 if entry found at requested distance 36 * < 0 otherwise 37 */ 38 int spu_sg_at_offset(struct scatterlist *sg, unsigned int skip, 39 struct scatterlist **sge, unsigned int *sge_offset) 40 { 41 /* byte index from start of sg to the end of the previous entry */ 42 unsigned int index = 0; 43 /* byte index from start of sg to the end of the current entry */ 44 unsigned int next_index; 45 46 next_index = sg->length; 47 while (next_index <= skip) { 48 sg = sg_next(sg); 49 index = next_index; 50 if (!sg) 51 return -EINVAL; 52 next_index += sg->length; 53 } 54 55 *sge_offset = skip - index; 56 *sge = sg; 57 return 0; 58 } 59 60 /* Copy len bytes of sg data, starting at offset skip, to a dest buffer */ 61 void sg_copy_part_to_buf(struct scatterlist *src, u8 *dest, 62 unsigned int len, unsigned int skip) 63 { 64 size_t copied; 65 unsigned int nents = sg_nents(src); 66 67 copied = sg_pcopy_to_buffer(src, nents, dest, len, skip); 68 if (copied != len) { 69 flow_log("%s copied %u bytes of %u requested. ", 70 __func__, (u32)copied, len); 71 flow_log("sg with %u entries and skip %u\n", nents, skip); 72 } 73 } 74 75 /* 76 * Copy data into a scatterlist starting at a specified offset in the 77 * scatterlist. Specifically, copy len bytes of data in the buffer src 78 * into the scatterlist dest, starting skip bytes into the scatterlist. 79 */ 80 void sg_copy_part_from_buf(struct scatterlist *dest, u8 *src, 81 unsigned int len, unsigned int skip) 82 { 83 size_t copied; 84 unsigned int nents = sg_nents(dest); 85 86 copied = sg_pcopy_from_buffer(dest, nents, src, len, skip); 87 if (copied != len) { 88 flow_log("%s copied %u bytes of %u requested. ", 89 __func__, (u32)copied, len); 90 flow_log("sg with %u entries and skip %u\n", nents, skip); 91 } 92 } 93 94 /** 95 * spu_sg_count() - Determine number of elements in scatterlist to provide a 96 * specified number of bytes. 97 * @sg_list: scatterlist to examine 98 * @skip: index of starting point 99 * @nbytes: consider elements of scatterlist until reaching this number of 100 * bytes 101 * 102 * Return: the number of sg entries contributing to nbytes of data 103 */ 104 int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes) 105 { 106 struct scatterlist *sg; 107 int sg_nents = 0; 108 unsigned int offset; 109 110 if (!sg_list) 111 return 0; 112 113 if (spu_sg_at_offset(sg_list, skip, &sg, &offset) < 0) 114 return 0; 115 116 while (sg && (nbytes > 0)) { 117 sg_nents++; 118 nbytes -= (sg->length - offset); 119 offset = 0; 120 sg = sg_next(sg); 121 } 122 return sg_nents; 123 } 124 125 /** 126 * spu_msg_sg_add() - Copy scatterlist entries from one sg to another, up to a 127 * given length. 128 * @to_sg: scatterlist to copy to 129 * @from_sg: scatterlist to copy from 130 * @from_skip: number of bytes to skip in from_sg. Non-zero when previous 131 * request included part of the buffer in entry in from_sg. 132 * Assumes from_skip < from_sg->length. 133 * @from_nents number of entries in from_sg 134 * @length number of bytes to copy. may reach this limit before exhausting 135 * from_sg. 136 * 137 * Copies the entries themselves, not the data in the entries. Assumes to_sg has 138 * enough entries. Does not limit the size of an individual buffer in to_sg. 139 * 140 * to_sg, from_sg, skip are all updated to end of copy 141 * 142 * Return: Number of bytes copied 143 */ 144 u32 spu_msg_sg_add(struct scatterlist **to_sg, 145 struct scatterlist **from_sg, u32 *from_skip, 146 u8 from_nents, u32 length) 147 { 148 struct scatterlist *sg; /* an entry in from_sg */ 149 struct scatterlist *to = *to_sg; 150 struct scatterlist *from = *from_sg; 151 u32 skip = *from_skip; 152 u32 offset; 153 int i; 154 u32 entry_len = 0; 155 u32 frag_len = 0; /* length of entry added to to_sg */ 156 u32 copied = 0; /* number of bytes copied so far */ 157 158 if (length == 0) 159 return 0; 160 161 for_each_sg(from, sg, from_nents, i) { 162 /* number of bytes in this from entry not yet used */ 163 entry_len = sg->length - skip; 164 frag_len = min(entry_len, length - copied); 165 offset = sg->offset + skip; 166 if (frag_len) 167 sg_set_page(to++, sg_page(sg), frag_len, offset); 168 copied += frag_len; 169 if (copied == entry_len) { 170 /* used up all of from entry */ 171 skip = 0; /* start at beginning of next entry */ 172 } 173 if (copied == length) 174 break; 175 } 176 *to_sg = to; 177 *from_sg = sg; 178 if (frag_len < entry_len) 179 *from_skip = skip + frag_len; 180 else 181 *from_skip = 0; 182 183 return copied; 184 } 185 186 void add_to_ctr(u8 *ctr_pos, unsigned int increment) 187 { 188 __be64 *high_be = (__be64 *)ctr_pos; 189 __be64 *low_be = high_be + 1; 190 u64 orig_low = __be64_to_cpu(*low_be); 191 u64 new_low = orig_low + (u64)increment; 192 193 *low_be = __cpu_to_be64(new_low); 194 if (new_low < orig_low) 195 /* there was a carry from the low 8 bytes */ 196 *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1); 197 } 198 199 struct sdesc { 200 struct shash_desc shash; 201 char ctx[]; 202 }; 203 204 /* do a synchronous decrypt operation */ 205 int do_decrypt(char *alg_name, 206 void *key_ptr, unsigned int key_len, 207 void *iv_ptr, void *src_ptr, void *dst_ptr, 208 unsigned int block_len) 209 { 210 struct scatterlist sg_in[1], sg_out[1]; 211 struct crypto_blkcipher *tfm = 212 crypto_alloc_blkcipher(alg_name, 0, CRYPTO_ALG_ASYNC); 213 struct blkcipher_desc desc = {.tfm = tfm, .flags = 0 }; 214 int ret = 0; 215 void *iv; 216 int ivsize; 217 218 flow_log("%s() name:%s block_len:%u\n", __func__, alg_name, block_len); 219 220 if (IS_ERR(tfm)) 221 return PTR_ERR(tfm); 222 223 crypto_blkcipher_setkey((void *)tfm, key_ptr, key_len); 224 225 sg_init_table(sg_in, 1); 226 sg_set_buf(sg_in, src_ptr, block_len); 227 228 sg_init_table(sg_out, 1); 229 sg_set_buf(sg_out, dst_ptr, block_len); 230 231 iv = crypto_blkcipher_crt(tfm)->iv; 232 ivsize = crypto_blkcipher_ivsize(tfm); 233 memcpy(iv, iv_ptr, ivsize); 234 235 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, block_len); 236 crypto_free_blkcipher(tfm); 237 238 if (ret < 0) 239 pr_err("aes_decrypt failed %d\n", ret); 240 241 return ret; 242 } 243 244 /** 245 * do_shash() - Do a synchronous hash operation in software 246 * @name: The name of the hash algorithm 247 * @result: Buffer where digest is to be written 248 * @data1: First part of data to hash. May be NULL. 249 * @data1_len: Length of data1, in bytes 250 * @data2: Second part of data to hash. May be NULL. 251 * @data2_len: Length of data2, in bytes 252 * @key: Key (if keyed hash) 253 * @key_len: Length of key, in bytes (or 0 if non-keyed hash) 254 * 255 * Note that the crypto API will not select this driver's own transform because 256 * this driver only registers asynchronous algos. 257 * 258 * Return: 0 if hash successfully stored in result 259 * < 0 otherwise 260 */ 261 int do_shash(unsigned char *name, unsigned char *result, 262 const u8 *data1, unsigned int data1_len, 263 const u8 *data2, unsigned int data2_len, 264 const u8 *key, unsigned int key_len) 265 { 266 int rc; 267 unsigned int size; 268 struct crypto_shash *hash; 269 struct sdesc *sdesc; 270 271 hash = crypto_alloc_shash(name, 0, 0); 272 if (IS_ERR(hash)) { 273 rc = PTR_ERR(hash); 274 pr_err("%s: Crypto %s allocation error %d", __func__, name, rc); 275 return rc; 276 } 277 278 size = sizeof(struct shash_desc) + crypto_shash_descsize(hash); 279 sdesc = kmalloc(size, GFP_KERNEL); 280 if (!sdesc) { 281 rc = -ENOMEM; 282 pr_err("%s: Memory allocation failure", __func__); 283 goto do_shash_err; 284 } 285 sdesc->shash.tfm = hash; 286 sdesc->shash.flags = 0x0; 287 288 if (key_len > 0) { 289 rc = crypto_shash_setkey(hash, key, key_len); 290 if (rc) { 291 pr_err("%s: Could not setkey %s shash", __func__, name); 292 goto do_shash_err; 293 } 294 } 295 296 rc = crypto_shash_init(&sdesc->shash); 297 if (rc) { 298 pr_err("%s: Could not init %s shash", __func__, name); 299 goto do_shash_err; 300 } 301 rc = crypto_shash_update(&sdesc->shash, data1, data1_len); 302 if (rc) { 303 pr_err("%s: Could not update1", __func__); 304 goto do_shash_err; 305 } 306 if (data2 && data2_len) { 307 rc = crypto_shash_update(&sdesc->shash, data2, data2_len); 308 if (rc) { 309 pr_err("%s: Could not update2", __func__); 310 goto do_shash_err; 311 } 312 } 313 rc = crypto_shash_final(&sdesc->shash, result); 314 if (rc) 315 pr_err("%s: Could not genereate %s hash", __func__, name); 316 317 do_shash_err: 318 crypto_free_shash(hash); 319 kfree(sdesc); 320 321 return rc; 322 } 323 324 /* Dump len bytes of a scatterlist starting at skip bytes into the sg */ 325 void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len) 326 { 327 u8 dbuf[16]; 328 unsigned int idx = skip; 329 unsigned int num_out = 0; /* number of bytes dumped so far */ 330 unsigned int count; 331 332 if (packet_debug_logging) { 333 while (num_out < len) { 334 count = (len - num_out > 16) ? 16 : len - num_out; 335 sg_copy_part_to_buf(sg, dbuf, count, idx); 336 num_out += count; 337 print_hex_dump(KERN_ALERT, " sg: ", DUMP_PREFIX_NONE, 338 4, 1, dbuf, count, false); 339 idx += 16; 340 } 341 } 342 if (debug_logging_sleep) 343 msleep(debug_logging_sleep); 344 } 345 346 /* Returns the name for a given cipher alg/mode */ 347 char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode) 348 { 349 switch (alg) { 350 case CIPHER_ALG_RC4: 351 return "rc4"; 352 case CIPHER_ALG_AES: 353 switch (mode) { 354 case CIPHER_MODE_CBC: 355 return "cbc(aes)"; 356 case CIPHER_MODE_ECB: 357 return "ecb(aes)"; 358 case CIPHER_MODE_OFB: 359 return "ofb(aes)"; 360 case CIPHER_MODE_CFB: 361 return "cfb(aes)"; 362 case CIPHER_MODE_CTR: 363 return "ctr(aes)"; 364 case CIPHER_MODE_XTS: 365 return "xts(aes)"; 366 case CIPHER_MODE_GCM: 367 return "gcm(aes)"; 368 default: 369 return "aes"; 370 } 371 break; 372 case CIPHER_ALG_DES: 373 switch (mode) { 374 case CIPHER_MODE_CBC: 375 return "cbc(des)"; 376 case CIPHER_MODE_ECB: 377 return "ecb(des)"; 378 case CIPHER_MODE_CTR: 379 return "ctr(des)"; 380 default: 381 return "des"; 382 } 383 break; 384 case CIPHER_ALG_3DES: 385 switch (mode) { 386 case CIPHER_MODE_CBC: 387 return "cbc(des3_ede)"; 388 case CIPHER_MODE_ECB: 389 return "ecb(des3_ede)"; 390 case CIPHER_MODE_CTR: 391 return "ctr(des3_ede)"; 392 default: 393 return "3des"; 394 } 395 break; 396 default: 397 return "other"; 398 } 399 } 400 401 static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf, 402 size_t count, loff_t *offp) 403 { 404 struct device_private *ipriv; 405 char *buf; 406 ssize_t ret, out_offset, out_count; 407 int i; 408 u32 fifo_len; 409 u32 spu_ofifo_ctrl; 410 u32 alg; 411 u32 mode; 412 u32 op_cnt; 413 414 out_count = 2048; 415 416 buf = kmalloc(out_count, GFP_KERNEL); 417 if (!buf) 418 return -ENOMEM; 419 420 ipriv = filp->private_data; 421 out_offset = 0; 422 out_offset += snprintf(buf + out_offset, out_count - out_offset, 423 "Number of SPUs.........%u\n", 424 ipriv->spu.num_spu); 425 out_offset += snprintf(buf + out_offset, out_count - out_offset, 426 "Current sessions.......%u\n", 427 atomic_read(&ipriv->session_count)); 428 out_offset += snprintf(buf + out_offset, out_count - out_offset, 429 "Session count..........%u\n", 430 atomic_read(&ipriv->stream_count)); 431 out_offset += snprintf(buf + out_offset, out_count - out_offset, 432 "Cipher setkey..........%u\n", 433 atomic_read(&ipriv->setkey_cnt[SPU_OP_CIPHER])); 434 out_offset += snprintf(buf + out_offset, out_count - out_offset, 435 "Cipher Ops.............%u\n", 436 atomic_read(&ipriv->op_counts[SPU_OP_CIPHER])); 437 for (alg = 0; alg < CIPHER_ALG_LAST; alg++) { 438 for (mode = 0; mode < CIPHER_MODE_LAST; mode++) { 439 op_cnt = atomic_read(&ipriv->cipher_cnt[alg][mode]); 440 if (op_cnt) { 441 out_offset += snprintf(buf + out_offset, 442 out_count - out_offset, 443 " %-13s%11u\n", 444 spu_alg_name(alg, mode), op_cnt); 445 } 446 } 447 } 448 out_offset += snprintf(buf + out_offset, out_count - out_offset, 449 "Hash Ops...............%u\n", 450 atomic_read(&ipriv->op_counts[SPU_OP_HASH])); 451 for (alg = 0; alg < HASH_ALG_LAST; alg++) { 452 op_cnt = atomic_read(&ipriv->hash_cnt[alg]); 453 if (op_cnt) { 454 out_offset += snprintf(buf + out_offset, 455 out_count - out_offset, 456 " %-13s%11u\n", 457 hash_alg_name[alg], op_cnt); 458 } 459 } 460 out_offset += snprintf(buf + out_offset, out_count - out_offset, 461 "HMAC setkey............%u\n", 462 atomic_read(&ipriv->setkey_cnt[SPU_OP_HMAC])); 463 out_offset += snprintf(buf + out_offset, out_count - out_offset, 464 "HMAC Ops...............%u\n", 465 atomic_read(&ipriv->op_counts[SPU_OP_HMAC])); 466 for (alg = 0; alg < HASH_ALG_LAST; alg++) { 467 op_cnt = atomic_read(&ipriv->hmac_cnt[alg]); 468 if (op_cnt) { 469 out_offset += snprintf(buf + out_offset, 470 out_count - out_offset, 471 " %-13s%11u\n", 472 hash_alg_name[alg], op_cnt); 473 } 474 } 475 out_offset += snprintf(buf + out_offset, out_count - out_offset, 476 "AEAD setkey............%u\n", 477 atomic_read(&ipriv->setkey_cnt[SPU_OP_AEAD])); 478 479 out_offset += snprintf(buf + out_offset, out_count - out_offset, 480 "AEAD Ops...............%u\n", 481 atomic_read(&ipriv->op_counts[SPU_OP_AEAD])); 482 for (alg = 0; alg < AEAD_TYPE_LAST; alg++) { 483 op_cnt = atomic_read(&ipriv->aead_cnt[alg]); 484 if (op_cnt) { 485 out_offset += snprintf(buf + out_offset, 486 out_count - out_offset, 487 " %-13s%11u\n", 488 aead_alg_name[alg], op_cnt); 489 } 490 } 491 out_offset += snprintf(buf + out_offset, out_count - out_offset, 492 "Bytes of req data......%llu\n", 493 (u64)atomic64_read(&ipriv->bytes_out)); 494 out_offset += snprintf(buf + out_offset, out_count - out_offset, 495 "Bytes of resp data.....%llu\n", 496 (u64)atomic64_read(&ipriv->bytes_in)); 497 out_offset += snprintf(buf + out_offset, out_count - out_offset, 498 "Mailbox full...........%u\n", 499 atomic_read(&ipriv->mb_no_spc)); 500 out_offset += snprintf(buf + out_offset, out_count - out_offset, 501 "Mailbox send failures..%u\n", 502 atomic_read(&ipriv->mb_send_fail)); 503 out_offset += snprintf(buf + out_offset, out_count - out_offset, 504 "Check ICV errors.......%u\n", 505 atomic_read(&ipriv->bad_icv)); 506 if (ipriv->spu.spu_type == SPU_TYPE_SPUM) 507 for (i = 0; i < ipriv->spu.num_spu; i++) { 508 spu_ofifo_ctrl = ioread32(ipriv->spu.reg_vbase[i] + 509 SPU_OFIFO_CTRL); 510 fifo_len = spu_ofifo_ctrl & SPU_FIFO_WATERMARK; 511 out_offset += snprintf(buf + out_offset, 512 out_count - out_offset, 513 "SPU %d output FIFO high water.....%u\n", 514 i, fifo_len); 515 } 516 517 if (out_offset > out_count) 518 out_offset = out_count; 519 520 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 521 kfree(buf); 522 return ret; 523 } 524 525 static const struct file_operations spu_debugfs_stats = { 526 .owner = THIS_MODULE, 527 .open = simple_open, 528 .read = spu_debugfs_read, 529 }; 530 531 /* 532 * Create the debug FS directories. If the top-level directory has not yet 533 * been created, create it now. Create a stats file in this directory for 534 * a SPU. 535 */ 536 void spu_setup_debugfs(void) 537 { 538 if (!debugfs_initialized()) 539 return; 540 541 if (!iproc_priv.debugfs_dir) 542 iproc_priv.debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, 543 NULL); 544 545 if (!iproc_priv.debugfs_stats) 546 /* Create file with permissions S_IRUSR */ 547 debugfs_create_file("stats", 0400, iproc_priv.debugfs_dir, 548 &iproc_priv, &spu_debugfs_stats); 549 } 550 551 void spu_free_debugfs(void) 552 { 553 debugfs_remove_recursive(iproc_priv.debugfs_dir); 554 iproc_priv.debugfs_dir = NULL; 555 } 556 557 /** 558 * format_value_ccm() - Format a value into a buffer, using a specified number 559 * of bytes (i.e. maybe writing value X into a 4 byte 560 * buffer, or maybe into a 12 byte buffer), as per the 561 * SPU CCM spec. 562 * 563 * @val: value to write (up to max of unsigned int) 564 * @buf: (pointer to) buffer to write the value 565 * @len: number of bytes to use (0 to 255) 566 * 567 */ 568 void format_value_ccm(unsigned int val, u8 *buf, u8 len) 569 { 570 int i; 571 572 /* First clear full output buffer */ 573 memset(buf, 0, len); 574 575 /* Then, starting from right side, fill in with data */ 576 for (i = 0; i < len; i++) { 577 buf[len - i - 1] = (val >> (8 * i)) & 0xff; 578 if (i >= 3) 579 break; /* Only handle up to 32 bits of 'val' */ 580 } 581 } 582