1 /* 2 * Copyright 2016 Broadcom 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License, version 2, as 6 * published by the Free Software Foundation (the "GPL"). 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 11 * General Public License version 2 (GPLv2) for more details. 12 * 13 * You should have received a copy of the GNU General Public License 14 * version 2 (GPLv2) along with this source code. 15 */ 16 17 #include <linux/debugfs.h> 18 19 #include "cipher.h" 20 #include "util.h" 21 22 /* offset of SPU_OFIFO_CTRL register */ 23 #define SPU_OFIFO_CTRL 0x40 24 #define SPU_FIFO_WATERMARK 0x1FF 25 26 /** 27 * spu_sg_at_offset() - Find the scatterlist entry at a given distance from the 28 * start of a scatterlist. 29 * @sg: [in] Start of a scatterlist 30 * @skip: [in] Distance from the start of the scatterlist, in bytes 31 * @sge: [out] Scatterlist entry at skip bytes from start 32 * @sge_offset: [out] Number of bytes from start of sge buffer to get to 33 * requested distance. 34 * 35 * Return: 0 if entry found at requested distance 36 * < 0 otherwise 37 */ 38 int spu_sg_at_offset(struct scatterlist *sg, unsigned int skip, 39 struct scatterlist **sge, unsigned int *sge_offset) 40 { 41 /* byte index from start of sg to the end of the previous entry */ 42 unsigned int index = 0; 43 /* byte index from start of sg to the end of the current entry */ 44 unsigned int next_index; 45 46 next_index = sg->length; 47 while (next_index <= skip) { 48 sg = sg_next(sg); 49 index = next_index; 50 if (!sg) 51 return -EINVAL; 52 next_index += sg->length; 53 } 54 55 *sge_offset = skip - index; 56 *sge = sg; 57 return 0; 58 } 59 60 /* Copy len bytes of sg data, starting at offset skip, to a dest buffer */ 61 void sg_copy_part_to_buf(struct scatterlist *src, u8 *dest, 62 unsigned int len, unsigned int skip) 63 { 64 size_t copied; 65 unsigned int nents = sg_nents(src); 66 67 copied = sg_pcopy_to_buffer(src, nents, dest, len, skip); 68 if (copied != len) { 69 flow_log("%s copied %u bytes of %u requested. ", 70 __func__, (u32)copied, len); 71 flow_log("sg with %u entries and skip %u\n", nents, skip); 72 } 73 } 74 75 /* 76 * Copy data into a scatterlist starting at a specified offset in the 77 * scatterlist. Specifically, copy len bytes of data in the buffer src 78 * into the scatterlist dest, starting skip bytes into the scatterlist. 79 */ 80 void sg_copy_part_from_buf(struct scatterlist *dest, u8 *src, 81 unsigned int len, unsigned int skip) 82 { 83 size_t copied; 84 unsigned int nents = sg_nents(dest); 85 86 copied = sg_pcopy_from_buffer(dest, nents, src, len, skip); 87 if (copied != len) { 88 flow_log("%s copied %u bytes of %u requested. ", 89 __func__, (u32)copied, len); 90 flow_log("sg with %u entries and skip %u\n", nents, skip); 91 } 92 } 93 94 /** 95 * spu_sg_count() - Determine number of elements in scatterlist to provide a 96 * specified number of bytes. 97 * @sg_list: scatterlist to examine 98 * @skip: index of starting point 99 * @nbytes: consider elements of scatterlist until reaching this number of 100 * bytes 101 * 102 * Return: the number of sg entries contributing to nbytes of data 103 */ 104 int spu_sg_count(struct scatterlist *sg_list, unsigned int skip, int nbytes) 105 { 106 struct scatterlist *sg; 107 int sg_nents = 0; 108 unsigned int offset; 109 110 if (!sg_list) 111 return 0; 112 113 if (spu_sg_at_offset(sg_list, skip, &sg, &offset) < 0) 114 return 0; 115 116 while (sg && (nbytes > 0)) { 117 sg_nents++; 118 nbytes -= (sg->length - offset); 119 offset = 0; 120 sg = sg_next(sg); 121 } 122 return sg_nents; 123 } 124 125 /** 126 * spu_msg_sg_add() - Copy scatterlist entries from one sg to another, up to a 127 * given length. 128 * @to_sg: scatterlist to copy to 129 * @from_sg: scatterlist to copy from 130 * @from_skip: number of bytes to skip in from_sg. Non-zero when previous 131 * request included part of the buffer in entry in from_sg. 132 * Assumes from_skip < from_sg->length. 133 * @from_nents number of entries in from_sg 134 * @length number of bytes to copy. may reach this limit before exhausting 135 * from_sg. 136 * 137 * Copies the entries themselves, not the data in the entries. Assumes to_sg has 138 * enough entries. Does not limit the size of an individual buffer in to_sg. 139 * 140 * to_sg, from_sg, skip are all updated to end of copy 141 * 142 * Return: Number of bytes copied 143 */ 144 u32 spu_msg_sg_add(struct scatterlist **to_sg, 145 struct scatterlist **from_sg, u32 *from_skip, 146 u8 from_nents, u32 length) 147 { 148 struct scatterlist *sg; /* an entry in from_sg */ 149 struct scatterlist *to = *to_sg; 150 struct scatterlist *from = *from_sg; 151 u32 skip = *from_skip; 152 u32 offset; 153 int i; 154 u32 entry_len = 0; 155 u32 frag_len = 0; /* length of entry added to to_sg */ 156 u32 copied = 0; /* number of bytes copied so far */ 157 158 if (length == 0) 159 return 0; 160 161 for_each_sg(from, sg, from_nents, i) { 162 /* number of bytes in this from entry not yet used */ 163 entry_len = sg->length - skip; 164 frag_len = min(entry_len, length - copied); 165 offset = sg->offset + skip; 166 if (frag_len) 167 sg_set_page(to++, sg_page(sg), frag_len, offset); 168 copied += frag_len; 169 if (copied == entry_len) { 170 /* used up all of from entry */ 171 skip = 0; /* start at beginning of next entry */ 172 } 173 if (copied == length) 174 break; 175 } 176 *to_sg = to; 177 *from_sg = sg; 178 if (frag_len < entry_len) 179 *from_skip = skip + frag_len; 180 else 181 *from_skip = 0; 182 183 return copied; 184 } 185 186 void add_to_ctr(u8 *ctr_pos, unsigned int increment) 187 { 188 __be64 *high_be = (__be64 *)ctr_pos; 189 __be64 *low_be = high_be + 1; 190 u64 orig_low = __be64_to_cpu(*low_be); 191 u64 new_low = orig_low + (u64)increment; 192 193 *low_be = __cpu_to_be64(new_low); 194 if (new_low < orig_low) 195 /* there was a carry from the low 8 bytes */ 196 *high_be = __cpu_to_be64(__be64_to_cpu(*high_be) + 1); 197 } 198 199 struct sdesc { 200 struct shash_desc shash; 201 char ctx[]; 202 }; 203 204 /** 205 * do_shash() - Do a synchronous hash operation in software 206 * @name: The name of the hash algorithm 207 * @result: Buffer where digest is to be written 208 * @data1: First part of data to hash. May be NULL. 209 * @data1_len: Length of data1, in bytes 210 * @data2: Second part of data to hash. May be NULL. 211 * @data2_len: Length of data2, in bytes 212 * @key: Key (if keyed hash) 213 * @key_len: Length of key, in bytes (or 0 if non-keyed hash) 214 * 215 * Note that the crypto API will not select this driver's own transform because 216 * this driver only registers asynchronous algos. 217 * 218 * Return: 0 if hash successfully stored in result 219 * < 0 otherwise 220 */ 221 int do_shash(unsigned char *name, unsigned char *result, 222 const u8 *data1, unsigned int data1_len, 223 const u8 *data2, unsigned int data2_len, 224 const u8 *key, unsigned int key_len) 225 { 226 int rc; 227 unsigned int size; 228 struct crypto_shash *hash; 229 struct sdesc *sdesc; 230 231 hash = crypto_alloc_shash(name, 0, 0); 232 if (IS_ERR(hash)) { 233 rc = PTR_ERR(hash); 234 pr_err("%s: Crypto %s allocation error %d\n", __func__, name, rc); 235 return rc; 236 } 237 238 size = sizeof(struct shash_desc) + crypto_shash_descsize(hash); 239 sdesc = kmalloc(size, GFP_KERNEL); 240 if (!sdesc) { 241 rc = -ENOMEM; 242 goto do_shash_err; 243 } 244 sdesc->shash.tfm = hash; 245 246 if (key_len > 0) { 247 rc = crypto_shash_setkey(hash, key, key_len); 248 if (rc) { 249 pr_err("%s: Could not setkey %s shash\n", __func__, name); 250 goto do_shash_err; 251 } 252 } 253 254 rc = crypto_shash_init(&sdesc->shash); 255 if (rc) { 256 pr_err("%s: Could not init %s shash\n", __func__, name); 257 goto do_shash_err; 258 } 259 rc = crypto_shash_update(&sdesc->shash, data1, data1_len); 260 if (rc) { 261 pr_err("%s: Could not update1\n", __func__); 262 goto do_shash_err; 263 } 264 if (data2 && data2_len) { 265 rc = crypto_shash_update(&sdesc->shash, data2, data2_len); 266 if (rc) { 267 pr_err("%s: Could not update2\n", __func__); 268 goto do_shash_err; 269 } 270 } 271 rc = crypto_shash_final(&sdesc->shash, result); 272 if (rc) 273 pr_err("%s: Could not generate %s hash\n", __func__, name); 274 275 do_shash_err: 276 crypto_free_shash(hash); 277 kfree(sdesc); 278 279 return rc; 280 } 281 282 /* Dump len bytes of a scatterlist starting at skip bytes into the sg */ 283 void __dump_sg(struct scatterlist *sg, unsigned int skip, unsigned int len) 284 { 285 u8 dbuf[16]; 286 unsigned int idx = skip; 287 unsigned int num_out = 0; /* number of bytes dumped so far */ 288 unsigned int count; 289 290 if (packet_debug_logging) { 291 while (num_out < len) { 292 count = (len - num_out > 16) ? 16 : len - num_out; 293 sg_copy_part_to_buf(sg, dbuf, count, idx); 294 num_out += count; 295 print_hex_dump(KERN_ALERT, " sg: ", DUMP_PREFIX_NONE, 296 4, 1, dbuf, count, false); 297 idx += 16; 298 } 299 } 300 if (debug_logging_sleep) 301 msleep(debug_logging_sleep); 302 } 303 304 /* Returns the name for a given cipher alg/mode */ 305 char *spu_alg_name(enum spu_cipher_alg alg, enum spu_cipher_mode mode) 306 { 307 switch (alg) { 308 case CIPHER_ALG_RC4: 309 return "rc4"; 310 case CIPHER_ALG_AES: 311 switch (mode) { 312 case CIPHER_MODE_CBC: 313 return "cbc(aes)"; 314 case CIPHER_MODE_ECB: 315 return "ecb(aes)"; 316 case CIPHER_MODE_OFB: 317 return "ofb(aes)"; 318 case CIPHER_MODE_CFB: 319 return "cfb(aes)"; 320 case CIPHER_MODE_CTR: 321 return "ctr(aes)"; 322 case CIPHER_MODE_XTS: 323 return "xts(aes)"; 324 case CIPHER_MODE_GCM: 325 return "gcm(aes)"; 326 default: 327 return "aes"; 328 } 329 break; 330 case CIPHER_ALG_DES: 331 switch (mode) { 332 case CIPHER_MODE_CBC: 333 return "cbc(des)"; 334 case CIPHER_MODE_ECB: 335 return "ecb(des)"; 336 case CIPHER_MODE_CTR: 337 return "ctr(des)"; 338 default: 339 return "des"; 340 } 341 break; 342 case CIPHER_ALG_3DES: 343 switch (mode) { 344 case CIPHER_MODE_CBC: 345 return "cbc(des3_ede)"; 346 case CIPHER_MODE_ECB: 347 return "ecb(des3_ede)"; 348 case CIPHER_MODE_CTR: 349 return "ctr(des3_ede)"; 350 default: 351 return "3des"; 352 } 353 break; 354 default: 355 return "other"; 356 } 357 } 358 359 static ssize_t spu_debugfs_read(struct file *filp, char __user *ubuf, 360 size_t count, loff_t *offp) 361 { 362 struct device_private *ipriv; 363 char *buf; 364 ssize_t ret, out_offset, out_count; 365 int i; 366 u32 fifo_len; 367 u32 spu_ofifo_ctrl; 368 u32 alg; 369 u32 mode; 370 u32 op_cnt; 371 372 out_count = 2048; 373 374 buf = kmalloc(out_count, GFP_KERNEL); 375 if (!buf) 376 return -ENOMEM; 377 378 ipriv = filp->private_data; 379 out_offset = 0; 380 out_offset += snprintf(buf + out_offset, out_count - out_offset, 381 "Number of SPUs.........%u\n", 382 ipriv->spu.num_spu); 383 out_offset += snprintf(buf + out_offset, out_count - out_offset, 384 "Current sessions.......%u\n", 385 atomic_read(&ipriv->session_count)); 386 out_offset += snprintf(buf + out_offset, out_count - out_offset, 387 "Session count..........%u\n", 388 atomic_read(&ipriv->stream_count)); 389 out_offset += snprintf(buf + out_offset, out_count - out_offset, 390 "Cipher setkey..........%u\n", 391 atomic_read(&ipriv->setkey_cnt[SPU_OP_CIPHER])); 392 out_offset += snprintf(buf + out_offset, out_count - out_offset, 393 "Cipher Ops.............%u\n", 394 atomic_read(&ipriv->op_counts[SPU_OP_CIPHER])); 395 for (alg = 0; alg < CIPHER_ALG_LAST; alg++) { 396 for (mode = 0; mode < CIPHER_MODE_LAST; mode++) { 397 op_cnt = atomic_read(&ipriv->cipher_cnt[alg][mode]); 398 if (op_cnt) { 399 out_offset += snprintf(buf + out_offset, 400 out_count - out_offset, 401 " %-13s%11u\n", 402 spu_alg_name(alg, mode), op_cnt); 403 } 404 } 405 } 406 out_offset += snprintf(buf + out_offset, out_count - out_offset, 407 "Hash Ops...............%u\n", 408 atomic_read(&ipriv->op_counts[SPU_OP_HASH])); 409 for (alg = 0; alg < HASH_ALG_LAST; alg++) { 410 op_cnt = atomic_read(&ipriv->hash_cnt[alg]); 411 if (op_cnt) { 412 out_offset += snprintf(buf + out_offset, 413 out_count - out_offset, 414 " %-13s%11u\n", 415 hash_alg_name[alg], op_cnt); 416 } 417 } 418 out_offset += snprintf(buf + out_offset, out_count - out_offset, 419 "HMAC setkey............%u\n", 420 atomic_read(&ipriv->setkey_cnt[SPU_OP_HMAC])); 421 out_offset += snprintf(buf + out_offset, out_count - out_offset, 422 "HMAC Ops...............%u\n", 423 atomic_read(&ipriv->op_counts[SPU_OP_HMAC])); 424 for (alg = 0; alg < HASH_ALG_LAST; alg++) { 425 op_cnt = atomic_read(&ipriv->hmac_cnt[alg]); 426 if (op_cnt) { 427 out_offset += snprintf(buf + out_offset, 428 out_count - out_offset, 429 " %-13s%11u\n", 430 hash_alg_name[alg], op_cnt); 431 } 432 } 433 out_offset += snprintf(buf + out_offset, out_count - out_offset, 434 "AEAD setkey............%u\n", 435 atomic_read(&ipriv->setkey_cnt[SPU_OP_AEAD])); 436 437 out_offset += snprintf(buf + out_offset, out_count - out_offset, 438 "AEAD Ops...............%u\n", 439 atomic_read(&ipriv->op_counts[SPU_OP_AEAD])); 440 for (alg = 0; alg < AEAD_TYPE_LAST; alg++) { 441 op_cnt = atomic_read(&ipriv->aead_cnt[alg]); 442 if (op_cnt) { 443 out_offset += snprintf(buf + out_offset, 444 out_count - out_offset, 445 " %-13s%11u\n", 446 aead_alg_name[alg], op_cnt); 447 } 448 } 449 out_offset += snprintf(buf + out_offset, out_count - out_offset, 450 "Bytes of req data......%llu\n", 451 (u64)atomic64_read(&ipriv->bytes_out)); 452 out_offset += snprintf(buf + out_offset, out_count - out_offset, 453 "Bytes of resp data.....%llu\n", 454 (u64)atomic64_read(&ipriv->bytes_in)); 455 out_offset += snprintf(buf + out_offset, out_count - out_offset, 456 "Mailbox full...........%u\n", 457 atomic_read(&ipriv->mb_no_spc)); 458 out_offset += snprintf(buf + out_offset, out_count - out_offset, 459 "Mailbox send failures..%u\n", 460 atomic_read(&ipriv->mb_send_fail)); 461 out_offset += snprintf(buf + out_offset, out_count - out_offset, 462 "Check ICV errors.......%u\n", 463 atomic_read(&ipriv->bad_icv)); 464 if (ipriv->spu.spu_type == SPU_TYPE_SPUM) 465 for (i = 0; i < ipriv->spu.num_spu; i++) { 466 spu_ofifo_ctrl = ioread32(ipriv->spu.reg_vbase[i] + 467 SPU_OFIFO_CTRL); 468 fifo_len = spu_ofifo_ctrl & SPU_FIFO_WATERMARK; 469 out_offset += snprintf(buf + out_offset, 470 out_count - out_offset, 471 "SPU %d output FIFO high water.....%u\n", 472 i, fifo_len); 473 } 474 475 if (out_offset > out_count) 476 out_offset = out_count; 477 478 ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); 479 kfree(buf); 480 return ret; 481 } 482 483 static const struct file_operations spu_debugfs_stats = { 484 .owner = THIS_MODULE, 485 .open = simple_open, 486 .read = spu_debugfs_read, 487 }; 488 489 /* 490 * Create the debug FS directories. If the top-level directory has not yet 491 * been created, create it now. Create a stats file in this directory for 492 * a SPU. 493 */ 494 void spu_setup_debugfs(void) 495 { 496 if (!debugfs_initialized()) 497 return; 498 499 if (!iproc_priv.debugfs_dir) 500 iproc_priv.debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, 501 NULL); 502 503 if (!iproc_priv.debugfs_stats) 504 /* Create file with permissions S_IRUSR */ 505 debugfs_create_file("stats", 0400, iproc_priv.debugfs_dir, 506 &iproc_priv, &spu_debugfs_stats); 507 } 508 509 void spu_free_debugfs(void) 510 { 511 debugfs_remove_recursive(iproc_priv.debugfs_dir); 512 iproc_priv.debugfs_dir = NULL; 513 } 514 515 /** 516 * format_value_ccm() - Format a value into a buffer, using a specified number 517 * of bytes (i.e. maybe writing value X into a 4 byte 518 * buffer, or maybe into a 12 byte buffer), as per the 519 * SPU CCM spec. 520 * 521 * @val: value to write (up to max of unsigned int) 522 * @buf: (pointer to) buffer to write the value 523 * @len: number of bytes to use (0 to 255) 524 * 525 */ 526 void format_value_ccm(unsigned int val, u8 *buf, u8 len) 527 { 528 int i; 529 530 /* First clear full output buffer */ 531 memset(buf, 0, len); 532 533 /* Then, starting from right side, fill in with data */ 534 for (i = 0; i < len; i++) { 535 buf[len - i - 1] = (val >> (8 * i)) & 0xff; 536 if (i >= 3) 537 break; /* Only handle up to 32 bits of 'val' */ 538 } 539 } 540