1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* Copyright (C) 2004-2006, Advanced Micro Devices, Inc. 3 */ 4 5 #include <linux/module.h> 6 #include <linux/kernel.h> 7 #include <linux/pci.h> 8 #include <linux/pci_ids.h> 9 #include <linux/crypto.h> 10 #include <linux/spinlock.h> 11 #include <crypto/algapi.h> 12 #include <crypto/aes.h> 13 #include <crypto/internal/cipher.h> 14 #include <crypto/internal/skcipher.h> 15 16 #include <linux/io.h> 17 #include <linux/delay.h> 18 19 #include "geode-aes.h" 20 21 /* Static structures */ 22 23 static void __iomem *_iobase; 24 static spinlock_t lock; 25 26 /* Write a 128 bit field (either a writable key or IV) */ 27 static inline void 28 _writefield(u32 offset, const void *value) 29 { 30 int i; 31 32 for (i = 0; i < 4; i++) 33 iowrite32(((const u32 *) value)[i], _iobase + offset + (i * 4)); 34 } 35 36 /* Read a 128 bit field (either a writable key or IV) */ 37 static inline void 38 _readfield(u32 offset, void *value) 39 { 40 int i; 41 42 for (i = 0; i < 4; i++) 43 ((u32 *) value)[i] = ioread32(_iobase + offset + (i * 4)); 44 } 45 46 static int 47 do_crypt(const void *src, void *dst, u32 len, u32 flags) 48 { 49 u32 status; 50 u32 counter = AES_OP_TIMEOUT; 51 52 iowrite32(virt_to_phys((void *)src), _iobase + AES_SOURCEA_REG); 53 iowrite32(virt_to_phys(dst), _iobase + AES_DSTA_REG); 54 iowrite32(len, _iobase + AES_LENA_REG); 55 56 /* Start the operation */ 57 iowrite32(AES_CTRL_START | flags, _iobase + AES_CTRLA_REG); 58 59 do { 60 status = ioread32(_iobase + AES_INTR_REG); 61 cpu_relax(); 62 } while (!(status & AES_INTRA_PENDING) && --counter); 63 64 /* Clear the event */ 65 iowrite32((status & 0xFF) | AES_INTRA_PENDING, _iobase + AES_INTR_REG); 66 return counter ? 0 : 1; 67 } 68 69 static void 70 geode_aes_crypt(const struct geode_aes_tfm_ctx *tctx, const void *src, 71 void *dst, u32 len, u8 *iv, int mode, int dir) 72 { 73 u32 flags = 0; 74 unsigned long iflags; 75 int ret; 76 77 /* If the source and destination is the same, then 78 * we need to turn on the coherent flags, otherwise 79 * we don't need to worry 80 */ 81 82 flags |= (AES_CTRL_DCA | AES_CTRL_SCA); 83 84 if (dir == AES_DIR_ENCRYPT) 85 flags |= AES_CTRL_ENCRYPT; 86 87 /* Start the critical section */ 88 89 spin_lock_irqsave(&lock, iflags); 90 91 if (mode == AES_MODE_CBC) { 92 flags |= AES_CTRL_CBC; 93 _writefield(AES_WRITEIV0_REG, iv); 94 } 95 96 flags |= AES_CTRL_WRKEY; 97 _writefield(AES_WRITEKEY0_REG, tctx->key); 98 99 ret = do_crypt(src, dst, len, flags); 100 BUG_ON(ret); 101 102 if (mode == AES_MODE_CBC) 103 _readfield(AES_WRITEIV0_REG, iv); 104 105 spin_unlock_irqrestore(&lock, iflags); 106 } 107 108 /* CRYPTO-API Functions */ 109 110 static int geode_setkey_cip(struct crypto_tfm *tfm, const u8 *key, 111 unsigned int len) 112 { 113 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 114 115 tctx->keylen = len; 116 117 if (len == AES_KEYSIZE_128) { 118 memcpy(tctx->key, key, len); 119 return 0; 120 } 121 122 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) 123 /* not supported at all */ 124 return -EINVAL; 125 126 /* 127 * The requested key size is not supported by HW, do a fallback 128 */ 129 tctx->fallback.cip->base.crt_flags &= ~CRYPTO_TFM_REQ_MASK; 130 tctx->fallback.cip->base.crt_flags |= 131 (tfm->crt_flags & CRYPTO_TFM_REQ_MASK); 132 133 return crypto_cipher_setkey(tctx->fallback.cip, key, len); 134 } 135 136 static int geode_setkey_skcipher(struct crypto_skcipher *tfm, const u8 *key, 137 unsigned int len) 138 { 139 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 140 141 tctx->keylen = len; 142 143 if (len == AES_KEYSIZE_128) { 144 memcpy(tctx->key, key, len); 145 return 0; 146 } 147 148 if (len != AES_KEYSIZE_192 && len != AES_KEYSIZE_256) 149 /* not supported at all */ 150 return -EINVAL; 151 152 /* 153 * The requested key size is not supported by HW, do a fallback 154 */ 155 crypto_skcipher_clear_flags(tctx->fallback.skcipher, 156 CRYPTO_TFM_REQ_MASK); 157 crypto_skcipher_set_flags(tctx->fallback.skcipher, 158 crypto_skcipher_get_flags(tfm) & 159 CRYPTO_TFM_REQ_MASK); 160 return crypto_skcipher_setkey(tctx->fallback.skcipher, key, len); 161 } 162 163 static void 164 geode_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 165 { 166 const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 167 168 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { 169 crypto_cipher_encrypt_one(tctx->fallback.cip, out, in); 170 return; 171 } 172 173 geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL, 174 AES_MODE_ECB, AES_DIR_ENCRYPT); 175 } 176 177 178 static void 179 geode_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in) 180 { 181 const struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 182 183 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { 184 crypto_cipher_decrypt_one(tctx->fallback.cip, out, in); 185 return; 186 } 187 188 geode_aes_crypt(tctx, in, out, AES_BLOCK_SIZE, NULL, 189 AES_MODE_ECB, AES_DIR_DECRYPT); 190 } 191 192 static int fallback_init_cip(struct crypto_tfm *tfm) 193 { 194 const char *name = crypto_tfm_alg_name(tfm); 195 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 196 197 tctx->fallback.cip = crypto_alloc_cipher(name, 0, 198 CRYPTO_ALG_NEED_FALLBACK); 199 200 if (IS_ERR(tctx->fallback.cip)) { 201 printk(KERN_ERR "Error allocating fallback algo %s\n", name); 202 return PTR_ERR(tctx->fallback.cip); 203 } 204 205 return 0; 206 } 207 208 static void fallback_exit_cip(struct crypto_tfm *tfm) 209 { 210 struct geode_aes_tfm_ctx *tctx = crypto_tfm_ctx(tfm); 211 212 crypto_free_cipher(tctx->fallback.cip); 213 } 214 215 static struct crypto_alg geode_alg = { 216 .cra_name = "aes", 217 .cra_driver_name = "geode-aes", 218 .cra_priority = 300, 219 .cra_alignmask = 15, 220 .cra_flags = CRYPTO_ALG_TYPE_CIPHER | 221 CRYPTO_ALG_NEED_FALLBACK, 222 .cra_init = fallback_init_cip, 223 .cra_exit = fallback_exit_cip, 224 .cra_blocksize = AES_BLOCK_SIZE, 225 .cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), 226 .cra_module = THIS_MODULE, 227 .cra_u = { 228 .cipher = { 229 .cia_min_keysize = AES_MIN_KEY_SIZE, 230 .cia_max_keysize = AES_MAX_KEY_SIZE, 231 .cia_setkey = geode_setkey_cip, 232 .cia_encrypt = geode_encrypt, 233 .cia_decrypt = geode_decrypt 234 } 235 } 236 }; 237 238 static int geode_init_skcipher(struct crypto_skcipher *tfm) 239 { 240 const char *name = crypto_tfm_alg_name(&tfm->base); 241 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 242 243 tctx->fallback.skcipher = 244 crypto_alloc_skcipher(name, 0, CRYPTO_ALG_NEED_FALLBACK | 245 CRYPTO_ALG_ASYNC); 246 if (IS_ERR(tctx->fallback.skcipher)) { 247 printk(KERN_ERR "Error allocating fallback algo %s\n", name); 248 return PTR_ERR(tctx->fallback.skcipher); 249 } 250 251 crypto_skcipher_set_reqsize(tfm, sizeof(struct skcipher_request) + 252 crypto_skcipher_reqsize(tctx->fallback.skcipher)); 253 return 0; 254 } 255 256 static void geode_exit_skcipher(struct crypto_skcipher *tfm) 257 { 258 struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 259 260 crypto_free_skcipher(tctx->fallback.skcipher); 261 } 262 263 static int geode_skcipher_crypt(struct skcipher_request *req, int mode, int dir) 264 { 265 struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req); 266 const struct geode_aes_tfm_ctx *tctx = crypto_skcipher_ctx(tfm); 267 struct skcipher_walk walk; 268 unsigned int nbytes; 269 int err; 270 271 if (unlikely(tctx->keylen != AES_KEYSIZE_128)) { 272 struct skcipher_request *subreq = skcipher_request_ctx(req); 273 274 *subreq = *req; 275 skcipher_request_set_tfm(subreq, tctx->fallback.skcipher); 276 if (dir == AES_DIR_DECRYPT) 277 return crypto_skcipher_decrypt(subreq); 278 else 279 return crypto_skcipher_encrypt(subreq); 280 } 281 282 err = skcipher_walk_virt(&walk, req, false); 283 284 while ((nbytes = walk.nbytes) != 0) { 285 geode_aes_crypt(tctx, walk.src.virt.addr, walk.dst.virt.addr, 286 round_down(nbytes, AES_BLOCK_SIZE), 287 walk.iv, mode, dir); 288 err = skcipher_walk_done(&walk, nbytes % AES_BLOCK_SIZE); 289 } 290 291 return err; 292 } 293 294 static int geode_cbc_encrypt(struct skcipher_request *req) 295 { 296 return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_ENCRYPT); 297 } 298 299 static int geode_cbc_decrypt(struct skcipher_request *req) 300 { 301 return geode_skcipher_crypt(req, AES_MODE_CBC, AES_DIR_DECRYPT); 302 } 303 304 static int geode_ecb_encrypt(struct skcipher_request *req) 305 { 306 return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_ENCRYPT); 307 } 308 309 static int geode_ecb_decrypt(struct skcipher_request *req) 310 { 311 return geode_skcipher_crypt(req, AES_MODE_ECB, AES_DIR_DECRYPT); 312 } 313 314 static struct skcipher_alg geode_skcipher_algs[] = { 315 { 316 .base.cra_name = "cbc(aes)", 317 .base.cra_driver_name = "cbc-aes-geode", 318 .base.cra_priority = 400, 319 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 320 CRYPTO_ALG_NEED_FALLBACK, 321 .base.cra_blocksize = AES_BLOCK_SIZE, 322 .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), 323 .base.cra_alignmask = 15, 324 .base.cra_module = THIS_MODULE, 325 .init = geode_init_skcipher, 326 .exit = geode_exit_skcipher, 327 .setkey = geode_setkey_skcipher, 328 .encrypt = geode_cbc_encrypt, 329 .decrypt = geode_cbc_decrypt, 330 .min_keysize = AES_MIN_KEY_SIZE, 331 .max_keysize = AES_MAX_KEY_SIZE, 332 .ivsize = AES_BLOCK_SIZE, 333 }, { 334 .base.cra_name = "ecb(aes)", 335 .base.cra_driver_name = "ecb-aes-geode", 336 .base.cra_priority = 400, 337 .base.cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY | 338 CRYPTO_ALG_NEED_FALLBACK, 339 .base.cra_blocksize = AES_BLOCK_SIZE, 340 .base.cra_ctxsize = sizeof(struct geode_aes_tfm_ctx), 341 .base.cra_alignmask = 15, 342 .base.cra_module = THIS_MODULE, 343 .init = geode_init_skcipher, 344 .exit = geode_exit_skcipher, 345 .setkey = geode_setkey_skcipher, 346 .encrypt = geode_ecb_encrypt, 347 .decrypt = geode_ecb_decrypt, 348 .min_keysize = AES_MIN_KEY_SIZE, 349 .max_keysize = AES_MAX_KEY_SIZE, 350 }, 351 }; 352 353 static void geode_aes_remove(struct pci_dev *dev) 354 { 355 crypto_unregister_alg(&geode_alg); 356 crypto_unregister_skciphers(geode_skcipher_algs, 357 ARRAY_SIZE(geode_skcipher_algs)); 358 359 pci_iounmap(dev, _iobase); 360 _iobase = NULL; 361 362 pci_release_regions(dev); 363 pci_disable_device(dev); 364 } 365 366 367 static int geode_aes_probe(struct pci_dev *dev, const struct pci_device_id *id) 368 { 369 int ret; 370 371 ret = pci_enable_device(dev); 372 if (ret) 373 return ret; 374 375 ret = pci_request_regions(dev, "geode-aes"); 376 if (ret) 377 goto eenable; 378 379 _iobase = pci_iomap(dev, 0, 0); 380 381 if (_iobase == NULL) { 382 ret = -ENOMEM; 383 goto erequest; 384 } 385 386 spin_lock_init(&lock); 387 388 /* Clear any pending activity */ 389 iowrite32(AES_INTR_PENDING | AES_INTR_MASK, _iobase + AES_INTR_REG); 390 391 ret = crypto_register_alg(&geode_alg); 392 if (ret) 393 goto eiomap; 394 395 ret = crypto_register_skciphers(geode_skcipher_algs, 396 ARRAY_SIZE(geode_skcipher_algs)); 397 if (ret) 398 goto ealg; 399 400 dev_notice(&dev->dev, "GEODE AES engine enabled.\n"); 401 return 0; 402 403 ealg: 404 crypto_unregister_alg(&geode_alg); 405 406 eiomap: 407 pci_iounmap(dev, _iobase); 408 409 erequest: 410 pci_release_regions(dev); 411 412 eenable: 413 pci_disable_device(dev); 414 415 dev_err(&dev->dev, "GEODE AES initialization failed.\n"); 416 return ret; 417 } 418 419 static struct pci_device_id geode_aes_tbl[] = { 420 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), }, 421 { 0, } 422 }; 423 424 MODULE_DEVICE_TABLE(pci, geode_aes_tbl); 425 426 static struct pci_driver geode_aes_driver = { 427 .name = "Geode LX AES", 428 .id_table = geode_aes_tbl, 429 .probe = geode_aes_probe, 430 .remove = geode_aes_remove, 431 }; 432 433 module_pci_driver(geode_aes_driver); 434 435 MODULE_AUTHOR("Advanced Micro Devices, Inc."); 436 MODULE_DESCRIPTION("Geode LX Hardware AES driver"); 437 MODULE_LICENSE("GPL"); 438 MODULE_IMPORT_NS(CRYPTO_INTERNAL); 439