1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (c) 2021 Aspeed Technology Inc. 4 */ 5 6 #include "aspeed-hace.h" 7 #include <crypto/engine.h> 8 #include <linux/clk.h> 9 #include <linux/dma-mapping.h> 10 #include <linux/err.h> 11 #include <linux/interrupt.h> 12 #include <linux/io.h> 13 #include <linux/kernel.h> 14 #include <linux/module.h> 15 #include <linux/of_address.h> 16 #include <linux/of_device.h> 17 #include <linux/of_irq.h> 18 #include <linux/of.h> 19 #include <linux/platform_device.h> 20 21 #ifdef CONFIG_CRYPTO_DEV_ASPEED_DEBUG 22 #define HACE_DBG(d, fmt, ...) \ 23 dev_info((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 24 #else 25 #define HACE_DBG(d, fmt, ...) \ 26 dev_dbg((d)->dev, "%s() " fmt, __func__, ##__VA_ARGS__) 27 #endif 28 29 /* HACE interrupt service routine */ 30 static irqreturn_t aspeed_hace_irq(int irq, void *dev) 31 { 32 struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)dev; 33 struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; 34 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 35 u32 sts; 36 37 sts = ast_hace_read(hace_dev, ASPEED_HACE_STS); 38 ast_hace_write(hace_dev, sts, ASPEED_HACE_STS); 39 40 HACE_DBG(hace_dev, "irq status: 0x%x\n", sts); 41 42 if (sts & HACE_HASH_ISR) { 43 if (hash_engine->flags & CRYPTO_FLAGS_BUSY) 44 tasklet_schedule(&hash_engine->done_task); 45 else 46 dev_warn(hace_dev->dev, "HASH no active requests.\n"); 47 } 48 49 if (sts & HACE_CRYPTO_ISR) { 50 if (crypto_engine->flags & CRYPTO_FLAGS_BUSY) 51 tasklet_schedule(&crypto_engine->done_task); 52 else 53 dev_warn(hace_dev->dev, "CRYPTO no active requests.\n"); 54 } 55 56 return IRQ_HANDLED; 57 } 58 59 static void aspeed_hace_crypto_done_task(unsigned long data) 60 { 61 struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data; 62 struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; 63 64 crypto_engine->resume(hace_dev); 65 } 66 67 static void aspeed_hace_hash_done_task(unsigned long data) 68 { 69 struct aspeed_hace_dev *hace_dev = (struct aspeed_hace_dev *)data; 70 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 71 72 hash_engine->resume(hace_dev); 73 } 74 75 static void aspeed_hace_register(struct aspeed_hace_dev *hace_dev) 76 { 77 #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH 78 aspeed_register_hace_hash_algs(hace_dev); 79 #endif 80 #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO 81 aspeed_register_hace_crypto_algs(hace_dev); 82 #endif 83 } 84 85 static void aspeed_hace_unregister(struct aspeed_hace_dev *hace_dev) 86 { 87 #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_HASH 88 aspeed_unregister_hace_hash_algs(hace_dev); 89 #endif 90 #ifdef CONFIG_CRYPTO_DEV_ASPEED_HACE_CRYPTO 91 aspeed_unregister_hace_crypto_algs(hace_dev); 92 #endif 93 } 94 95 static const struct of_device_id aspeed_hace_of_matches[] = { 96 { .compatible = "aspeed,ast2500-hace", .data = (void *)5, }, 97 { .compatible = "aspeed,ast2600-hace", .data = (void *)6, }, 98 {}, 99 }; 100 101 static int aspeed_hace_probe(struct platform_device *pdev) 102 { 103 struct aspeed_engine_crypto *crypto_engine; 104 const struct of_device_id *hace_dev_id; 105 struct aspeed_engine_hash *hash_engine; 106 struct aspeed_hace_dev *hace_dev; 107 int rc; 108 109 hace_dev = devm_kzalloc(&pdev->dev, sizeof(struct aspeed_hace_dev), 110 GFP_KERNEL); 111 if (!hace_dev) 112 return -ENOMEM; 113 114 hace_dev_id = of_match_device(aspeed_hace_of_matches, &pdev->dev); 115 if (!hace_dev_id) { 116 dev_err(&pdev->dev, "Failed to match hace dev id\n"); 117 return -EINVAL; 118 } 119 120 hace_dev->dev = &pdev->dev; 121 hace_dev->version = (unsigned long)hace_dev_id->data; 122 hash_engine = &hace_dev->hash_engine; 123 crypto_engine = &hace_dev->crypto_engine; 124 125 platform_set_drvdata(pdev, hace_dev); 126 127 hace_dev->regs = devm_platform_get_and_ioremap_resource(pdev, 0, NULL); 128 if (IS_ERR(hace_dev->regs)) 129 return PTR_ERR(hace_dev->regs); 130 131 /* Get irq number and register it */ 132 hace_dev->irq = platform_get_irq(pdev, 0); 133 if (hace_dev->irq < 0) 134 return -ENXIO; 135 136 rc = devm_request_irq(&pdev->dev, hace_dev->irq, aspeed_hace_irq, 0, 137 dev_name(&pdev->dev), hace_dev); 138 if (rc) { 139 dev_err(&pdev->dev, "Failed to request interrupt\n"); 140 return rc; 141 } 142 143 /* Get clk and enable it */ 144 hace_dev->clk = devm_clk_get(&pdev->dev, NULL); 145 if (IS_ERR(hace_dev->clk)) { 146 dev_err(&pdev->dev, "Failed to get clk\n"); 147 return -ENODEV; 148 } 149 150 rc = clk_prepare_enable(hace_dev->clk); 151 if (rc) { 152 dev_err(&pdev->dev, "Failed to enable clock 0x%x\n", rc); 153 return rc; 154 } 155 156 /* Initialize crypto hardware engine structure for hash */ 157 hace_dev->crypt_engine_hash = crypto_engine_alloc_init(hace_dev->dev, 158 true); 159 if (!hace_dev->crypt_engine_hash) { 160 rc = -ENOMEM; 161 goto clk_exit; 162 } 163 164 rc = crypto_engine_start(hace_dev->crypt_engine_hash); 165 if (rc) 166 goto err_engine_hash_start; 167 168 tasklet_init(&hash_engine->done_task, aspeed_hace_hash_done_task, 169 (unsigned long)hace_dev); 170 171 /* Initialize crypto hardware engine structure for crypto */ 172 hace_dev->crypt_engine_crypto = crypto_engine_alloc_init(hace_dev->dev, 173 true); 174 if (!hace_dev->crypt_engine_crypto) { 175 rc = -ENOMEM; 176 goto err_engine_hash_start; 177 } 178 179 rc = crypto_engine_start(hace_dev->crypt_engine_crypto); 180 if (rc) 181 goto err_engine_crypto_start; 182 183 tasklet_init(&crypto_engine->done_task, aspeed_hace_crypto_done_task, 184 (unsigned long)hace_dev); 185 186 /* Allocate DMA buffer for hash engine input used */ 187 hash_engine->ahash_src_addr = 188 dmam_alloc_coherent(&pdev->dev, 189 ASPEED_HASH_SRC_DMA_BUF_LEN, 190 &hash_engine->ahash_src_dma_addr, 191 GFP_KERNEL); 192 if (!hash_engine->ahash_src_addr) { 193 dev_err(&pdev->dev, "Failed to allocate dma buffer\n"); 194 rc = -ENOMEM; 195 goto err_engine_crypto_start; 196 } 197 198 /* Allocate DMA buffer for crypto engine context used */ 199 crypto_engine->cipher_ctx = 200 dmam_alloc_coherent(&pdev->dev, 201 PAGE_SIZE, 202 &crypto_engine->cipher_ctx_dma, 203 GFP_KERNEL); 204 if (!crypto_engine->cipher_ctx) { 205 dev_err(&pdev->dev, "Failed to allocate cipher ctx dma\n"); 206 rc = -ENOMEM; 207 goto err_engine_crypto_start; 208 } 209 210 /* Allocate DMA buffer for crypto engine input used */ 211 crypto_engine->cipher_addr = 212 dmam_alloc_coherent(&pdev->dev, 213 ASPEED_CRYPTO_SRC_DMA_BUF_LEN, 214 &crypto_engine->cipher_dma_addr, 215 GFP_KERNEL); 216 if (!crypto_engine->cipher_addr) { 217 dev_err(&pdev->dev, "Failed to allocate cipher addr dma\n"); 218 rc = -ENOMEM; 219 goto err_engine_crypto_start; 220 } 221 222 /* Allocate DMA buffer for crypto engine output used */ 223 if (hace_dev->version == AST2600_VERSION) { 224 crypto_engine->dst_sg_addr = 225 dmam_alloc_coherent(&pdev->dev, 226 ASPEED_CRYPTO_DST_DMA_BUF_LEN, 227 &crypto_engine->dst_sg_dma_addr, 228 GFP_KERNEL); 229 if (!crypto_engine->dst_sg_addr) { 230 dev_err(&pdev->dev, "Failed to allocate dst_sg dma\n"); 231 rc = -ENOMEM; 232 goto err_engine_crypto_start; 233 } 234 } 235 236 aspeed_hace_register(hace_dev); 237 238 dev_info(&pdev->dev, "Aspeed Crypto Accelerator successfully registered\n"); 239 240 return 0; 241 242 err_engine_crypto_start: 243 crypto_engine_exit(hace_dev->crypt_engine_crypto); 244 err_engine_hash_start: 245 crypto_engine_exit(hace_dev->crypt_engine_hash); 246 clk_exit: 247 clk_disable_unprepare(hace_dev->clk); 248 249 return rc; 250 } 251 252 static int aspeed_hace_remove(struct platform_device *pdev) 253 { 254 struct aspeed_hace_dev *hace_dev = platform_get_drvdata(pdev); 255 struct aspeed_engine_crypto *crypto_engine = &hace_dev->crypto_engine; 256 struct aspeed_engine_hash *hash_engine = &hace_dev->hash_engine; 257 258 aspeed_hace_unregister(hace_dev); 259 260 crypto_engine_exit(hace_dev->crypt_engine_hash); 261 crypto_engine_exit(hace_dev->crypt_engine_crypto); 262 263 tasklet_kill(&hash_engine->done_task); 264 tasklet_kill(&crypto_engine->done_task); 265 266 clk_disable_unprepare(hace_dev->clk); 267 268 return 0; 269 } 270 271 MODULE_DEVICE_TABLE(of, aspeed_hace_of_matches); 272 273 static struct platform_driver aspeed_hace_driver = { 274 .probe = aspeed_hace_probe, 275 .remove = aspeed_hace_remove, 276 .driver = { 277 .name = KBUILD_MODNAME, 278 .of_match_table = aspeed_hace_of_matches, 279 }, 280 }; 281 282 module_platform_driver(aspeed_hace_driver); 283 284 MODULE_AUTHOR("Neal Liu <neal_liu@aspeedtech.com>"); 285 MODULE_DESCRIPTION("Aspeed HACE driver Crypto Accelerator"); 286 MODULE_LICENSE("GPL"); 287