1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Crypto acceleration support for Rockchip RK3288 4 * 5 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd 6 * 7 * Author: Zain Wang <zain.wang@rock-chips.com> 8 * 9 * Some ideas are from marvell-cesa.c and s5p-sss.c driver. 10 */ 11 12 #include "rk3288_crypto.h" 13 #include <linux/module.h> 14 #include <linux/platform_device.h> 15 #include <linux/of.h> 16 #include <linux/clk.h> 17 #include <linux/crypto.h> 18 #include <linux/reset.h> 19 20 static int rk_crypto_enable_clk(struct rk_crypto_info *dev) 21 { 22 int err; 23 24 err = clk_prepare_enable(dev->sclk); 25 if (err) { 26 dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n", 27 __func__, __LINE__); 28 goto err_return; 29 } 30 err = clk_prepare_enable(dev->aclk); 31 if (err) { 32 dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n", 33 __func__, __LINE__); 34 goto err_aclk; 35 } 36 err = clk_prepare_enable(dev->hclk); 37 if (err) { 38 dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n", 39 __func__, __LINE__); 40 goto err_hclk; 41 } 42 err = clk_prepare_enable(dev->dmaclk); 43 if (err) { 44 dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n", 45 __func__, __LINE__); 46 goto err_dmaclk; 47 } 48 return err; 49 err_dmaclk: 50 clk_disable_unprepare(dev->hclk); 51 err_hclk: 52 clk_disable_unprepare(dev->aclk); 53 err_aclk: 54 clk_disable_unprepare(dev->sclk); 55 err_return: 56 return err; 57 } 58 59 static void rk_crypto_disable_clk(struct rk_crypto_info *dev) 60 { 61 clk_disable_unprepare(dev->dmaclk); 62 clk_disable_unprepare(dev->hclk); 63 clk_disable_unprepare(dev->aclk); 64 clk_disable_unprepare(dev->sclk); 65 } 66 67 static int check_alignment(struct scatterlist *sg_src, 68 struct scatterlist *sg_dst, 69 int align_mask) 70 { 71 int in, out, align; 72 73 in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && 74 IS_ALIGNED((uint32_t)sg_src->length, align_mask); 75 if (!sg_dst) 76 return in; 77 out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && 78 IS_ALIGNED((uint32_t)sg_dst->length, align_mask); 79 align = in && out; 80 81 return (align && (sg_src->length == sg_dst->length)); 82 } 83 84 static int rk_load_data(struct rk_crypto_info *dev, 85 struct scatterlist *sg_src, 86 struct scatterlist *sg_dst) 87 { 88 unsigned int count; 89 90 dev->aligned = dev->aligned ? 91 check_alignment(sg_src, sg_dst, dev->align_size) : 92 dev->aligned; 93 if (dev->aligned) { 94 count = min(dev->left_bytes, sg_src->length); 95 dev->left_bytes -= count; 96 97 if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { 98 dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", 99 __func__, __LINE__); 100 return -EINVAL; 101 } 102 dev->addr_in = sg_dma_address(sg_src); 103 104 if (sg_dst) { 105 if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { 106 dev_err(dev->dev, 107 "[%s:%d] dma_map_sg(dst) error\n", 108 __func__, __LINE__); 109 dma_unmap_sg(dev->dev, sg_src, 1, 110 DMA_TO_DEVICE); 111 return -EINVAL; 112 } 113 dev->addr_out = sg_dma_address(sg_dst); 114 } 115 } else { 116 count = (dev->left_bytes > PAGE_SIZE) ? 117 PAGE_SIZE : dev->left_bytes; 118 119 if (!sg_pcopy_to_buffer(dev->first, dev->src_nents, 120 dev->addr_vir, count, 121 dev->total - dev->left_bytes)) { 122 dev_err(dev->dev, "[%s:%d] pcopy err\n", 123 __func__, __LINE__); 124 return -EINVAL; 125 } 126 dev->left_bytes -= count; 127 sg_init_one(&dev->sg_tmp, dev->addr_vir, count); 128 if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { 129 dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", 130 __func__, __LINE__); 131 return -ENOMEM; 132 } 133 dev->addr_in = sg_dma_address(&dev->sg_tmp); 134 135 if (sg_dst) { 136 if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, 137 DMA_FROM_DEVICE)) { 138 dev_err(dev->dev, 139 "[%s:%d] dma_map_sg(sg_tmp) error\n", 140 __func__, __LINE__); 141 dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, 142 DMA_TO_DEVICE); 143 return -ENOMEM; 144 } 145 dev->addr_out = sg_dma_address(&dev->sg_tmp); 146 } 147 } 148 dev->count = count; 149 return 0; 150 } 151 152 static void rk_unload_data(struct rk_crypto_info *dev) 153 { 154 struct scatterlist *sg_in, *sg_out; 155 156 sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; 157 dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); 158 159 if (dev->sg_dst) { 160 sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; 161 dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); 162 } 163 } 164 165 static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) 166 { 167 struct rk_crypto_info *dev = platform_get_drvdata(dev_id); 168 u32 interrupt_status; 169 170 spin_lock(&dev->lock); 171 interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); 172 CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); 173 174 if (interrupt_status & 0x0a) { 175 dev_warn(dev->dev, "DMA Error\n"); 176 dev->err = -EFAULT; 177 } 178 tasklet_schedule(&dev->done_task); 179 180 spin_unlock(&dev->lock); 181 return IRQ_HANDLED; 182 } 183 184 static int rk_crypto_enqueue(struct rk_crypto_info *dev, 185 struct crypto_async_request *async_req) 186 { 187 unsigned long flags; 188 int ret; 189 190 spin_lock_irqsave(&dev->lock, flags); 191 ret = crypto_enqueue_request(&dev->queue, async_req); 192 if (dev->busy) { 193 spin_unlock_irqrestore(&dev->lock, flags); 194 return ret; 195 } 196 dev->busy = true; 197 spin_unlock_irqrestore(&dev->lock, flags); 198 tasklet_schedule(&dev->queue_task); 199 200 return ret; 201 } 202 203 static void rk_crypto_queue_task_cb(unsigned long data) 204 { 205 struct rk_crypto_info *dev = (struct rk_crypto_info *)data; 206 struct crypto_async_request *async_req, *backlog; 207 unsigned long flags; 208 int err = 0; 209 210 dev->err = 0; 211 spin_lock_irqsave(&dev->lock, flags); 212 backlog = crypto_get_backlog(&dev->queue); 213 async_req = crypto_dequeue_request(&dev->queue); 214 215 if (!async_req) { 216 dev->busy = false; 217 spin_unlock_irqrestore(&dev->lock, flags); 218 return; 219 } 220 spin_unlock_irqrestore(&dev->lock, flags); 221 222 if (backlog) { 223 backlog->complete(backlog, -EINPROGRESS); 224 backlog = NULL; 225 } 226 227 dev->async_req = async_req; 228 err = dev->start(dev); 229 if (err) 230 dev->complete(dev->async_req, err); 231 } 232 233 static void rk_crypto_done_task_cb(unsigned long data) 234 { 235 struct rk_crypto_info *dev = (struct rk_crypto_info *)data; 236 237 if (dev->err) { 238 dev->complete(dev->async_req, dev->err); 239 return; 240 } 241 242 dev->err = dev->update(dev); 243 if (dev->err) 244 dev->complete(dev->async_req, dev->err); 245 } 246 247 static struct rk_crypto_tmp *rk_cipher_algs[] = { 248 &rk_ecb_aes_alg, 249 &rk_cbc_aes_alg, 250 &rk_ecb_des_alg, 251 &rk_cbc_des_alg, 252 &rk_ecb_des3_ede_alg, 253 &rk_cbc_des3_ede_alg, 254 &rk_ahash_sha1, 255 &rk_ahash_sha256, 256 &rk_ahash_md5, 257 }; 258 259 static int rk_crypto_register(struct rk_crypto_info *crypto_info) 260 { 261 unsigned int i, k; 262 int err = 0; 263 264 for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { 265 rk_cipher_algs[i]->dev = crypto_info; 266 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) 267 err = crypto_register_alg( 268 &rk_cipher_algs[i]->alg.crypto); 269 else 270 err = crypto_register_ahash( 271 &rk_cipher_algs[i]->alg.hash); 272 if (err) 273 goto err_cipher_algs; 274 } 275 return 0; 276 277 err_cipher_algs: 278 for (k = 0; k < i; k++) { 279 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) 280 crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto); 281 else 282 crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); 283 } 284 return err; 285 } 286 287 static void rk_crypto_unregister(void) 288 { 289 unsigned int i; 290 291 for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { 292 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) 293 crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto); 294 else 295 crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); 296 } 297 } 298 299 static void rk_crypto_action(void *data) 300 { 301 struct rk_crypto_info *crypto_info = data; 302 303 reset_control_assert(crypto_info->rst); 304 } 305 306 static const struct of_device_id crypto_of_id_table[] = { 307 { .compatible = "rockchip,rk3288-crypto" }, 308 {} 309 }; 310 MODULE_DEVICE_TABLE(of, crypto_of_id_table); 311 312 static int rk_crypto_probe(struct platform_device *pdev) 313 { 314 struct resource *res; 315 struct device *dev = &pdev->dev; 316 struct rk_crypto_info *crypto_info; 317 int err = 0; 318 319 crypto_info = devm_kzalloc(&pdev->dev, 320 sizeof(*crypto_info), GFP_KERNEL); 321 if (!crypto_info) { 322 err = -ENOMEM; 323 goto err_crypto; 324 } 325 326 crypto_info->rst = devm_reset_control_get(dev, "crypto-rst"); 327 if (IS_ERR(crypto_info->rst)) { 328 err = PTR_ERR(crypto_info->rst); 329 goto err_crypto; 330 } 331 332 reset_control_assert(crypto_info->rst); 333 usleep_range(10, 20); 334 reset_control_deassert(crypto_info->rst); 335 336 err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info); 337 if (err) 338 goto err_crypto; 339 340 spin_lock_init(&crypto_info->lock); 341 342 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 343 crypto_info->reg = devm_ioremap_resource(&pdev->dev, res); 344 if (IS_ERR(crypto_info->reg)) { 345 err = PTR_ERR(crypto_info->reg); 346 goto err_crypto; 347 } 348 349 crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk"); 350 if (IS_ERR(crypto_info->aclk)) { 351 err = PTR_ERR(crypto_info->aclk); 352 goto err_crypto; 353 } 354 355 crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk"); 356 if (IS_ERR(crypto_info->hclk)) { 357 err = PTR_ERR(crypto_info->hclk); 358 goto err_crypto; 359 } 360 361 crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk"); 362 if (IS_ERR(crypto_info->sclk)) { 363 err = PTR_ERR(crypto_info->sclk); 364 goto err_crypto; 365 } 366 367 crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk"); 368 if (IS_ERR(crypto_info->dmaclk)) { 369 err = PTR_ERR(crypto_info->dmaclk); 370 goto err_crypto; 371 } 372 373 crypto_info->irq = platform_get_irq(pdev, 0); 374 if (crypto_info->irq < 0) { 375 dev_warn(crypto_info->dev, 376 "control Interrupt is not available.\n"); 377 err = crypto_info->irq; 378 goto err_crypto; 379 } 380 381 err = devm_request_irq(&pdev->dev, crypto_info->irq, 382 rk_crypto_irq_handle, IRQF_SHARED, 383 "rk-crypto", pdev); 384 385 if (err) { 386 dev_err(crypto_info->dev, "irq request failed.\n"); 387 goto err_crypto; 388 } 389 390 crypto_info->dev = &pdev->dev; 391 platform_set_drvdata(pdev, crypto_info); 392 393 tasklet_init(&crypto_info->queue_task, 394 rk_crypto_queue_task_cb, (unsigned long)crypto_info); 395 tasklet_init(&crypto_info->done_task, 396 rk_crypto_done_task_cb, (unsigned long)crypto_info); 397 crypto_init_queue(&crypto_info->queue, 50); 398 399 crypto_info->enable_clk = rk_crypto_enable_clk; 400 crypto_info->disable_clk = rk_crypto_disable_clk; 401 crypto_info->load_data = rk_load_data; 402 crypto_info->unload_data = rk_unload_data; 403 crypto_info->enqueue = rk_crypto_enqueue; 404 crypto_info->busy = false; 405 406 err = rk_crypto_register(crypto_info); 407 if (err) { 408 dev_err(dev, "err in register alg"); 409 goto err_register_alg; 410 } 411 412 dev_info(dev, "Crypto Accelerator successfully registered\n"); 413 return 0; 414 415 err_register_alg: 416 tasklet_kill(&crypto_info->queue_task); 417 tasklet_kill(&crypto_info->done_task); 418 err_crypto: 419 return err; 420 } 421 422 static int rk_crypto_remove(struct platform_device *pdev) 423 { 424 struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); 425 426 rk_crypto_unregister(); 427 tasklet_kill(&crypto_tmp->done_task); 428 tasklet_kill(&crypto_tmp->queue_task); 429 return 0; 430 } 431 432 static struct platform_driver crypto_driver = { 433 .probe = rk_crypto_probe, 434 .remove = rk_crypto_remove, 435 .driver = { 436 .name = "rk3288-crypto", 437 .of_match_table = crypto_of_id_table, 438 }, 439 }; 440 441 module_platform_driver(crypto_driver); 442 443 MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>"); 444 MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine"); 445 MODULE_LICENSE("GPL"); 446