1 /* 2 * Crypto acceleration support for Rockchip RK3288 3 * 4 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd 5 * 6 * Author: Zain Wang <zain.wang@rock-chips.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2, as published by the Free Software Foundation. 11 * 12 * Some ideas are from marvell-cesa.c and s5p-sss.c driver. 13 */ 14 15 #include "rk3288_crypto.h" 16 #include <linux/module.h> 17 #include <linux/platform_device.h> 18 #include <linux/of.h> 19 #include <linux/clk.h> 20 #include <linux/crypto.h> 21 #include <linux/reset.h> 22 23 static int rk_crypto_enable_clk(struct rk_crypto_info *dev) 24 { 25 int err; 26 27 err = clk_prepare_enable(dev->sclk); 28 if (err) { 29 dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n", 30 __func__, __LINE__); 31 goto err_return; 32 } 33 err = clk_prepare_enable(dev->aclk); 34 if (err) { 35 dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n", 36 __func__, __LINE__); 37 goto err_aclk; 38 } 39 err = clk_prepare_enable(dev->hclk); 40 if (err) { 41 dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n", 42 __func__, __LINE__); 43 goto err_hclk; 44 } 45 err = clk_prepare_enable(dev->dmaclk); 46 if (err) { 47 dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n", 48 __func__, __LINE__); 49 goto err_dmaclk; 50 } 51 return err; 52 err_dmaclk: 53 clk_disable_unprepare(dev->hclk); 54 err_hclk: 55 clk_disable_unprepare(dev->aclk); 56 err_aclk: 57 clk_disable_unprepare(dev->sclk); 58 err_return: 59 return err; 60 } 61 62 static void rk_crypto_disable_clk(struct rk_crypto_info *dev) 63 { 64 clk_disable_unprepare(dev->dmaclk); 65 clk_disable_unprepare(dev->hclk); 66 clk_disable_unprepare(dev->aclk); 67 clk_disable_unprepare(dev->sclk); 68 } 69 70 static int check_alignment(struct scatterlist *sg_src, 71 struct scatterlist *sg_dst, 72 int align_mask) 73 { 74 int in, out, align; 75 76 in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && 77 IS_ALIGNED((uint32_t)sg_src->length, align_mask); 78 if (!sg_dst) 79 return in; 80 out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && 81 IS_ALIGNED((uint32_t)sg_dst->length, align_mask); 82 align = in && out; 83 84 return (align && (sg_src->length == sg_dst->length)); 85 } 86 87 static int rk_load_data(struct rk_crypto_info *dev, 88 struct scatterlist *sg_src, 89 struct scatterlist *sg_dst) 90 { 91 unsigned int count; 92 93 dev->aligned = dev->aligned ? 94 check_alignment(sg_src, sg_dst, dev->align_size) : 95 dev->aligned; 96 if (dev->aligned) { 97 count = min(dev->left_bytes, sg_src->length); 98 dev->left_bytes -= count; 99 100 if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { 101 dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", 102 __func__, __LINE__); 103 return -EINVAL; 104 } 105 dev->addr_in = sg_dma_address(sg_src); 106 107 if (sg_dst) { 108 if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { 109 dev_err(dev->dev, 110 "[%s:%d] dma_map_sg(dst) error\n", 111 __func__, __LINE__); 112 dma_unmap_sg(dev->dev, sg_src, 1, 113 DMA_TO_DEVICE); 114 return -EINVAL; 115 } 116 dev->addr_out = sg_dma_address(sg_dst); 117 } 118 } else { 119 count = (dev->left_bytes > PAGE_SIZE) ? 120 PAGE_SIZE : dev->left_bytes; 121 122 if (!sg_pcopy_to_buffer(dev->first, dev->nents, 123 dev->addr_vir, count, 124 dev->total - dev->left_bytes)) { 125 dev_err(dev->dev, "[%s:%d] pcopy err\n", 126 __func__, __LINE__); 127 return -EINVAL; 128 } 129 dev->left_bytes -= count; 130 sg_init_one(&dev->sg_tmp, dev->addr_vir, count); 131 if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { 132 dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", 133 __func__, __LINE__); 134 return -ENOMEM; 135 } 136 dev->addr_in = sg_dma_address(&dev->sg_tmp); 137 138 if (sg_dst) { 139 if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, 140 DMA_FROM_DEVICE)) { 141 dev_err(dev->dev, 142 "[%s:%d] dma_map_sg(sg_tmp) error\n", 143 __func__, __LINE__); 144 dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, 145 DMA_TO_DEVICE); 146 return -ENOMEM; 147 } 148 dev->addr_out = sg_dma_address(&dev->sg_tmp); 149 } 150 } 151 dev->count = count; 152 return 0; 153 } 154 155 static void rk_unload_data(struct rk_crypto_info *dev) 156 { 157 struct scatterlist *sg_in, *sg_out; 158 159 sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; 160 dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); 161 162 if (dev->sg_dst) { 163 sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; 164 dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); 165 } 166 } 167 168 static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) 169 { 170 struct rk_crypto_info *dev = platform_get_drvdata(dev_id); 171 u32 interrupt_status; 172 173 spin_lock(&dev->lock); 174 interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); 175 CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); 176 177 if (interrupt_status & 0x0a) { 178 dev_warn(dev->dev, "DMA Error\n"); 179 dev->err = -EFAULT; 180 } 181 tasklet_schedule(&dev->done_task); 182 183 spin_unlock(&dev->lock); 184 return IRQ_HANDLED; 185 } 186 187 static int rk_crypto_enqueue(struct rk_crypto_info *dev, 188 struct crypto_async_request *async_req) 189 { 190 unsigned long flags; 191 int ret; 192 193 spin_lock_irqsave(&dev->lock, flags); 194 ret = crypto_enqueue_request(&dev->queue, async_req); 195 if (dev->busy) { 196 spin_unlock_irqrestore(&dev->lock, flags); 197 return ret; 198 } 199 dev->busy = true; 200 spin_unlock_irqrestore(&dev->lock, flags); 201 tasklet_schedule(&dev->queue_task); 202 203 return ret; 204 } 205 206 static void rk_crypto_queue_task_cb(unsigned long data) 207 { 208 struct rk_crypto_info *dev = (struct rk_crypto_info *)data; 209 struct crypto_async_request *async_req, *backlog; 210 unsigned long flags; 211 int err = 0; 212 213 dev->err = 0; 214 spin_lock_irqsave(&dev->lock, flags); 215 backlog = crypto_get_backlog(&dev->queue); 216 async_req = crypto_dequeue_request(&dev->queue); 217 218 if (!async_req) { 219 dev->busy = false; 220 spin_unlock_irqrestore(&dev->lock, flags); 221 return; 222 } 223 spin_unlock_irqrestore(&dev->lock, flags); 224 225 if (backlog) { 226 backlog->complete(backlog, -EINPROGRESS); 227 backlog = NULL; 228 } 229 230 dev->async_req = async_req; 231 err = dev->start(dev); 232 if (err) 233 dev->complete(dev->async_req, err); 234 } 235 236 static void rk_crypto_done_task_cb(unsigned long data) 237 { 238 struct rk_crypto_info *dev = (struct rk_crypto_info *)data; 239 240 if (dev->err) { 241 dev->complete(dev->async_req, dev->err); 242 return; 243 } 244 245 dev->err = dev->update(dev); 246 if (dev->err) 247 dev->complete(dev->async_req, dev->err); 248 } 249 250 static struct rk_crypto_tmp *rk_cipher_algs[] = { 251 &rk_ecb_aes_alg, 252 &rk_cbc_aes_alg, 253 &rk_ecb_des_alg, 254 &rk_cbc_des_alg, 255 &rk_ecb_des3_ede_alg, 256 &rk_cbc_des3_ede_alg, 257 &rk_ahash_sha1, 258 &rk_ahash_sha256, 259 &rk_ahash_md5, 260 }; 261 262 static int rk_crypto_register(struct rk_crypto_info *crypto_info) 263 { 264 unsigned int i, k; 265 int err = 0; 266 267 for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { 268 rk_cipher_algs[i]->dev = crypto_info; 269 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) 270 err = crypto_register_alg( 271 &rk_cipher_algs[i]->alg.crypto); 272 else 273 err = crypto_register_ahash( 274 &rk_cipher_algs[i]->alg.hash); 275 if (err) 276 goto err_cipher_algs; 277 } 278 return 0; 279 280 err_cipher_algs: 281 for (k = 0; k < i; k++) { 282 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) 283 crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto); 284 else 285 crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); 286 } 287 return err; 288 } 289 290 static void rk_crypto_unregister(void) 291 { 292 unsigned int i; 293 294 for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { 295 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) 296 crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto); 297 else 298 crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); 299 } 300 } 301 302 static void rk_crypto_action(void *data) 303 { 304 struct rk_crypto_info *crypto_info = data; 305 306 reset_control_assert(crypto_info->rst); 307 } 308 309 static const struct of_device_id crypto_of_id_table[] = { 310 { .compatible = "rockchip,rk3288-crypto" }, 311 {} 312 }; 313 MODULE_DEVICE_TABLE(of, crypto_of_id_table); 314 315 static int rk_crypto_probe(struct platform_device *pdev) 316 { 317 struct resource *res; 318 struct device *dev = &pdev->dev; 319 struct rk_crypto_info *crypto_info; 320 int err = 0; 321 322 crypto_info = devm_kzalloc(&pdev->dev, 323 sizeof(*crypto_info), GFP_KERNEL); 324 if (!crypto_info) { 325 err = -ENOMEM; 326 goto err_crypto; 327 } 328 329 crypto_info->rst = devm_reset_control_get(dev, "crypto-rst"); 330 if (IS_ERR(crypto_info->rst)) { 331 err = PTR_ERR(crypto_info->rst); 332 goto err_crypto; 333 } 334 335 reset_control_assert(crypto_info->rst); 336 usleep_range(10, 20); 337 reset_control_deassert(crypto_info->rst); 338 339 err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info); 340 if (err) 341 goto err_crypto; 342 343 spin_lock_init(&crypto_info->lock); 344 345 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 346 crypto_info->reg = devm_ioremap_resource(&pdev->dev, res); 347 if (IS_ERR(crypto_info->reg)) { 348 err = PTR_ERR(crypto_info->reg); 349 goto err_crypto; 350 } 351 352 crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk"); 353 if (IS_ERR(crypto_info->aclk)) { 354 err = PTR_ERR(crypto_info->aclk); 355 goto err_crypto; 356 } 357 358 crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk"); 359 if (IS_ERR(crypto_info->hclk)) { 360 err = PTR_ERR(crypto_info->hclk); 361 goto err_crypto; 362 } 363 364 crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk"); 365 if (IS_ERR(crypto_info->sclk)) { 366 err = PTR_ERR(crypto_info->sclk); 367 goto err_crypto; 368 } 369 370 crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk"); 371 if (IS_ERR(crypto_info->dmaclk)) { 372 err = PTR_ERR(crypto_info->dmaclk); 373 goto err_crypto; 374 } 375 376 crypto_info->irq = platform_get_irq(pdev, 0); 377 if (crypto_info->irq < 0) { 378 dev_warn(crypto_info->dev, 379 "control Interrupt is not available.\n"); 380 err = crypto_info->irq; 381 goto err_crypto; 382 } 383 384 err = devm_request_irq(&pdev->dev, crypto_info->irq, 385 rk_crypto_irq_handle, IRQF_SHARED, 386 "rk-crypto", pdev); 387 388 if (err) { 389 dev_err(crypto_info->dev, "irq request failed.\n"); 390 goto err_crypto; 391 } 392 393 crypto_info->dev = &pdev->dev; 394 platform_set_drvdata(pdev, crypto_info); 395 396 tasklet_init(&crypto_info->queue_task, 397 rk_crypto_queue_task_cb, (unsigned long)crypto_info); 398 tasklet_init(&crypto_info->done_task, 399 rk_crypto_done_task_cb, (unsigned long)crypto_info); 400 crypto_init_queue(&crypto_info->queue, 50); 401 402 crypto_info->enable_clk = rk_crypto_enable_clk; 403 crypto_info->disable_clk = rk_crypto_disable_clk; 404 crypto_info->load_data = rk_load_data; 405 crypto_info->unload_data = rk_unload_data; 406 crypto_info->enqueue = rk_crypto_enqueue; 407 crypto_info->busy = false; 408 409 err = rk_crypto_register(crypto_info); 410 if (err) { 411 dev_err(dev, "err in register alg"); 412 goto err_register_alg; 413 } 414 415 dev_info(dev, "Crypto Accelerator successfully registered\n"); 416 return 0; 417 418 err_register_alg: 419 tasklet_kill(&crypto_info->queue_task); 420 tasklet_kill(&crypto_info->done_task); 421 err_crypto: 422 return err; 423 } 424 425 static int rk_crypto_remove(struct platform_device *pdev) 426 { 427 struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); 428 429 rk_crypto_unregister(); 430 tasklet_kill(&crypto_tmp->done_task); 431 tasklet_kill(&crypto_tmp->queue_task); 432 return 0; 433 } 434 435 static struct platform_driver crypto_driver = { 436 .probe = rk_crypto_probe, 437 .remove = rk_crypto_remove, 438 .driver = { 439 .name = "rk3288-crypto", 440 .of_match_table = crypto_of_id_table, 441 }, 442 }; 443 444 module_platform_driver(crypto_driver); 445 446 MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>"); 447 MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine"); 448 MODULE_LICENSE("GPL"); 449