1 /* 2 * Crypto acceleration support for Rockchip RK3288 3 * 4 * Copyright (c) 2015, Fuzhou Rockchip Electronics Co., Ltd 5 * 6 * Author: Zain Wang <zain.wang@rock-chips.com> 7 * 8 * This program is free software; you can redistribute it and/or modify it 9 * under the terms and conditions of the GNU General Public License, 10 * version 2, as published by the Free Software Foundation. 11 * 12 * Some ideas are from marvell-cesa.c and s5p-sss.c driver. 13 */ 14 15 #include "rk3288_crypto.h" 16 #include <linux/module.h> 17 #include <linux/platform_device.h> 18 #include <linux/of.h> 19 #include <linux/clk.h> 20 #include <linux/crypto.h> 21 #include <linux/reset.h> 22 23 static int rk_crypto_enable_clk(struct rk_crypto_info *dev) 24 { 25 int err; 26 27 err = clk_prepare_enable(dev->sclk); 28 if (err) { 29 dev_err(dev->dev, "[%s:%d], Couldn't enable clock sclk\n", 30 __func__, __LINE__); 31 goto err_return; 32 } 33 err = clk_prepare_enable(dev->aclk); 34 if (err) { 35 dev_err(dev->dev, "[%s:%d], Couldn't enable clock aclk\n", 36 __func__, __LINE__); 37 goto err_aclk; 38 } 39 err = clk_prepare_enable(dev->hclk); 40 if (err) { 41 dev_err(dev->dev, "[%s:%d], Couldn't enable clock hclk\n", 42 __func__, __LINE__); 43 goto err_hclk; 44 } 45 err = clk_prepare_enable(dev->dmaclk); 46 if (err) { 47 dev_err(dev->dev, "[%s:%d], Couldn't enable clock dmaclk\n", 48 __func__, __LINE__); 49 goto err_dmaclk; 50 } 51 return err; 52 err_dmaclk: 53 clk_disable_unprepare(dev->hclk); 54 err_hclk: 55 clk_disable_unprepare(dev->aclk); 56 err_aclk: 57 clk_disable_unprepare(dev->sclk); 58 err_return: 59 return err; 60 } 61 62 static void rk_crypto_disable_clk(struct rk_crypto_info *dev) 63 { 64 clk_disable_unprepare(dev->dmaclk); 65 clk_disable_unprepare(dev->hclk); 66 clk_disable_unprepare(dev->aclk); 67 clk_disable_unprepare(dev->sclk); 68 } 69 70 static int check_alignment(struct scatterlist *sg_src, 71 struct scatterlist *sg_dst, 72 int align_mask) 73 { 74 int in, out, align; 75 76 in = IS_ALIGNED((uint32_t)sg_src->offset, 4) && 77 IS_ALIGNED((uint32_t)sg_src->length, align_mask); 78 if (!sg_dst) 79 return in; 80 out = IS_ALIGNED((uint32_t)sg_dst->offset, 4) && 81 IS_ALIGNED((uint32_t)sg_dst->length, align_mask); 82 align = in && out; 83 84 return (align && (sg_src->length == sg_dst->length)); 85 } 86 87 static int rk_load_data(struct rk_crypto_info *dev, 88 struct scatterlist *sg_src, 89 struct scatterlist *sg_dst) 90 { 91 unsigned int count; 92 93 dev->aligned = dev->aligned ? 94 check_alignment(sg_src, sg_dst, dev->align_size) : 95 dev->aligned; 96 if (dev->aligned) { 97 count = min(dev->left_bytes, sg_src->length); 98 dev->left_bytes -= count; 99 100 if (!dma_map_sg(dev->dev, sg_src, 1, DMA_TO_DEVICE)) { 101 dev_err(dev->dev, "[%s:%d] dma_map_sg(src) error\n", 102 __func__, __LINE__); 103 return -EINVAL; 104 } 105 dev->addr_in = sg_dma_address(sg_src); 106 107 if (sg_dst) { 108 if (!dma_map_sg(dev->dev, sg_dst, 1, DMA_FROM_DEVICE)) { 109 dev_err(dev->dev, 110 "[%s:%d] dma_map_sg(dst) error\n", 111 __func__, __LINE__); 112 dma_unmap_sg(dev->dev, sg_src, 1, 113 DMA_TO_DEVICE); 114 return -EINVAL; 115 } 116 dev->addr_out = sg_dma_address(sg_dst); 117 } 118 } else { 119 count = (dev->left_bytes > PAGE_SIZE) ? 120 PAGE_SIZE : dev->left_bytes; 121 122 if (!sg_pcopy_to_buffer(dev->first, dev->nents, 123 dev->addr_vir, count, 124 dev->total - dev->left_bytes)) { 125 dev_err(dev->dev, "[%s:%d] pcopy err\n", 126 __func__, __LINE__); 127 return -EINVAL; 128 } 129 dev->left_bytes -= count; 130 sg_init_one(&dev->sg_tmp, dev->addr_vir, count); 131 if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, DMA_TO_DEVICE)) { 132 dev_err(dev->dev, "[%s:%d] dma_map_sg(sg_tmp) error\n", 133 __func__, __LINE__); 134 return -ENOMEM; 135 } 136 dev->addr_in = sg_dma_address(&dev->sg_tmp); 137 138 if (sg_dst) { 139 if (!dma_map_sg(dev->dev, &dev->sg_tmp, 1, 140 DMA_FROM_DEVICE)) { 141 dev_err(dev->dev, 142 "[%s:%d] dma_map_sg(sg_tmp) error\n", 143 __func__, __LINE__); 144 dma_unmap_sg(dev->dev, &dev->sg_tmp, 1, 145 DMA_TO_DEVICE); 146 return -ENOMEM; 147 } 148 dev->addr_out = sg_dma_address(&dev->sg_tmp); 149 } 150 } 151 dev->count = count; 152 return 0; 153 } 154 155 static void rk_unload_data(struct rk_crypto_info *dev) 156 { 157 struct scatterlist *sg_in, *sg_out; 158 159 sg_in = dev->aligned ? dev->sg_src : &dev->sg_tmp; 160 dma_unmap_sg(dev->dev, sg_in, 1, DMA_TO_DEVICE); 161 162 if (dev->sg_dst) { 163 sg_out = dev->aligned ? dev->sg_dst : &dev->sg_tmp; 164 dma_unmap_sg(dev->dev, sg_out, 1, DMA_FROM_DEVICE); 165 } 166 } 167 168 static irqreturn_t rk_crypto_irq_handle(int irq, void *dev_id) 169 { 170 struct rk_crypto_info *dev = platform_get_drvdata(dev_id); 171 u32 interrupt_status; 172 int err = 0; 173 174 spin_lock(&dev->lock); 175 interrupt_status = CRYPTO_READ(dev, RK_CRYPTO_INTSTS); 176 CRYPTO_WRITE(dev, RK_CRYPTO_INTSTS, interrupt_status); 177 if (interrupt_status & 0x0a) { 178 dev_warn(dev->dev, "DMA Error\n"); 179 err = -EFAULT; 180 } else if (interrupt_status & 0x05) { 181 err = dev->update(dev); 182 } 183 if (err) 184 dev->complete(dev, err); 185 spin_unlock(&dev->lock); 186 return IRQ_HANDLED; 187 } 188 189 static void rk_crypto_tasklet_cb(unsigned long data) 190 { 191 struct rk_crypto_info *dev = (struct rk_crypto_info *)data; 192 struct crypto_async_request *async_req, *backlog; 193 unsigned long flags; 194 int err = 0; 195 196 spin_lock_irqsave(&dev->lock, flags); 197 backlog = crypto_get_backlog(&dev->queue); 198 async_req = crypto_dequeue_request(&dev->queue); 199 spin_unlock_irqrestore(&dev->lock, flags); 200 if (!async_req) { 201 dev_err(dev->dev, "async_req is NULL !!\n"); 202 return; 203 } 204 if (backlog) { 205 backlog->complete(backlog, -EINPROGRESS); 206 backlog = NULL; 207 } 208 209 if (crypto_tfm_alg_type(async_req->tfm) == CRYPTO_ALG_TYPE_ABLKCIPHER) 210 dev->ablk_req = ablkcipher_request_cast(async_req); 211 else 212 dev->ahash_req = ahash_request_cast(async_req); 213 err = dev->start(dev); 214 if (err) 215 dev->complete(dev, err); 216 } 217 218 static struct rk_crypto_tmp *rk_cipher_algs[] = { 219 &rk_ecb_aes_alg, 220 &rk_cbc_aes_alg, 221 &rk_ecb_des_alg, 222 &rk_cbc_des_alg, 223 &rk_ecb_des3_ede_alg, 224 &rk_cbc_des3_ede_alg, 225 &rk_ahash_sha1, 226 &rk_ahash_sha256, 227 &rk_ahash_md5, 228 }; 229 230 static int rk_crypto_register(struct rk_crypto_info *crypto_info) 231 { 232 unsigned int i, k; 233 int err = 0; 234 235 for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { 236 rk_cipher_algs[i]->dev = crypto_info; 237 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) 238 err = crypto_register_alg( 239 &rk_cipher_algs[i]->alg.crypto); 240 else 241 err = crypto_register_ahash( 242 &rk_cipher_algs[i]->alg.hash); 243 if (err) 244 goto err_cipher_algs; 245 } 246 return 0; 247 248 err_cipher_algs: 249 for (k = 0; k < i; k++) { 250 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) 251 crypto_unregister_alg(&rk_cipher_algs[k]->alg.crypto); 252 else 253 crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); 254 } 255 return err; 256 } 257 258 static void rk_crypto_unregister(void) 259 { 260 unsigned int i; 261 262 for (i = 0; i < ARRAY_SIZE(rk_cipher_algs); i++) { 263 if (rk_cipher_algs[i]->type == ALG_TYPE_CIPHER) 264 crypto_unregister_alg(&rk_cipher_algs[i]->alg.crypto); 265 else 266 crypto_unregister_ahash(&rk_cipher_algs[i]->alg.hash); 267 } 268 } 269 270 static void rk_crypto_action(void *data) 271 { 272 struct rk_crypto_info *crypto_info = data; 273 274 reset_control_assert(crypto_info->rst); 275 } 276 277 static const struct of_device_id crypto_of_id_table[] = { 278 { .compatible = "rockchip,rk3288-crypto" }, 279 {} 280 }; 281 MODULE_DEVICE_TABLE(of, crypto_of_id_table); 282 283 static int rk_crypto_probe(struct platform_device *pdev) 284 { 285 struct resource *res; 286 struct device *dev = &pdev->dev; 287 struct rk_crypto_info *crypto_info; 288 int err = 0; 289 290 crypto_info = devm_kzalloc(&pdev->dev, 291 sizeof(*crypto_info), GFP_KERNEL); 292 if (!crypto_info) { 293 err = -ENOMEM; 294 goto err_crypto; 295 } 296 297 crypto_info->rst = devm_reset_control_get(dev, "crypto-rst"); 298 if (IS_ERR(crypto_info->rst)) { 299 err = PTR_ERR(crypto_info->rst); 300 goto err_crypto; 301 } 302 303 reset_control_assert(crypto_info->rst); 304 usleep_range(10, 20); 305 reset_control_deassert(crypto_info->rst); 306 307 err = devm_add_action_or_reset(dev, rk_crypto_action, crypto_info); 308 if (err) 309 goto err_crypto; 310 311 spin_lock_init(&crypto_info->lock); 312 313 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 314 crypto_info->reg = devm_ioremap_resource(&pdev->dev, res); 315 if (IS_ERR(crypto_info->reg)) { 316 err = PTR_ERR(crypto_info->reg); 317 goto err_crypto; 318 } 319 320 crypto_info->aclk = devm_clk_get(&pdev->dev, "aclk"); 321 if (IS_ERR(crypto_info->aclk)) { 322 err = PTR_ERR(crypto_info->aclk); 323 goto err_crypto; 324 } 325 326 crypto_info->hclk = devm_clk_get(&pdev->dev, "hclk"); 327 if (IS_ERR(crypto_info->hclk)) { 328 err = PTR_ERR(crypto_info->hclk); 329 goto err_crypto; 330 } 331 332 crypto_info->sclk = devm_clk_get(&pdev->dev, "sclk"); 333 if (IS_ERR(crypto_info->sclk)) { 334 err = PTR_ERR(crypto_info->sclk); 335 goto err_crypto; 336 } 337 338 crypto_info->dmaclk = devm_clk_get(&pdev->dev, "apb_pclk"); 339 if (IS_ERR(crypto_info->dmaclk)) { 340 err = PTR_ERR(crypto_info->dmaclk); 341 goto err_crypto; 342 } 343 344 crypto_info->irq = platform_get_irq(pdev, 0); 345 if (crypto_info->irq < 0) { 346 dev_warn(crypto_info->dev, 347 "control Interrupt is not available.\n"); 348 err = crypto_info->irq; 349 goto err_crypto; 350 } 351 352 err = devm_request_irq(&pdev->dev, crypto_info->irq, 353 rk_crypto_irq_handle, IRQF_SHARED, 354 "rk-crypto", pdev); 355 356 if (err) { 357 dev_err(crypto_info->dev, "irq request failed.\n"); 358 goto err_crypto; 359 } 360 361 crypto_info->dev = &pdev->dev; 362 platform_set_drvdata(pdev, crypto_info); 363 364 tasklet_init(&crypto_info->crypto_tasklet, 365 rk_crypto_tasklet_cb, (unsigned long)crypto_info); 366 crypto_init_queue(&crypto_info->queue, 50); 367 368 crypto_info->enable_clk = rk_crypto_enable_clk; 369 crypto_info->disable_clk = rk_crypto_disable_clk; 370 crypto_info->load_data = rk_load_data; 371 crypto_info->unload_data = rk_unload_data; 372 373 err = rk_crypto_register(crypto_info); 374 if (err) { 375 dev_err(dev, "err in register alg"); 376 goto err_register_alg; 377 } 378 379 dev_info(dev, "Crypto Accelerator successfully registered\n"); 380 return 0; 381 382 err_register_alg: 383 tasklet_kill(&crypto_info->crypto_tasklet); 384 err_crypto: 385 return err; 386 } 387 388 static int rk_crypto_remove(struct platform_device *pdev) 389 { 390 struct rk_crypto_info *crypto_tmp = platform_get_drvdata(pdev); 391 392 rk_crypto_unregister(); 393 tasklet_kill(&crypto_tmp->crypto_tasklet); 394 return 0; 395 } 396 397 static struct platform_driver crypto_driver = { 398 .probe = rk_crypto_probe, 399 .remove = rk_crypto_remove, 400 .driver = { 401 .name = "rk3288-crypto", 402 .of_match_table = crypto_of_id_table, 403 }, 404 }; 405 406 module_platform_driver(crypto_driver); 407 408 MODULE_AUTHOR("Zain Wang <zain.wang@rock-chips.com>"); 409 MODULE_DESCRIPTION("Support for Rockchip's cryptographic engine"); 410 MODULE_LICENSE("GPL"); 411