1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/dma-mapping.h> 8 #include <linux/interrupt.h> 9 #include <linux/module.h> 10 #include <linux/mod_devicetable.h> 11 #include <linux/platform_device.h> 12 #include <linux/spinlock.h> 13 #include <linux/types.h> 14 #include <crypto/algapi.h> 15 #include <crypto/internal/hash.h> 16 17 #include "core.h" 18 #include "cipher.h" 19 #include "sha.h" 20 21 #define QCE_MAJOR_VERSION5 0x05 22 #define QCE_QUEUE_LENGTH 1 23 24 static const struct qce_algo_ops *qce_ops[] = { 25 #ifdef CONFIG_CRYPTO_DEV_QCE_SKCIPHER 26 &skcipher_ops, 27 #endif 28 #ifdef CONFIG_CRYPTO_DEV_QCE_SHA 29 &ahash_ops, 30 #endif 31 }; 32 33 static void qce_unregister_algs(struct qce_device *qce) 34 { 35 const struct qce_algo_ops *ops; 36 int i; 37 38 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 39 ops = qce_ops[i]; 40 ops->unregister_algs(qce); 41 } 42 } 43 44 static int qce_register_algs(struct qce_device *qce) 45 { 46 const struct qce_algo_ops *ops; 47 int i, ret = -ENODEV; 48 49 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 50 ops = qce_ops[i]; 51 ret = ops->register_algs(qce); 52 if (ret) 53 break; 54 } 55 56 return ret; 57 } 58 59 static int qce_handle_request(struct crypto_async_request *async_req) 60 { 61 int ret = -EINVAL, i; 62 const struct qce_algo_ops *ops; 63 u32 type = crypto_tfm_alg_type(async_req->tfm); 64 65 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 66 ops = qce_ops[i]; 67 if (type != ops->type) 68 continue; 69 ret = ops->async_req_handle(async_req); 70 break; 71 } 72 73 return ret; 74 } 75 76 static int qce_handle_queue(struct qce_device *qce, 77 struct crypto_async_request *req) 78 { 79 struct crypto_async_request *async_req, *backlog; 80 unsigned long flags; 81 int ret = 0, err; 82 83 spin_lock_irqsave(&qce->lock, flags); 84 85 if (req) 86 ret = crypto_enqueue_request(&qce->queue, req); 87 88 /* busy, do not dequeue request */ 89 if (qce->req) { 90 spin_unlock_irqrestore(&qce->lock, flags); 91 return ret; 92 } 93 94 backlog = crypto_get_backlog(&qce->queue); 95 async_req = crypto_dequeue_request(&qce->queue); 96 if (async_req) 97 qce->req = async_req; 98 99 spin_unlock_irqrestore(&qce->lock, flags); 100 101 if (!async_req) 102 return ret; 103 104 if (backlog) { 105 spin_lock_bh(&qce->lock); 106 backlog->complete(backlog, -EINPROGRESS); 107 spin_unlock_bh(&qce->lock); 108 } 109 110 err = qce_handle_request(async_req); 111 if (err) { 112 qce->result = err; 113 tasklet_schedule(&qce->done_tasklet); 114 } 115 116 return ret; 117 } 118 119 static void qce_tasklet_req_done(unsigned long data) 120 { 121 struct qce_device *qce = (struct qce_device *)data; 122 struct crypto_async_request *req; 123 unsigned long flags; 124 125 spin_lock_irqsave(&qce->lock, flags); 126 req = qce->req; 127 qce->req = NULL; 128 spin_unlock_irqrestore(&qce->lock, flags); 129 130 if (req) 131 req->complete(req, qce->result); 132 133 qce_handle_queue(qce, NULL); 134 } 135 136 static int qce_async_request_enqueue(struct qce_device *qce, 137 struct crypto_async_request *req) 138 { 139 return qce_handle_queue(qce, req); 140 } 141 142 static void qce_async_request_done(struct qce_device *qce, int ret) 143 { 144 qce->result = ret; 145 tasklet_schedule(&qce->done_tasklet); 146 } 147 148 static int qce_check_version(struct qce_device *qce) 149 { 150 u32 major, minor, step; 151 152 qce_get_version(qce, &major, &minor, &step); 153 154 /* 155 * the driver does not support v5 with minor 0 because it has special 156 * alignment requirements. 157 */ 158 if (major != QCE_MAJOR_VERSION5 || minor == 0) 159 return -ENODEV; 160 161 qce->burst_size = QCE_BAM_BURST_SIZE; 162 163 /* 164 * Rx and tx pipes are treated as a pair inside CE. 165 * Pipe pair number depends on the actual BAM dma pipe 166 * that is used for transfers. The BAM dma pipes are passed 167 * from the device tree and used to derive the pipe pair 168 * id in the CE driver as follows. 169 * BAM dma pipes(rx, tx) CE pipe pair id 170 * 0,1 0 171 * 2,3 1 172 * 4,5 2 173 * 6,7 3 174 * ... 175 */ 176 qce->pipe_pair_id = qce->dma.rxchan->chan_id >> 1; 177 178 dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n", 179 major, minor, step); 180 181 return 0; 182 } 183 184 static int qce_crypto_probe(struct platform_device *pdev) 185 { 186 struct device *dev = &pdev->dev; 187 struct qce_device *qce; 188 int ret; 189 190 qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL); 191 if (!qce) 192 return -ENOMEM; 193 194 qce->dev = dev; 195 platform_set_drvdata(pdev, qce); 196 197 qce->base = devm_platform_ioremap_resource(pdev, 0); 198 if (IS_ERR(qce->base)) 199 return PTR_ERR(qce->base); 200 201 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 202 if (ret < 0) 203 return ret; 204 205 qce->core = devm_clk_get(qce->dev, "core"); 206 if (IS_ERR(qce->core)) 207 return PTR_ERR(qce->core); 208 209 qce->iface = devm_clk_get(qce->dev, "iface"); 210 if (IS_ERR(qce->iface)) 211 return PTR_ERR(qce->iface); 212 213 qce->bus = devm_clk_get(qce->dev, "bus"); 214 if (IS_ERR(qce->bus)) 215 return PTR_ERR(qce->bus); 216 217 ret = clk_prepare_enable(qce->core); 218 if (ret) 219 return ret; 220 221 ret = clk_prepare_enable(qce->iface); 222 if (ret) 223 goto err_clks_core; 224 225 ret = clk_prepare_enable(qce->bus); 226 if (ret) 227 goto err_clks_iface; 228 229 ret = qce_dma_request(qce->dev, &qce->dma); 230 if (ret) 231 goto err_clks; 232 233 ret = qce_check_version(qce); 234 if (ret) 235 goto err_clks; 236 237 spin_lock_init(&qce->lock); 238 tasklet_init(&qce->done_tasklet, qce_tasklet_req_done, 239 (unsigned long)qce); 240 crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH); 241 242 qce->async_req_enqueue = qce_async_request_enqueue; 243 qce->async_req_done = qce_async_request_done; 244 245 ret = qce_register_algs(qce); 246 if (ret) 247 goto err_dma; 248 249 return 0; 250 251 err_dma: 252 qce_dma_release(&qce->dma); 253 err_clks: 254 clk_disable_unprepare(qce->bus); 255 err_clks_iface: 256 clk_disable_unprepare(qce->iface); 257 err_clks_core: 258 clk_disable_unprepare(qce->core); 259 return ret; 260 } 261 262 static int qce_crypto_remove(struct platform_device *pdev) 263 { 264 struct qce_device *qce = platform_get_drvdata(pdev); 265 266 tasklet_kill(&qce->done_tasklet); 267 qce_unregister_algs(qce); 268 qce_dma_release(&qce->dma); 269 clk_disable_unprepare(qce->bus); 270 clk_disable_unprepare(qce->iface); 271 clk_disable_unprepare(qce->core); 272 return 0; 273 } 274 275 static const struct of_device_id qce_crypto_of_match[] = { 276 { .compatible = "qcom,crypto-v5.1", }, 277 { .compatible = "qcom,crypto-v5.4", }, 278 {} 279 }; 280 MODULE_DEVICE_TABLE(of, qce_crypto_of_match); 281 282 static struct platform_driver qce_crypto_driver = { 283 .probe = qce_crypto_probe, 284 .remove = qce_crypto_remove, 285 .driver = { 286 .name = KBUILD_MODNAME, 287 .of_match_table = qce_crypto_of_match, 288 }, 289 }; 290 module_platform_driver(qce_crypto_driver); 291 292 MODULE_LICENSE("GPL v2"); 293 MODULE_DESCRIPTION("Qualcomm crypto engine driver"); 294 MODULE_ALIAS("platform:" KBUILD_MODNAME); 295 MODULE_AUTHOR("The Linux Foundation"); 296