1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/clk.h> 7 #include <linux/interrupt.h> 8 #include <linux/module.h> 9 #include <linux/mod_devicetable.h> 10 #include <linux/platform_device.h> 11 #include <linux/spinlock.h> 12 #include <linux/types.h> 13 #include <crypto/algapi.h> 14 #include <crypto/internal/hash.h> 15 #include <crypto/sha.h> 16 17 #include "core.h" 18 #include "cipher.h" 19 #include "sha.h" 20 21 #define QCE_MAJOR_VERSION5 0x05 22 #define QCE_QUEUE_LENGTH 1 23 24 static const struct qce_algo_ops *qce_ops[] = { 25 &ablkcipher_ops, 26 &ahash_ops, 27 }; 28 29 static void qce_unregister_algs(struct qce_device *qce) 30 { 31 const struct qce_algo_ops *ops; 32 int i; 33 34 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 35 ops = qce_ops[i]; 36 ops->unregister_algs(qce); 37 } 38 } 39 40 static int qce_register_algs(struct qce_device *qce) 41 { 42 const struct qce_algo_ops *ops; 43 int i, ret = -ENODEV; 44 45 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 46 ops = qce_ops[i]; 47 ret = ops->register_algs(qce); 48 if (ret) 49 break; 50 } 51 52 return ret; 53 } 54 55 static int qce_handle_request(struct crypto_async_request *async_req) 56 { 57 int ret = -EINVAL, i; 58 const struct qce_algo_ops *ops; 59 u32 type = crypto_tfm_alg_type(async_req->tfm); 60 61 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 62 ops = qce_ops[i]; 63 if (type != ops->type) 64 continue; 65 ret = ops->async_req_handle(async_req); 66 break; 67 } 68 69 return ret; 70 } 71 72 static int qce_handle_queue(struct qce_device *qce, 73 struct crypto_async_request *req) 74 { 75 struct crypto_async_request *async_req, *backlog; 76 unsigned long flags; 77 int ret = 0, err; 78 79 spin_lock_irqsave(&qce->lock, flags); 80 81 if (req) 82 ret = crypto_enqueue_request(&qce->queue, req); 83 84 /* busy, do not dequeue request */ 85 if (qce->req) { 86 spin_unlock_irqrestore(&qce->lock, flags); 87 return ret; 88 } 89 90 backlog = crypto_get_backlog(&qce->queue); 91 async_req = crypto_dequeue_request(&qce->queue); 92 if (async_req) 93 qce->req = async_req; 94 95 spin_unlock_irqrestore(&qce->lock, flags); 96 97 if (!async_req) 98 return ret; 99 100 if (backlog) { 101 spin_lock_bh(&qce->lock); 102 backlog->complete(backlog, -EINPROGRESS); 103 spin_unlock_bh(&qce->lock); 104 } 105 106 err = qce_handle_request(async_req); 107 if (err) { 108 qce->result = err; 109 tasklet_schedule(&qce->done_tasklet); 110 } 111 112 return ret; 113 } 114 115 static void qce_tasklet_req_done(unsigned long data) 116 { 117 struct qce_device *qce = (struct qce_device *)data; 118 struct crypto_async_request *req; 119 unsigned long flags; 120 121 spin_lock_irqsave(&qce->lock, flags); 122 req = qce->req; 123 qce->req = NULL; 124 spin_unlock_irqrestore(&qce->lock, flags); 125 126 if (req) 127 req->complete(req, qce->result); 128 129 qce_handle_queue(qce, NULL); 130 } 131 132 static int qce_async_request_enqueue(struct qce_device *qce, 133 struct crypto_async_request *req) 134 { 135 return qce_handle_queue(qce, req); 136 } 137 138 static void qce_async_request_done(struct qce_device *qce, int ret) 139 { 140 qce->result = ret; 141 tasklet_schedule(&qce->done_tasklet); 142 } 143 144 static int qce_check_version(struct qce_device *qce) 145 { 146 u32 major, minor, step; 147 148 qce_get_version(qce, &major, &minor, &step); 149 150 /* 151 * the driver does not support v5 with minor 0 because it has special 152 * alignment requirements. 153 */ 154 if (major != QCE_MAJOR_VERSION5 || minor == 0) 155 return -ENODEV; 156 157 qce->burst_size = QCE_BAM_BURST_SIZE; 158 qce->pipe_pair_id = 1; 159 160 dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n", 161 major, minor, step); 162 163 return 0; 164 } 165 166 static int qce_crypto_probe(struct platform_device *pdev) 167 { 168 struct device *dev = &pdev->dev; 169 struct qce_device *qce; 170 struct resource *res; 171 int ret; 172 173 qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL); 174 if (!qce) 175 return -ENOMEM; 176 177 qce->dev = dev; 178 platform_set_drvdata(pdev, qce); 179 180 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 181 qce->base = devm_ioremap_resource(&pdev->dev, res); 182 if (IS_ERR(qce->base)) 183 return PTR_ERR(qce->base); 184 185 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 186 if (ret < 0) 187 return ret; 188 189 qce->core = devm_clk_get(qce->dev, "core"); 190 if (IS_ERR(qce->core)) 191 return PTR_ERR(qce->core); 192 193 qce->iface = devm_clk_get(qce->dev, "iface"); 194 if (IS_ERR(qce->iface)) 195 return PTR_ERR(qce->iface); 196 197 qce->bus = devm_clk_get(qce->dev, "bus"); 198 if (IS_ERR(qce->bus)) 199 return PTR_ERR(qce->bus); 200 201 ret = clk_prepare_enable(qce->core); 202 if (ret) 203 return ret; 204 205 ret = clk_prepare_enable(qce->iface); 206 if (ret) 207 goto err_clks_core; 208 209 ret = clk_prepare_enable(qce->bus); 210 if (ret) 211 goto err_clks_iface; 212 213 ret = qce_dma_request(qce->dev, &qce->dma); 214 if (ret) 215 goto err_clks; 216 217 ret = qce_check_version(qce); 218 if (ret) 219 goto err_clks; 220 221 spin_lock_init(&qce->lock); 222 tasklet_init(&qce->done_tasklet, qce_tasklet_req_done, 223 (unsigned long)qce); 224 crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH); 225 226 qce->async_req_enqueue = qce_async_request_enqueue; 227 qce->async_req_done = qce_async_request_done; 228 229 ret = qce_register_algs(qce); 230 if (ret) 231 goto err_dma; 232 233 return 0; 234 235 err_dma: 236 qce_dma_release(&qce->dma); 237 err_clks: 238 clk_disable_unprepare(qce->bus); 239 err_clks_iface: 240 clk_disable_unprepare(qce->iface); 241 err_clks_core: 242 clk_disable_unprepare(qce->core); 243 return ret; 244 } 245 246 static int qce_crypto_remove(struct platform_device *pdev) 247 { 248 struct qce_device *qce = platform_get_drvdata(pdev); 249 250 tasklet_kill(&qce->done_tasklet); 251 qce_unregister_algs(qce); 252 qce_dma_release(&qce->dma); 253 clk_disable_unprepare(qce->bus); 254 clk_disable_unprepare(qce->iface); 255 clk_disable_unprepare(qce->core); 256 return 0; 257 } 258 259 static const struct of_device_id qce_crypto_of_match[] = { 260 { .compatible = "qcom,crypto-v5.1", }, 261 {} 262 }; 263 MODULE_DEVICE_TABLE(of, qce_crypto_of_match); 264 265 static struct platform_driver qce_crypto_driver = { 266 .probe = qce_crypto_probe, 267 .remove = qce_crypto_remove, 268 .driver = { 269 .name = KBUILD_MODNAME, 270 .of_match_table = qce_crypto_of_match, 271 }, 272 }; 273 module_platform_driver(qce_crypto_driver); 274 275 MODULE_LICENSE("GPL v2"); 276 MODULE_DESCRIPTION("Qualcomm crypto engine driver"); 277 MODULE_ALIAS("platform:" KBUILD_MODNAME); 278 MODULE_AUTHOR("The Linux Foundation"); 279