1 /* 2 * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 and 6 * only version 2 as published by the Free Software Foundation. 7 * 8 * This program is distributed in the hope that it will be useful, 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * GNU General Public License for more details. 12 */ 13 14 #include <linux/clk.h> 15 #include <linux/interrupt.h> 16 #include <linux/module.h> 17 #include <linux/platform_device.h> 18 #include <linux/spinlock.h> 19 #include <linux/types.h> 20 #include <crypto/algapi.h> 21 #include <crypto/internal/hash.h> 22 #include <crypto/sha.h> 23 24 #include "core.h" 25 #include "cipher.h" 26 #include "sha.h" 27 28 #define QCE_MAJOR_VERSION5 0x05 29 #define QCE_QUEUE_LENGTH 1 30 31 static const struct qce_algo_ops *qce_ops[] = { 32 &ablkcipher_ops, 33 &ahash_ops, 34 }; 35 36 static void qce_unregister_algs(struct qce_device *qce) 37 { 38 const struct qce_algo_ops *ops; 39 int i; 40 41 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 42 ops = qce_ops[i]; 43 ops->unregister_algs(qce); 44 } 45 } 46 47 static int qce_register_algs(struct qce_device *qce) 48 { 49 const struct qce_algo_ops *ops; 50 int i, ret = -ENODEV; 51 52 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 53 ops = qce_ops[i]; 54 ret = ops->register_algs(qce); 55 if (ret) 56 break; 57 } 58 59 return ret; 60 } 61 62 static int qce_handle_request(struct crypto_async_request *async_req) 63 { 64 int ret = -EINVAL, i; 65 const struct qce_algo_ops *ops; 66 u32 type = crypto_tfm_alg_type(async_req->tfm); 67 68 for (i = 0; i < ARRAY_SIZE(qce_ops); i++) { 69 ops = qce_ops[i]; 70 if (type != ops->type) 71 continue; 72 ret = ops->async_req_handle(async_req); 73 break; 74 } 75 76 return ret; 77 } 78 79 static int qce_handle_queue(struct qce_device *qce, 80 struct crypto_async_request *req) 81 { 82 struct crypto_async_request *async_req, *backlog; 83 unsigned long flags; 84 int ret = 0, err; 85 86 spin_lock_irqsave(&qce->lock, flags); 87 88 if (req) 89 ret = crypto_enqueue_request(&qce->queue, req); 90 91 /* busy, do not dequeue request */ 92 if (qce->req) { 93 spin_unlock_irqrestore(&qce->lock, flags); 94 return ret; 95 } 96 97 backlog = crypto_get_backlog(&qce->queue); 98 async_req = crypto_dequeue_request(&qce->queue); 99 if (async_req) 100 qce->req = async_req; 101 102 spin_unlock_irqrestore(&qce->lock, flags); 103 104 if (!async_req) 105 return ret; 106 107 if (backlog) { 108 spin_lock_bh(&qce->lock); 109 backlog->complete(backlog, -EINPROGRESS); 110 spin_unlock_bh(&qce->lock); 111 } 112 113 err = qce_handle_request(async_req); 114 if (err) { 115 qce->result = err; 116 tasklet_schedule(&qce->done_tasklet); 117 } 118 119 return ret; 120 } 121 122 static void qce_tasklet_req_done(unsigned long data) 123 { 124 struct qce_device *qce = (struct qce_device *)data; 125 struct crypto_async_request *req; 126 unsigned long flags; 127 128 spin_lock_irqsave(&qce->lock, flags); 129 req = qce->req; 130 qce->req = NULL; 131 spin_unlock_irqrestore(&qce->lock, flags); 132 133 if (req) 134 req->complete(req, qce->result); 135 136 qce_handle_queue(qce, NULL); 137 } 138 139 static int qce_async_request_enqueue(struct qce_device *qce, 140 struct crypto_async_request *req) 141 { 142 return qce_handle_queue(qce, req); 143 } 144 145 static void qce_async_request_done(struct qce_device *qce, int ret) 146 { 147 qce->result = ret; 148 tasklet_schedule(&qce->done_tasklet); 149 } 150 151 static int qce_check_version(struct qce_device *qce) 152 { 153 u32 major, minor, step; 154 155 qce_get_version(qce, &major, &minor, &step); 156 157 /* 158 * the driver does not support v5 with minor 0 because it has special 159 * alignment requirements. 160 */ 161 if (major != QCE_MAJOR_VERSION5 || minor == 0) 162 return -ENODEV; 163 164 qce->burst_size = QCE_BAM_BURST_SIZE; 165 qce->pipe_pair_id = 1; 166 167 dev_dbg(qce->dev, "Crypto device found, version %d.%d.%d\n", 168 major, minor, step); 169 170 return 0; 171 } 172 173 static int qce_crypto_probe(struct platform_device *pdev) 174 { 175 struct device *dev = &pdev->dev; 176 struct qce_device *qce; 177 struct resource *res; 178 int ret; 179 180 qce = devm_kzalloc(dev, sizeof(*qce), GFP_KERNEL); 181 if (!qce) 182 return -ENOMEM; 183 184 qce->dev = dev; 185 platform_set_drvdata(pdev, qce); 186 187 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 188 qce->base = devm_ioremap_resource(&pdev->dev, res); 189 if (IS_ERR(qce->base)) 190 return PTR_ERR(qce->base); 191 192 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 193 if (ret < 0) 194 return ret; 195 196 qce->core = devm_clk_get(qce->dev, "core"); 197 if (IS_ERR(qce->core)) 198 return PTR_ERR(qce->core); 199 200 qce->iface = devm_clk_get(qce->dev, "iface"); 201 if (IS_ERR(qce->iface)) 202 return PTR_ERR(qce->iface); 203 204 qce->bus = devm_clk_get(qce->dev, "bus"); 205 if (IS_ERR(qce->bus)) 206 return PTR_ERR(qce->bus); 207 208 ret = clk_prepare_enable(qce->core); 209 if (ret) 210 return ret; 211 212 ret = clk_prepare_enable(qce->iface); 213 if (ret) 214 goto err_clks_core; 215 216 ret = clk_prepare_enable(qce->bus); 217 if (ret) 218 goto err_clks_iface; 219 220 ret = qce_dma_request(qce->dev, &qce->dma); 221 if (ret) 222 goto err_clks; 223 224 ret = qce_check_version(qce); 225 if (ret) 226 goto err_clks; 227 228 spin_lock_init(&qce->lock); 229 tasklet_init(&qce->done_tasklet, qce_tasklet_req_done, 230 (unsigned long)qce); 231 crypto_init_queue(&qce->queue, QCE_QUEUE_LENGTH); 232 233 qce->async_req_enqueue = qce_async_request_enqueue; 234 qce->async_req_done = qce_async_request_done; 235 236 ret = qce_register_algs(qce); 237 if (ret) 238 goto err_dma; 239 240 return 0; 241 242 err_dma: 243 qce_dma_release(&qce->dma); 244 err_clks: 245 clk_disable_unprepare(qce->bus); 246 err_clks_iface: 247 clk_disable_unprepare(qce->iface); 248 err_clks_core: 249 clk_disable_unprepare(qce->core); 250 return ret; 251 } 252 253 static int qce_crypto_remove(struct platform_device *pdev) 254 { 255 struct qce_device *qce = platform_get_drvdata(pdev); 256 257 tasklet_kill(&qce->done_tasklet); 258 qce_unregister_algs(qce); 259 qce_dma_release(&qce->dma); 260 clk_disable_unprepare(qce->bus); 261 clk_disable_unprepare(qce->iface); 262 clk_disable_unprepare(qce->core); 263 return 0; 264 } 265 266 static const struct of_device_id qce_crypto_of_match[] = { 267 { .compatible = "qcom,crypto-v5.1", }, 268 {} 269 }; 270 MODULE_DEVICE_TABLE(of, qce_crypto_of_match); 271 272 static struct platform_driver qce_crypto_driver = { 273 .probe = qce_crypto_probe, 274 .remove = qce_crypto_remove, 275 .driver = { 276 .name = KBUILD_MODNAME, 277 .of_match_table = qce_crypto_of_match, 278 }, 279 }; 280 module_platform_driver(qce_crypto_driver); 281 282 MODULE_LICENSE("GPL v2"); 283 MODULE_DESCRIPTION("Qualcomm crypto engine driver"); 284 MODULE_ALIAS("platform:" KBUILD_MODNAME); 285 MODULE_AUTHOR("The Linux Foundation"); 286