1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) STMicroelectronics SA 2017 4 * Author: Fabien Dessenne <fabien.dessenne@st.com> 5 */ 6 7 #include <linux/bitrev.h> 8 #include <linux/clk.h> 9 #include <linux/crc32poly.h> 10 #include <linux/module.h> 11 #include <linux/mod_devicetable.h> 12 #include <linux/platform_device.h> 13 #include <linux/pm_runtime.h> 14 15 #include <crypto/internal/hash.h> 16 17 #include <asm/unaligned.h> 18 19 #define DRIVER_NAME "stm32-crc32" 20 #define CHKSUM_DIGEST_SIZE 4 21 #define CHKSUM_BLOCK_SIZE 1 22 23 /* Registers */ 24 #define CRC_DR 0x00000000 25 #define CRC_CR 0x00000008 26 #define CRC_INIT 0x00000010 27 #define CRC_POL 0x00000014 28 29 /* Registers values */ 30 #define CRC_CR_RESET BIT(0) 31 #define CRC_CR_REVERSE (BIT(7) | BIT(6) | BIT(5)) 32 #define CRC_INIT_DEFAULT 0xFFFFFFFF 33 34 #define CRC_AUTOSUSPEND_DELAY 50 35 36 struct stm32_crc { 37 struct list_head list; 38 struct device *dev; 39 void __iomem *regs; 40 struct clk *clk; 41 u8 pending_data[sizeof(u32)]; 42 size_t nb_pending_bytes; 43 }; 44 45 struct stm32_crc_list { 46 struct list_head dev_list; 47 spinlock_t lock; /* protect dev_list */ 48 }; 49 50 static struct stm32_crc_list crc_list = { 51 .dev_list = LIST_HEAD_INIT(crc_list.dev_list), 52 .lock = __SPIN_LOCK_UNLOCKED(crc_list.lock), 53 }; 54 55 struct stm32_crc_ctx { 56 u32 key; 57 u32 poly; 58 }; 59 60 struct stm32_crc_desc_ctx { 61 u32 partial; /* crc32c: partial in first 4 bytes of that struct */ 62 struct stm32_crc *crc; 63 }; 64 65 static int stm32_crc32_cra_init(struct crypto_tfm *tfm) 66 { 67 struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); 68 69 mctx->key = CRC_INIT_DEFAULT; 70 mctx->poly = CRC32_POLY_LE; 71 return 0; 72 } 73 74 static int stm32_crc32c_cra_init(struct crypto_tfm *tfm) 75 { 76 struct stm32_crc_ctx *mctx = crypto_tfm_ctx(tfm); 77 78 mctx->key = CRC_INIT_DEFAULT; 79 mctx->poly = CRC32C_POLY_LE; 80 return 0; 81 } 82 83 static int stm32_crc_setkey(struct crypto_shash *tfm, const u8 *key, 84 unsigned int keylen) 85 { 86 struct stm32_crc_ctx *mctx = crypto_shash_ctx(tfm); 87 88 if (keylen != sizeof(u32)) { 89 crypto_shash_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN); 90 return -EINVAL; 91 } 92 93 mctx->key = get_unaligned_le32(key); 94 return 0; 95 } 96 97 static int stm32_crc_init(struct shash_desc *desc) 98 { 99 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); 100 struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); 101 struct stm32_crc *crc; 102 103 spin_lock_bh(&crc_list.lock); 104 list_for_each_entry(crc, &crc_list.dev_list, list) { 105 ctx->crc = crc; 106 break; 107 } 108 spin_unlock_bh(&crc_list.lock); 109 110 pm_runtime_get_sync(ctx->crc->dev); 111 112 /* Reset, set key, poly and configure in bit reverse mode */ 113 writel_relaxed(bitrev32(mctx->key), ctx->crc->regs + CRC_INIT); 114 writel_relaxed(bitrev32(mctx->poly), ctx->crc->regs + CRC_POL); 115 writel_relaxed(CRC_CR_RESET | CRC_CR_REVERSE, ctx->crc->regs + CRC_CR); 116 117 /* Store partial result */ 118 ctx->partial = readl_relaxed(ctx->crc->regs + CRC_DR); 119 ctx->crc->nb_pending_bytes = 0; 120 121 pm_runtime_mark_last_busy(ctx->crc->dev); 122 pm_runtime_put_autosuspend(ctx->crc->dev); 123 124 return 0; 125 } 126 127 static int stm32_crc_update(struct shash_desc *desc, const u8 *d8, 128 unsigned int length) 129 { 130 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); 131 struct stm32_crc *crc = ctx->crc; 132 u32 *d32; 133 unsigned int i; 134 135 pm_runtime_get_sync(crc->dev); 136 137 if (unlikely(crc->nb_pending_bytes)) { 138 while (crc->nb_pending_bytes != sizeof(u32) && length) { 139 /* Fill in pending data */ 140 crc->pending_data[crc->nb_pending_bytes++] = *(d8++); 141 length--; 142 } 143 144 if (crc->nb_pending_bytes == sizeof(u32)) { 145 /* Process completed pending data */ 146 writel_relaxed(*(u32 *)crc->pending_data, 147 crc->regs + CRC_DR); 148 crc->nb_pending_bytes = 0; 149 } 150 } 151 152 d32 = (u32 *)d8; 153 for (i = 0; i < length >> 2; i++) 154 /* Process 32 bits data */ 155 writel_relaxed(*(d32++), crc->regs + CRC_DR); 156 157 /* Store partial result */ 158 ctx->partial = readl_relaxed(crc->regs + CRC_DR); 159 160 pm_runtime_mark_last_busy(crc->dev); 161 pm_runtime_put_autosuspend(crc->dev); 162 163 /* Check for pending data (non 32 bits) */ 164 length &= 3; 165 if (likely(!length)) 166 return 0; 167 168 if ((crc->nb_pending_bytes + length) >= sizeof(u32)) { 169 /* Shall not happen */ 170 dev_err(crc->dev, "Pending data overflow\n"); 171 return -EINVAL; 172 } 173 174 d8 = (const u8 *)d32; 175 for (i = 0; i < length; i++) 176 /* Store pending data */ 177 crc->pending_data[crc->nb_pending_bytes++] = *(d8++); 178 179 return 0; 180 } 181 182 static int stm32_crc_final(struct shash_desc *desc, u8 *out) 183 { 184 struct stm32_crc_desc_ctx *ctx = shash_desc_ctx(desc); 185 struct stm32_crc_ctx *mctx = crypto_shash_ctx(desc->tfm); 186 187 /* Send computed CRC */ 188 put_unaligned_le32(mctx->poly == CRC32C_POLY_LE ? 189 ~ctx->partial : ctx->partial, out); 190 191 return 0; 192 } 193 194 static int stm32_crc_finup(struct shash_desc *desc, const u8 *data, 195 unsigned int length, u8 *out) 196 { 197 return stm32_crc_update(desc, data, length) ?: 198 stm32_crc_final(desc, out); 199 } 200 201 static int stm32_crc_digest(struct shash_desc *desc, const u8 *data, 202 unsigned int length, u8 *out) 203 { 204 return stm32_crc_init(desc) ?: stm32_crc_finup(desc, data, length, out); 205 } 206 207 static struct shash_alg algs[] = { 208 /* CRC-32 */ 209 { 210 .setkey = stm32_crc_setkey, 211 .init = stm32_crc_init, 212 .update = stm32_crc_update, 213 .final = stm32_crc_final, 214 .finup = stm32_crc_finup, 215 .digest = stm32_crc_digest, 216 .descsize = sizeof(struct stm32_crc_desc_ctx), 217 .digestsize = CHKSUM_DIGEST_SIZE, 218 .base = { 219 .cra_name = "crc32", 220 .cra_driver_name = DRIVER_NAME, 221 .cra_priority = 200, 222 .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 223 .cra_blocksize = CHKSUM_BLOCK_SIZE, 224 .cra_alignmask = 3, 225 .cra_ctxsize = sizeof(struct stm32_crc_ctx), 226 .cra_module = THIS_MODULE, 227 .cra_init = stm32_crc32_cra_init, 228 } 229 }, 230 /* CRC-32Castagnoli */ 231 { 232 .setkey = stm32_crc_setkey, 233 .init = stm32_crc_init, 234 .update = stm32_crc_update, 235 .final = stm32_crc_final, 236 .finup = stm32_crc_finup, 237 .digest = stm32_crc_digest, 238 .descsize = sizeof(struct stm32_crc_desc_ctx), 239 .digestsize = CHKSUM_DIGEST_SIZE, 240 .base = { 241 .cra_name = "crc32c", 242 .cra_driver_name = DRIVER_NAME, 243 .cra_priority = 200, 244 .cra_flags = CRYPTO_ALG_OPTIONAL_KEY, 245 .cra_blocksize = CHKSUM_BLOCK_SIZE, 246 .cra_alignmask = 3, 247 .cra_ctxsize = sizeof(struct stm32_crc_ctx), 248 .cra_module = THIS_MODULE, 249 .cra_init = stm32_crc32c_cra_init, 250 } 251 } 252 }; 253 254 static int stm32_crc_probe(struct platform_device *pdev) 255 { 256 struct device *dev = &pdev->dev; 257 struct stm32_crc *crc; 258 struct resource *res; 259 int ret; 260 261 crc = devm_kzalloc(dev, sizeof(*crc), GFP_KERNEL); 262 if (!crc) 263 return -ENOMEM; 264 265 crc->dev = dev; 266 267 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 268 crc->regs = devm_ioremap_resource(dev, res); 269 if (IS_ERR(crc->regs)) { 270 dev_err(dev, "Cannot map CRC IO\n"); 271 return PTR_ERR(crc->regs); 272 } 273 274 crc->clk = devm_clk_get(dev, NULL); 275 if (IS_ERR(crc->clk)) { 276 dev_err(dev, "Could not get clock\n"); 277 return PTR_ERR(crc->clk); 278 } 279 280 ret = clk_prepare_enable(crc->clk); 281 if (ret) { 282 dev_err(crc->dev, "Failed to enable clock\n"); 283 return ret; 284 } 285 286 pm_runtime_set_autosuspend_delay(dev, CRC_AUTOSUSPEND_DELAY); 287 pm_runtime_use_autosuspend(dev); 288 289 pm_runtime_get_noresume(dev); 290 pm_runtime_set_active(dev); 291 pm_runtime_enable(dev); 292 293 platform_set_drvdata(pdev, crc); 294 295 spin_lock(&crc_list.lock); 296 list_add(&crc->list, &crc_list.dev_list); 297 spin_unlock(&crc_list.lock); 298 299 ret = crypto_register_shashes(algs, ARRAY_SIZE(algs)); 300 if (ret) { 301 dev_err(dev, "Failed to register\n"); 302 clk_disable_unprepare(crc->clk); 303 return ret; 304 } 305 306 dev_info(dev, "Initialized\n"); 307 308 pm_runtime_put_sync(dev); 309 310 return 0; 311 } 312 313 static int stm32_crc_remove(struct platform_device *pdev) 314 { 315 struct stm32_crc *crc = platform_get_drvdata(pdev); 316 int ret = pm_runtime_get_sync(crc->dev); 317 318 if (ret < 0) 319 return ret; 320 321 spin_lock(&crc_list.lock); 322 list_del(&crc->list); 323 spin_unlock(&crc_list.lock); 324 325 crypto_unregister_shashes(algs, ARRAY_SIZE(algs)); 326 327 pm_runtime_disable(crc->dev); 328 pm_runtime_put_noidle(crc->dev); 329 330 clk_disable_unprepare(crc->clk); 331 332 return 0; 333 } 334 335 #ifdef CONFIG_PM 336 static int stm32_crc_runtime_suspend(struct device *dev) 337 { 338 struct stm32_crc *crc = dev_get_drvdata(dev); 339 340 clk_disable_unprepare(crc->clk); 341 342 return 0; 343 } 344 345 static int stm32_crc_runtime_resume(struct device *dev) 346 { 347 struct stm32_crc *crc = dev_get_drvdata(dev); 348 int ret; 349 350 ret = clk_prepare_enable(crc->clk); 351 if (ret) { 352 dev_err(crc->dev, "Failed to prepare_enable clock\n"); 353 return ret; 354 } 355 356 return 0; 357 } 358 #endif 359 360 static const struct dev_pm_ops stm32_crc_pm_ops = { 361 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, 362 pm_runtime_force_resume) 363 SET_RUNTIME_PM_OPS(stm32_crc_runtime_suspend, 364 stm32_crc_runtime_resume, NULL) 365 }; 366 367 static const struct of_device_id stm32_dt_ids[] = { 368 { .compatible = "st,stm32f7-crc", }, 369 {}, 370 }; 371 MODULE_DEVICE_TABLE(of, stm32_dt_ids); 372 373 static struct platform_driver stm32_crc_driver = { 374 .probe = stm32_crc_probe, 375 .remove = stm32_crc_remove, 376 .driver = { 377 .name = DRIVER_NAME, 378 .pm = &stm32_crc_pm_ops, 379 .of_match_table = stm32_dt_ids, 380 }, 381 }; 382 383 module_platform_driver(stm32_crc_driver); 384 385 MODULE_AUTHOR("Fabien Dessenne <fabien.dessenne@st.com>"); 386 MODULE_DESCRIPTION("STMicrolectronics STM32 CRC32 hardware driver"); 387 MODULE_LICENSE("GPL"); 388