1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * caam - Freescale FSL CAAM support for hw_random 4 * 5 * Copyright 2011 Freescale Semiconductor, Inc. 6 * Copyright 2018-2019, 2023 NXP 7 * 8 * Based on caamalg.c crypto API driver. 9 * 10 */ 11 12 #include <linux/hw_random.h> 13 #include <linux/completion.h> 14 #include <linux/atomic.h> 15 #include <linux/dma-mapping.h> 16 #include <linux/kernel.h> 17 #include <linux/kfifo.h> 18 19 #include "compat.h" 20 21 #include "regs.h" 22 #include "intern.h" 23 #include "desc_constr.h" 24 #include "jr.h" 25 #include "error.h" 26 27 #define CAAM_RNG_MAX_FIFO_STORE_SIZE 16 28 29 /* 30 * Length of used descriptors, see caam_init_desc() 31 */ 32 #define CAAM_RNG_DESC_LEN (CAAM_CMD_SZ + \ 33 CAAM_CMD_SZ + \ 34 CAAM_CMD_SZ + CAAM_PTR_SZ_MAX) 35 36 /* rng per-device context */ 37 struct caam_rng_ctx { 38 struct hwrng rng; 39 struct device *jrdev; 40 struct device *ctrldev; 41 void *desc_async; 42 void *desc_sync; 43 struct work_struct worker; 44 struct kfifo fifo; 45 }; 46 47 struct caam_rng_job_ctx { 48 struct completion *done; 49 int *err; 50 }; 51 52 static struct caam_rng_ctx *to_caam_rng_ctx(struct hwrng *r) 53 { 54 return (struct caam_rng_ctx *)r->priv; 55 } 56 57 static void caam_rng_done(struct device *jrdev, u32 *desc, u32 err, 58 void *context) 59 { 60 struct caam_rng_job_ctx *jctx = context; 61 62 if (err) 63 *jctx->err = caam_jr_strstatus(jrdev, err); 64 65 complete(jctx->done); 66 } 67 68 static u32 *caam_init_desc(u32 *desc, dma_addr_t dst_dma) 69 { 70 init_job_desc(desc, 0); /* + 1 cmd_sz */ 71 /* Generate random bytes: + 1 cmd_sz */ 72 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG | 73 OP_ALG_PR_ON); 74 /* Store bytes: + 1 cmd_sz + caam_ptr_sz */ 75 append_fifo_store(desc, dst_dma, 76 CAAM_RNG_MAX_FIFO_STORE_SIZE, FIFOST_TYPE_RNGSTORE); 77 78 print_hex_dump_debug("rng job desc@: ", DUMP_PREFIX_ADDRESS, 79 16, 4, desc, desc_bytes(desc), 1); 80 81 return desc; 82 } 83 84 static int caam_rng_read_one(struct device *jrdev, 85 void *dst, int len, 86 void *desc, 87 struct completion *done) 88 { 89 dma_addr_t dst_dma; 90 int err, ret = 0; 91 struct caam_rng_job_ctx jctx = { 92 .done = done, 93 .err = &ret, 94 }; 95 96 len = CAAM_RNG_MAX_FIFO_STORE_SIZE; 97 98 dst_dma = dma_map_single(jrdev, dst, len, DMA_FROM_DEVICE); 99 if (dma_mapping_error(jrdev, dst_dma)) { 100 dev_err(jrdev, "unable to map destination memory\n"); 101 return -ENOMEM; 102 } 103 104 init_completion(done); 105 err = caam_jr_enqueue(jrdev, 106 caam_init_desc(desc, dst_dma), 107 caam_rng_done, &jctx); 108 if (err == -EINPROGRESS) { 109 wait_for_completion(done); 110 err = 0; 111 } 112 113 dma_unmap_single(jrdev, dst_dma, len, DMA_FROM_DEVICE); 114 115 return err ?: (ret ?: len); 116 } 117 118 static void caam_rng_fill_async(struct caam_rng_ctx *ctx) 119 { 120 struct scatterlist sg[1]; 121 struct completion done; 122 int len, nents; 123 124 sg_init_table(sg, ARRAY_SIZE(sg)); 125 nents = kfifo_dma_in_prepare(&ctx->fifo, sg, ARRAY_SIZE(sg), 126 CAAM_RNG_MAX_FIFO_STORE_SIZE); 127 if (!nents) 128 return; 129 130 len = caam_rng_read_one(ctx->jrdev, sg_virt(&sg[0]), 131 sg[0].length, 132 ctx->desc_async, 133 &done); 134 if (len < 0) 135 return; 136 137 kfifo_dma_in_finish(&ctx->fifo, len); 138 } 139 140 static void caam_rng_worker(struct work_struct *work) 141 { 142 struct caam_rng_ctx *ctx = container_of(work, struct caam_rng_ctx, 143 worker); 144 caam_rng_fill_async(ctx); 145 } 146 147 static int caam_read(struct hwrng *rng, void *dst, size_t max, bool wait) 148 { 149 struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); 150 int out; 151 152 if (wait) { 153 struct completion done; 154 155 return caam_rng_read_one(ctx->jrdev, dst, max, 156 ctx->desc_sync, &done); 157 } 158 159 out = kfifo_out(&ctx->fifo, dst, max); 160 if (kfifo_is_empty(&ctx->fifo)) 161 schedule_work(&ctx->worker); 162 163 return out; 164 } 165 166 static void caam_cleanup(struct hwrng *rng) 167 { 168 struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); 169 170 flush_work(&ctx->worker); 171 caam_jr_free(ctx->jrdev); 172 kfifo_free(&ctx->fifo); 173 } 174 175 static int caam_init(struct hwrng *rng) 176 { 177 struct caam_rng_ctx *ctx = to_caam_rng_ctx(rng); 178 int err; 179 180 ctx->desc_sync = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN, 181 GFP_KERNEL); 182 if (!ctx->desc_sync) 183 return -ENOMEM; 184 185 ctx->desc_async = devm_kzalloc(ctx->ctrldev, CAAM_RNG_DESC_LEN, 186 GFP_KERNEL); 187 if (!ctx->desc_async) 188 return -ENOMEM; 189 190 if (kfifo_alloc(&ctx->fifo, ALIGN(CAAM_RNG_MAX_FIFO_STORE_SIZE, 191 dma_get_cache_alignment()), 192 GFP_KERNEL)) 193 return -ENOMEM; 194 195 INIT_WORK(&ctx->worker, caam_rng_worker); 196 197 ctx->jrdev = caam_jr_alloc(); 198 err = PTR_ERR_OR_ZERO(ctx->jrdev); 199 if (err) { 200 kfifo_free(&ctx->fifo); 201 pr_err("Job Ring Device allocation for transform failed\n"); 202 return err; 203 } 204 205 /* 206 * Fill async buffer to have early randomness data for 207 * hw_random 208 */ 209 caam_rng_fill_async(ctx); 210 211 return 0; 212 } 213 214 int caam_rng_init(struct device *ctrldev); 215 216 void caam_rng_exit(struct device *ctrldev) 217 { 218 devres_release_group(ctrldev, caam_rng_init); 219 } 220 221 int caam_rng_init(struct device *ctrldev) 222 { 223 struct caam_rng_ctx *ctx; 224 u32 rng_inst; 225 struct caam_drv_private *priv = dev_get_drvdata(ctrldev); 226 int ret; 227 228 /* Check for an instantiated RNG before registration */ 229 if (priv->era < 10) 230 rng_inst = (rd_reg32(&priv->jr[0]->perfmon.cha_num_ls) & 231 CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT; 232 else 233 rng_inst = rd_reg32(&priv->jr[0]->vreg.rng) & CHA_VER_NUM_MASK; 234 235 if (!rng_inst) 236 return 0; 237 238 if (!devres_open_group(ctrldev, caam_rng_init, GFP_KERNEL)) 239 return -ENOMEM; 240 241 ctx = devm_kzalloc(ctrldev, sizeof(*ctx), GFP_KERNEL); 242 if (!ctx) 243 return -ENOMEM; 244 245 ctx->ctrldev = ctrldev; 246 247 ctx->rng.name = "rng-caam"; 248 ctx->rng.init = caam_init; 249 ctx->rng.cleanup = caam_cleanup; 250 ctx->rng.read = caam_read; 251 ctx->rng.priv = (unsigned long)ctx; 252 253 dev_info(ctrldev, "registering rng-caam\n"); 254 255 ret = devm_hwrng_register(ctrldev, &ctx->rng); 256 if (ret) { 257 caam_rng_exit(ctrldev); 258 return ret; 259 } 260 261 devres_close_group(ctrldev, caam_rng_init); 262 return 0; 263 } 264