1 /* 2 * caam - Freescale FSL CAAM support for hw_random 3 * 4 * Copyright 2011 Freescale Semiconductor, Inc. 5 * 6 * Based on caamalg.c crypto API driver. 7 * 8 * relationship between job descriptors to shared descriptors: 9 * 10 * --------------- -------------- 11 * | JobDesc #0 |-------------------->| ShareDesc | 12 * | *(buffer 0) | |------------->| (generate) | 13 * --------------- | | (move) | 14 * | | (store) | 15 * --------------- | -------------- 16 * | JobDesc #1 |------| 17 * | *(buffer 1) | 18 * --------------- 19 * 20 * A job desc looks like this: 21 * 22 * --------------------- 23 * | Header | 24 * | ShareDesc Pointer | 25 * | SEQ_OUT_PTR | 26 * | (output buffer) | 27 * --------------------- 28 * 29 * The SharedDesc never changes, and each job descriptor points to one of two 30 * buffers for each device, from which the data will be copied into the 31 * requested destination 32 */ 33 34 #include <linux/hw_random.h> 35 #include <linux/completion.h> 36 #include <linux/atomic.h> 37 38 #include "compat.h" 39 40 #include "regs.h" 41 #include "intern.h" 42 #include "desc_constr.h" 43 #include "jr.h" 44 #include "error.h" 45 46 /* 47 * Maximum buffer size: maximum number of random, cache-aligned bytes that 48 * will be generated and moved to seq out ptr (extlen not allowed) 49 */ 50 #define RN_BUF_SIZE (0xffff / L1_CACHE_BYTES * \ 51 L1_CACHE_BYTES) 52 53 /* length of descriptors */ 54 #define DESC_JOB_O_LEN (CAAM_CMD_SZ * 2 + CAAM_PTR_SZ * 2) 55 #define DESC_RNG_LEN (4 * CAAM_CMD_SZ) 56 57 /* Buffer, its dma address and lock */ 58 struct buf_data { 59 u8 buf[RN_BUF_SIZE]; 60 dma_addr_t addr; 61 struct completion filled; 62 u32 hw_desc[DESC_JOB_O_LEN]; 63 #define BUF_NOT_EMPTY 0 64 #define BUF_EMPTY 1 65 #define BUF_PENDING 2 /* Empty, but with job pending --don't submit another */ 66 atomic_t empty; 67 }; 68 69 /* rng per-device context */ 70 struct caam_rng_ctx { 71 struct device *jrdev; 72 dma_addr_t sh_desc_dma; 73 u32 sh_desc[DESC_RNG_LEN]; 74 unsigned int cur_buf_idx; 75 int current_buf; 76 struct buf_data bufs[2]; 77 }; 78 79 static struct caam_rng_ctx *rng_ctx; 80 81 static inline void rng_unmap_buf(struct device *jrdev, struct buf_data *bd) 82 { 83 if (bd->addr) 84 dma_unmap_single(jrdev, bd->addr, RN_BUF_SIZE, 85 DMA_FROM_DEVICE); 86 } 87 88 static inline void rng_unmap_ctx(struct caam_rng_ctx *ctx) 89 { 90 struct device *jrdev = ctx->jrdev; 91 92 if (ctx->sh_desc_dma) 93 dma_unmap_single(jrdev, ctx->sh_desc_dma, 94 desc_bytes(ctx->sh_desc), DMA_TO_DEVICE); 95 rng_unmap_buf(jrdev, &ctx->bufs[0]); 96 rng_unmap_buf(jrdev, &ctx->bufs[1]); 97 } 98 99 static void rng_done(struct device *jrdev, u32 *desc, u32 err, void *context) 100 { 101 struct buf_data *bd; 102 103 bd = (struct buf_data *)((char *)desc - 104 offsetof(struct buf_data, hw_desc)); 105 106 if (err) 107 caam_jr_strstatus(jrdev, err); 108 109 atomic_set(&bd->empty, BUF_NOT_EMPTY); 110 complete(&bd->filled); 111 #ifdef DEBUG 112 print_hex_dump(KERN_ERR, "rng refreshed buf@: ", 113 DUMP_PREFIX_ADDRESS, 16, 4, bd->buf, RN_BUF_SIZE, 1); 114 #endif 115 } 116 117 static inline int submit_job(struct caam_rng_ctx *ctx, int to_current) 118 { 119 struct buf_data *bd = &ctx->bufs[!(to_current ^ ctx->current_buf)]; 120 struct device *jrdev = ctx->jrdev; 121 u32 *desc = bd->hw_desc; 122 int err; 123 124 dev_dbg(jrdev, "submitting job %d\n", !(to_current ^ ctx->current_buf)); 125 init_completion(&bd->filled); 126 err = caam_jr_enqueue(jrdev, desc, rng_done, ctx); 127 if (err) 128 complete(&bd->filled); /* don't wait on failed job*/ 129 else 130 atomic_inc(&bd->empty); /* note if pending */ 131 132 return err; 133 } 134 135 static int caam_read(struct hwrng *rng, void *data, size_t max, bool wait) 136 { 137 struct caam_rng_ctx *ctx = rng_ctx; 138 struct buf_data *bd = &ctx->bufs[ctx->current_buf]; 139 int next_buf_idx, copied_idx; 140 int err; 141 142 if (atomic_read(&bd->empty)) { 143 /* try to submit job if there wasn't one */ 144 if (atomic_read(&bd->empty) == BUF_EMPTY) { 145 err = submit_job(ctx, 1); 146 /* if can't submit job, can't even wait */ 147 if (err) 148 return 0; 149 } 150 /* no immediate data, so exit if not waiting */ 151 if (!wait) 152 return 0; 153 154 /* waiting for pending job */ 155 if (atomic_read(&bd->empty)) 156 wait_for_completion(&bd->filled); 157 } 158 159 next_buf_idx = ctx->cur_buf_idx + max; 160 dev_dbg(ctx->jrdev, "%s: start reading at buffer %d, idx %d\n", 161 __func__, ctx->current_buf, ctx->cur_buf_idx); 162 163 /* if enough data in current buffer */ 164 if (next_buf_idx < RN_BUF_SIZE) { 165 memcpy(data, bd->buf + ctx->cur_buf_idx, max); 166 ctx->cur_buf_idx = next_buf_idx; 167 return max; 168 } 169 170 /* else, copy what's left... */ 171 copied_idx = RN_BUF_SIZE - ctx->cur_buf_idx; 172 memcpy(data, bd->buf + ctx->cur_buf_idx, copied_idx); 173 ctx->cur_buf_idx = 0; 174 atomic_set(&bd->empty, BUF_EMPTY); 175 176 /* ...refill... */ 177 submit_job(ctx, 1); 178 179 /* and use next buffer */ 180 ctx->current_buf = !ctx->current_buf; 181 dev_dbg(ctx->jrdev, "switched to buffer %d\n", ctx->current_buf); 182 183 /* since there already is some data read, don't wait */ 184 return copied_idx + caam_read(rng, data + copied_idx, 185 max - copied_idx, false); 186 } 187 188 static inline int rng_create_sh_desc(struct caam_rng_ctx *ctx) 189 { 190 struct device *jrdev = ctx->jrdev; 191 u32 *desc = ctx->sh_desc; 192 193 init_sh_desc(desc, HDR_SHARE_SERIAL); 194 195 /* Propagate errors from shared to job descriptor */ 196 append_cmd(desc, SET_OK_NO_PROP_ERRORS | CMD_LOAD); 197 198 /* Generate random bytes */ 199 append_operation(desc, OP_ALG_ALGSEL_RNG | OP_TYPE_CLASS1_ALG); 200 201 /* Store bytes */ 202 append_seq_fifo_store(desc, RN_BUF_SIZE, FIFOST_TYPE_RNGSTORE); 203 204 ctx->sh_desc_dma = dma_map_single(jrdev, desc, desc_bytes(desc), 205 DMA_TO_DEVICE); 206 if (dma_mapping_error(jrdev, ctx->sh_desc_dma)) { 207 dev_err(jrdev, "unable to map shared descriptor\n"); 208 return -ENOMEM; 209 } 210 #ifdef DEBUG 211 print_hex_dump(KERN_ERR, "rng shdesc@: ", DUMP_PREFIX_ADDRESS, 16, 4, 212 desc, desc_bytes(desc), 1); 213 #endif 214 return 0; 215 } 216 217 static inline int rng_create_job_desc(struct caam_rng_ctx *ctx, int buf_id) 218 { 219 struct device *jrdev = ctx->jrdev; 220 struct buf_data *bd = &ctx->bufs[buf_id]; 221 u32 *desc = bd->hw_desc; 222 int sh_len = desc_len(ctx->sh_desc); 223 224 init_job_desc_shared(desc, ctx->sh_desc_dma, sh_len, HDR_SHARE_DEFER | 225 HDR_REVERSE); 226 227 bd->addr = dma_map_single(jrdev, bd->buf, RN_BUF_SIZE, DMA_FROM_DEVICE); 228 if (dma_mapping_error(jrdev, bd->addr)) { 229 dev_err(jrdev, "unable to map dst\n"); 230 return -ENOMEM; 231 } 232 233 append_seq_out_ptr_intlen(desc, bd->addr, RN_BUF_SIZE, 0); 234 #ifdef DEBUG 235 print_hex_dump(KERN_ERR, "rng job desc@: ", DUMP_PREFIX_ADDRESS, 16, 4, 236 desc, desc_bytes(desc), 1); 237 #endif 238 return 0; 239 } 240 241 static void caam_cleanup(struct hwrng *rng) 242 { 243 int i; 244 struct buf_data *bd; 245 246 for (i = 0; i < 2; i++) { 247 bd = &rng_ctx->bufs[i]; 248 if (atomic_read(&bd->empty) == BUF_PENDING) 249 wait_for_completion(&bd->filled); 250 } 251 252 rng_unmap_ctx(rng_ctx); 253 } 254 255 static int caam_init_buf(struct caam_rng_ctx *ctx, int buf_id) 256 { 257 struct buf_data *bd = &ctx->bufs[buf_id]; 258 int err; 259 260 err = rng_create_job_desc(ctx, buf_id); 261 if (err) 262 return err; 263 264 atomic_set(&bd->empty, BUF_EMPTY); 265 submit_job(ctx, buf_id == ctx->current_buf); 266 wait_for_completion(&bd->filled); 267 268 return 0; 269 } 270 271 static int caam_init_rng(struct caam_rng_ctx *ctx, struct device *jrdev) 272 { 273 int err; 274 275 ctx->jrdev = jrdev; 276 277 err = rng_create_sh_desc(ctx); 278 if (err) 279 return err; 280 281 ctx->current_buf = 0; 282 ctx->cur_buf_idx = 0; 283 284 err = caam_init_buf(ctx, 0); 285 if (err) 286 return err; 287 288 err = caam_init_buf(ctx, 1); 289 if (err) 290 return err; 291 292 return 0; 293 } 294 295 static struct hwrng caam_rng = { 296 .name = "rng-caam", 297 .cleanup = caam_cleanup, 298 .read = caam_read, 299 }; 300 301 static void __exit caam_rng_exit(void) 302 { 303 caam_jr_free(rng_ctx->jrdev); 304 hwrng_unregister(&caam_rng); 305 kfree(rng_ctx); 306 } 307 308 static int __init caam_rng_init(void) 309 { 310 struct device *dev; 311 struct device_node *dev_node; 312 struct platform_device *pdev; 313 struct device *ctrldev; 314 void *priv; 315 int err; 316 317 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0"); 318 if (!dev_node) { 319 dev_node = of_find_compatible_node(NULL, NULL, "fsl,sec4.0"); 320 if (!dev_node) 321 return -ENODEV; 322 } 323 324 pdev = of_find_device_by_node(dev_node); 325 if (!pdev) { 326 of_node_put(dev_node); 327 return -ENODEV; 328 } 329 330 ctrldev = &pdev->dev; 331 priv = dev_get_drvdata(ctrldev); 332 of_node_put(dev_node); 333 334 /* 335 * If priv is NULL, it's probably because the caam driver wasn't 336 * properly initialized (e.g. RNG4 init failed). Thus, bail out here. 337 */ 338 if (!priv) 339 return -ENODEV; 340 341 dev = caam_jr_alloc(); 342 if (IS_ERR(dev)) { 343 pr_err("Job Ring Device allocation for transform failed\n"); 344 return PTR_ERR(dev); 345 } 346 rng_ctx = kmalloc(sizeof(struct caam_rng_ctx), GFP_DMA); 347 if (!rng_ctx) 348 return -ENOMEM; 349 err = caam_init_rng(rng_ctx, dev); 350 if (err) 351 return err; 352 353 dev_info(dev, "registering rng-caam\n"); 354 return hwrng_register(&caam_rng); 355 } 356 357 module_init(caam_rng_init); 358 module_exit(caam_rng_exit); 359 360 MODULE_LICENSE("GPL"); 361 MODULE_DESCRIPTION("FSL CAAM support for hw_random API"); 362 MODULE_AUTHOR("Freescale Semiconductor - NMG"); 363