1 /* 2 * CAAM/SEC 4.x transport/backend driver 3 * JobR backend functionality 4 * 5 * Copyright 2008-2011 Freescale Semiconductor, Inc. 6 */ 7 8 #include "compat.h" 9 #include "regs.h" 10 #include "jr.h" 11 #include "desc.h" 12 #include "intern.h" 13 14 /* Main per-ring interrupt handler */ 15 static irqreturn_t caam_jr_interrupt(int irq, void *st_dev) 16 { 17 struct device *dev = st_dev; 18 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 19 u32 irqstate; 20 21 /* 22 * Check the output ring for ready responses, kick 23 * tasklet if jobs done. 24 */ 25 irqstate = rd_reg32(&jrp->rregs->jrintstatus); 26 if (!irqstate) 27 return IRQ_NONE; 28 29 /* 30 * If JobR error, we got more development work to do 31 * Flag a bug now, but we really need to shut down and 32 * restart the queue (and fix code). 33 */ 34 if (irqstate & JRINT_JR_ERROR) { 35 dev_err(dev, "job ring error: irqstate: %08x\n", irqstate); 36 BUG(); 37 } 38 39 /* mask valid interrupts */ 40 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 41 42 /* Have valid interrupt at this point, just ACK and trigger */ 43 wr_reg32(&jrp->rregs->jrintstatus, irqstate); 44 45 preempt_disable(); 46 tasklet_schedule(&jrp->irqtask[smp_processor_id()]); 47 preempt_enable(); 48 49 return IRQ_HANDLED; 50 } 51 52 /* Deferred service handler, run as interrupt-fired tasklet */ 53 static void caam_jr_dequeue(unsigned long devarg) 54 { 55 int hw_idx, sw_idx, i, head, tail; 56 struct device *dev = (struct device *)devarg; 57 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 58 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg); 59 u32 *userdesc, userstatus; 60 void *userarg; 61 unsigned long flags; 62 63 spin_lock_irqsave(&jrp->outlock, flags); 64 65 head = ACCESS_ONCE(jrp->head); 66 sw_idx = tail = jrp->tail; 67 68 while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && 69 rd_reg32(&jrp->rregs->outring_used)) { 70 71 hw_idx = jrp->out_ring_read_index; 72 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) { 73 sw_idx = (tail + i) & (JOBR_DEPTH - 1); 74 75 smp_read_barrier_depends(); 76 77 if (jrp->outring[hw_idx].desc == 78 jrp->entinfo[sw_idx].desc_addr_dma) 79 break; /* found */ 80 } 81 /* we should never fail to find a matching descriptor */ 82 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0); 83 84 /* Unmap just-run descriptor so we can post-process */ 85 dma_unmap_single(dev, jrp->outring[hw_idx].desc, 86 jrp->entinfo[sw_idx].desc_size, 87 DMA_TO_DEVICE); 88 89 /* mark completed, avoid matching on a recycled desc addr */ 90 jrp->entinfo[sw_idx].desc_addr_dma = 0; 91 92 /* Stash callback params for use outside of lock */ 93 usercall = jrp->entinfo[sw_idx].callbk; 94 userarg = jrp->entinfo[sw_idx].cbkarg; 95 userdesc = jrp->entinfo[sw_idx].desc_addr_virt; 96 userstatus = jrp->outring[hw_idx].jrstatus; 97 98 smp_mb(); 99 100 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) & 101 (JOBR_DEPTH - 1); 102 103 /* 104 * if this job completed out-of-order, do not increment 105 * the tail. Otherwise, increment tail by 1 plus the 106 * number of subsequent jobs already completed out-of-order 107 */ 108 if (sw_idx == tail) { 109 do { 110 tail = (tail + 1) & (JOBR_DEPTH - 1); 111 smp_read_barrier_depends(); 112 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 && 113 jrp->entinfo[tail].desc_addr_dma == 0); 114 115 jrp->tail = tail; 116 } 117 118 /* set done */ 119 wr_reg32(&jrp->rregs->outring_rmvd, 1); 120 121 spin_unlock_irqrestore(&jrp->outlock, flags); 122 123 /* Finally, execute user's callback */ 124 usercall(dev, userdesc, userstatus, userarg); 125 126 spin_lock_irqsave(&jrp->outlock, flags); 127 128 head = ACCESS_ONCE(jrp->head); 129 sw_idx = tail = jrp->tail; 130 } 131 132 spin_unlock_irqrestore(&jrp->outlock, flags); 133 134 /* reenable / unmask IRQs */ 135 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 136 } 137 138 /** 139 * caam_jr_register() - Alloc a ring for someone to use as needed. Returns 140 * an ordinal of the rings allocated, else returns -ENODEV if no rings 141 * are available. 142 * @ctrldev: points to the controller level dev (parent) that 143 * owns rings available for use. 144 * @dev: points to where a pointer to the newly allocated queue's 145 * dev can be written to if successful. 146 **/ 147 int caam_jr_register(struct device *ctrldev, struct device **rdev) 148 { 149 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev); 150 struct caam_drv_private_jr *jrpriv = NULL; 151 unsigned long flags; 152 int ring; 153 154 /* Lock, if free ring - assign, unlock */ 155 spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags); 156 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) { 157 jrpriv = dev_get_drvdata(ctrlpriv->jrdev[ring]); 158 if (jrpriv->assign == JOBR_UNASSIGNED) { 159 jrpriv->assign = JOBR_ASSIGNED; 160 *rdev = ctrlpriv->jrdev[ring]; 161 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); 162 return ring; 163 } 164 } 165 166 /* If assigned, write dev where caller needs it */ 167 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); 168 *rdev = NULL; 169 170 return -ENODEV; 171 } 172 EXPORT_SYMBOL(caam_jr_register); 173 174 /** 175 * caam_jr_deregister() - Deregister an API and release the queue. 176 * Returns 0 if OK, -EBUSY if queue still contains pending entries 177 * or unprocessed results at the time of the call 178 * @dev - points to the dev that identifies the queue to 179 * be released. 180 **/ 181 int caam_jr_deregister(struct device *rdev) 182 { 183 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev); 184 struct caam_drv_private *ctrlpriv; 185 unsigned long flags; 186 187 /* Get the owning controller's private space */ 188 ctrlpriv = dev_get_drvdata(jrpriv->parentdev); 189 190 /* 191 * Make sure ring empty before release 192 */ 193 if (rd_reg32(&jrpriv->rregs->outring_used) || 194 (rd_reg32(&jrpriv->rregs->inpring_avail) != JOBR_DEPTH)) 195 return -EBUSY; 196 197 /* Release ring */ 198 spin_lock_irqsave(&ctrlpriv->jr_alloc_lock, flags); 199 jrpriv->assign = JOBR_UNASSIGNED; 200 spin_unlock_irqrestore(&ctrlpriv->jr_alloc_lock, flags); 201 202 return 0; 203 } 204 EXPORT_SYMBOL(caam_jr_deregister); 205 206 /** 207 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns 0 if OK, 208 * -EBUSY if the queue is full, -EIO if it cannot map the caller's 209 * descriptor. 210 * @dev: device of the job ring to be used. This device should have 211 * been assigned prior by caam_jr_register(). 212 * @desc: points to a job descriptor that execute our request. All 213 * descriptors (and all referenced data) must be in a DMAable 214 * region, and all data references must be physical addresses 215 * accessible to CAAM (i.e. within a PAMU window granted 216 * to it). 217 * @cbk: pointer to a callback function to be invoked upon completion 218 * of this request. This has the form: 219 * callback(struct device *dev, u32 *desc, u32 stat, void *arg) 220 * where: 221 * @dev: contains the job ring device that processed this 222 * response. 223 * @desc: descriptor that initiated the request, same as 224 * "desc" being argued to caam_jr_enqueue(). 225 * @status: untranslated status received from CAAM. See the 226 * reference manual for a detailed description of 227 * error meaning, or see the JRSTA definitions in the 228 * register header file 229 * @areq: optional pointer to an argument passed with the 230 * original request 231 * @areq: optional pointer to a user argument for use at callback 232 * time. 233 **/ 234 int caam_jr_enqueue(struct device *dev, u32 *desc, 235 void (*cbk)(struct device *dev, u32 *desc, 236 u32 status, void *areq), 237 void *areq) 238 { 239 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 240 struct caam_jrentry_info *head_entry; 241 unsigned long flags; 242 int head, tail, desc_size; 243 dma_addr_t desc_dma; 244 245 desc_size = (*desc & HDR_JD_LENGTH_MASK) * sizeof(u32); 246 desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE); 247 if (dma_mapping_error(dev, desc_dma)) { 248 dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n"); 249 return -EIO; 250 } 251 252 spin_lock_irqsave(&jrp->inplock, flags); 253 254 head = jrp->head; 255 tail = ACCESS_ONCE(jrp->tail); 256 257 if (!rd_reg32(&jrp->rregs->inpring_avail) || 258 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) { 259 spin_unlock_irqrestore(&jrp->inplock, flags); 260 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE); 261 return -EBUSY; 262 } 263 264 head_entry = &jrp->entinfo[head]; 265 head_entry->desc_addr_virt = desc; 266 head_entry->desc_size = desc_size; 267 head_entry->callbk = (void *)cbk; 268 head_entry->cbkarg = areq; 269 head_entry->desc_addr_dma = desc_dma; 270 271 jrp->inpring[jrp->inp_ring_write_index] = desc_dma; 272 273 smp_wmb(); 274 275 jrp->inp_ring_write_index = (jrp->inp_ring_write_index + 1) & 276 (JOBR_DEPTH - 1); 277 jrp->head = (head + 1) & (JOBR_DEPTH - 1); 278 279 wmb(); 280 281 wr_reg32(&jrp->rregs->inpring_jobadd, 1); 282 283 spin_unlock_irqrestore(&jrp->inplock, flags); 284 285 return 0; 286 } 287 EXPORT_SYMBOL(caam_jr_enqueue); 288 289 static int caam_reset_hw_jr(struct device *dev) 290 { 291 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 292 unsigned int timeout = 100000; 293 294 /* 295 * mask interrupts since we are going to poll 296 * for reset completion status 297 */ 298 setbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 299 300 /* initiate flush (required prior to reset) */ 301 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); 302 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) == 303 JRINT_ERR_HALT_INPROGRESS) && --timeout) 304 cpu_relax(); 305 306 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) != 307 JRINT_ERR_HALT_COMPLETE || timeout == 0) { 308 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx); 309 return -EIO; 310 } 311 312 /* initiate reset */ 313 timeout = 100000; 314 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET); 315 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout) 316 cpu_relax(); 317 318 if (timeout == 0) { 319 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx); 320 return -EIO; 321 } 322 323 /* unmask interrupts */ 324 clrbits32(&jrp->rregs->rconfig_lo, JRCFG_IMSK); 325 326 return 0; 327 } 328 329 /* 330 * Init JobR independent of platform property detection 331 */ 332 static int caam_jr_init(struct device *dev) 333 { 334 struct caam_drv_private_jr *jrp; 335 dma_addr_t inpbusaddr, outbusaddr; 336 int i, error; 337 338 jrp = dev_get_drvdata(dev); 339 340 /* Connect job ring interrupt handler. */ 341 for_each_possible_cpu(i) 342 tasklet_init(&jrp->irqtask[i], caam_jr_dequeue, 343 (unsigned long)dev); 344 345 error = request_irq(jrp->irq, caam_jr_interrupt, IRQF_SHARED, 346 "caam-jobr", dev); 347 if (error) { 348 dev_err(dev, "can't connect JobR %d interrupt (%d)\n", 349 jrp->ridx, jrp->irq); 350 irq_dispose_mapping(jrp->irq); 351 jrp->irq = 0; 352 return -EINVAL; 353 } 354 355 error = caam_reset_hw_jr(dev); 356 if (error) 357 return error; 358 359 jrp->inpring = kzalloc(sizeof(dma_addr_t) * JOBR_DEPTH, 360 GFP_KERNEL | GFP_DMA); 361 jrp->outring = kzalloc(sizeof(struct jr_outentry) * 362 JOBR_DEPTH, GFP_KERNEL | GFP_DMA); 363 364 jrp->entinfo = kzalloc(sizeof(struct caam_jrentry_info) * JOBR_DEPTH, 365 GFP_KERNEL); 366 367 if ((jrp->inpring == NULL) || (jrp->outring == NULL) || 368 (jrp->entinfo == NULL)) { 369 dev_err(dev, "can't allocate job rings for %d\n", 370 jrp->ridx); 371 return -ENOMEM; 372 } 373 374 for (i = 0; i < JOBR_DEPTH; i++) 375 jrp->entinfo[i].desc_addr_dma = !0; 376 377 /* Setup rings */ 378 inpbusaddr = dma_map_single(dev, jrp->inpring, 379 sizeof(u32 *) * JOBR_DEPTH, 380 DMA_BIDIRECTIONAL); 381 if (dma_mapping_error(dev, inpbusaddr)) { 382 dev_err(dev, "caam_jr_init(): can't map input ring\n"); 383 kfree(jrp->inpring); 384 kfree(jrp->outring); 385 kfree(jrp->entinfo); 386 return -EIO; 387 } 388 389 outbusaddr = dma_map_single(dev, jrp->outring, 390 sizeof(struct jr_outentry) * JOBR_DEPTH, 391 DMA_BIDIRECTIONAL); 392 if (dma_mapping_error(dev, outbusaddr)) { 393 dev_err(dev, "caam_jr_init(): can't map output ring\n"); 394 dma_unmap_single(dev, inpbusaddr, 395 sizeof(u32 *) * JOBR_DEPTH, 396 DMA_BIDIRECTIONAL); 397 kfree(jrp->inpring); 398 kfree(jrp->outring); 399 kfree(jrp->entinfo); 400 return -EIO; 401 } 402 403 jrp->inp_ring_write_index = 0; 404 jrp->out_ring_read_index = 0; 405 jrp->head = 0; 406 jrp->tail = 0; 407 408 wr_reg64(&jrp->rregs->inpring_base, inpbusaddr); 409 wr_reg64(&jrp->rregs->outring_base, outbusaddr); 410 wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH); 411 wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH); 412 413 jrp->ringsize = JOBR_DEPTH; 414 415 spin_lock_init(&jrp->inplock); 416 spin_lock_init(&jrp->outlock); 417 418 /* Select interrupt coalescing parameters */ 419 setbits32(&jrp->rregs->rconfig_lo, JOBR_INTC | 420 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) | 421 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT)); 422 423 jrp->assign = JOBR_UNASSIGNED; 424 return 0; 425 } 426 427 /* 428 * Shutdown JobR independent of platform property code 429 */ 430 int caam_jr_shutdown(struct device *dev) 431 { 432 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev); 433 dma_addr_t inpbusaddr, outbusaddr; 434 int ret, i; 435 436 ret = caam_reset_hw_jr(dev); 437 438 for_each_possible_cpu(i) 439 tasklet_kill(&jrp->irqtask[i]); 440 441 /* Release interrupt */ 442 free_irq(jrp->irq, dev); 443 444 /* Free rings */ 445 inpbusaddr = rd_reg64(&jrp->rregs->inpring_base); 446 outbusaddr = rd_reg64(&jrp->rregs->outring_base); 447 dma_unmap_single(dev, outbusaddr, 448 sizeof(struct jr_outentry) * JOBR_DEPTH, 449 DMA_BIDIRECTIONAL); 450 dma_unmap_single(dev, inpbusaddr, sizeof(u32 *) * JOBR_DEPTH, 451 DMA_BIDIRECTIONAL); 452 kfree(jrp->outring); 453 kfree(jrp->inpring); 454 kfree(jrp->entinfo); 455 456 return ret; 457 } 458 459 /* 460 * Probe routine for each detected JobR subsystem. It assumes that 461 * property detection was picked up externally. 462 */ 463 int caam_jr_probe(struct platform_device *pdev, struct device_node *np, 464 int ring) 465 { 466 struct device *ctrldev, *jrdev; 467 struct platform_device *jr_pdev; 468 struct caam_drv_private *ctrlpriv; 469 struct caam_drv_private_jr *jrpriv; 470 u32 *jroffset; 471 int error; 472 473 ctrldev = &pdev->dev; 474 ctrlpriv = dev_get_drvdata(ctrldev); 475 476 jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), 477 GFP_KERNEL); 478 if (jrpriv == NULL) { 479 dev_err(ctrldev, "can't alloc private mem for job ring %d\n", 480 ring); 481 return -ENOMEM; 482 } 483 jrpriv->parentdev = ctrldev; /* point back to parent */ 484 jrpriv->ridx = ring; /* save ring identity relative to detection */ 485 486 /* 487 * Derive a pointer to the detected JobRs regs 488 * Driver has already iomapped the entire space, we just 489 * need to add in the offset to this JobR. Don't know if I 490 * like this long-term, but it'll run 491 */ 492 jroffset = (u32 *)of_get_property(np, "reg", NULL); 493 jrpriv->rregs = (struct caam_job_ring __iomem *)((void *)ctrlpriv->ctrl 494 + *jroffset); 495 496 /* Build a local dev for each detected queue */ 497 jr_pdev = of_platform_device_create(np, NULL, ctrldev); 498 if (jr_pdev == NULL) { 499 kfree(jrpriv); 500 return -EINVAL; 501 } 502 jrdev = &jr_pdev->dev; 503 dev_set_drvdata(jrdev, jrpriv); 504 ctrlpriv->jrdev[ring] = jrdev; 505 506 /* Identify the interrupt */ 507 jrpriv->irq = of_irq_to_resource(np, 0, NULL); 508 509 /* Now do the platform independent part */ 510 error = caam_jr_init(jrdev); /* now turn on hardware */ 511 if (error) { 512 kfree(jrpriv); 513 return error; 514 } 515 516 return error; 517 } 518