xref: /openbmc/linux/drivers/crypto/caam/jr.c (revision 31d49ba0)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * CAAM/SEC 4.x transport/backend driver
4  * JobR backend functionality
5  *
6  * Copyright 2008-2012 Freescale Semiconductor, Inc.
7  * Copyright 2019, 2023 NXP
8  */
9 
10 #include <linux/of_irq.h>
11 #include <linux/of_address.h>
12 
13 #include "compat.h"
14 #include "ctrl.h"
15 #include "regs.h"
16 #include "jr.h"
17 #include "desc.h"
18 #include "intern.h"
19 
20 struct jr_driver_data {
21 	/* List of Physical JobR's with the Driver */
22 	struct list_head	jr_list;
23 	spinlock_t		jr_alloc_lock;	/* jr_list lock */
24 } ____cacheline_aligned;
25 
26 static struct jr_driver_data driver_data;
27 static DEFINE_MUTEX(algs_lock);
28 static unsigned int active_devs;
29 
30 static void register_algs(struct caam_drv_private_jr *jrpriv,
31 			  struct device *dev)
32 {
33 	mutex_lock(&algs_lock);
34 
35 	if (++active_devs != 1)
36 		goto algs_unlock;
37 
38 	caam_algapi_init(dev);
39 	caam_algapi_hash_init(dev);
40 	caam_pkc_init(dev);
41 	jrpriv->hwrng = !caam_rng_init(dev);
42 	caam_prng_register(dev);
43 	caam_qi_algapi_init(dev);
44 
45 algs_unlock:
46 	mutex_unlock(&algs_lock);
47 }
48 
49 static void unregister_algs(void)
50 {
51 	mutex_lock(&algs_lock);
52 
53 	if (--active_devs != 0)
54 		goto algs_unlock;
55 
56 	caam_qi_algapi_exit();
57 	caam_prng_unregister(NULL);
58 	caam_pkc_exit();
59 	caam_algapi_hash_exit();
60 	caam_algapi_exit();
61 
62 algs_unlock:
63 	mutex_unlock(&algs_lock);
64 }
65 
66 static void caam_jr_crypto_engine_exit(void *data)
67 {
68 	struct device *jrdev = data;
69 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
70 
71 	/* Free the resources of crypto-engine */
72 	crypto_engine_exit(jrpriv->engine);
73 }
74 
75 /*
76  * Put the CAAM in quiesce, ie stop
77  *
78  * Must be called with itr disabled
79  */
80 static int caam_jr_stop_processing(struct device *dev, u32 jrcr_bits)
81 {
82 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
83 	unsigned int timeout = 100000;
84 
85 	/* Check the current status */
86 	if (rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_INPROGRESS)
87 		goto wait_quiesce_completion;
88 
89 	/* Reset the field */
90 	clrsetbits_32(&jrp->rregs->jrintstatus, JRINT_ERR_HALT_MASK, 0);
91 
92 	/* initiate flush / park (required prior to reset) */
93 	wr_reg32(&jrp->rregs->jrcommand, jrcr_bits);
94 
95 wait_quiesce_completion:
96 	while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
97 		JRINT_ERR_HALT_INPROGRESS) && --timeout)
98 		cpu_relax();
99 
100 	if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
101 	    JRINT_ERR_HALT_COMPLETE || timeout == 0) {
102 		dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
103 		return -EIO;
104 	}
105 
106 	return 0;
107 }
108 
109 /*
110  * Flush the job ring, so the jobs running will be stopped, jobs queued will be
111  * invalidated and the CAAM will no longer fetch fron input ring.
112  *
113  * Must be called with itr disabled
114  */
115 static int caam_jr_flush(struct device *dev)
116 {
117 	return caam_jr_stop_processing(dev, JRCR_RESET);
118 }
119 
120 static int caam_reset_hw_jr(struct device *dev)
121 {
122 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
123 	unsigned int timeout = 100000;
124 	int err;
125 	/*
126 	 * mask interrupts since we are going to poll
127 	 * for reset completion status
128 	 */
129 	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
130 	err = caam_jr_flush(dev);
131 	if (err)
132 		return err;
133 
134 	/* initiate reset */
135 	wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
136 	while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
137 		cpu_relax();
138 
139 	if (timeout == 0) {
140 		dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
141 		return -EIO;
142 	}
143 
144 	/* unmask interrupts */
145 	clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
146 
147 	return 0;
148 }
149 
150 /*
151  * Shutdown JobR independent of platform property code
152  */
153 static int caam_jr_shutdown(struct device *dev)
154 {
155 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
156 	int ret;
157 
158 	ret = caam_reset_hw_jr(dev);
159 
160 	tasklet_kill(&jrp->irqtask);
161 
162 	return ret;
163 }
164 
165 static int caam_jr_remove(struct platform_device *pdev)
166 {
167 	int ret;
168 	struct device *jrdev;
169 	struct caam_drv_private_jr *jrpriv;
170 
171 	jrdev = &pdev->dev;
172 	jrpriv = dev_get_drvdata(jrdev);
173 
174 	if (jrpriv->hwrng)
175 		caam_rng_exit(jrdev->parent);
176 
177 	/*
178 	 * Return EBUSY if job ring already allocated.
179 	 */
180 	if (atomic_read(&jrpriv->tfm_count)) {
181 		dev_err(jrdev, "Device is busy\n");
182 		return -EBUSY;
183 	}
184 
185 	/* Unregister JR-based RNG & crypto algorithms */
186 	unregister_algs();
187 
188 	/* Remove the node from Physical JobR list maintained by driver */
189 	spin_lock(&driver_data.jr_alloc_lock);
190 	list_del(&jrpriv->list_node);
191 	spin_unlock(&driver_data.jr_alloc_lock);
192 
193 	/* Release ring */
194 	ret = caam_jr_shutdown(jrdev);
195 	if (ret)
196 		dev_err(jrdev, "Failed to shut down job ring\n");
197 
198 	return ret;
199 }
200 
201 static void caam_jr_platform_shutdown(struct platform_device *pdev)
202 {
203 	caam_jr_remove(pdev);
204 }
205 
206 /* Main per-ring interrupt handler */
207 static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
208 {
209 	struct device *dev = st_dev;
210 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
211 	u32 irqstate;
212 
213 	/*
214 	 * Check the output ring for ready responses, kick
215 	 * tasklet if jobs done.
216 	 */
217 	irqstate = rd_reg32(&jrp->rregs->jrintstatus);
218 	if (!irqstate)
219 		return IRQ_NONE;
220 
221 	/*
222 	 * If JobR error, we got more development work to do
223 	 * Flag a bug now, but we really need to shut down and
224 	 * restart the queue (and fix code).
225 	 */
226 	if (irqstate & JRINT_JR_ERROR) {
227 		dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
228 		BUG();
229 	}
230 
231 	/* mask valid interrupts */
232 	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
233 
234 	/* Have valid interrupt at this point, just ACK and trigger */
235 	wr_reg32(&jrp->rregs->jrintstatus, irqstate);
236 
237 	preempt_disable();
238 	tasklet_schedule(&jrp->irqtask);
239 	preempt_enable();
240 
241 	return IRQ_HANDLED;
242 }
243 
244 /* Deferred service handler, run as interrupt-fired tasklet */
245 static void caam_jr_dequeue(unsigned long devarg)
246 {
247 	int hw_idx, sw_idx, i, head, tail;
248 	struct device *dev = (struct device *)devarg;
249 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
250 	void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
251 	u32 *userdesc, userstatus;
252 	void *userarg;
253 	u32 outring_used = 0;
254 
255 	while (outring_used ||
256 	       (outring_used = rd_reg32(&jrp->rregs->outring_used))) {
257 
258 		head = READ_ONCE(jrp->head);
259 
260 		sw_idx = tail = jrp->tail;
261 		hw_idx = jrp->out_ring_read_index;
262 
263 		for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
264 			sw_idx = (tail + i) & (JOBR_DEPTH - 1);
265 
266 			if (jr_outentry_desc(jrp->outring, hw_idx) ==
267 			    caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
268 				break; /* found */
269 		}
270 		/* we should never fail to find a matching descriptor */
271 		BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
272 
273 		/* Unmap just-run descriptor so we can post-process */
274 		dma_unmap_single(dev,
275 				 caam_dma_to_cpu(jr_outentry_desc(jrp->outring,
276 								  hw_idx)),
277 				 jrp->entinfo[sw_idx].desc_size,
278 				 DMA_TO_DEVICE);
279 
280 		/* mark completed, avoid matching on a recycled desc addr */
281 		jrp->entinfo[sw_idx].desc_addr_dma = 0;
282 
283 		/* Stash callback params */
284 		usercall = jrp->entinfo[sw_idx].callbk;
285 		userarg = jrp->entinfo[sw_idx].cbkarg;
286 		userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
287 		userstatus = caam32_to_cpu(jr_outentry_jrstatus(jrp->outring,
288 								hw_idx));
289 
290 		/*
291 		 * Make sure all information from the job has been obtained
292 		 * before telling CAAM that the job has been removed from the
293 		 * output ring.
294 		 */
295 		mb();
296 
297 		/* set done */
298 		wr_reg32(&jrp->rregs->outring_rmvd, 1);
299 
300 		jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
301 					   (JOBR_DEPTH - 1);
302 
303 		/*
304 		 * if this job completed out-of-order, do not increment
305 		 * the tail.  Otherwise, increment tail by 1 plus the
306 		 * number of subsequent jobs already completed out-of-order
307 		 */
308 		if (sw_idx == tail) {
309 			do {
310 				tail = (tail + 1) & (JOBR_DEPTH - 1);
311 			} while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
312 				 jrp->entinfo[tail].desc_addr_dma == 0);
313 
314 			jrp->tail = tail;
315 		}
316 
317 		/* Finally, execute user's callback */
318 		usercall(dev, userdesc, userstatus, userarg);
319 		outring_used--;
320 	}
321 
322 	/* reenable / unmask IRQs */
323 	clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
324 }
325 
326 /**
327  * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
328  *
329  * returns :  pointer to the newly allocated physical
330  *	      JobR dev can be written to if successful.
331  **/
332 struct device *caam_jr_alloc(void)
333 {
334 	struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
335 	struct device *dev = ERR_PTR(-ENODEV);
336 	int min_tfm_cnt	= INT_MAX;
337 	int tfm_cnt;
338 
339 	spin_lock(&driver_data.jr_alloc_lock);
340 
341 	if (list_empty(&driver_data.jr_list)) {
342 		spin_unlock(&driver_data.jr_alloc_lock);
343 		return ERR_PTR(-ENODEV);
344 	}
345 
346 	list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
347 		tfm_cnt = atomic_read(&jrpriv->tfm_count);
348 		if (tfm_cnt < min_tfm_cnt) {
349 			min_tfm_cnt = tfm_cnt;
350 			min_jrpriv = jrpriv;
351 		}
352 		if (!min_tfm_cnt)
353 			break;
354 	}
355 
356 	if (min_jrpriv) {
357 		atomic_inc(&min_jrpriv->tfm_count);
358 		dev = min_jrpriv->dev;
359 	}
360 	spin_unlock(&driver_data.jr_alloc_lock);
361 
362 	return dev;
363 }
364 EXPORT_SYMBOL(caam_jr_alloc);
365 
366 /**
367  * caam_jr_free() - Free the Job Ring
368  * @rdev:      points to the dev that identifies the Job ring to
369  *             be released.
370  **/
371 void caam_jr_free(struct device *rdev)
372 {
373 	struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
374 
375 	atomic_dec(&jrpriv->tfm_count);
376 }
377 EXPORT_SYMBOL(caam_jr_free);
378 
379 /**
380  * caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS
381  * if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's
382  * descriptor.
383  * @dev:  struct device of the job ring to be used
384  * @desc: points to a job descriptor that execute our request. All
385  *        descriptors (and all referenced data) must be in a DMAable
386  *        region, and all data references must be physical addresses
387  *        accessible to CAAM (i.e. within a PAMU window granted
388  *        to it).
389  * @cbk:  pointer to a callback function to be invoked upon completion
390  *        of this request. This has the form:
391  *        callback(struct device *dev, u32 *desc, u32 stat, void *arg)
392  *        where:
393  *        dev:     contains the job ring device that processed this
394  *                 response.
395  *        desc:    descriptor that initiated the request, same as
396  *                 "desc" being argued to caam_jr_enqueue().
397  *        status:  untranslated status received from CAAM. See the
398  *                 reference manual for a detailed description of
399  *                 error meaning, or see the JRSTA definitions in the
400  *                 register header file
401  *        areq:    optional pointer to an argument passed with the
402  *                 original request
403  * @areq: optional pointer to a user argument for use at callback
404  *        time.
405  **/
406 int caam_jr_enqueue(struct device *dev, u32 *desc,
407 		    void (*cbk)(struct device *dev, u32 *desc,
408 				u32 status, void *areq),
409 		    void *areq)
410 {
411 	struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
412 	struct caam_jrentry_info *head_entry;
413 	int head, tail, desc_size;
414 	dma_addr_t desc_dma;
415 
416 	desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32);
417 	desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
418 	if (dma_mapping_error(dev, desc_dma)) {
419 		dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
420 		return -EIO;
421 	}
422 
423 	spin_lock_bh(&jrp->inplock);
424 
425 	head = jrp->head;
426 	tail = READ_ONCE(jrp->tail);
427 
428 	if (!jrp->inpring_avail ||
429 	    CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
430 		spin_unlock_bh(&jrp->inplock);
431 		dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
432 		return -ENOSPC;
433 	}
434 
435 	head_entry = &jrp->entinfo[head];
436 	head_entry->desc_addr_virt = desc;
437 	head_entry->desc_size = desc_size;
438 	head_entry->callbk = (void *)cbk;
439 	head_entry->cbkarg = areq;
440 	head_entry->desc_addr_dma = desc_dma;
441 
442 	jr_inpentry_set(jrp->inpring, head, cpu_to_caam_dma(desc_dma));
443 
444 	/*
445 	 * Guarantee that the descriptor's DMA address has been written to
446 	 * the next slot in the ring before the write index is updated, since
447 	 * other cores may update this index independently.
448 	 */
449 	smp_wmb();
450 
451 	jrp->head = (head + 1) & (JOBR_DEPTH - 1);
452 
453 	/*
454 	 * Ensure that all job information has been written before
455 	 * notifying CAAM that a new job was added to the input ring
456 	 * using a memory barrier. The wr_reg32() uses api iowrite32()
457 	 * to do the register write. iowrite32() issues a memory barrier
458 	 * before the write operation.
459 	 */
460 
461 	wr_reg32(&jrp->rregs->inpring_jobadd, 1);
462 
463 	jrp->inpring_avail--;
464 	if (!jrp->inpring_avail)
465 		jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail);
466 
467 	spin_unlock_bh(&jrp->inplock);
468 
469 	return -EINPROGRESS;
470 }
471 EXPORT_SYMBOL(caam_jr_enqueue);
472 
473 /*
474  * Init JobR independent of platform property detection
475  */
476 static int caam_jr_init(struct device *dev)
477 {
478 	struct caam_drv_private_jr *jrp;
479 	dma_addr_t inpbusaddr, outbusaddr;
480 	int i, error;
481 
482 	jrp = dev_get_drvdata(dev);
483 
484 	error = caam_reset_hw_jr(dev);
485 	if (error)
486 		return error;
487 
488 	jrp->inpring = dmam_alloc_coherent(dev, SIZEOF_JR_INPENTRY *
489 					   JOBR_DEPTH, &inpbusaddr,
490 					   GFP_KERNEL);
491 	if (!jrp->inpring)
492 		return -ENOMEM;
493 
494 	jrp->outring = dmam_alloc_coherent(dev, SIZEOF_JR_OUTENTRY *
495 					   JOBR_DEPTH, &outbusaddr,
496 					   GFP_KERNEL);
497 	if (!jrp->outring)
498 		return -ENOMEM;
499 
500 	jrp->entinfo = devm_kcalloc(dev, JOBR_DEPTH, sizeof(*jrp->entinfo),
501 				    GFP_KERNEL);
502 	if (!jrp->entinfo)
503 		return -ENOMEM;
504 
505 	for (i = 0; i < JOBR_DEPTH; i++)
506 		jrp->entinfo[i].desc_addr_dma = !0;
507 
508 	/* Setup rings */
509 	jrp->out_ring_read_index = 0;
510 	jrp->head = 0;
511 	jrp->tail = 0;
512 
513 	wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
514 	wr_reg64(&jrp->rregs->outring_base, outbusaddr);
515 	wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
516 	wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
517 
518 	jrp->inpring_avail = JOBR_DEPTH;
519 
520 	spin_lock_init(&jrp->inplock);
521 
522 	/* Select interrupt coalescing parameters */
523 	clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
524 		      (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
525 		      (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
526 
527 	tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
528 
529 	/* Connect job ring interrupt handler. */
530 	error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED,
531 				 dev_name(dev), dev);
532 	if (error) {
533 		dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
534 			jrp->ridx, jrp->irq);
535 		tasklet_kill(&jrp->irqtask);
536 	}
537 
538 	return error;
539 }
540 
541 static void caam_jr_irq_dispose_mapping(void *data)
542 {
543 	irq_dispose_mapping((unsigned long)data);
544 }
545 
546 /*
547  * Probe routine for each detected JobR subsystem.
548  */
549 static int caam_jr_probe(struct platform_device *pdev)
550 {
551 	struct device *jrdev;
552 	struct device_node *nprop;
553 	struct caam_job_ring __iomem *ctrl;
554 	struct caam_drv_private_jr *jrpriv;
555 	static int total_jobrs;
556 	struct resource *r;
557 	int error;
558 
559 	jrdev = &pdev->dev;
560 	jrpriv = devm_kzalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
561 	if (!jrpriv)
562 		return -ENOMEM;
563 
564 	dev_set_drvdata(jrdev, jrpriv);
565 
566 	/* save ring identity relative to detection */
567 	jrpriv->ridx = total_jobrs++;
568 
569 	nprop = pdev->dev.of_node;
570 	/* Get configuration properties from device tree */
571 	/* First, get register page */
572 	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
573 	if (!r) {
574 		dev_err(jrdev, "platform_get_resource() failed\n");
575 		return -ENOMEM;
576 	}
577 
578 	ctrl = devm_ioremap(jrdev, r->start, resource_size(r));
579 	if (!ctrl) {
580 		dev_err(jrdev, "devm_ioremap() failed\n");
581 		return -ENOMEM;
582 	}
583 
584 	jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
585 
586 	error = dma_set_mask_and_coherent(jrdev, caam_get_dma_mask(jrdev));
587 	if (error) {
588 		dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
589 			error);
590 		return error;
591 	}
592 
593 	/* Initialize crypto engine */
594 	jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, NULL,
595 							  false,
596 							  CRYPTO_ENGINE_MAX_QLEN);
597 	if (!jrpriv->engine) {
598 		dev_err(jrdev, "Could not init crypto-engine\n");
599 		return -ENOMEM;
600 	}
601 
602 	error = devm_add_action_or_reset(jrdev, caam_jr_crypto_engine_exit,
603 					 jrdev);
604 	if (error)
605 		return error;
606 
607 	/* Start crypto engine */
608 	error = crypto_engine_start(jrpriv->engine);
609 	if (error) {
610 		dev_err(jrdev, "Could not start crypto-engine\n");
611 		return error;
612 	}
613 
614 	/* Identify the interrupt */
615 	jrpriv->irq = irq_of_parse_and_map(nprop, 0);
616 	if (!jrpriv->irq) {
617 		dev_err(jrdev, "irq_of_parse_and_map failed\n");
618 		return -EINVAL;
619 	}
620 
621 	error = devm_add_action_or_reset(jrdev, caam_jr_irq_dispose_mapping,
622 					 (void *)(unsigned long)jrpriv->irq);
623 	if (error)
624 		return error;
625 
626 	/* Now do the platform independent part */
627 	error = caam_jr_init(jrdev); /* now turn on hardware */
628 	if (error)
629 		return error;
630 
631 	jrpriv->dev = jrdev;
632 	spin_lock(&driver_data.jr_alloc_lock);
633 	list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
634 	spin_unlock(&driver_data.jr_alloc_lock);
635 
636 	atomic_set(&jrpriv->tfm_count, 0);
637 
638 	register_algs(jrpriv, jrdev->parent);
639 
640 	return 0;
641 }
642 
643 static const struct of_device_id caam_jr_match[] = {
644 	{
645 		.compatible = "fsl,sec-v4.0-job-ring",
646 	},
647 	{
648 		.compatible = "fsl,sec4.0-job-ring",
649 	},
650 	{},
651 };
652 MODULE_DEVICE_TABLE(of, caam_jr_match);
653 
654 static struct platform_driver caam_jr_driver = {
655 	.driver = {
656 		.name = "caam_jr",
657 		.of_match_table = caam_jr_match,
658 	},
659 	.probe       = caam_jr_probe,
660 	.remove      = caam_jr_remove,
661 	.shutdown    = caam_jr_platform_shutdown,
662 };
663 
664 static int __init jr_driver_init(void)
665 {
666 	spin_lock_init(&driver_data.jr_alloc_lock);
667 	INIT_LIST_HEAD(&driver_data.jr_list);
668 	return platform_driver_register(&caam_jr_driver);
669 }
670 
671 static void __exit jr_driver_exit(void)
672 {
673 	platform_driver_unregister(&caam_jr_driver);
674 }
675 
676 module_init(jr_driver_init);
677 module_exit(jr_driver_exit);
678 
679 MODULE_LICENSE("GPL");
680 MODULE_DESCRIPTION("FSL CAAM JR request backend");
681 MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
682