Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * CAAM/SEC 4.x transport/backend driver
4 * JobR backend functionality
5 *
6 * Copyright 2008-2012 Freescale Semiconductor, Inc.
7 * Copyright 2019 NXP
8 */
9
10#include <linux/of_irq.h>
11#include <linux/of_address.h>
12
13#include "compat.h"
14#include "ctrl.h"
15#include "regs.h"
16#include "jr.h"
17#include "desc.h"
18#include "intern.h"
19
20struct jr_driver_data {
21 /* List of Physical JobR's with the Driver */
22 struct list_head jr_list;
23 spinlock_t jr_alloc_lock; /* jr_list lock */
24} ____cacheline_aligned;
25
26static struct jr_driver_data driver_data;
27static DEFINE_MUTEX(algs_lock);
28static unsigned int active_devs;
29
30static void register_algs(struct caam_drv_private_jr *jrpriv,
31 struct device *dev)
32{
33 mutex_lock(&algs_lock);
34
35 if (++active_devs != 1)
36 goto algs_unlock;
37
38 caam_algapi_init(dev);
39 caam_algapi_hash_init(dev);
40 caam_pkc_init(dev);
41 jrpriv->hwrng = !caam_rng_init(dev);
42 caam_qi_algapi_init(dev);
43
44algs_unlock:
45 mutex_unlock(&algs_lock);
46}
47
48static void unregister_algs(void)
49{
50 mutex_lock(&algs_lock);
51
52 if (--active_devs != 0)
53 goto algs_unlock;
54
55 caam_qi_algapi_exit();
56
57 caam_pkc_exit();
58 caam_algapi_hash_exit();
59 caam_algapi_exit();
60
61algs_unlock:
62 mutex_unlock(&algs_lock);
63}
64
65static void caam_jr_crypto_engine_exit(void *data)
66{
67 struct device *jrdev = data;
68 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
69
70 /* Free the resources of crypto-engine */
71 crypto_engine_exit(jrpriv->engine);
72}
73
74static int caam_reset_hw_jr(struct device *dev)
75{
76 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
77 unsigned int timeout = 100000;
78
79 /*
80 * mask interrupts since we are going to poll
81 * for reset completion status
82 */
83 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
84
85 /* initiate flush (required prior to reset) */
86 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
87 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
88 JRINT_ERR_HALT_INPROGRESS) && --timeout)
89 cpu_relax();
90
91 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
92 JRINT_ERR_HALT_COMPLETE || timeout == 0) {
93 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
94 return -EIO;
95 }
96
97 /* initiate reset */
98 timeout = 100000;
99 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
100 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
101 cpu_relax();
102
103 if (timeout == 0) {
104 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
105 return -EIO;
106 }
107
108 /* unmask interrupts */
109 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
110
111 return 0;
112}
113
114/*
115 * Shutdown JobR independent of platform property code
116 */
117static int caam_jr_shutdown(struct device *dev)
118{
119 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
120 int ret;
121
122 ret = caam_reset_hw_jr(dev);
123
124 tasklet_kill(&jrp->irqtask);
125
126 return ret;
127}
128
129static int caam_jr_remove(struct platform_device *pdev)
130{
131 int ret;
132 struct device *jrdev;
133 struct caam_drv_private_jr *jrpriv;
134
135 jrdev = &pdev->dev;
136 jrpriv = dev_get_drvdata(jrdev);
137
138 if (jrpriv->hwrng)
139 caam_rng_exit(jrdev->parent);
140
141 /*
142 * Return EBUSY if job ring already allocated.
143 */
144 if (atomic_read(&jrpriv->tfm_count)) {
145 dev_err(jrdev, "Device is busy\n");
146 return -EBUSY;
147 }
148
149 /* Unregister JR-based RNG & crypto algorithms */
150 unregister_algs();
151
152 /* Remove the node from Physical JobR list maintained by driver */
153 spin_lock(&driver_data.jr_alloc_lock);
154 list_del(&jrpriv->list_node);
155 spin_unlock(&driver_data.jr_alloc_lock);
156
157 /* Release ring */
158 ret = caam_jr_shutdown(jrdev);
159 if (ret)
160 dev_err(jrdev, "Failed to shut down job ring\n");
161
162 return ret;
163}
164
165/* Main per-ring interrupt handler */
166static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
167{
168 struct device *dev = st_dev;
169 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
170 u32 irqstate;
171
172 /*
173 * Check the output ring for ready responses, kick
174 * tasklet if jobs done.
175 */
176 irqstate = rd_reg32(&jrp->rregs->jrintstatus);
177 if (!irqstate)
178 return IRQ_NONE;
179
180 /*
181 * If JobR error, we got more development work to do
182 * Flag a bug now, but we really need to shut down and
183 * restart the queue (and fix code).
184 */
185 if (irqstate & JRINT_JR_ERROR) {
186 dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
187 BUG();
188 }
189
190 /* mask valid interrupts */
191 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
192
193 /* Have valid interrupt at this point, just ACK and trigger */
194 wr_reg32(&jrp->rregs->jrintstatus, irqstate);
195
196 preempt_disable();
197 tasklet_schedule(&jrp->irqtask);
198 preempt_enable();
199
200 return IRQ_HANDLED;
201}
202
203/* Deferred service handler, run as interrupt-fired tasklet */
204static void caam_jr_dequeue(unsigned long devarg)
205{
206 int hw_idx, sw_idx, i, head, tail;
207 struct device *dev = (struct device *)devarg;
208 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
209 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
210 u32 *userdesc, userstatus;
211 void *userarg;
212 u32 outring_used = 0;
213
214 while (outring_used ||
215 (outring_used = rd_reg32(&jrp->rregs->outring_used))) {
216
217 head = READ_ONCE(jrp->head);
218
219 sw_idx = tail = jrp->tail;
220 hw_idx = jrp->out_ring_read_index;
221
222 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
223 sw_idx = (tail + i) & (JOBR_DEPTH - 1);
224
225 if (jr_outentry_desc(jrp->outring, hw_idx) ==
226 caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
227 break; /* found */
228 }
229 /* we should never fail to find a matching descriptor */
230 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
231
232 /* Unmap just-run descriptor so we can post-process */
233 dma_unmap_single(dev,
234 caam_dma_to_cpu(jr_outentry_desc(jrp->outring,
235 hw_idx)),
236 jrp->entinfo[sw_idx].desc_size,
237 DMA_TO_DEVICE);
238
239 /* mark completed, avoid matching on a recycled desc addr */
240 jrp->entinfo[sw_idx].desc_addr_dma = 0;
241
242 /* Stash callback params */
243 usercall = jrp->entinfo[sw_idx].callbk;
244 userarg = jrp->entinfo[sw_idx].cbkarg;
245 userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
246 userstatus = caam32_to_cpu(jr_outentry_jrstatus(jrp->outring,
247 hw_idx));
248
249 /*
250 * Make sure all information from the job has been obtained
251 * before telling CAAM that the job has been removed from the
252 * output ring.
253 */
254 mb();
255
256 /* set done */
257 wr_reg32(&jrp->rregs->outring_rmvd, 1);
258
259 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
260 (JOBR_DEPTH - 1);
261
262 /*
263 * if this job completed out-of-order, do not increment
264 * the tail. Otherwise, increment tail by 1 plus the
265 * number of subsequent jobs already completed out-of-order
266 */
267 if (sw_idx == tail) {
268 do {
269 tail = (tail + 1) & (JOBR_DEPTH - 1);
270 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
271 jrp->entinfo[tail].desc_addr_dma == 0);
272
273 jrp->tail = tail;
274 }
275
276 /* Finally, execute user's callback */
277 usercall(dev, userdesc, userstatus, userarg);
278 outring_used--;
279 }
280
281 /* reenable / unmask IRQs */
282 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
283}
284
285/**
286 * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
287 *
288 * returns : pointer to the newly allocated physical
289 * JobR dev can be written to if successful.
290 **/
291struct device *caam_jr_alloc(void)
292{
293 struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
294 struct device *dev = ERR_PTR(-ENODEV);
295 int min_tfm_cnt = INT_MAX;
296 int tfm_cnt;
297
298 spin_lock(&driver_data.jr_alloc_lock);
299
300 if (list_empty(&driver_data.jr_list)) {
301 spin_unlock(&driver_data.jr_alloc_lock);
302 return ERR_PTR(-ENODEV);
303 }
304
305 list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
306 tfm_cnt = atomic_read(&jrpriv->tfm_count);
307 if (tfm_cnt < min_tfm_cnt) {
308 min_tfm_cnt = tfm_cnt;
309 min_jrpriv = jrpriv;
310 }
311 if (!min_tfm_cnt)
312 break;
313 }
314
315 if (min_jrpriv) {
316 atomic_inc(&min_jrpriv->tfm_count);
317 dev = min_jrpriv->dev;
318 }
319 spin_unlock(&driver_data.jr_alloc_lock);
320
321 return dev;
322}
323EXPORT_SYMBOL(caam_jr_alloc);
324
325/**
326 * caam_jr_free() - Free the Job Ring
327 * @rdev - points to the dev that identifies the Job ring to
328 * be released.
329 **/
330void caam_jr_free(struct device *rdev)
331{
332 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
333
334 atomic_dec(&jrpriv->tfm_count);
335}
336EXPORT_SYMBOL(caam_jr_free);
337
338/**
339 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS
340 * if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's
341 * descriptor.
342 * @dev: struct device of the job ring to be used
343 * @desc: points to a job descriptor that execute our request. All
344 * descriptors (and all referenced data) must be in a DMAable
345 * region, and all data references must be physical addresses
346 * accessible to CAAM (i.e. within a PAMU window granted
347 * to it).
348 * @cbk: pointer to a callback function to be invoked upon completion
349 * of this request. This has the form:
350 * callback(struct device *dev, u32 *desc, u32 stat, void *arg)
351 * where:
352 * @dev: contains the job ring device that processed this
353 * response.
354 * @desc: descriptor that initiated the request, same as
355 * "desc" being argued to caam_jr_enqueue().
356 * @status: untranslated status received from CAAM. See the
357 * reference manual for a detailed description of
358 * error meaning, or see the JRSTA definitions in the
359 * register header file
360 * @areq: optional pointer to an argument passed with the
361 * original request
362 * @areq: optional pointer to a user argument for use at callback
363 * time.
364 **/
365int caam_jr_enqueue(struct device *dev, u32 *desc,
366 void (*cbk)(struct device *dev, u32 *desc,
367 u32 status, void *areq),
368 void *areq)
369{
370 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
371 struct caam_jrentry_info *head_entry;
372 int head, tail, desc_size;
373 dma_addr_t desc_dma;
374
375 desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32);
376 desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
377 if (dma_mapping_error(dev, desc_dma)) {
378 dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
379 return -EIO;
380 }
381
382 spin_lock_bh(&jrp->inplock);
383
384 head = jrp->head;
385 tail = READ_ONCE(jrp->tail);
386
387 if (!jrp->inpring_avail ||
388 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
389 spin_unlock_bh(&jrp->inplock);
390 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
391 return -ENOSPC;
392 }
393
394 head_entry = &jrp->entinfo[head];
395 head_entry->desc_addr_virt = desc;
396 head_entry->desc_size = desc_size;
397 head_entry->callbk = (void *)cbk;
398 head_entry->cbkarg = areq;
399 head_entry->desc_addr_dma = desc_dma;
400
401 jr_inpentry_set(jrp->inpring, head, cpu_to_caam_dma(desc_dma));
402
403 /*
404 * Guarantee that the descriptor's DMA address has been written to
405 * the next slot in the ring before the write index is updated, since
406 * other cores may update this index independently.
407 */
408 smp_wmb();
409
410 jrp->head = (head + 1) & (JOBR_DEPTH - 1);
411
412 /*
413 * Ensure that all job information has been written before
414 * notifying CAAM that a new job was added to the input ring
415 * using a memory barrier. The wr_reg32() uses api iowrite32()
416 * to do the register write. iowrite32() issues a memory barrier
417 * before the write operation.
418 */
419
420 wr_reg32(&jrp->rregs->inpring_jobadd, 1);
421
422 jrp->inpring_avail--;
423 if (!jrp->inpring_avail)
424 jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail);
425
426 spin_unlock_bh(&jrp->inplock);
427
428 return -EINPROGRESS;
429}
430EXPORT_SYMBOL(caam_jr_enqueue);
431
432/*
433 * Init JobR independent of platform property detection
434 */
435static int caam_jr_init(struct device *dev)
436{
437 struct caam_drv_private_jr *jrp;
438 dma_addr_t inpbusaddr, outbusaddr;
439 int i, error;
440
441 jrp = dev_get_drvdata(dev);
442
443 error = caam_reset_hw_jr(dev);
444 if (error)
445 return error;
446
447 jrp->inpring = dmam_alloc_coherent(dev, SIZEOF_JR_INPENTRY *
448 JOBR_DEPTH, &inpbusaddr,
449 GFP_KERNEL);
450 if (!jrp->inpring)
451 return -ENOMEM;
452
453 jrp->outring = dmam_alloc_coherent(dev, SIZEOF_JR_OUTENTRY *
454 JOBR_DEPTH, &outbusaddr,
455 GFP_KERNEL);
456 if (!jrp->outring)
457 return -ENOMEM;
458
459 jrp->entinfo = devm_kcalloc(dev, JOBR_DEPTH, sizeof(*jrp->entinfo),
460 GFP_KERNEL);
461 if (!jrp->entinfo)
462 return -ENOMEM;
463
464 for (i = 0; i < JOBR_DEPTH; i++)
465 jrp->entinfo[i].desc_addr_dma = !0;
466
467 /* Setup rings */
468 jrp->out_ring_read_index = 0;
469 jrp->head = 0;
470 jrp->tail = 0;
471
472 wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
473 wr_reg64(&jrp->rregs->outring_base, outbusaddr);
474 wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
475 wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
476
477 jrp->inpring_avail = JOBR_DEPTH;
478
479 spin_lock_init(&jrp->inplock);
480
481 /* Select interrupt coalescing parameters */
482 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
483 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
484 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
485
486 tasklet_init(&jrp->irqtask, caam_jr_dequeue, (unsigned long)dev);
487
488 /* Connect job ring interrupt handler. */
489 error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED,
490 dev_name(dev), dev);
491 if (error) {
492 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
493 jrp->ridx, jrp->irq);
494 tasklet_kill(&jrp->irqtask);
495 }
496
497 return error;
498}
499
500static void caam_jr_irq_dispose_mapping(void *data)
501{
502 irq_dispose_mapping((unsigned long)data);
503}
504
505/*
506 * Probe routine for each detected JobR subsystem.
507 */
508static int caam_jr_probe(struct platform_device *pdev)
509{
510 struct device *jrdev;
511 struct device_node *nprop;
512 struct caam_job_ring __iomem *ctrl;
513 struct caam_drv_private_jr *jrpriv;
514 static int total_jobrs;
515 struct resource *r;
516 int error;
517
518 jrdev = &pdev->dev;
519 jrpriv = devm_kzalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
520 if (!jrpriv)
521 return -ENOMEM;
522
523 dev_set_drvdata(jrdev, jrpriv);
524
525 /* save ring identity relative to detection */
526 jrpriv->ridx = total_jobrs++;
527
528 nprop = pdev->dev.of_node;
529 /* Get configuration properties from device tree */
530 /* First, get register page */
531 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
532 if (!r) {
533 dev_err(jrdev, "platform_get_resource() failed\n");
534 return -ENOMEM;
535 }
536
537 ctrl = devm_ioremap(jrdev, r->start, resource_size(r));
538 if (!ctrl) {
539 dev_err(jrdev, "devm_ioremap() failed\n");
540 return -ENOMEM;
541 }
542
543 jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
544
545 error = dma_set_mask_and_coherent(jrdev, caam_get_dma_mask(jrdev));
546 if (error) {
547 dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
548 error);
549 return error;
550 }
551
552 /* Initialize crypto engine */
553 jrpriv->engine = crypto_engine_alloc_init(jrdev, false);
554 if (!jrpriv->engine) {
555 dev_err(jrdev, "Could not init crypto-engine\n");
556 return -ENOMEM;
557 }
558
559 error = devm_add_action_or_reset(jrdev, caam_jr_crypto_engine_exit,
560 jrdev);
561 if (error)
562 return error;
563
564 /* Start crypto engine */
565 error = crypto_engine_start(jrpriv->engine);
566 if (error) {
567 dev_err(jrdev, "Could not start crypto-engine\n");
568 return error;
569 }
570
571 /* Identify the interrupt */
572 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
573 if (!jrpriv->irq) {
574 dev_err(jrdev, "irq_of_parse_and_map failed\n");
575 return -EINVAL;
576 }
577
578 error = devm_add_action_or_reset(jrdev, caam_jr_irq_dispose_mapping,
579 (void *)(unsigned long)jrpriv->irq);
580 if (error)
581 return error;
582
583 /* Now do the platform independent part */
584 error = caam_jr_init(jrdev); /* now turn on hardware */
585 if (error)
586 return error;
587
588 jrpriv->dev = jrdev;
589 spin_lock(&driver_data.jr_alloc_lock);
590 list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
591 spin_unlock(&driver_data.jr_alloc_lock);
592
593 atomic_set(&jrpriv->tfm_count, 0);
594
595 register_algs(jrpriv, jrdev->parent);
596
597 return 0;
598}
599
600static const struct of_device_id caam_jr_match[] = {
601 {
602 .compatible = "fsl,sec-v4.0-job-ring",
603 },
604 {
605 .compatible = "fsl,sec4.0-job-ring",
606 },
607 {},
608};
609MODULE_DEVICE_TABLE(of, caam_jr_match);
610
611static struct platform_driver caam_jr_driver = {
612 .driver = {
613 .name = "caam_jr",
614 .of_match_table = caam_jr_match,
615 },
616 .probe = caam_jr_probe,
617 .remove = caam_jr_remove,
618};
619
620static int __init jr_driver_init(void)
621{
622 spin_lock_init(&driver_data.jr_alloc_lock);
623 INIT_LIST_HEAD(&driver_data.jr_list);
624 return platform_driver_register(&caam_jr_driver);
625}
626
627static void __exit jr_driver_exit(void)
628{
629 platform_driver_unregister(&caam_jr_driver);
630}
631
632module_init(jr_driver_init);
633module_exit(jr_driver_exit);
634
635MODULE_LICENSE("GPL");
636MODULE_DESCRIPTION("FSL CAAM JR request backend");
637MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * CAAM/SEC 4.x transport/backend driver
4 * JobR backend functionality
5 *
6 * Copyright 2008-2012 Freescale Semiconductor, Inc.
7 * Copyright 2019, 2023 NXP
8 */
9
10#include <linux/of_irq.h>
11#include <linux/of_address.h>
12#include <linux/platform_device.h>
13
14#include "compat.h"
15#include "ctrl.h"
16#include "regs.h"
17#include "jr.h"
18#include "desc.h"
19#include "intern.h"
20
21struct jr_driver_data {
22 /* List of Physical JobR's with the Driver */
23 struct list_head jr_list;
24 spinlock_t jr_alloc_lock; /* jr_list lock */
25} ____cacheline_aligned;
26
27static struct jr_driver_data driver_data;
28static DEFINE_MUTEX(algs_lock);
29static unsigned int active_devs;
30
31static void register_algs(struct caam_drv_private_jr *jrpriv,
32 struct device *dev)
33{
34 mutex_lock(&algs_lock);
35
36 if (++active_devs != 1)
37 goto algs_unlock;
38
39 caam_algapi_init(dev);
40 caam_algapi_hash_init(dev);
41 caam_pkc_init(dev);
42 jrpriv->hwrng = !caam_rng_init(dev);
43 caam_prng_register(dev);
44 caam_qi_algapi_init(dev);
45
46algs_unlock:
47 mutex_unlock(&algs_lock);
48}
49
50static void unregister_algs(void)
51{
52 mutex_lock(&algs_lock);
53
54 if (--active_devs != 0)
55 goto algs_unlock;
56
57 caam_qi_algapi_exit();
58 caam_prng_unregister(NULL);
59 caam_pkc_exit();
60 caam_algapi_hash_exit();
61 caam_algapi_exit();
62
63algs_unlock:
64 mutex_unlock(&algs_lock);
65}
66
67static void caam_jr_crypto_engine_exit(void *data)
68{
69 struct device *jrdev = data;
70 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(jrdev);
71
72 /* Free the resources of crypto-engine */
73 crypto_engine_exit(jrpriv->engine);
74}
75
76/*
77 * Put the CAAM in quiesce, ie stop
78 *
79 * Must be called with itr disabled
80 */
81static int caam_jr_stop_processing(struct device *dev, u32 jrcr_bits)
82{
83 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
84 unsigned int timeout = 100000;
85
86 /* Check the current status */
87 if (rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_INPROGRESS)
88 goto wait_quiesce_completion;
89
90 /* Reset the field */
91 clrsetbits_32(&jrp->rregs->jrintstatus, JRINT_ERR_HALT_MASK, 0);
92
93 /* initiate flush / park (required prior to reset) */
94 wr_reg32(&jrp->rregs->jrcommand, jrcr_bits);
95
96wait_quiesce_completion:
97 while (((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) ==
98 JRINT_ERR_HALT_INPROGRESS) && --timeout)
99 cpu_relax();
100
101 if ((rd_reg32(&jrp->rregs->jrintstatus) & JRINT_ERR_HALT_MASK) !=
102 JRINT_ERR_HALT_COMPLETE || timeout == 0) {
103 dev_err(dev, "failed to flush job ring %d\n", jrp->ridx);
104 return -EIO;
105 }
106
107 return 0;
108}
109
110/*
111 * Flush the job ring, so the jobs running will be stopped, jobs queued will be
112 * invalidated and the CAAM will no longer fetch fron input ring.
113 *
114 * Must be called with itr disabled
115 */
116static int caam_jr_flush(struct device *dev)
117{
118 return caam_jr_stop_processing(dev, JRCR_RESET);
119}
120
121/* The resume can be used after a park or a flush if CAAM has not been reset */
122static int caam_jr_restart_processing(struct device *dev)
123{
124 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
125 u32 halt_status = rd_reg32(&jrp->rregs->jrintstatus) &
126 JRINT_ERR_HALT_MASK;
127
128 /* Check that the flush/park is completed */
129 if (halt_status != JRINT_ERR_HALT_COMPLETE)
130 return -1;
131
132 /* Resume processing of jobs */
133 clrsetbits_32(&jrp->rregs->jrintstatus, 0, JRINT_ERR_HALT_COMPLETE);
134
135 return 0;
136}
137
138static int caam_reset_hw_jr(struct device *dev)
139{
140 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
141 unsigned int timeout = 100000;
142 int err;
143 /*
144 * mask interrupts since we are going to poll
145 * for reset completion status
146 */
147 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
148 err = caam_jr_flush(dev);
149 if (err)
150 return err;
151
152 /* initiate reset */
153 wr_reg32(&jrp->rregs->jrcommand, JRCR_RESET);
154 while ((rd_reg32(&jrp->rregs->jrcommand) & JRCR_RESET) && --timeout)
155 cpu_relax();
156
157 if (timeout == 0) {
158 dev_err(dev, "failed to reset job ring %d\n", jrp->ridx);
159 return -EIO;
160 }
161
162 /* unmask interrupts */
163 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
164
165 return 0;
166}
167
168/*
169 * Shutdown JobR independent of platform property code
170 */
171static int caam_jr_shutdown(struct device *dev)
172{
173 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
174 int ret;
175
176 ret = caam_reset_hw_jr(dev);
177
178 tasklet_kill(&jrp->irqtask);
179
180 return ret;
181}
182
183static void caam_jr_remove(struct platform_device *pdev)
184{
185 int ret;
186 struct device *jrdev;
187 struct caam_drv_private_jr *jrpriv;
188
189 jrdev = &pdev->dev;
190 jrpriv = dev_get_drvdata(jrdev);
191
192 if (jrpriv->hwrng)
193 caam_rng_exit(jrdev->parent);
194
195 /*
196 * If a job ring is still allocated there is trouble ahead. Once
197 * caam_jr_remove() returned, jrpriv will be freed and the registers
198 * will get unmapped. So any user of such a job ring will probably
199 * crash.
200 */
201 if (atomic_read(&jrpriv->tfm_count)) {
202 dev_alert(jrdev, "Device is busy; consumers might start to crash\n");
203 return;
204 }
205
206 /* Unregister JR-based RNG & crypto algorithms */
207 unregister_algs();
208
209 /* Remove the node from Physical JobR list maintained by driver */
210 spin_lock(&driver_data.jr_alloc_lock);
211 list_del(&jrpriv->list_node);
212 spin_unlock(&driver_data.jr_alloc_lock);
213
214 /* Release ring */
215 ret = caam_jr_shutdown(jrdev);
216 if (ret)
217 dev_err(jrdev, "Failed to shut down job ring\n");
218}
219
220/* Main per-ring interrupt handler */
221static irqreturn_t caam_jr_interrupt(int irq, void *st_dev)
222{
223 struct device *dev = st_dev;
224 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
225 u32 irqstate;
226
227 /*
228 * Check the output ring for ready responses, kick
229 * tasklet if jobs done.
230 */
231 irqstate = rd_reg32(&jrp->rregs->jrintstatus);
232 if (!(irqstate & JRINT_JR_INT))
233 return IRQ_NONE;
234
235 /*
236 * If JobR error, we got more development work to do
237 * Flag a bug now, but we really need to shut down and
238 * restart the queue (and fix code).
239 */
240 if (irqstate & JRINT_JR_ERROR) {
241 dev_err(dev, "job ring error: irqstate: %08x\n", irqstate);
242 BUG();
243 }
244
245 /* mask valid interrupts */
246 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JRCFG_IMSK);
247
248 /* Have valid interrupt at this point, just ACK and trigger */
249 wr_reg32(&jrp->rregs->jrintstatus, irqstate);
250
251 preempt_disable();
252 tasklet_schedule(&jrp->irqtask);
253 preempt_enable();
254
255 return IRQ_HANDLED;
256}
257
258/* Deferred service handler, run as interrupt-fired tasklet */
259static void caam_jr_dequeue(unsigned long devarg)
260{
261 int hw_idx, sw_idx, i, head, tail;
262 struct caam_jr_dequeue_params *params = (void *)devarg;
263 struct device *dev = params->dev;
264 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
265 void (*usercall)(struct device *dev, u32 *desc, u32 status, void *arg);
266 u32 *userdesc, userstatus;
267 void *userarg;
268 u32 outring_used = 0;
269
270 while (outring_used ||
271 (outring_used = rd_reg32(&jrp->rregs->outring_used))) {
272
273 head = READ_ONCE(jrp->head);
274
275 sw_idx = tail = jrp->tail;
276 hw_idx = jrp->out_ring_read_index;
277
278 for (i = 0; CIRC_CNT(head, tail + i, JOBR_DEPTH) >= 1; i++) {
279 sw_idx = (tail + i) & (JOBR_DEPTH - 1);
280
281 if (jr_outentry_desc(jrp->outring, hw_idx) ==
282 caam_dma_to_cpu(jrp->entinfo[sw_idx].desc_addr_dma))
283 break; /* found */
284 }
285 /* we should never fail to find a matching descriptor */
286 BUG_ON(CIRC_CNT(head, tail + i, JOBR_DEPTH) <= 0);
287
288 /* Unmap just-run descriptor so we can post-process */
289 dma_unmap_single(dev,
290 caam_dma_to_cpu(jr_outentry_desc(jrp->outring,
291 hw_idx)),
292 jrp->entinfo[sw_idx].desc_size,
293 DMA_TO_DEVICE);
294
295 /* mark completed, avoid matching on a recycled desc addr */
296 jrp->entinfo[sw_idx].desc_addr_dma = 0;
297
298 /* Stash callback params */
299 usercall = jrp->entinfo[sw_idx].callbk;
300 userarg = jrp->entinfo[sw_idx].cbkarg;
301 userdesc = jrp->entinfo[sw_idx].desc_addr_virt;
302 userstatus = caam32_to_cpu(jr_outentry_jrstatus(jrp->outring,
303 hw_idx));
304
305 /*
306 * Make sure all information from the job has been obtained
307 * before telling CAAM that the job has been removed from the
308 * output ring.
309 */
310 mb();
311
312 /* set done */
313 wr_reg32(&jrp->rregs->outring_rmvd, 1);
314
315 jrp->out_ring_read_index = (jrp->out_ring_read_index + 1) &
316 (JOBR_DEPTH - 1);
317
318 /*
319 * if this job completed out-of-order, do not increment
320 * the tail. Otherwise, increment tail by 1 plus the
321 * number of subsequent jobs already completed out-of-order
322 */
323 if (sw_idx == tail) {
324 do {
325 tail = (tail + 1) & (JOBR_DEPTH - 1);
326 } while (CIRC_CNT(head, tail, JOBR_DEPTH) >= 1 &&
327 jrp->entinfo[tail].desc_addr_dma == 0);
328
329 jrp->tail = tail;
330 }
331
332 /* Finally, execute user's callback */
333 usercall(dev, userdesc, userstatus, userarg);
334 outring_used--;
335 }
336
337 if (params->enable_itr)
338 /* reenable / unmask IRQs */
339 clrsetbits_32(&jrp->rregs->rconfig_lo, JRCFG_IMSK, 0);
340}
341
342/**
343 * caam_jr_alloc() - Alloc a job ring for someone to use as needed.
344 *
345 * returns : pointer to the newly allocated physical
346 * JobR dev can be written to if successful.
347 **/
348struct device *caam_jr_alloc(void)
349{
350 struct caam_drv_private_jr *jrpriv, *min_jrpriv = NULL;
351 struct device *dev = ERR_PTR(-ENODEV);
352 int min_tfm_cnt = INT_MAX;
353 int tfm_cnt;
354
355 spin_lock(&driver_data.jr_alloc_lock);
356
357 if (list_empty(&driver_data.jr_list)) {
358 spin_unlock(&driver_data.jr_alloc_lock);
359 return ERR_PTR(-ENODEV);
360 }
361
362 list_for_each_entry(jrpriv, &driver_data.jr_list, list_node) {
363 tfm_cnt = atomic_read(&jrpriv->tfm_count);
364 if (tfm_cnt < min_tfm_cnt) {
365 min_tfm_cnt = tfm_cnt;
366 min_jrpriv = jrpriv;
367 }
368 if (!min_tfm_cnt)
369 break;
370 }
371
372 if (min_jrpriv) {
373 atomic_inc(&min_jrpriv->tfm_count);
374 dev = min_jrpriv->dev;
375 }
376 spin_unlock(&driver_data.jr_alloc_lock);
377
378 return dev;
379}
380EXPORT_SYMBOL(caam_jr_alloc);
381
382/**
383 * caam_jr_free() - Free the Job Ring
384 * @rdev: points to the dev that identifies the Job ring to
385 * be released.
386 **/
387void caam_jr_free(struct device *rdev)
388{
389 struct caam_drv_private_jr *jrpriv = dev_get_drvdata(rdev);
390
391 atomic_dec(&jrpriv->tfm_count);
392}
393EXPORT_SYMBOL(caam_jr_free);
394
395/**
396 * caam_jr_enqueue() - Enqueue a job descriptor head. Returns -EINPROGRESS
397 * if OK, -ENOSPC if the queue is full, -EIO if it cannot map the caller's
398 * descriptor.
399 * @dev: struct device of the job ring to be used
400 * @desc: points to a job descriptor that execute our request. All
401 * descriptors (and all referenced data) must be in a DMAable
402 * region, and all data references must be physical addresses
403 * accessible to CAAM (i.e. within a PAMU window granted
404 * to it).
405 * @cbk: pointer to a callback function to be invoked upon completion
406 * of this request. This has the form:
407 * callback(struct device *dev, u32 *desc, u32 stat, void *arg)
408 * where:
409 * dev: contains the job ring device that processed this
410 * response.
411 * desc: descriptor that initiated the request, same as
412 * "desc" being argued to caam_jr_enqueue().
413 * status: untranslated status received from CAAM. See the
414 * reference manual for a detailed description of
415 * error meaning, or see the JRSTA definitions in the
416 * register header file
417 * areq: optional pointer to an argument passed with the
418 * original request
419 * @areq: optional pointer to a user argument for use at callback
420 * time.
421 **/
422int caam_jr_enqueue(struct device *dev, u32 *desc,
423 void (*cbk)(struct device *dev, u32 *desc,
424 u32 status, void *areq),
425 void *areq)
426{
427 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
428 struct caam_jrentry_info *head_entry;
429 int head, tail, desc_size;
430 dma_addr_t desc_dma;
431
432 desc_size = (caam32_to_cpu(*desc) & HDR_JD_LENGTH_MASK) * sizeof(u32);
433 desc_dma = dma_map_single(dev, desc, desc_size, DMA_TO_DEVICE);
434 if (dma_mapping_error(dev, desc_dma)) {
435 dev_err(dev, "caam_jr_enqueue(): can't map jobdesc\n");
436 return -EIO;
437 }
438
439 spin_lock_bh(&jrp->inplock);
440
441 head = jrp->head;
442 tail = READ_ONCE(jrp->tail);
443
444 if (!jrp->inpring_avail ||
445 CIRC_SPACE(head, tail, JOBR_DEPTH) <= 0) {
446 spin_unlock_bh(&jrp->inplock);
447 dma_unmap_single(dev, desc_dma, desc_size, DMA_TO_DEVICE);
448 return -ENOSPC;
449 }
450
451 head_entry = &jrp->entinfo[head];
452 head_entry->desc_addr_virt = desc;
453 head_entry->desc_size = desc_size;
454 head_entry->callbk = (void *)cbk;
455 head_entry->cbkarg = areq;
456 head_entry->desc_addr_dma = desc_dma;
457
458 jr_inpentry_set(jrp->inpring, head, cpu_to_caam_dma(desc_dma));
459
460 /*
461 * Guarantee that the descriptor's DMA address has been written to
462 * the next slot in the ring before the write index is updated, since
463 * other cores may update this index independently.
464 *
465 * Under heavy DDR load, smp_wmb() or dma_wmb() fail to make the input
466 * ring be updated before the CAAM starts reading it. So, CAAM will
467 * process, again, an old descriptor address and will put it in the
468 * output ring. This will make caam_jr_dequeue() to fail, since this
469 * old descriptor is not in the software ring.
470 * To fix this, use wmb() which works on the full system instead of
471 * inner/outer shareable domains.
472 */
473 wmb();
474
475 jrp->head = (head + 1) & (JOBR_DEPTH - 1);
476
477 /*
478 * Ensure that all job information has been written before
479 * notifying CAAM that a new job was added to the input ring
480 * using a memory barrier. The wr_reg32() uses api iowrite32()
481 * to do the register write. iowrite32() issues a memory barrier
482 * before the write operation.
483 */
484
485 wr_reg32(&jrp->rregs->inpring_jobadd, 1);
486
487 jrp->inpring_avail--;
488 if (!jrp->inpring_avail)
489 jrp->inpring_avail = rd_reg32(&jrp->rregs->inpring_avail);
490
491 spin_unlock_bh(&jrp->inplock);
492
493 return -EINPROGRESS;
494}
495EXPORT_SYMBOL(caam_jr_enqueue);
496
497static void caam_jr_init_hw(struct device *dev, dma_addr_t inpbusaddr,
498 dma_addr_t outbusaddr)
499{
500 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
501
502 wr_reg64(&jrp->rregs->inpring_base, inpbusaddr);
503 wr_reg64(&jrp->rregs->outring_base, outbusaddr);
504 wr_reg32(&jrp->rregs->inpring_size, JOBR_DEPTH);
505 wr_reg32(&jrp->rregs->outring_size, JOBR_DEPTH);
506
507 /* Select interrupt coalescing parameters */
508 clrsetbits_32(&jrp->rregs->rconfig_lo, 0, JOBR_INTC |
509 (JOBR_INTC_COUNT_THLD << JRCFG_ICDCT_SHIFT) |
510 (JOBR_INTC_TIME_THLD << JRCFG_ICTT_SHIFT));
511}
512
513static void caam_jr_reset_index(struct caam_drv_private_jr *jrp)
514{
515 jrp->out_ring_read_index = 0;
516 jrp->head = 0;
517 jrp->tail = 0;
518}
519
520/*
521 * Init JobR independent of platform property detection
522 */
523static int caam_jr_init(struct device *dev)
524{
525 struct caam_drv_private_jr *jrp;
526 dma_addr_t inpbusaddr, outbusaddr;
527 int i, error;
528
529 jrp = dev_get_drvdata(dev);
530
531 error = caam_reset_hw_jr(dev);
532 if (error)
533 return error;
534
535 jrp->inpring = dmam_alloc_coherent(dev, SIZEOF_JR_INPENTRY *
536 JOBR_DEPTH, &inpbusaddr,
537 GFP_KERNEL);
538 if (!jrp->inpring)
539 return -ENOMEM;
540
541 jrp->outring = dmam_alloc_coherent(dev, SIZEOF_JR_OUTENTRY *
542 JOBR_DEPTH, &outbusaddr,
543 GFP_KERNEL);
544 if (!jrp->outring)
545 return -ENOMEM;
546
547 jrp->entinfo = devm_kcalloc(dev, JOBR_DEPTH, sizeof(*jrp->entinfo),
548 GFP_KERNEL);
549 if (!jrp->entinfo)
550 return -ENOMEM;
551
552 for (i = 0; i < JOBR_DEPTH; i++)
553 jrp->entinfo[i].desc_addr_dma = !0;
554
555 /* Setup rings */
556 caam_jr_reset_index(jrp);
557 jrp->inpring_avail = JOBR_DEPTH;
558 caam_jr_init_hw(dev, inpbusaddr, outbusaddr);
559
560 spin_lock_init(&jrp->inplock);
561
562 jrp->tasklet_params.dev = dev;
563 jrp->tasklet_params.enable_itr = 1;
564 tasklet_init(&jrp->irqtask, caam_jr_dequeue,
565 (unsigned long)&jrp->tasklet_params);
566
567 /* Connect job ring interrupt handler. */
568 error = devm_request_irq(dev, jrp->irq, caam_jr_interrupt, IRQF_SHARED,
569 dev_name(dev), dev);
570 if (error) {
571 dev_err(dev, "can't connect JobR %d interrupt (%d)\n",
572 jrp->ridx, jrp->irq);
573 tasklet_kill(&jrp->irqtask);
574 }
575
576 return error;
577}
578
579static void caam_jr_irq_dispose_mapping(void *data)
580{
581 irq_dispose_mapping((unsigned long)data);
582}
583
584/*
585 * Probe routine for each detected JobR subsystem.
586 */
587static int caam_jr_probe(struct platform_device *pdev)
588{
589 struct device *jrdev;
590 struct device_node *nprop;
591 struct caam_job_ring __iomem *ctrl;
592 struct caam_drv_private_jr *jrpriv;
593 static int total_jobrs;
594 struct resource *r;
595 int error;
596
597 jrdev = &pdev->dev;
598 jrpriv = devm_kzalloc(jrdev, sizeof(*jrpriv), GFP_KERNEL);
599 if (!jrpriv)
600 return -ENOMEM;
601
602 dev_set_drvdata(jrdev, jrpriv);
603
604 /* save ring identity relative to detection */
605 jrpriv->ridx = total_jobrs++;
606
607 nprop = pdev->dev.of_node;
608 /* Get configuration properties from device tree */
609 /* First, get register page */
610 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
611 if (!r) {
612 dev_err(jrdev, "platform_get_resource() failed\n");
613 return -ENOMEM;
614 }
615
616 ctrl = devm_ioremap(jrdev, r->start, resource_size(r));
617 if (!ctrl) {
618 dev_err(jrdev, "devm_ioremap() failed\n");
619 return -ENOMEM;
620 }
621
622 jrpriv->rregs = (struct caam_job_ring __iomem __force *)ctrl;
623
624 error = dma_set_mask_and_coherent(jrdev, caam_get_dma_mask(jrdev));
625 if (error) {
626 dev_err(jrdev, "dma_set_mask_and_coherent failed (%d)\n",
627 error);
628 return error;
629 }
630
631 /* Initialize crypto engine */
632 jrpriv->engine = crypto_engine_alloc_init_and_set(jrdev, true, NULL,
633 false,
634 CRYPTO_ENGINE_MAX_QLEN);
635 if (!jrpriv->engine) {
636 dev_err(jrdev, "Could not init crypto-engine\n");
637 return -ENOMEM;
638 }
639
640 error = devm_add_action_or_reset(jrdev, caam_jr_crypto_engine_exit,
641 jrdev);
642 if (error)
643 return error;
644
645 /* Start crypto engine */
646 error = crypto_engine_start(jrpriv->engine);
647 if (error) {
648 dev_err(jrdev, "Could not start crypto-engine\n");
649 return error;
650 }
651
652 /* Identify the interrupt */
653 jrpriv->irq = irq_of_parse_and_map(nprop, 0);
654 if (!jrpriv->irq) {
655 dev_err(jrdev, "irq_of_parse_and_map failed\n");
656 return -EINVAL;
657 }
658
659 error = devm_add_action_or_reset(jrdev, caam_jr_irq_dispose_mapping,
660 (void *)(unsigned long)jrpriv->irq);
661 if (error)
662 return error;
663
664 /* Now do the platform independent part */
665 error = caam_jr_init(jrdev); /* now turn on hardware */
666 if (error)
667 return error;
668
669 jrpriv->dev = jrdev;
670 spin_lock(&driver_data.jr_alloc_lock);
671 list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
672 spin_unlock(&driver_data.jr_alloc_lock);
673
674 atomic_set(&jrpriv->tfm_count, 0);
675
676 device_init_wakeup(&pdev->dev, 1);
677 device_set_wakeup_enable(&pdev->dev, false);
678
679 register_algs(jrpriv, jrdev->parent);
680
681 return 0;
682}
683
684static void caam_jr_get_hw_state(struct device *dev)
685{
686 struct caam_drv_private_jr *jrp = dev_get_drvdata(dev);
687
688 jrp->state.inpbusaddr = rd_reg64(&jrp->rregs->inpring_base);
689 jrp->state.outbusaddr = rd_reg64(&jrp->rregs->outring_base);
690}
691
692static int caam_jr_suspend(struct device *dev)
693{
694 struct platform_device *pdev = to_platform_device(dev);
695 struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev);
696 struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent);
697 struct caam_jr_dequeue_params suspend_params = {
698 .dev = dev,
699 .enable_itr = 0,
700 };
701
702 /* Remove the node from Physical JobR list maintained by driver */
703 spin_lock(&driver_data.jr_alloc_lock);
704 list_del(&jrpriv->list_node);
705 spin_unlock(&driver_data.jr_alloc_lock);
706
707 if (jrpriv->hwrng)
708 caam_rng_exit(dev->parent);
709
710 if (ctrlpriv->caam_off_during_pm) {
711 int err;
712
713 tasklet_disable(&jrpriv->irqtask);
714
715 /* mask itr to call flush */
716 clrsetbits_32(&jrpriv->rregs->rconfig_lo, 0, JRCFG_IMSK);
717
718 /* Invalid job in process */
719 err = caam_jr_flush(dev);
720 if (err) {
721 dev_err(dev, "Failed to flush\n");
722 return err;
723 }
724
725 /* Dequeing jobs flushed */
726 caam_jr_dequeue((unsigned long)&suspend_params);
727
728 /* Save state */
729 caam_jr_get_hw_state(dev);
730 } else if (device_may_wakeup(&pdev->dev)) {
731 enable_irq_wake(jrpriv->irq);
732 }
733
734 return 0;
735}
736
737static int caam_jr_resume(struct device *dev)
738{
739 struct platform_device *pdev = to_platform_device(dev);
740 struct caam_drv_private_jr *jrpriv = platform_get_drvdata(pdev);
741 struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev->parent);
742
743 if (ctrlpriv->caam_off_during_pm) {
744 u64 inp_addr;
745 int err;
746
747 /*
748 * Check if the CAAM has been resetted checking the address of
749 * the input ring
750 */
751 inp_addr = rd_reg64(&jrpriv->rregs->inpring_base);
752 if (inp_addr != 0) {
753 /* JR still has some configuration */
754 if (inp_addr == jrpriv->state.inpbusaddr) {
755 /* JR has not been resetted */
756 err = caam_jr_restart_processing(dev);
757 if (err) {
758 dev_err(dev,
759 "Restart processing failed\n");
760 return err;
761 }
762
763 tasklet_enable(&jrpriv->irqtask);
764
765 clrsetbits_32(&jrpriv->rregs->rconfig_lo,
766 JRCFG_IMSK, 0);
767
768 goto add_jr;
769 } else if (ctrlpriv->optee_en) {
770 /* JR has been used by OPTEE, reset it */
771 err = caam_reset_hw_jr(dev);
772 if (err) {
773 dev_err(dev, "Failed to reset JR\n");
774 return err;
775 }
776 } else {
777 /* No explanation, return error */
778 return -EIO;
779 }
780 }
781
782 caam_jr_reset_index(jrpriv);
783 caam_jr_init_hw(dev, jrpriv->state.inpbusaddr,
784 jrpriv->state.outbusaddr);
785
786 tasklet_enable(&jrpriv->irqtask);
787 } else if (device_may_wakeup(&pdev->dev)) {
788 disable_irq_wake(jrpriv->irq);
789 }
790
791add_jr:
792 spin_lock(&driver_data.jr_alloc_lock);
793 list_add_tail(&jrpriv->list_node, &driver_data.jr_list);
794 spin_unlock(&driver_data.jr_alloc_lock);
795
796 if (jrpriv->hwrng)
797 jrpriv->hwrng = !caam_rng_init(dev->parent);
798
799 return 0;
800}
801
802static DEFINE_SIMPLE_DEV_PM_OPS(caam_jr_pm_ops, caam_jr_suspend, caam_jr_resume);
803
804static const struct of_device_id caam_jr_match[] = {
805 {
806 .compatible = "fsl,sec-v4.0-job-ring",
807 },
808 {
809 .compatible = "fsl,sec4.0-job-ring",
810 },
811 {},
812};
813MODULE_DEVICE_TABLE(of, caam_jr_match);
814
815static struct platform_driver caam_jr_driver = {
816 .driver = {
817 .name = "caam_jr",
818 .of_match_table = caam_jr_match,
819 .pm = pm_ptr(&caam_jr_pm_ops),
820 },
821 .probe = caam_jr_probe,
822 .remove = caam_jr_remove,
823 .shutdown = caam_jr_remove,
824};
825
826static int __init jr_driver_init(void)
827{
828 spin_lock_init(&driver_data.jr_alloc_lock);
829 INIT_LIST_HEAD(&driver_data.jr_list);
830 return platform_driver_register(&caam_jr_driver);
831}
832
833static void __exit jr_driver_exit(void)
834{
835 platform_driver_unregister(&caam_jr_driver);
836}
837
838module_init(jr_driver_init);
839module_exit(jr_driver_exit);
840
841MODULE_LICENSE("GPL");
842MODULE_DESCRIPTION("FSL CAAM JR request backend");
843MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");