Loading...
1// SPDX-License-Identifier: GPL-2.0+
2/* * CAAM control-plane driver backend
3 * Controller-level driver, kernel property detection, initialization
4 *
5 * Copyright 2008-2012 Freescale Semiconductor, Inc.
6 * Copyright 2018-2019, 2023 NXP
7 */
8
9#include <linux/device.h>
10#include <linux/of_address.h>
11#include <linux/of_irq.h>
12#include <linux/platform_device.h>
13#include <linux/sys_soc.h>
14#include <linux/fsl/mc.h>
15
16#include "compat.h"
17#include "debugfs.h"
18#include "regs.h"
19#include "intern.h"
20#include "jr.h"
21#include "desc_constr.h"
22#include "ctrl.h"
23
24bool caam_dpaa2;
25EXPORT_SYMBOL(caam_dpaa2);
26
27#ifdef CONFIG_CAAM_QI
28#include "qi.h"
29#endif
30
31/*
32 * Descriptor to instantiate RNG State Handle 0 in normal mode and
33 * load the JDKEK, TDKEK and TDSK registers
34 */
35static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
36{
37 u32 *jump_cmd, op_flags;
38
39 init_job_desc(desc, 0);
40
41 op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
42 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT |
43 OP_ALG_PR_ON;
44
45 /* INIT RNG in non-test mode */
46 append_operation(desc, op_flags);
47
48 if (!handle && do_sk) {
49 /*
50 * For SH0, Secure Keys must be generated as well
51 */
52
53 /* wait for done */
54 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
55 set_jump_tgt_here(desc, jump_cmd);
56
57 /*
58 * load 1 to clear written reg:
59 * resets the done interrupt and returns the RNG to idle.
60 */
61 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
62
63 /* Initialize State Handle */
64 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
65 OP_ALG_AAI_RNG4_SK);
66 }
67
68 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
69}
70
71/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
72static void build_deinstantiation_desc(u32 *desc, int handle)
73{
74 init_job_desc(desc, 0);
75
76 /* Uninstantiate State Handle 0 */
77 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
78 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
79
80 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
81}
82
83static const struct of_device_id imx8m_machine_match[] = {
84 { .compatible = "fsl,imx8mm", },
85 { .compatible = "fsl,imx8mn", },
86 { .compatible = "fsl,imx8mp", },
87 { .compatible = "fsl,imx8mq", },
88 { .compatible = "fsl,imx8ulp", },
89 { }
90};
91
92/*
93 * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
94 * the software (no JR/QI used).
95 * @ctrldev - pointer to device
96 * @status - descriptor status, after being run
97 *
98 * Return: - 0 if no error occurred
99 * - -ENODEV if the DECO couldn't be acquired
100 * - -EAGAIN if an error occurred while executing the descriptor
101 */
102static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
103 u32 *status)
104{
105 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
106 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
107 struct caam_deco __iomem *deco = ctrlpriv->deco;
108 unsigned int timeout = 100000;
109 u32 deco_dbg_reg, deco_state, flags;
110 int i;
111
112
113 if (ctrlpriv->virt_en == 1 ||
114 /*
115 * Apparently on i.MX8M{Q,M,N,P} it doesn't matter if virt_en == 1
116 * and the following steps should be performed regardless
117 */
118 of_match_node(imx8m_machine_match, of_root)) {
119 clrsetbits_32(&ctrl->deco_rsr, 0, DECORSR_JR0);
120
121 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
122 --timeout)
123 cpu_relax();
124
125 timeout = 100000;
126 }
127
128 clrsetbits_32(&ctrl->deco_rq, 0, DECORR_RQD0ENABLE);
129
130 while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
131 --timeout)
132 cpu_relax();
133
134 if (!timeout) {
135 dev_err(ctrldev, "failed to acquire DECO 0\n");
136 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
137 return -ENODEV;
138 }
139
140 for (i = 0; i < desc_len(desc); i++)
141 wr_reg32(&deco->descbuf[i], caam32_to_cpu(*(desc + i)));
142
143 flags = DECO_JQCR_WHL;
144 /*
145 * If the descriptor length is longer than 4 words, then the
146 * FOUR bit in JRCTRL register must be set.
147 */
148 if (desc_len(desc) >= 4)
149 flags |= DECO_JQCR_FOUR;
150
151 /* Instruct the DECO to execute it */
152 clrsetbits_32(&deco->jr_ctl_hi, 0, flags);
153
154 timeout = 10000000;
155 do {
156 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
157
158 if (ctrlpriv->era < 10)
159 deco_state = (deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) >>
160 DESC_DBG_DECO_STAT_SHIFT;
161 else
162 deco_state = (rd_reg32(&deco->dbg_exec) &
163 DESC_DER_DECO_STAT_MASK) >>
164 DESC_DER_DECO_STAT_SHIFT;
165
166 /*
167 * If an error occurred in the descriptor, then
168 * the DECO status field will be set to 0x0D
169 */
170 if (deco_state == DECO_STAT_HOST_ERR)
171 break;
172
173 cpu_relax();
174 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
175
176 *status = rd_reg32(&deco->op_status_hi) &
177 DECO_OP_STATUS_HI_ERR_MASK;
178
179 if (ctrlpriv->virt_en == 1)
180 clrsetbits_32(&ctrl->deco_rsr, DECORSR_JR0, 0);
181
182 /* Mark the DECO as free */
183 clrsetbits_32(&ctrl->deco_rq, DECORR_RQD0ENABLE, 0);
184
185 if (!timeout)
186 return -EAGAIN;
187
188 return 0;
189}
190
191/*
192 * deinstantiate_rng - builds and executes a descriptor on DECO0,
193 * which deinitializes the RNG block.
194 * @ctrldev - pointer to device
195 * @state_handle_mask - bitmask containing the instantiation status
196 * for the RNG4 state handles which exist in
197 * the RNG4 block: 1 if it's been instantiated
198 *
199 * Return: - 0 if no error occurred
200 * - -ENOMEM if there isn't enough memory to allocate the descriptor
201 * - -ENODEV if DECO0 couldn't be acquired
202 * - -EAGAIN if an error occurred when executing the descriptor
203 */
204static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
205{
206 u32 *desc, status;
207 int sh_idx, ret = 0;
208
209 desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
210 if (!desc)
211 return -ENOMEM;
212
213 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
214 /*
215 * If the corresponding bit is set, then it means the state
216 * handle was initialized by us, and thus it needs to be
217 * deinitialized as well
218 */
219 if ((1 << sh_idx) & state_handle_mask) {
220 /*
221 * Create the descriptor for deinstantating this state
222 * handle
223 */
224 build_deinstantiation_desc(desc, sh_idx);
225
226 /* Try to run it through DECO0 */
227 ret = run_descriptor_deco0(ctrldev, desc, &status);
228
229 if (ret ||
230 (status && status != JRSTA_SSRC_JUMP_HALT_CC)) {
231 dev_err(ctrldev,
232 "Failed to deinstantiate RNG4 SH%d\n",
233 sh_idx);
234 break;
235 }
236 dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
237 }
238 }
239
240 kfree(desc);
241
242 return ret;
243}
244
245static void devm_deinstantiate_rng(void *data)
246{
247 struct device *ctrldev = data;
248 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
249
250 /*
251 * De-initialize RNG state handles initialized by this driver.
252 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
253 */
254 if (ctrlpriv->rng4_sh_init)
255 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
256}
257
258/*
259 * instantiate_rng - builds and executes a descriptor on DECO0,
260 * which initializes the RNG block.
261 * @ctrldev - pointer to device
262 * @state_handle_mask - bitmask containing the instantiation status
263 * for the RNG4 state handles which exist in
264 * the RNG4 block: 1 if it's been instantiated
265 * by an external entry, 0 otherwise.
266 * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
267 * Caution: this can be done only once; if the keys need to be
268 * regenerated, a POR is required
269 *
270 * Return: - 0 if no error occurred
271 * - -ENOMEM if there isn't enough memory to allocate the descriptor
272 * - -ENODEV if DECO0 couldn't be acquired
273 * - -EAGAIN if an error occurred when executing the descriptor
274 * f.i. there was a RNG hardware error due to not "good enough"
275 * entropy being acquired.
276 */
277static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
278 int gen_sk)
279{
280 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
281 struct caam_ctrl __iomem *ctrl;
282 u32 *desc, status = 0, rdsta_val;
283 int ret = 0, sh_idx;
284
285 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
286 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
287 if (!desc)
288 return -ENOMEM;
289
290 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
291 const u32 rdsta_if = RDSTA_IF0 << sh_idx;
292 const u32 rdsta_pr = RDSTA_PR0 << sh_idx;
293 const u32 rdsta_mask = rdsta_if | rdsta_pr;
294
295 /* Clear the contents before using the descriptor */
296 memset(desc, 0x00, CAAM_CMD_SZ * 7);
297
298 /*
299 * If the corresponding bit is set, this state handle
300 * was initialized by somebody else, so it's left alone.
301 */
302 if (rdsta_if & state_handle_mask) {
303 if (rdsta_pr & state_handle_mask)
304 continue;
305
306 dev_info(ctrldev,
307 "RNG4 SH%d was previously instantiated without prediction resistance. Tearing it down\n",
308 sh_idx);
309
310 ret = deinstantiate_rng(ctrldev, rdsta_if);
311 if (ret)
312 break;
313 }
314
315 /* Create the descriptor for instantiating RNG State Handle */
316 build_instantiation_desc(desc, sh_idx, gen_sk);
317
318 /* Try to run it through DECO0 */
319 ret = run_descriptor_deco0(ctrldev, desc, &status);
320
321 /*
322 * If ret is not 0, or descriptor status is not 0, then
323 * something went wrong. No need to try the next state
324 * handle (if available), bail out here.
325 * Also, if for some reason, the State Handle didn't get
326 * instantiated although the descriptor has finished
327 * without any error (HW optimizations for later
328 * CAAM eras), then try again.
329 */
330 if (ret)
331 break;
332
333 rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK;
334 if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
335 (rdsta_val & rdsta_mask) != rdsta_mask) {
336 ret = -EAGAIN;
337 break;
338 }
339
340 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
341 }
342
343 kfree(desc);
344
345 if (ret)
346 return ret;
347
348 return devm_add_action_or_reset(ctrldev, devm_deinstantiate_rng, ctrldev);
349}
350
351/*
352 * kick_trng - sets the various parameters for enabling the initialization
353 * of the RNG4 block in CAAM
354 * @dev - pointer to the controller device
355 * @ent_delay - Defines the length (in system clocks) of each entropy sample.
356 */
357static void kick_trng(struct device *dev, int ent_delay)
358{
359 struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
360 struct caam_ctrl __iomem *ctrl;
361 struct rng4tst __iomem *r4tst;
362 u32 val, rtsdctl;
363
364 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
365 r4tst = &ctrl->r4tst[0];
366
367 /*
368 * Setting both RTMCTL:PRGM and RTMCTL:TRNG_ACC causes TRNG to
369 * properly invalidate the entropy in the entropy register and
370 * force re-generation.
371 */
372 clrsetbits_32(&r4tst->rtmctl, 0, RTMCTL_PRGM | RTMCTL_ACC);
373
374 /*
375 * Performance-wise, it does not make sense to
376 * set the delay to a value that is lower
377 * than the last one that worked (i.e. the state handles
378 * were instantiated properly).
379 */
380 rtsdctl = rd_reg32(&r4tst->rtsdctl);
381 val = (rtsdctl & RTSDCTL_ENT_DLY_MASK) >> RTSDCTL_ENT_DLY_SHIFT;
382 if (ent_delay > val) {
383 val = ent_delay;
384 /* min. freq. count, equal to 1/4 of the entropy sample length */
385 wr_reg32(&r4tst->rtfrqmin, val >> 2);
386 /* disable maximum frequency count */
387 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
388 }
389
390 wr_reg32(&r4tst->rtsdctl, (val << RTSDCTL_ENT_DLY_SHIFT) |
391 RTSDCTL_SAMP_SIZE_VAL);
392
393 /*
394 * To avoid reprogramming the self-test parameters over and over again,
395 * use RTSDCTL[SAMP_SIZE] as an indicator.
396 */
397 if ((rtsdctl & RTSDCTL_SAMP_SIZE_MASK) != RTSDCTL_SAMP_SIZE_VAL) {
398 wr_reg32(&r4tst->rtscmisc, (2 << 16) | 32);
399 wr_reg32(&r4tst->rtpkrrng, 570);
400 wr_reg32(&r4tst->rtpkrmax, 1600);
401 wr_reg32(&r4tst->rtscml, (122 << 16) | 317);
402 wr_reg32(&r4tst->rtscrl[0], (80 << 16) | 107);
403 wr_reg32(&r4tst->rtscrl[1], (57 << 16) | 62);
404 wr_reg32(&r4tst->rtscrl[2], (39 << 16) | 39);
405 wr_reg32(&r4tst->rtscrl[3], (27 << 16) | 26);
406 wr_reg32(&r4tst->rtscrl[4], (19 << 16) | 18);
407 wr_reg32(&r4tst->rtscrl[5], (18 << 16) | 17);
408 }
409
410 /*
411 * select raw sampling in both entropy shifter
412 * and statistical checker; ; put RNG4 into run mode
413 */
414 clrsetbits_32(&r4tst->rtmctl, RTMCTL_PRGM | RTMCTL_ACC,
415 RTMCTL_SAMP_MODE_RAW_ES_SC);
416}
417
418static int caam_get_era_from_hw(struct caam_perfmon __iomem *perfmon)
419{
420 static const struct {
421 u16 ip_id;
422 u8 maj_rev;
423 u8 era;
424 } id[] = {
425 {0x0A10, 1, 1},
426 {0x0A10, 2, 2},
427 {0x0A12, 1, 3},
428 {0x0A14, 1, 3},
429 {0x0A14, 2, 4},
430 {0x0A16, 1, 4},
431 {0x0A10, 3, 4},
432 {0x0A11, 1, 4},
433 {0x0A18, 1, 4},
434 {0x0A11, 2, 5},
435 {0x0A12, 2, 5},
436 {0x0A13, 1, 5},
437 {0x0A1C, 1, 5}
438 };
439 u32 ccbvid, id_ms;
440 u8 maj_rev, era;
441 u16 ip_id;
442 int i;
443
444 ccbvid = rd_reg32(&perfmon->ccb_id);
445 era = (ccbvid & CCBVID_ERA_MASK) >> CCBVID_ERA_SHIFT;
446 if (era) /* This is '0' prior to CAAM ERA-6 */
447 return era;
448
449 id_ms = rd_reg32(&perfmon->caam_id_ms);
450 ip_id = (id_ms & SECVID_MS_IPID_MASK) >> SECVID_MS_IPID_SHIFT;
451 maj_rev = (id_ms & SECVID_MS_MAJ_REV_MASK) >> SECVID_MS_MAJ_REV_SHIFT;
452
453 for (i = 0; i < ARRAY_SIZE(id); i++)
454 if (id[i].ip_id == ip_id && id[i].maj_rev == maj_rev)
455 return id[i].era;
456
457 return -ENOTSUPP;
458}
459
460/**
461 * caam_get_era() - Return the ERA of the SEC on SoC, based
462 * on "sec-era" optional property in the DTS. This property is updated
463 * by u-boot.
464 * In case this property is not passed an attempt to retrieve the CAAM
465 * era via register reads will be made.
466 *
467 * @perfmon: Performance Monitor Registers
468 */
469static int caam_get_era(struct caam_perfmon __iomem *perfmon)
470{
471 struct device_node *caam_node;
472 int ret;
473 u32 prop;
474
475 caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
476 ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
477 of_node_put(caam_node);
478
479 if (!ret)
480 return prop;
481 else
482 return caam_get_era_from_hw(perfmon);
483}
484
485/*
486 * ERRATA: imx6 devices (imx6D, imx6Q, imx6DL, imx6S, imx6DP and imx6QP)
487 * have an issue wherein AXI bus transactions may not occur in the correct
488 * order. This isn't a problem running single descriptors, but can be if
489 * running multiple concurrent descriptors. Reworking the driver to throttle
490 * to single requests is impractical, thus the workaround is to limit the AXI
491 * pipeline to a depth of 1 (from it's default of 4) to preclude this situation
492 * from occurring.
493 */
494static void handle_imx6_err005766(u32 __iomem *mcr)
495{
496 if (of_machine_is_compatible("fsl,imx6q") ||
497 of_machine_is_compatible("fsl,imx6dl") ||
498 of_machine_is_compatible("fsl,imx6qp"))
499 clrsetbits_32(mcr, MCFGR_AXIPIPE_MASK,
500 1 << MCFGR_AXIPIPE_SHIFT);
501}
502
503static const struct of_device_id caam_match[] = {
504 {
505 .compatible = "fsl,sec-v4.0",
506 },
507 {
508 .compatible = "fsl,sec4.0",
509 },
510 {},
511};
512MODULE_DEVICE_TABLE(of, caam_match);
513
514struct caam_imx_data {
515 const struct clk_bulk_data *clks;
516 int num_clks;
517};
518
519static const struct clk_bulk_data caam_imx6_clks[] = {
520 { .id = "ipg" },
521 { .id = "mem" },
522 { .id = "aclk" },
523 { .id = "emi_slow" },
524};
525
526static const struct caam_imx_data caam_imx6_data = {
527 .clks = caam_imx6_clks,
528 .num_clks = ARRAY_SIZE(caam_imx6_clks),
529};
530
531static const struct clk_bulk_data caam_imx7_clks[] = {
532 { .id = "ipg" },
533 { .id = "aclk" },
534};
535
536static const struct caam_imx_data caam_imx7_data = {
537 .clks = caam_imx7_clks,
538 .num_clks = ARRAY_SIZE(caam_imx7_clks),
539};
540
541static const struct clk_bulk_data caam_imx6ul_clks[] = {
542 { .id = "ipg" },
543 { .id = "mem" },
544 { .id = "aclk" },
545};
546
547static const struct caam_imx_data caam_imx6ul_data = {
548 .clks = caam_imx6ul_clks,
549 .num_clks = ARRAY_SIZE(caam_imx6ul_clks),
550};
551
552static const struct clk_bulk_data caam_vf610_clks[] = {
553 { .id = "ipg" },
554};
555
556static const struct caam_imx_data caam_vf610_data = {
557 .clks = caam_vf610_clks,
558 .num_clks = ARRAY_SIZE(caam_vf610_clks),
559};
560
561static const struct soc_device_attribute caam_imx_soc_table[] = {
562 { .soc_id = "i.MX6UL", .data = &caam_imx6ul_data },
563 { .soc_id = "i.MX6*", .data = &caam_imx6_data },
564 { .soc_id = "i.MX7*", .data = &caam_imx7_data },
565 { .soc_id = "i.MX8M*", .data = &caam_imx7_data },
566 { .soc_id = "VF*", .data = &caam_vf610_data },
567 { .family = "Freescale i.MX" },
568 { /* sentinel */ }
569};
570
571static void disable_clocks(void *data)
572{
573 struct caam_drv_private *ctrlpriv = data;
574
575 clk_bulk_disable_unprepare(ctrlpriv->num_clks, ctrlpriv->clks);
576}
577
578static int init_clocks(struct device *dev, const struct caam_imx_data *data)
579{
580 struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
581 int ret;
582
583 ctrlpriv->num_clks = data->num_clks;
584 ctrlpriv->clks = devm_kmemdup(dev, data->clks,
585 data->num_clks * sizeof(data->clks[0]),
586 GFP_KERNEL);
587 if (!ctrlpriv->clks)
588 return -ENOMEM;
589
590 ret = devm_clk_bulk_get(dev, ctrlpriv->num_clks, ctrlpriv->clks);
591 if (ret) {
592 dev_err(dev,
593 "Failed to request all necessary clocks\n");
594 return ret;
595 }
596
597 ret = clk_bulk_prepare_enable(ctrlpriv->num_clks, ctrlpriv->clks);
598 if (ret) {
599 dev_err(dev,
600 "Failed to prepare/enable all necessary clocks\n");
601 return ret;
602 }
603
604 return devm_add_action_or_reset(dev, disable_clocks, ctrlpriv);
605}
606
607static void caam_remove_debugfs(void *root)
608{
609 debugfs_remove_recursive(root);
610}
611
612#ifdef CONFIG_FSL_MC_BUS
613static bool check_version(struct fsl_mc_version *mc_version, u32 major,
614 u32 minor, u32 revision)
615{
616 if (mc_version->major > major)
617 return true;
618
619 if (mc_version->major == major) {
620 if (mc_version->minor > minor)
621 return true;
622
623 if (mc_version->minor == minor &&
624 mc_version->revision > revision)
625 return true;
626 }
627
628 return false;
629}
630#endif
631
632static bool needs_entropy_delay_adjustment(void)
633{
634 if (of_machine_is_compatible("fsl,imx6sx"))
635 return true;
636 return false;
637}
638
639static int caam_ctrl_rng_init(struct device *dev)
640{
641 struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
642 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
643 int ret, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
644 u8 rng_vid;
645
646 if (ctrlpriv->era < 10) {
647 struct caam_perfmon __iomem *perfmon;
648
649 perfmon = ctrlpriv->total_jobrs ?
650 (struct caam_perfmon __iomem *)&ctrlpriv->jr[0]->perfmon :
651 (struct caam_perfmon __iomem *)&ctrl->perfmon;
652
653 rng_vid = (rd_reg32(&perfmon->cha_id_ls) &
654 CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT;
655 } else {
656 struct version_regs __iomem *vreg;
657
658 vreg = ctrlpriv->total_jobrs ?
659 (struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg :
660 (struct version_regs __iomem *)&ctrl->vreg;
661
662 rng_vid = (rd_reg32(&vreg->rng) & CHA_VER_VID_MASK) >>
663 CHA_VER_VID_SHIFT;
664 }
665
666 /*
667 * If SEC has RNG version >= 4 and RNG state handle has not been
668 * already instantiated, do RNG instantiation
669 * In case of SoCs with Management Complex, RNG is managed by MC f/w.
670 */
671 if (!(ctrlpriv->mc_en && ctrlpriv->pr_support) && rng_vid >= 4) {
672 ctrlpriv->rng4_sh_init =
673 rd_reg32(&ctrl->r4tst[0].rdsta);
674 /*
675 * If the secure keys (TDKEK, JDKEK, TDSK), were already
676 * generated, signal this to the function that is instantiating
677 * the state handles. An error would occur if RNG4 attempts
678 * to regenerate these keys before the next POR.
679 */
680 gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
681 ctrlpriv->rng4_sh_init &= RDSTA_MASK;
682 do {
683 int inst_handles =
684 rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_MASK;
685 /*
686 * If either SH were instantiated by somebody else
687 * (e.g. u-boot) then it is assumed that the entropy
688 * parameters are properly set and thus the function
689 * setting these (kick_trng(...)) is skipped.
690 * Also, if a handle was instantiated, do not change
691 * the TRNG parameters.
692 */
693 if (needs_entropy_delay_adjustment())
694 ent_delay = 12000;
695 if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
696 dev_info(dev,
697 "Entropy delay = %u\n",
698 ent_delay);
699 kick_trng(dev, ent_delay);
700 ent_delay += 400;
701 }
702 /*
703 * if instantiate_rng(...) fails, the loop will rerun
704 * and the kick_trng(...) function will modify the
705 * upper and lower limits of the entropy sampling
706 * interval, leading to a successful initialization of
707 * the RNG.
708 */
709 ret = instantiate_rng(dev, inst_handles,
710 gen_sk);
711 /*
712 * Entropy delay is determined via TRNG characterization.
713 * TRNG characterization is run across different voltages
714 * and temperatures.
715 * If worst case value for ent_dly is identified,
716 * the loop can be skipped for that platform.
717 */
718 if (needs_entropy_delay_adjustment())
719 break;
720 if (ret == -EAGAIN)
721 /*
722 * if here, the loop will rerun,
723 * so don't hog the CPU
724 */
725 cpu_relax();
726 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
727 if (ret) {
728 dev_err(dev, "failed to instantiate RNG");
729 return ret;
730 }
731 /*
732 * Set handles initialized by this module as the complement of
733 * the already initialized ones
734 */
735 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_MASK;
736
737 /* Enable RDB bit so that RNG works faster */
738 clrsetbits_32(&ctrl->scfgr, 0, SCFGR_RDBENABLE);
739 }
740
741 return 0;
742}
743
744/* Indicate if the internal state of the CAAM is lost during PM */
745static int caam_off_during_pm(void)
746{
747 bool not_off_during_pm = of_machine_is_compatible("fsl,imx6q") ||
748 of_machine_is_compatible("fsl,imx6qp") ||
749 of_machine_is_compatible("fsl,imx6dl");
750
751 return not_off_during_pm ? 0 : 1;
752}
753
754static void caam_state_save(struct device *dev)
755{
756 struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
757 struct caam_ctl_state *state = &ctrlpriv->state;
758 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
759 u32 deco_inst, jr_inst;
760 int i;
761
762 state->mcr = rd_reg32(&ctrl->mcr);
763 state->scfgr = rd_reg32(&ctrl->scfgr);
764
765 deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
766 CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT;
767 for (i = 0; i < deco_inst; i++) {
768 state->deco_mid[i].liodn_ms =
769 rd_reg32(&ctrl->deco_mid[i].liodn_ms);
770 state->deco_mid[i].liodn_ls =
771 rd_reg32(&ctrl->deco_mid[i].liodn_ls);
772 }
773
774 jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
775 CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT;
776 for (i = 0; i < jr_inst; i++) {
777 state->jr_mid[i].liodn_ms =
778 rd_reg32(&ctrl->jr_mid[i].liodn_ms);
779 state->jr_mid[i].liodn_ls =
780 rd_reg32(&ctrl->jr_mid[i].liodn_ls);
781 }
782}
783
784static void caam_state_restore(const struct device *dev)
785{
786 const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
787 const struct caam_ctl_state *state = &ctrlpriv->state;
788 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
789 u32 deco_inst, jr_inst;
790 int i;
791
792 wr_reg32(&ctrl->mcr, state->mcr);
793 wr_reg32(&ctrl->scfgr, state->scfgr);
794
795 deco_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
796 CHA_ID_MS_DECO_MASK) >> CHA_ID_MS_DECO_SHIFT;
797 for (i = 0; i < deco_inst; i++) {
798 wr_reg32(&ctrl->deco_mid[i].liodn_ms,
799 state->deco_mid[i].liodn_ms);
800 wr_reg32(&ctrl->deco_mid[i].liodn_ls,
801 state->deco_mid[i].liodn_ls);
802 }
803
804 jr_inst = (rd_reg32(&ctrl->perfmon.cha_num_ms) &
805 CHA_ID_MS_JR_MASK) >> CHA_ID_MS_JR_SHIFT;
806 for (i = 0; i < jr_inst; i++) {
807 wr_reg32(&ctrl->jr_mid[i].liodn_ms,
808 state->jr_mid[i].liodn_ms);
809 wr_reg32(&ctrl->jr_mid[i].liodn_ls,
810 state->jr_mid[i].liodn_ls);
811 }
812
813 if (ctrlpriv->virt_en == 1)
814 clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
815 JRSTART_JR1_START | JRSTART_JR2_START |
816 JRSTART_JR3_START);
817}
818
819static int caam_ctrl_suspend(struct device *dev)
820{
821 const struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
822
823 if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en)
824 caam_state_save(dev);
825
826 return 0;
827}
828
829static int caam_ctrl_resume(struct device *dev)
830{
831 struct caam_drv_private *ctrlpriv = dev_get_drvdata(dev);
832 int ret = 0;
833
834 if (ctrlpriv->caam_off_during_pm && !ctrlpriv->optee_en) {
835 caam_state_restore(dev);
836
837 /* HW and rng will be reset so deinstantiation can be removed */
838 devm_remove_action(dev, devm_deinstantiate_rng, dev);
839 ret = caam_ctrl_rng_init(dev);
840 }
841
842 return ret;
843}
844
845static DEFINE_SIMPLE_DEV_PM_OPS(caam_ctrl_pm_ops, caam_ctrl_suspend, caam_ctrl_resume);
846
847/* Probe routine for CAAM top (controller) level */
848static int caam_probe(struct platform_device *pdev)
849{
850 int ret, ring;
851 u64 caam_id;
852 const struct soc_device_attribute *imx_soc_match;
853 struct device *dev;
854 struct device_node *nprop, *np;
855 struct caam_ctrl __iomem *ctrl;
856 struct caam_drv_private *ctrlpriv;
857 struct caam_perfmon __iomem *perfmon;
858 struct dentry *dfs_root;
859 u32 scfgr, comp_params;
860 int pg_size;
861 int BLOCK_OFFSET = 0;
862 bool reg_access = true;
863
864 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
865 if (!ctrlpriv)
866 return -ENOMEM;
867
868 dev = &pdev->dev;
869 dev_set_drvdata(dev, ctrlpriv);
870 nprop = pdev->dev.of_node;
871
872 imx_soc_match = soc_device_match(caam_imx_soc_table);
873 if (!imx_soc_match && of_match_node(imx8m_machine_match, of_root))
874 return -EPROBE_DEFER;
875
876 caam_imx = (bool)imx_soc_match;
877
878 ctrlpriv->caam_off_during_pm = caam_imx && caam_off_during_pm();
879
880 if (imx_soc_match) {
881 /*
882 * Until Layerscape and i.MX OP-TEE get in sync,
883 * only i.MX OP-TEE use cases disallow access to
884 * caam page 0 (controller) registers.
885 */
886 np = of_find_compatible_node(NULL, NULL, "linaro,optee-tz");
887 ctrlpriv->optee_en = !!np;
888 of_node_put(np);
889
890 reg_access = !ctrlpriv->optee_en;
891
892 if (!imx_soc_match->data) {
893 dev_err(dev, "No clock data provided for i.MX SoC");
894 return -EINVAL;
895 }
896
897 ret = init_clocks(dev, imx_soc_match->data);
898 if (ret)
899 return ret;
900 }
901
902
903 /* Get configuration properties from device tree */
904 /* First, get register page */
905 ctrl = devm_of_iomap(dev, nprop, 0, NULL);
906 ret = PTR_ERR_OR_ZERO(ctrl);
907 if (ret) {
908 dev_err(dev, "caam: of_iomap() failed\n");
909 return ret;
910 }
911
912 ring = 0;
913 for_each_available_child_of_node(nprop, np)
914 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
915 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
916 u32 reg;
917
918 if (of_property_read_u32_index(np, "reg", 0, ®)) {
919 dev_err(dev, "%s read reg property error\n",
920 np->full_name);
921 continue;
922 }
923
924 ctrlpriv->jr[ring] = (struct caam_job_ring __iomem __force *)
925 ((__force uint8_t *)ctrl + reg);
926
927 ctrlpriv->total_jobrs++;
928 ring++;
929 }
930
931 /*
932 * Wherever possible, instead of accessing registers from the global page,
933 * use the alias registers in the first (cf. DT nodes order)
934 * job ring's page.
935 */
936 perfmon = ring ? (struct caam_perfmon __iomem *)&ctrlpriv->jr[0]->perfmon :
937 (struct caam_perfmon __iomem *)&ctrl->perfmon;
938
939 caam_little_end = !(bool)(rd_reg32(&perfmon->status) &
940 (CSTA_PLEND | CSTA_ALT_PLEND));
941 comp_params = rd_reg32(&perfmon->comp_parms_ms);
942 if (reg_access && comp_params & CTPR_MS_PS &&
943 rd_reg32(&ctrl->mcr) & MCFGR_LONG_PTR)
944 caam_ptr_sz = sizeof(u64);
945 else
946 caam_ptr_sz = sizeof(u32);
947 caam_dpaa2 = !!(comp_params & CTPR_MS_DPAA2);
948 ctrlpriv->qi_present = !!(comp_params & CTPR_MS_QI_MASK);
949
950#ifdef CONFIG_CAAM_QI
951 /* If (DPAA 1.x) QI present, check whether dependencies are available */
952 if (ctrlpriv->qi_present && !caam_dpaa2) {
953 ret = qman_is_probed();
954 if (!ret) {
955 return -EPROBE_DEFER;
956 } else if (ret < 0) {
957 dev_err(dev, "failing probe due to qman probe error\n");
958 return -ENODEV;
959 }
960
961 ret = qman_portals_probed();
962 if (!ret) {
963 return -EPROBE_DEFER;
964 } else if (ret < 0) {
965 dev_err(dev, "failing probe due to qman portals probe error\n");
966 return -ENODEV;
967 }
968 }
969#endif
970
971 /* Allocating the BLOCK_OFFSET based on the supported page size on
972 * the platform
973 */
974 pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
975 if (pg_size == 0)
976 BLOCK_OFFSET = PG_SIZE_4K;
977 else
978 BLOCK_OFFSET = PG_SIZE_64K;
979
980 ctrlpriv->ctrl = (struct caam_ctrl __iomem __force *)ctrl;
981 ctrlpriv->assure = (struct caam_assurance __iomem __force *)
982 ((__force uint8_t *)ctrl +
983 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
984 );
985 ctrlpriv->deco = (struct caam_deco __iomem __force *)
986 ((__force uint8_t *)ctrl +
987 BLOCK_OFFSET * DECO_BLOCK_NUMBER
988 );
989
990 /* Get the IRQ of the controller (for security violations only) */
991 ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
992 np = of_find_compatible_node(NULL, NULL, "fsl,qoriq-mc");
993 ctrlpriv->mc_en = !!np;
994 of_node_put(np);
995
996#ifdef CONFIG_FSL_MC_BUS
997 if (ctrlpriv->mc_en) {
998 struct fsl_mc_version *mc_version;
999
1000 mc_version = fsl_mc_get_version();
1001 if (mc_version)
1002 ctrlpriv->pr_support = check_version(mc_version, 10, 20,
1003 0);
1004 else
1005 return -EPROBE_DEFER;
1006 }
1007#endif
1008
1009 if (!reg_access)
1010 goto set_dma_mask;
1011
1012 /*
1013 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
1014 * long pointers in master configuration register.
1015 * In case of SoCs with Management Complex, MC f/w performs
1016 * the configuration.
1017 */
1018 if (!ctrlpriv->mc_en)
1019 clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK,
1020 MCFGR_AWCACHE_CACH | MCFGR_AWCACHE_BUFF |
1021 MCFGR_WDENABLE | MCFGR_LARGE_BURST);
1022
1023 handle_imx6_err005766(&ctrl->mcr);
1024
1025 /*
1026 * Read the Compile Time parameters and SCFGR to determine
1027 * if virtualization is enabled for this platform
1028 */
1029 scfgr = rd_reg32(&ctrl->scfgr);
1030
1031 ctrlpriv->virt_en = 0;
1032 if (comp_params & CTPR_MS_VIRT_EN_INCL) {
1033 /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
1034 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
1035 */
1036 if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
1037 (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
1038 (scfgr & SCFGR_VIRT_EN)))
1039 ctrlpriv->virt_en = 1;
1040 } else {
1041 /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
1042 if (comp_params & CTPR_MS_VIRT_EN_POR)
1043 ctrlpriv->virt_en = 1;
1044 }
1045
1046 if (ctrlpriv->virt_en == 1)
1047 clrsetbits_32(&ctrl->jrstart, 0, JRSTART_JR0_START |
1048 JRSTART_JR1_START | JRSTART_JR2_START |
1049 JRSTART_JR3_START);
1050
1051set_dma_mask:
1052 ret = dma_set_mask_and_coherent(dev, caam_get_dma_mask(dev));
1053 if (ret) {
1054 dev_err(dev, "dma_set_mask_and_coherent failed (%d)\n", ret);
1055 return ret;
1056 }
1057
1058 ctrlpriv->era = caam_get_era(perfmon);
1059 ctrlpriv->domain = iommu_get_domain_for_dev(dev);
1060
1061 dfs_root = debugfs_create_dir(dev_name(dev), NULL);
1062 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
1063 ret = devm_add_action_or_reset(dev, caam_remove_debugfs,
1064 dfs_root);
1065 if (ret)
1066 return ret;
1067 }
1068
1069 caam_debugfs_init(ctrlpriv, perfmon, dfs_root);
1070
1071 /* Check to see if (DPAA 1.x) QI present. If so, enable */
1072 if (ctrlpriv->qi_present && !caam_dpaa2) {
1073 ctrlpriv->qi = (struct caam_queue_if __iomem __force *)
1074 ((__force uint8_t *)ctrl +
1075 BLOCK_OFFSET * QI_BLOCK_NUMBER
1076 );
1077 /* This is all that's required to physically enable QI */
1078 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
1079
1080 /* If QMAN driver is present, init CAAM-QI backend */
1081#ifdef CONFIG_CAAM_QI
1082 ret = caam_qi_init(pdev);
1083 if (ret)
1084 dev_err(dev, "caam qi i/f init failed: %d\n", ret);
1085#endif
1086 }
1087
1088 /* If no QI and no rings specified, quit and go home */
1089 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
1090 dev_err(dev, "no queues configured, terminating\n");
1091 return -ENOMEM;
1092 }
1093
1094 comp_params = rd_reg32(&perfmon->comp_parms_ls);
1095 ctrlpriv->blob_present = !!(comp_params & CTPR_LS_BLOB);
1096
1097 /*
1098 * Some SoCs like the LS1028A (non-E) indicate CTPR_LS_BLOB support,
1099 * but fail when actually using it due to missing AES support, so
1100 * check both here.
1101 */
1102 if (ctrlpriv->era < 10) {
1103 ctrlpriv->blob_present = ctrlpriv->blob_present &&
1104 (rd_reg32(&perfmon->cha_num_ls) & CHA_ID_LS_AES_MASK);
1105 } else {
1106 struct version_regs __iomem *vreg;
1107
1108 vreg = ctrlpriv->total_jobrs ?
1109 (struct version_regs __iomem *)&ctrlpriv->jr[0]->vreg :
1110 (struct version_regs __iomem *)&ctrl->vreg;
1111
1112 ctrlpriv->blob_present = ctrlpriv->blob_present &&
1113 (rd_reg32(&vreg->aesa) & CHA_VER_MISC_AES_NUM_MASK);
1114 }
1115
1116 if (reg_access) {
1117 ret = caam_ctrl_rng_init(dev);
1118 if (ret)
1119 return ret;
1120 }
1121
1122 caam_id = (u64)rd_reg32(&perfmon->caam_id_ms) << 32 |
1123 (u64)rd_reg32(&perfmon->caam_id_ls);
1124
1125 /* Report "alive" for developer to see */
1126 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
1127 ctrlpriv->era);
1128 dev_info(dev, "job rings = %d, qi = %d\n",
1129 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
1130
1131 ret = devm_of_platform_populate(dev);
1132 if (ret)
1133 dev_err(dev, "JR platform devices creation error\n");
1134
1135 return ret;
1136}
1137
1138static struct platform_driver caam_driver = {
1139 .driver = {
1140 .name = "caam",
1141 .of_match_table = caam_match,
1142 .pm = pm_ptr(&caam_ctrl_pm_ops),
1143 },
1144 .probe = caam_probe,
1145};
1146
1147module_platform_driver(caam_driver);
1148
1149MODULE_LICENSE("GPL");
1150MODULE_DESCRIPTION("FSL CAAM request backend");
1151MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");
1/* * CAAM control-plane driver backend
2 * Controller-level driver, kernel property detection, initialization
3 *
4 * Copyright 2008-2012 Freescale Semiconductor, Inc.
5 */
6
7#include <linux/device.h>
8#include <linux/of_address.h>
9#include <linux/of_irq.h>
10
11#include "compat.h"
12#include "regs.h"
13#include "intern.h"
14#include "jr.h"
15#include "desc_constr.h"
16#include "error.h"
17
18/*
19 * i.MX targets tend to have clock control subsystems that can
20 * enable/disable clocking to our device.
21 */
22#ifdef CONFIG_CRYPTO_DEV_FSL_CAAM_IMX
23static inline struct clk *caam_drv_identify_clk(struct device *dev,
24 char *clk_name)
25{
26 return devm_clk_get(dev, clk_name);
27}
28#else
29static inline struct clk *caam_drv_identify_clk(struct device *dev,
30 char *clk_name)
31{
32 return NULL;
33}
34#endif
35
36/*
37 * Descriptor to instantiate RNG State Handle 0 in normal mode and
38 * load the JDKEK, TDKEK and TDSK registers
39 */
40static void build_instantiation_desc(u32 *desc, int handle, int do_sk)
41{
42 u32 *jump_cmd, op_flags;
43
44 init_job_desc(desc, 0);
45
46 op_flags = OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
47 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INIT;
48
49 /* INIT RNG in non-test mode */
50 append_operation(desc, op_flags);
51
52 if (!handle && do_sk) {
53 /*
54 * For SH0, Secure Keys must be generated as well
55 */
56
57 /* wait for done */
58 jump_cmd = append_jump(desc, JUMP_CLASS_CLASS1);
59 set_jump_tgt_here(desc, jump_cmd);
60
61 /*
62 * load 1 to clear written reg:
63 * resets the done interrrupt and returns the RNG to idle.
64 */
65 append_load_imm_u32(desc, 1, LDST_SRCDST_WORD_CLRW);
66
67 /* Initialize State Handle */
68 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
69 OP_ALG_AAI_RNG4_SK);
70 }
71
72 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
73}
74
75/* Descriptor for deinstantiation of State Handle 0 of the RNG block. */
76static void build_deinstantiation_desc(u32 *desc, int handle)
77{
78 init_job_desc(desc, 0);
79
80 /* Uninstantiate State Handle 0 */
81 append_operation(desc, OP_TYPE_CLASS1_ALG | OP_ALG_ALGSEL_RNG |
82 (handle << OP_ALG_AAI_SHIFT) | OP_ALG_AS_INITFINAL);
83
84 append_jump(desc, JUMP_CLASS_CLASS1 | JUMP_TYPE_HALT);
85}
86
87/*
88 * run_descriptor_deco0 - runs a descriptor on DECO0, under direct control of
89 * the software (no JR/QI used).
90 * @ctrldev - pointer to device
91 * @status - descriptor status, after being run
92 *
93 * Return: - 0 if no error occurred
94 * - -ENODEV if the DECO couldn't be acquired
95 * - -EAGAIN if an error occurred while executing the descriptor
96 */
97static inline int run_descriptor_deco0(struct device *ctrldev, u32 *desc,
98 u32 *status)
99{
100 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
101 struct caam_ctrl __iomem *ctrl = ctrlpriv->ctrl;
102 struct caam_deco __iomem *deco = ctrlpriv->deco;
103 unsigned int timeout = 100000;
104 u32 deco_dbg_reg, flags;
105 int i;
106
107
108 if (ctrlpriv->virt_en == 1) {
109 setbits32(&ctrl->deco_rsr, DECORSR_JR0);
110
111 while (!(rd_reg32(&ctrl->deco_rsr) & DECORSR_VALID) &&
112 --timeout)
113 cpu_relax();
114
115 timeout = 100000;
116 }
117
118 setbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
119
120 while (!(rd_reg32(&ctrl->deco_rq) & DECORR_DEN0) &&
121 --timeout)
122 cpu_relax();
123
124 if (!timeout) {
125 dev_err(ctrldev, "failed to acquire DECO 0\n");
126 clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
127 return -ENODEV;
128 }
129
130 for (i = 0; i < desc_len(desc); i++)
131 wr_reg32(&deco->descbuf[i], *(desc + i));
132
133 flags = DECO_JQCR_WHL;
134 /*
135 * If the descriptor length is longer than 4 words, then the
136 * FOUR bit in JRCTRL register must be set.
137 */
138 if (desc_len(desc) >= 4)
139 flags |= DECO_JQCR_FOUR;
140
141 /* Instruct the DECO to execute it */
142 setbits32(&deco->jr_ctl_hi, flags);
143
144 timeout = 10000000;
145 do {
146 deco_dbg_reg = rd_reg32(&deco->desc_dbg);
147 /*
148 * If an error occured in the descriptor, then
149 * the DECO status field will be set to 0x0D
150 */
151 if ((deco_dbg_reg & DESC_DBG_DECO_STAT_MASK) ==
152 DESC_DBG_DECO_STAT_HOST_ERR)
153 break;
154 cpu_relax();
155 } while ((deco_dbg_reg & DESC_DBG_DECO_STAT_VALID) && --timeout);
156
157 *status = rd_reg32(&deco->op_status_hi) &
158 DECO_OP_STATUS_HI_ERR_MASK;
159
160 if (ctrlpriv->virt_en == 1)
161 clrbits32(&ctrl->deco_rsr, DECORSR_JR0);
162
163 /* Mark the DECO as free */
164 clrbits32(&ctrl->deco_rq, DECORR_RQD0ENABLE);
165
166 if (!timeout)
167 return -EAGAIN;
168
169 return 0;
170}
171
172/*
173 * instantiate_rng - builds and executes a descriptor on DECO0,
174 * which initializes the RNG block.
175 * @ctrldev - pointer to device
176 * @state_handle_mask - bitmask containing the instantiation status
177 * for the RNG4 state handles which exist in
178 * the RNG4 block: 1 if it's been instantiated
179 * by an external entry, 0 otherwise.
180 * @gen_sk - generate data to be loaded into the JDKEK, TDKEK and TDSK;
181 * Caution: this can be done only once; if the keys need to be
182 * regenerated, a POR is required
183 *
184 * Return: - 0 if no error occurred
185 * - -ENOMEM if there isn't enough memory to allocate the descriptor
186 * - -ENODEV if DECO0 couldn't be acquired
187 * - -EAGAIN if an error occurred when executing the descriptor
188 * f.i. there was a RNG hardware error due to not "good enough"
189 * entropy being aquired.
190 */
191static int instantiate_rng(struct device *ctrldev, int state_handle_mask,
192 int gen_sk)
193{
194 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
195 struct caam_ctrl __iomem *ctrl;
196 u32 *desc, status = 0, rdsta_val;
197 int ret = 0, sh_idx;
198
199 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
200 desc = kmalloc(CAAM_CMD_SZ * 7, GFP_KERNEL);
201 if (!desc)
202 return -ENOMEM;
203
204 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
205 /*
206 * If the corresponding bit is set, this state handle
207 * was initialized by somebody else, so it's left alone.
208 */
209 if ((1 << sh_idx) & state_handle_mask)
210 continue;
211
212 /* Create the descriptor for instantiating RNG State Handle */
213 build_instantiation_desc(desc, sh_idx, gen_sk);
214
215 /* Try to run it through DECO0 */
216 ret = run_descriptor_deco0(ctrldev, desc, &status);
217
218 /*
219 * If ret is not 0, or descriptor status is not 0, then
220 * something went wrong. No need to try the next state
221 * handle (if available), bail out here.
222 * Also, if for some reason, the State Handle didn't get
223 * instantiated although the descriptor has finished
224 * without any error (HW optimizations for later
225 * CAAM eras), then try again.
226 */
227 rdsta_val = rd_reg32(&ctrl->r4tst[0].rdsta) & RDSTA_IFMASK;
228 if ((status && status != JRSTA_SSRC_JUMP_HALT_CC) ||
229 !(rdsta_val & (1 << sh_idx)))
230 ret = -EAGAIN;
231 if (ret)
232 break;
233 dev_info(ctrldev, "Instantiated RNG4 SH%d\n", sh_idx);
234 /* Clear the contents before recreating the descriptor */
235 memset(desc, 0x00, CAAM_CMD_SZ * 7);
236 }
237
238 kfree(desc);
239
240 return ret;
241}
242
243/*
244 * deinstantiate_rng - builds and executes a descriptor on DECO0,
245 * which deinitializes the RNG block.
246 * @ctrldev - pointer to device
247 * @state_handle_mask - bitmask containing the instantiation status
248 * for the RNG4 state handles which exist in
249 * the RNG4 block: 1 if it's been instantiated
250 *
251 * Return: - 0 if no error occurred
252 * - -ENOMEM if there isn't enough memory to allocate the descriptor
253 * - -ENODEV if DECO0 couldn't be acquired
254 * - -EAGAIN if an error occurred when executing the descriptor
255 */
256static int deinstantiate_rng(struct device *ctrldev, int state_handle_mask)
257{
258 u32 *desc, status;
259 int sh_idx, ret = 0;
260
261 desc = kmalloc(CAAM_CMD_SZ * 3, GFP_KERNEL);
262 if (!desc)
263 return -ENOMEM;
264
265 for (sh_idx = 0; sh_idx < RNG4_MAX_HANDLES; sh_idx++) {
266 /*
267 * If the corresponding bit is set, then it means the state
268 * handle was initialized by us, and thus it needs to be
269 * deintialized as well
270 */
271 if ((1 << sh_idx) & state_handle_mask) {
272 /*
273 * Create the descriptor for deinstantating this state
274 * handle
275 */
276 build_deinstantiation_desc(desc, sh_idx);
277
278 /* Try to run it through DECO0 */
279 ret = run_descriptor_deco0(ctrldev, desc, &status);
280
281 if (ret || status) {
282 dev_err(ctrldev,
283 "Failed to deinstantiate RNG4 SH%d\n",
284 sh_idx);
285 break;
286 }
287 dev_info(ctrldev, "Deinstantiated RNG4 SH%d\n", sh_idx);
288 }
289 }
290
291 kfree(desc);
292
293 return ret;
294}
295
296static int caam_remove(struct platform_device *pdev)
297{
298 struct device *ctrldev;
299 struct caam_drv_private *ctrlpriv;
300 struct caam_ctrl __iomem *ctrl;
301 int ring;
302
303 ctrldev = &pdev->dev;
304 ctrlpriv = dev_get_drvdata(ctrldev);
305 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
306
307 /* Remove platform devices for JobRs */
308 for (ring = 0; ring < ctrlpriv->total_jobrs; ring++) {
309 if (ctrlpriv->jrpdev[ring])
310 of_device_unregister(ctrlpriv->jrpdev[ring]);
311 }
312
313 /* De-initialize RNG state handles initialized by this driver. */
314 if (ctrlpriv->rng4_sh_init)
315 deinstantiate_rng(ctrldev, ctrlpriv->rng4_sh_init);
316
317 /* Shut down debug views */
318#ifdef CONFIG_DEBUG_FS
319 debugfs_remove_recursive(ctrlpriv->dfs_root);
320#endif
321
322 /* Unmap controller region */
323 iounmap(ctrl);
324
325 /* shut clocks off before finalizing shutdown */
326 clk_disable_unprepare(ctrlpriv->caam_ipg);
327 clk_disable_unprepare(ctrlpriv->caam_mem);
328 clk_disable_unprepare(ctrlpriv->caam_aclk);
329 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
330
331 return 0;
332}
333
334/*
335 * kick_trng - sets the various parameters for enabling the initialization
336 * of the RNG4 block in CAAM
337 * @pdev - pointer to the platform device
338 * @ent_delay - Defines the length (in system clocks) of each entropy sample.
339 */
340static void kick_trng(struct platform_device *pdev, int ent_delay)
341{
342 struct device *ctrldev = &pdev->dev;
343 struct caam_drv_private *ctrlpriv = dev_get_drvdata(ctrldev);
344 struct caam_ctrl __iomem *ctrl;
345 struct rng4tst __iomem *r4tst;
346 u32 val;
347
348 ctrl = (struct caam_ctrl __iomem *)ctrlpriv->ctrl;
349 r4tst = &ctrl->r4tst[0];
350
351 /* put RNG4 into program mode */
352 setbits32(&r4tst->rtmctl, RTMCTL_PRGM);
353
354 /*
355 * Performance-wise, it does not make sense to
356 * set the delay to a value that is lower
357 * than the last one that worked (i.e. the state handles
358 * were instantiated properly. Thus, instead of wasting
359 * time trying to set the values controlling the sample
360 * frequency, the function simply returns.
361 */
362 val = (rd_reg32(&r4tst->rtsdctl) & RTSDCTL_ENT_DLY_MASK)
363 >> RTSDCTL_ENT_DLY_SHIFT;
364 if (ent_delay <= val) {
365 /* put RNG4 into run mode */
366 clrbits32(&r4tst->rtmctl, RTMCTL_PRGM);
367 return;
368 }
369
370 val = rd_reg32(&r4tst->rtsdctl);
371 val = (val & ~RTSDCTL_ENT_DLY_MASK) |
372 (ent_delay << RTSDCTL_ENT_DLY_SHIFT);
373 wr_reg32(&r4tst->rtsdctl, val);
374 /* min. freq. count, equal to 1/4 of the entropy sample length */
375 wr_reg32(&r4tst->rtfrqmin, ent_delay >> 2);
376 /* disable maximum frequency count */
377 wr_reg32(&r4tst->rtfrqmax, RTFRQMAX_DISABLE);
378 /* read the control register */
379 val = rd_reg32(&r4tst->rtmctl);
380 /*
381 * select raw sampling in both entropy shifter
382 * and statistical checker
383 */
384 setbits32(&val, RTMCTL_SAMP_MODE_RAW_ES_SC);
385 /* put RNG4 into run mode */
386 clrbits32(&val, RTMCTL_PRGM);
387 /* write back the control register */
388 wr_reg32(&r4tst->rtmctl, val);
389}
390
391/**
392 * caam_get_era() - Return the ERA of the SEC on SoC, based
393 * on "sec-era" propery in the DTS. This property is updated by u-boot.
394 **/
395int caam_get_era(void)
396{
397 struct device_node *caam_node;
398 int ret;
399 u32 prop;
400
401 caam_node = of_find_compatible_node(NULL, NULL, "fsl,sec-v4.0");
402 ret = of_property_read_u32(caam_node, "fsl,sec-era", &prop);
403 of_node_put(caam_node);
404
405 return IS_ERR_VALUE(ret) ? -ENOTSUPP : prop;
406}
407EXPORT_SYMBOL(caam_get_era);
408
409/* Probe routine for CAAM top (controller) level */
410static int caam_probe(struct platform_device *pdev)
411{
412 int ret, ring, rspec, gen_sk, ent_delay = RTSDCTL_ENT_DLY_MIN;
413 u64 caam_id;
414 struct device *dev;
415 struct device_node *nprop, *np;
416 struct caam_ctrl __iomem *ctrl;
417 struct caam_drv_private *ctrlpriv;
418 struct clk *clk;
419#ifdef CONFIG_DEBUG_FS
420 struct caam_perfmon *perfmon;
421#endif
422 u32 scfgr, comp_params;
423 u32 cha_vid_ls;
424 int pg_size;
425 int BLOCK_OFFSET = 0;
426
427 ctrlpriv = devm_kzalloc(&pdev->dev, sizeof(*ctrlpriv), GFP_KERNEL);
428 if (!ctrlpriv)
429 return -ENOMEM;
430
431 dev = &pdev->dev;
432 dev_set_drvdata(dev, ctrlpriv);
433 ctrlpriv->pdev = pdev;
434 nprop = pdev->dev.of_node;
435
436 /* Enable clocking */
437 clk = caam_drv_identify_clk(&pdev->dev, "ipg");
438 if (IS_ERR(clk)) {
439 ret = PTR_ERR(clk);
440 dev_err(&pdev->dev,
441 "can't identify CAAM ipg clk: %d\n", ret);
442 return ret;
443 }
444 ctrlpriv->caam_ipg = clk;
445
446 clk = caam_drv_identify_clk(&pdev->dev, "mem");
447 if (IS_ERR(clk)) {
448 ret = PTR_ERR(clk);
449 dev_err(&pdev->dev,
450 "can't identify CAAM mem clk: %d\n", ret);
451 return ret;
452 }
453 ctrlpriv->caam_mem = clk;
454
455 clk = caam_drv_identify_clk(&pdev->dev, "aclk");
456 if (IS_ERR(clk)) {
457 ret = PTR_ERR(clk);
458 dev_err(&pdev->dev,
459 "can't identify CAAM aclk clk: %d\n", ret);
460 return ret;
461 }
462 ctrlpriv->caam_aclk = clk;
463
464 clk = caam_drv_identify_clk(&pdev->dev, "emi_slow");
465 if (IS_ERR(clk)) {
466 ret = PTR_ERR(clk);
467 dev_err(&pdev->dev,
468 "can't identify CAAM emi_slow clk: %d\n", ret);
469 return ret;
470 }
471 ctrlpriv->caam_emi_slow = clk;
472
473 ret = clk_prepare_enable(ctrlpriv->caam_ipg);
474 if (ret < 0) {
475 dev_err(&pdev->dev, "can't enable CAAM ipg clock: %d\n", ret);
476 return ret;
477 }
478
479 ret = clk_prepare_enable(ctrlpriv->caam_mem);
480 if (ret < 0) {
481 dev_err(&pdev->dev, "can't enable CAAM secure mem clock: %d\n",
482 ret);
483 goto disable_caam_ipg;
484 }
485
486 ret = clk_prepare_enable(ctrlpriv->caam_aclk);
487 if (ret < 0) {
488 dev_err(&pdev->dev, "can't enable CAAM aclk clock: %d\n", ret);
489 goto disable_caam_mem;
490 }
491
492 ret = clk_prepare_enable(ctrlpriv->caam_emi_slow);
493 if (ret < 0) {
494 dev_err(&pdev->dev, "can't enable CAAM emi slow clock: %d\n",
495 ret);
496 goto disable_caam_aclk;
497 }
498
499 /* Get configuration properties from device tree */
500 /* First, get register page */
501 ctrl = of_iomap(nprop, 0);
502 if (ctrl == NULL) {
503 dev_err(dev, "caam: of_iomap() failed\n");
504 ret = -ENOMEM;
505 goto disable_caam_emi_slow;
506 }
507 /* Finding the page size for using the CTPR_MS register */
508 comp_params = rd_reg32(&ctrl->perfmon.comp_parms_ms);
509 pg_size = (comp_params & CTPR_MS_PG_SZ_MASK) >> CTPR_MS_PG_SZ_SHIFT;
510
511 /* Allocating the BLOCK_OFFSET based on the supported page size on
512 * the platform
513 */
514 if (pg_size == 0)
515 BLOCK_OFFSET = PG_SIZE_4K;
516 else
517 BLOCK_OFFSET = PG_SIZE_64K;
518
519 ctrlpriv->ctrl = (struct caam_ctrl __force *)ctrl;
520 ctrlpriv->assure = (struct caam_assurance __force *)
521 ((uint8_t *)ctrl +
522 BLOCK_OFFSET * ASSURE_BLOCK_NUMBER
523 );
524 ctrlpriv->deco = (struct caam_deco __force *)
525 ((uint8_t *)ctrl +
526 BLOCK_OFFSET * DECO_BLOCK_NUMBER
527 );
528
529 /* Get the IRQ of the controller (for security violations only) */
530 ctrlpriv->secvio_irq = irq_of_parse_and_map(nprop, 0);
531
532 /*
533 * Enable DECO watchdogs and, if this is a PHYS_ADDR_T_64BIT kernel,
534 * long pointers in master configuration register
535 */
536 clrsetbits_32(&ctrl->mcr, MCFGR_AWCACHE_MASK, MCFGR_AWCACHE_CACH |
537 MCFGR_AWCACHE_BUFF | MCFGR_WDENABLE | MCFGR_LARGE_BURST |
538 (sizeof(dma_addr_t) == sizeof(u64) ? MCFGR_LONG_PTR : 0));
539
540 /*
541 * Read the Compile Time paramters and SCFGR to determine
542 * if Virtualization is enabled for this platform
543 */
544 scfgr = rd_reg32(&ctrl->scfgr);
545
546 ctrlpriv->virt_en = 0;
547 if (comp_params & CTPR_MS_VIRT_EN_INCL) {
548 /* VIRT_EN_INCL = 1 & VIRT_EN_POR = 1 or
549 * VIRT_EN_INCL = 1 & VIRT_EN_POR = 0 & SCFGR_VIRT_EN = 1
550 */
551 if ((comp_params & CTPR_MS_VIRT_EN_POR) ||
552 (!(comp_params & CTPR_MS_VIRT_EN_POR) &&
553 (scfgr & SCFGR_VIRT_EN)))
554 ctrlpriv->virt_en = 1;
555 } else {
556 /* VIRT_EN_INCL = 0 && VIRT_EN_POR_VALUE = 1 */
557 if (comp_params & CTPR_MS_VIRT_EN_POR)
558 ctrlpriv->virt_en = 1;
559 }
560
561 if (ctrlpriv->virt_en == 1)
562 setbits32(&ctrl->jrstart, JRSTART_JR0_START |
563 JRSTART_JR1_START | JRSTART_JR2_START |
564 JRSTART_JR3_START);
565
566 if (sizeof(dma_addr_t) == sizeof(u64))
567 if (of_device_is_compatible(nprop, "fsl,sec-v5.0"))
568 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
569 else
570 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(36));
571 else
572 dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
573
574 /*
575 * Detect and enable JobRs
576 * First, find out how many ring spec'ed, allocate references
577 * for all, then go probe each one.
578 */
579 rspec = 0;
580 for_each_available_child_of_node(nprop, np)
581 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
582 of_device_is_compatible(np, "fsl,sec4.0-job-ring"))
583 rspec++;
584
585 ctrlpriv->jrpdev = devm_kcalloc(&pdev->dev, rspec,
586 sizeof(*ctrlpriv->jrpdev), GFP_KERNEL);
587 if (ctrlpriv->jrpdev == NULL) {
588 ret = -ENOMEM;
589 goto iounmap_ctrl;
590 }
591
592 ring = 0;
593 ctrlpriv->total_jobrs = 0;
594 for_each_available_child_of_node(nprop, np)
595 if (of_device_is_compatible(np, "fsl,sec-v4.0-job-ring") ||
596 of_device_is_compatible(np, "fsl,sec4.0-job-ring")) {
597 ctrlpriv->jrpdev[ring] =
598 of_platform_device_create(np, NULL, dev);
599 if (!ctrlpriv->jrpdev[ring]) {
600 pr_warn("JR%d Platform device creation error\n",
601 ring);
602 continue;
603 }
604 ctrlpriv->jr[ring] = (struct caam_job_ring __force *)
605 ((uint8_t *)ctrl +
606 (ring + JR_BLOCK_NUMBER) *
607 BLOCK_OFFSET
608 );
609 ctrlpriv->total_jobrs++;
610 ring++;
611 }
612
613 /* Check to see if QI present. If so, enable */
614 ctrlpriv->qi_present =
615 !!(rd_reg32(&ctrl->perfmon.comp_parms_ms) &
616 CTPR_MS_QI_MASK);
617 if (ctrlpriv->qi_present) {
618 ctrlpriv->qi = (struct caam_queue_if __force *)
619 ((uint8_t *)ctrl +
620 BLOCK_OFFSET * QI_BLOCK_NUMBER
621 );
622 /* This is all that's required to physically enable QI */
623 wr_reg32(&ctrlpriv->qi->qi_control_lo, QICTL_DQEN);
624 }
625
626 /* If no QI and no rings specified, quit and go home */
627 if ((!ctrlpriv->qi_present) && (!ctrlpriv->total_jobrs)) {
628 dev_err(dev, "no queues configured, terminating\n");
629 ret = -ENOMEM;
630 goto caam_remove;
631 }
632
633 cha_vid_ls = rd_reg32(&ctrl->perfmon.cha_id_ls);
634
635 /*
636 * If SEC has RNG version >= 4 and RNG state handle has not been
637 * already instantiated, do RNG instantiation
638 */
639 if ((cha_vid_ls & CHA_ID_LS_RNG_MASK) >> CHA_ID_LS_RNG_SHIFT >= 4) {
640 ctrlpriv->rng4_sh_init =
641 rd_reg32(&ctrl->r4tst[0].rdsta);
642 /*
643 * If the secure keys (TDKEK, JDKEK, TDSK), were already
644 * generated, signal this to the function that is instantiating
645 * the state handles. An error would occur if RNG4 attempts
646 * to regenerate these keys before the next POR.
647 */
648 gen_sk = ctrlpriv->rng4_sh_init & RDSTA_SKVN ? 0 : 1;
649 ctrlpriv->rng4_sh_init &= RDSTA_IFMASK;
650 do {
651 int inst_handles =
652 rd_reg32(&ctrl->r4tst[0].rdsta) &
653 RDSTA_IFMASK;
654 /*
655 * If either SH were instantiated by somebody else
656 * (e.g. u-boot) then it is assumed that the entropy
657 * parameters are properly set and thus the function
658 * setting these (kick_trng(...)) is skipped.
659 * Also, if a handle was instantiated, do not change
660 * the TRNG parameters.
661 */
662 if (!(ctrlpriv->rng4_sh_init || inst_handles)) {
663 dev_info(dev,
664 "Entropy delay = %u\n",
665 ent_delay);
666 kick_trng(pdev, ent_delay);
667 ent_delay += 400;
668 }
669 /*
670 * if instantiate_rng(...) fails, the loop will rerun
671 * and the kick_trng(...) function will modfiy the
672 * upper and lower limits of the entropy sampling
673 * interval, leading to a sucessful initialization of
674 * the RNG.
675 */
676 ret = instantiate_rng(dev, inst_handles,
677 gen_sk);
678 if (ret == -EAGAIN)
679 /*
680 * if here, the loop will rerun,
681 * so don't hog the CPU
682 */
683 cpu_relax();
684 } while ((ret == -EAGAIN) && (ent_delay < RTSDCTL_ENT_DLY_MAX));
685 if (ret) {
686 dev_err(dev, "failed to instantiate RNG");
687 goto caam_remove;
688 }
689 /*
690 * Set handles init'ed by this module as the complement of the
691 * already initialized ones
692 */
693 ctrlpriv->rng4_sh_init = ~ctrlpriv->rng4_sh_init & RDSTA_IFMASK;
694
695 /* Enable RDB bit so that RNG works faster */
696 setbits32(&ctrl->scfgr, SCFGR_RDBENABLE);
697 }
698
699 /* NOTE: RTIC detection ought to go here, around Si time */
700
701 caam_id = (u64)rd_reg32(&ctrl->perfmon.caam_id_ms) << 32 |
702 (u64)rd_reg32(&ctrl->perfmon.caam_id_ls);
703
704 /* Report "alive" for developer to see */
705 dev_info(dev, "device ID = 0x%016llx (Era %d)\n", caam_id,
706 caam_get_era());
707 dev_info(dev, "job rings = %d, qi = %d\n",
708 ctrlpriv->total_jobrs, ctrlpriv->qi_present);
709
710#ifdef CONFIG_DEBUG_FS
711 /*
712 * FIXME: needs better naming distinction, as some amalgamation of
713 * "caam" and nprop->full_name. The OF name isn't distinctive,
714 * but does separate instances
715 */
716 perfmon = (struct caam_perfmon __force *)&ctrl->perfmon;
717
718 ctrlpriv->dfs_root = debugfs_create_dir(dev_name(dev), NULL);
719 ctrlpriv->ctl = debugfs_create_dir("ctl", ctrlpriv->dfs_root);
720
721 /* Controller-level - performance monitor counters */
722 ctrlpriv->ctl_rq_dequeued =
723 debugfs_create_u64("rq_dequeued",
724 S_IRUSR | S_IRGRP | S_IROTH,
725 ctrlpriv->ctl, &perfmon->req_dequeued);
726 ctrlpriv->ctl_ob_enc_req =
727 debugfs_create_u64("ob_rq_encrypted",
728 S_IRUSR | S_IRGRP | S_IROTH,
729 ctrlpriv->ctl, &perfmon->ob_enc_req);
730 ctrlpriv->ctl_ib_dec_req =
731 debugfs_create_u64("ib_rq_decrypted",
732 S_IRUSR | S_IRGRP | S_IROTH,
733 ctrlpriv->ctl, &perfmon->ib_dec_req);
734 ctrlpriv->ctl_ob_enc_bytes =
735 debugfs_create_u64("ob_bytes_encrypted",
736 S_IRUSR | S_IRGRP | S_IROTH,
737 ctrlpriv->ctl, &perfmon->ob_enc_bytes);
738 ctrlpriv->ctl_ob_prot_bytes =
739 debugfs_create_u64("ob_bytes_protected",
740 S_IRUSR | S_IRGRP | S_IROTH,
741 ctrlpriv->ctl, &perfmon->ob_prot_bytes);
742 ctrlpriv->ctl_ib_dec_bytes =
743 debugfs_create_u64("ib_bytes_decrypted",
744 S_IRUSR | S_IRGRP | S_IROTH,
745 ctrlpriv->ctl, &perfmon->ib_dec_bytes);
746 ctrlpriv->ctl_ib_valid_bytes =
747 debugfs_create_u64("ib_bytes_validated",
748 S_IRUSR | S_IRGRP | S_IROTH,
749 ctrlpriv->ctl, &perfmon->ib_valid_bytes);
750
751 /* Controller level - global status values */
752 ctrlpriv->ctl_faultaddr =
753 debugfs_create_u64("fault_addr",
754 S_IRUSR | S_IRGRP | S_IROTH,
755 ctrlpriv->ctl, &perfmon->faultaddr);
756 ctrlpriv->ctl_faultdetail =
757 debugfs_create_u32("fault_detail",
758 S_IRUSR | S_IRGRP | S_IROTH,
759 ctrlpriv->ctl, &perfmon->faultdetail);
760 ctrlpriv->ctl_faultstatus =
761 debugfs_create_u32("fault_status",
762 S_IRUSR | S_IRGRP | S_IROTH,
763 ctrlpriv->ctl, &perfmon->status);
764
765 /* Internal covering keys (useful in non-secure mode only) */
766 ctrlpriv->ctl_kek_wrap.data = &ctrlpriv->ctrl->kek[0];
767 ctrlpriv->ctl_kek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
768 ctrlpriv->ctl_kek = debugfs_create_blob("kek",
769 S_IRUSR |
770 S_IRGRP | S_IROTH,
771 ctrlpriv->ctl,
772 &ctrlpriv->ctl_kek_wrap);
773
774 ctrlpriv->ctl_tkek_wrap.data = &ctrlpriv->ctrl->tkek[0];
775 ctrlpriv->ctl_tkek_wrap.size = KEK_KEY_SIZE * sizeof(u32);
776 ctrlpriv->ctl_tkek = debugfs_create_blob("tkek",
777 S_IRUSR |
778 S_IRGRP | S_IROTH,
779 ctrlpriv->ctl,
780 &ctrlpriv->ctl_tkek_wrap);
781
782 ctrlpriv->ctl_tdsk_wrap.data = &ctrlpriv->ctrl->tdsk[0];
783 ctrlpriv->ctl_tdsk_wrap.size = KEK_KEY_SIZE * sizeof(u32);
784 ctrlpriv->ctl_tdsk = debugfs_create_blob("tdsk",
785 S_IRUSR |
786 S_IRGRP | S_IROTH,
787 ctrlpriv->ctl,
788 &ctrlpriv->ctl_tdsk_wrap);
789#endif
790 return 0;
791
792caam_remove:
793 caam_remove(pdev);
794iounmap_ctrl:
795 iounmap(ctrl);
796disable_caam_emi_slow:
797 clk_disable_unprepare(ctrlpriv->caam_emi_slow);
798disable_caam_aclk:
799 clk_disable_unprepare(ctrlpriv->caam_aclk);
800disable_caam_mem:
801 clk_disable_unprepare(ctrlpriv->caam_mem);
802disable_caam_ipg:
803 clk_disable_unprepare(ctrlpriv->caam_ipg);
804 return ret;
805}
806
807static struct of_device_id caam_match[] = {
808 {
809 .compatible = "fsl,sec-v4.0",
810 },
811 {
812 .compatible = "fsl,sec4.0",
813 },
814 {},
815};
816MODULE_DEVICE_TABLE(of, caam_match);
817
818static struct platform_driver caam_driver = {
819 .driver = {
820 .name = "caam",
821 .of_match_table = caam_match,
822 },
823 .probe = caam_probe,
824 .remove = caam_remove,
825};
826
827module_platform_driver(caam_driver);
828
829MODULE_LICENSE("GPL");
830MODULE_DESCRIPTION("FSL CAAM request backend");
831MODULE_AUTHOR("Freescale Semiconductor - NMG/STC");