Linux Audio

Check our new training course

Yocto distribution development and maintenance

Need a Yocto distribution for your embedded project?
Loading...
Note: File does not exist in v3.1.
  1// SPDX-License-Identifier: GPL-2.0-or-later
  2/*
  3 * Copyright (c) 2015, Daniel Thompson
  4 */
  5
  6#include <linux/clk.h>
  7#include <linux/clk-provider.h>
  8#include <linux/delay.h>
  9#include <linux/hw_random.h>
 10#include <linux/io.h>
 11#include <linux/iopoll.h>
 12#include <linux/kernel.h>
 13#include <linux/module.h>
 14#include <linux/of.h>
 15#include <linux/of_address.h>
 16#include <linux/platform_device.h>
 17#include <linux/pm_runtime.h>
 18#include <linux/reset.h>
 19#include <linux/slab.h>
 20
 21#define RNG_CR			0x00
 22#define RNG_CR_RNGEN		BIT(2)
 23#define RNG_CR_CED		BIT(5)
 24#define RNG_CR_CONFIG1		GENMASK(11, 8)
 25#define RNG_CR_NISTC		BIT(12)
 26#define RNG_CR_CONFIG2		GENMASK(15, 13)
 27#define RNG_CR_CLKDIV_SHIFT	16
 28#define RNG_CR_CLKDIV		GENMASK(19, 16)
 29#define RNG_CR_CONFIG3		GENMASK(25, 20)
 30#define RNG_CR_CONDRST		BIT(30)
 31#define RNG_CR_CONFLOCK		BIT(31)
 32#define RNG_CR_ENTROPY_SRC_MASK	(RNG_CR_CONFIG1 | RNG_CR_NISTC | RNG_CR_CONFIG2 | RNG_CR_CONFIG3)
 33#define RNG_CR_CONFIG_MASK	(RNG_CR_ENTROPY_SRC_MASK | RNG_CR_CED | RNG_CR_CLKDIV)
 34
 35#define RNG_SR			0x04
 36#define RNG_SR_DRDY		BIT(0)
 37#define RNG_SR_CECS		BIT(1)
 38#define RNG_SR_SECS		BIT(2)
 39#define RNG_SR_CEIS		BIT(5)
 40#define RNG_SR_SEIS		BIT(6)
 41
 42#define RNG_DR			0x08
 43
 44#define RNG_NSCR		0x0C
 45#define RNG_NSCR_MASK		GENMASK(17, 0)
 46
 47#define RNG_HTCR		0x10
 48
 49#define RNG_NB_RECOVER_TRIES	3
 50
 51struct stm32_rng_data {
 52	uint	max_clock_rate;
 53	uint	nb_clock;
 54	u32	cr;
 55	u32	nscr;
 56	u32	htcr;
 57	bool	has_cond_reset;
 58};
 59
 60/**
 61 * struct stm32_rng_config - RNG configuration data
 62 *
 63 * @cr:			RNG configuration. 0 means default hardware RNG configuration
 64 * @nscr:		Noise sources control configuration.
 65 * @htcr:		Health tests configuration.
 66 */
 67struct stm32_rng_config {
 68	u32 cr;
 69	u32 nscr;
 70	u32 htcr;
 71};
 72
 73struct stm32_rng_private {
 74	struct hwrng rng;
 75	struct device *dev;
 76	void __iomem *base;
 77	struct clk_bulk_data *clk_bulk;
 78	struct reset_control *rst;
 79	struct stm32_rng_config pm_conf;
 80	const struct stm32_rng_data *data;
 81	bool ced;
 82	bool lock_conf;
 83};
 84
 85/*
 86 * Extracts from the STM32 RNG specification when RNG supports CONDRST.
 87 *
 88 * When a noise source (or seed) error occurs, the RNG stops generating
 89 * random numbers and sets to “1” both SEIS and SECS bits to indicate
 90 * that a seed error occurred. (...)
 91 *
 92 * 1. Software reset by writing CONDRST at 1 and at 0 (see bitfield
 93 * description for details). This step is needed only if SECS is set.
 94 * Indeed, when SEIS is set and SECS is cleared it means RNG performed
 95 * the reset automatically (auto-reset).
 96 * 2. If SECS was set in step 1 (no auto-reset) wait for CONDRST
 97 * to be cleared in the RNG_CR register, then confirm that SEIS is
 98 * cleared in the RNG_SR register. Otherwise just clear SEIS bit in
 99 * the RNG_SR register.
100 * 3. If SECS was set in step 1 (no auto-reset) wait for SECS to be
101 * cleared by RNG. The random number generation is now back to normal.
102 */
103static int stm32_rng_conceal_seed_error_cond_reset(struct stm32_rng_private *priv)
104{
105	struct device *dev = priv->dev;
106	u32 sr = readl_relaxed(priv->base + RNG_SR);
107	u32 cr = readl_relaxed(priv->base + RNG_CR);
108	int err;
109
110	if (sr & RNG_SR_SECS) {
111		/* Conceal by resetting the subsystem (step 1.) */
112		writel_relaxed(cr | RNG_CR_CONDRST, priv->base + RNG_CR);
113		writel_relaxed(cr & ~RNG_CR_CONDRST, priv->base + RNG_CR);
114	} else {
115		/* RNG auto-reset (step 2.) */
116		writel_relaxed(sr & ~RNG_SR_SEIS, priv->base + RNG_SR);
117		goto end;
118	}
119
120	err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_CR, cr, !(cr & RNG_CR_CONDRST), 10,
121						100000);
122	if (err) {
123		dev_err(dev, "%s: timeout %x\n", __func__, sr);
124		return err;
125	}
126
127	/* Check SEIS is cleared (step 2.) */
128	if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS)
129		return -EINVAL;
130
131	err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, sr, !(sr & RNG_SR_SECS), 10,
132						100000);
133	if (err) {
134		dev_err(dev, "%s: timeout %x\n", __func__, sr);
135		return err;
136	}
137
138end:
139	return 0;
140}
141
142/*
143 * Extracts from the STM32 RNG specification, when CONDRST is not supported
144 *
145 * When a noise source (or seed) error occurs, the RNG stops generating
146 * random numbers and sets to “1” both SEIS and SECS bits to indicate
147 * that a seed error occurred. (...)
148 *
149 * The following sequence shall be used to fully recover from a seed
150 * error after the RNG initialization:
151 * 1. Clear the SEIS bit by writing it to “0”.
152 * 2. Read out 12 words from the RNG_DR register, and discard each of
153 * them in order to clean the pipeline.
154 * 3. Confirm that SEIS is still cleared. Random number generation is
155 * back to normal.
156 */
157static int stm32_rng_conceal_seed_error_sw_reset(struct stm32_rng_private *priv)
158{
159	unsigned int i = 0;
160	u32 sr = readl_relaxed(priv->base + RNG_SR);
161
162	writel_relaxed(sr & ~RNG_SR_SEIS, priv->base + RNG_SR);
163
164	for (i = 12; i != 0; i--)
165		(void)readl_relaxed(priv->base + RNG_DR);
166
167	if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS)
168		return -EINVAL;
169
170	return 0;
171}
172
173static int stm32_rng_conceal_seed_error(struct hwrng *rng)
174{
175	struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng);
176
177	dev_dbg(priv->dev, "Concealing seed error\n");
178
179	if (priv->data->has_cond_reset)
180		return stm32_rng_conceal_seed_error_cond_reset(priv);
181	else
182		return stm32_rng_conceal_seed_error_sw_reset(priv);
183};
184
185
186static int stm32_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
187{
188	struct stm32_rng_private *priv = container_of(rng, struct stm32_rng_private, rng);
189	unsigned int i = 0;
190	int retval = 0, err = 0;
191	u32 sr;
192
193	retval = pm_runtime_resume_and_get(priv->dev);
194	if (retval)
195		return retval;
196
197	if (readl_relaxed(priv->base + RNG_SR) & RNG_SR_SEIS)
198		stm32_rng_conceal_seed_error(rng);
199
200	while (max >= sizeof(u32)) {
201		sr = readl_relaxed(priv->base + RNG_SR);
202		/*
203		 * Manage timeout which is based on timer and take
204		 * care of initial delay time when enabling the RNG.
205		 */
206		if (!sr && wait) {
207			err = readl_relaxed_poll_timeout_atomic(priv->base
208								   + RNG_SR,
209								   sr, sr,
210								   10, 50000);
211			if (err) {
212				dev_err(priv->dev, "%s: timeout %x!\n", __func__, sr);
213				break;
214			}
215		} else if (!sr) {
216			/* The FIFO is being filled up */
217			break;
218		}
219
220		if (sr != RNG_SR_DRDY) {
221			if (sr & RNG_SR_SEIS) {
222				err = stm32_rng_conceal_seed_error(rng);
223				i++;
224				if (err && i > RNG_NB_RECOVER_TRIES) {
225					dev_err(priv->dev, "Couldn't recover from seed error\n");
226					retval = -ENOTRECOVERABLE;
227					goto exit_rpm;
228				}
229
230				continue;
231			}
232
233			if (WARN_ONCE((sr & RNG_SR_CEIS), "RNG clock too slow - %x\n", sr))
234				writel_relaxed(0, priv->base + RNG_SR);
235		}
236
237		/* Late seed error case: DR being 0 is an error status */
238		*(u32 *)data = readl_relaxed(priv->base + RNG_DR);
239		if (!*(u32 *)data) {
240			err = stm32_rng_conceal_seed_error(rng);
241			i++;
242			if (err && i > RNG_NB_RECOVER_TRIES) {
243				dev_err(priv->dev, "Couldn't recover from seed error");
244				retval = -ENOTRECOVERABLE;
245				goto exit_rpm;
246			}
247
248			continue;
249		}
250
251		i = 0;
252		retval += sizeof(u32);
253		data += sizeof(u32);
254		max -= sizeof(u32);
255	}
256
257exit_rpm:
258	pm_runtime_mark_last_busy(priv->dev);
259	pm_runtime_put_sync_autosuspend(priv->dev);
260
261	return retval || !wait ? retval : -EIO;
262}
263
264static uint stm32_rng_clock_freq_restrain(struct hwrng *rng)
265{
266	struct stm32_rng_private *priv =
267	    container_of(rng, struct stm32_rng_private, rng);
268	unsigned long clock_rate = 0;
269	uint clock_div = 0;
270
271	clock_rate = clk_get_rate(priv->clk_bulk[0].clk);
272
273	/*
274	 * Get the exponent to apply on the CLKDIV field in RNG_CR register
275	 * No need to handle the case when clock-div > 0xF as it is physically
276	 * impossible
277	 */
278	while ((clock_rate >> clock_div) > priv->data->max_clock_rate)
279		clock_div++;
280
281	pr_debug("RNG clk rate : %lu\n", clk_get_rate(priv->clk_bulk[0].clk) >> clock_div);
282
283	return clock_div;
284}
285
286static int stm32_rng_init(struct hwrng *rng)
287{
288	struct stm32_rng_private *priv =
289	    container_of(rng, struct stm32_rng_private, rng);
290	int err;
291	u32 reg;
292
293	err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
294	if (err)
295		return err;
296
297	/* clear error indicators */
298	writel_relaxed(0, priv->base + RNG_SR);
299
300	reg = readl_relaxed(priv->base + RNG_CR);
301
302	/*
303	 * Keep default RNG configuration if none was specified.
304	 * 0 is an invalid value as it disables all entropy sources.
305	 */
306	if (priv->data->has_cond_reset && priv->data->cr) {
307		uint clock_div = stm32_rng_clock_freq_restrain(rng);
308
309		reg &= ~RNG_CR_CONFIG_MASK;
310		reg |= RNG_CR_CONDRST | (priv->data->cr & RNG_CR_ENTROPY_SRC_MASK) |
311		       (clock_div << RNG_CR_CLKDIV_SHIFT);
312		if (priv->ced)
313			reg &= ~RNG_CR_CED;
314		else
315			reg |= RNG_CR_CED;
316		writel_relaxed(reg, priv->base + RNG_CR);
317
318		/* Health tests and noise control registers */
319		writel_relaxed(priv->data->htcr, priv->base + RNG_HTCR);
320		writel_relaxed(priv->data->nscr & RNG_NSCR_MASK, priv->base + RNG_NSCR);
321
322		reg &= ~RNG_CR_CONDRST;
323		reg |= RNG_CR_RNGEN;
324		if (priv->lock_conf)
325			reg |= RNG_CR_CONFLOCK;
326
327		writel_relaxed(reg, priv->base + RNG_CR);
328
329		err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_CR, reg,
330							(!(reg & RNG_CR_CONDRST)),
331							10, 50000);
332		if (err) {
333			clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
334			dev_err(priv->dev, "%s: timeout %x!\n", __func__, reg);
335			return -EINVAL;
336		}
337	} else {
338		/* Handle all RNG versions by checking if conditional reset should be set */
339		if (priv->data->has_cond_reset)
340			reg |= RNG_CR_CONDRST;
341
342		if (priv->ced)
343			reg &= ~RNG_CR_CED;
344		else
345			reg |= RNG_CR_CED;
346
347		writel_relaxed(reg, priv->base + RNG_CR);
348
349		if (priv->data->has_cond_reset)
350			reg &= ~RNG_CR_CONDRST;
351
352		reg |= RNG_CR_RNGEN;
353
354		writel_relaxed(reg, priv->base + RNG_CR);
355	}
356
357	err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_SR, reg,
358						reg & RNG_SR_DRDY,
359						10, 100000);
360	if (err || (reg & ~RNG_SR_DRDY)) {
361		clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
362		dev_err(priv->dev, "%s: timeout:%x SR: %x!\n", __func__, err, reg);
363
364		return -EINVAL;
365	}
366
367	clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
368
369	return 0;
370}
371
372static void stm32_rng_remove(struct platform_device *ofdev)
373{
374	pm_runtime_disable(&ofdev->dev);
375}
376
377static int __maybe_unused stm32_rng_runtime_suspend(struct device *dev)
378{
379	struct stm32_rng_private *priv = dev_get_drvdata(dev);
380	u32 reg;
381
382	reg = readl_relaxed(priv->base + RNG_CR);
383	reg &= ~RNG_CR_RNGEN;
384	writel_relaxed(reg, priv->base + RNG_CR);
385
386	clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
387
388	return 0;
389}
390
391static int __maybe_unused stm32_rng_suspend(struct device *dev)
392{
393	struct stm32_rng_private *priv = dev_get_drvdata(dev);
394	int err;
395
396	err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
397	if (err)
398		return err;
399
400	if (priv->data->has_cond_reset) {
401		priv->pm_conf.nscr = readl_relaxed(priv->base + RNG_NSCR);
402		priv->pm_conf.htcr = readl_relaxed(priv->base + RNG_HTCR);
403	}
404
405	/* Do not save that RNG is enabled as it will be handled at resume */
406	priv->pm_conf.cr = readl_relaxed(priv->base + RNG_CR) & ~RNG_CR_RNGEN;
407
408	writel_relaxed(priv->pm_conf.cr, priv->base + RNG_CR);
409
410	clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
411
412	return 0;
413}
414
415static int __maybe_unused stm32_rng_runtime_resume(struct device *dev)
416{
417	struct stm32_rng_private *priv = dev_get_drvdata(dev);
418	int err;
419	u32 reg;
420
421	err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
422	if (err)
423		return err;
424
425	/* Clean error indications */
426	writel_relaxed(0, priv->base + RNG_SR);
427
428	reg = readl_relaxed(priv->base + RNG_CR);
429	reg |= RNG_CR_RNGEN;
430	writel_relaxed(reg, priv->base + RNG_CR);
431
432	return 0;
433}
434
435static int __maybe_unused stm32_rng_resume(struct device *dev)
436{
437	struct stm32_rng_private *priv = dev_get_drvdata(dev);
438	int err;
439	u32 reg;
440
441	err = clk_bulk_prepare_enable(priv->data->nb_clock, priv->clk_bulk);
442	if (err)
443		return err;
444
445	/* Clean error indications */
446	writel_relaxed(0, priv->base + RNG_SR);
447
448	if (priv->data->has_cond_reset) {
449		/*
450		 * Correct configuration in bits [29:4] must be set in the same
451		 * access that set RNG_CR_CONDRST bit. Else config setting is
452		 * not taken into account. CONFIGLOCK bit must also be unset but
453		 * it is not handled at the moment.
454		 */
455		writel_relaxed(priv->pm_conf.cr | RNG_CR_CONDRST, priv->base + RNG_CR);
456
457		writel_relaxed(priv->pm_conf.nscr, priv->base + RNG_NSCR);
458		writel_relaxed(priv->pm_conf.htcr, priv->base + RNG_HTCR);
459
460		reg = readl_relaxed(priv->base + RNG_CR);
461		reg |= RNG_CR_RNGEN;
462		reg &= ~RNG_CR_CONDRST;
463		writel_relaxed(reg, priv->base + RNG_CR);
464
465		err = readl_relaxed_poll_timeout_atomic(priv->base + RNG_CR, reg,
466							reg & ~RNG_CR_CONDRST, 10, 100000);
467
468		if (err) {
469			clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
470			dev_err(priv->dev, "%s: timeout:%x CR: %x!\n", __func__, err, reg);
471			return -EINVAL;
472		}
473	} else {
474		reg = priv->pm_conf.cr;
475		reg |= RNG_CR_RNGEN;
476		writel_relaxed(reg, priv->base + RNG_CR);
477	}
478
479	clk_bulk_disable_unprepare(priv->data->nb_clock, priv->clk_bulk);
480
481	return 0;
482}
483
484static const struct dev_pm_ops __maybe_unused stm32_rng_pm_ops = {
485	SET_RUNTIME_PM_OPS(stm32_rng_runtime_suspend,
486			   stm32_rng_runtime_resume, NULL)
487	SET_SYSTEM_SLEEP_PM_OPS(stm32_rng_suspend,
488				stm32_rng_resume)
489};
490
491static const struct stm32_rng_data stm32mp25_rng_data = {
492	.has_cond_reset = true,
493	.max_clock_rate = 48000000,
494	.nb_clock = 2,
495	.cr = 0x00F00D00,
496	.nscr = 0x2B5BB,
497	.htcr = 0x969D,
498};
499
500static const struct stm32_rng_data stm32mp13_rng_data = {
501	.has_cond_reset = true,
502	.max_clock_rate = 48000000,
503	.nb_clock = 1,
504	.cr = 0x00F00D00,
505	.nscr = 0x2B5BB,
506	.htcr = 0x969D,
507};
508
509static const struct stm32_rng_data stm32_rng_data = {
510	.has_cond_reset = false,
511	.max_clock_rate = 48000000,
512	.nb_clock = 1,
513};
514
515static const struct of_device_id stm32_rng_match[] = {
516	{
517		.compatible = "st,stm32mp25-rng",
518		.data = &stm32mp25_rng_data,
519	},
520	{
521		.compatible = "st,stm32mp13-rng",
522		.data = &stm32mp13_rng_data,
523	},
524	{
525		.compatible = "st,stm32-rng",
526		.data = &stm32_rng_data,
527	},
528	{},
529};
530MODULE_DEVICE_TABLE(of, stm32_rng_match);
531
532static int stm32_rng_probe(struct platform_device *ofdev)
533{
534	struct device *dev = &ofdev->dev;
535	struct device_node *np = ofdev->dev.of_node;
536	struct stm32_rng_private *priv;
537	struct resource *res;
538	int ret;
539
540	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
541	if (!priv)
542		return -ENOMEM;
543
544	priv->base = devm_platform_get_and_ioremap_resource(ofdev, 0, &res);
545	if (IS_ERR(priv->base))
546		return PTR_ERR(priv->base);
547
548	priv->rst = devm_reset_control_get(&ofdev->dev, NULL);
549	if (!IS_ERR(priv->rst)) {
550		reset_control_assert(priv->rst);
551		udelay(2);
552		reset_control_deassert(priv->rst);
553	}
554
555	priv->ced = of_property_read_bool(np, "clock-error-detect");
556	priv->lock_conf = of_property_read_bool(np, "st,rng-lock-conf");
557	priv->dev = dev;
558
559	priv->data = of_device_get_match_data(dev);
560	if (!priv->data)
561		return -ENODEV;
562
563	dev_set_drvdata(dev, priv);
564
565	priv->rng.name = dev_driver_string(dev);
566	priv->rng.init = stm32_rng_init;
567	priv->rng.read = stm32_rng_read;
568	priv->rng.quality = 900;
569
570	if (!priv->data->nb_clock || priv->data->nb_clock > 2)
571		return -EINVAL;
572
573	ret = devm_clk_bulk_get_all(dev, &priv->clk_bulk);
574	if (ret != priv->data->nb_clock)
575		return dev_err_probe(dev, -EINVAL, "Failed to get clocks: %d\n", ret);
576
577	if (priv->data->nb_clock == 2) {
578		const char *id = priv->clk_bulk[1].id;
579		struct clk *clk = priv->clk_bulk[1].clk;
580
581		if (!priv->clk_bulk[0].id || !priv->clk_bulk[1].id)
582			return dev_err_probe(dev, -EINVAL, "Missing clock name\n");
583
584		if (strcmp(priv->clk_bulk[0].id, "core")) {
585			priv->clk_bulk[1].id = priv->clk_bulk[0].id;
586			priv->clk_bulk[1].clk = priv->clk_bulk[0].clk;
587			priv->clk_bulk[0].id = id;
588			priv->clk_bulk[0].clk = clk;
589		}
590	}
591
592	pm_runtime_set_autosuspend_delay(dev, 100);
593	pm_runtime_use_autosuspend(dev);
594	pm_runtime_enable(dev);
595
596	return devm_hwrng_register(dev, &priv->rng);
597}
598
599static struct platform_driver stm32_rng_driver = {
600	.driver = {
601		.name = "stm32-rng",
602		.pm = pm_ptr(&stm32_rng_pm_ops),
603		.of_match_table = stm32_rng_match,
604	},
605	.probe = stm32_rng_probe,
606	.remove = stm32_rng_remove,
607};
608
609module_platform_driver(stm32_rng_driver);
610
611MODULE_LICENSE("GPL");
612MODULE_AUTHOR("Daniel Thompson <daniel.thompson@linaro.org>");
613MODULE_DESCRIPTION("STMicroelectronics STM32 RNG device driver");