Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Reset driver for the Mobileye EyeQ5, EyeQ6L and EyeQ6H platforms.
  4 *
  5 * Controllers live in a shared register region called OLB. EyeQ5 and EyeQ6L
  6 * have a single OLB instance for a single reset controller. EyeQ6H has seven
  7 * OLB instances; three host reset controllers.
  8 *
  9 * Each reset controller has one or more domain. Domains are of a given type
 10 * (see enum eqr_domain_type), with a valid offset mask (up to 32 resets per
 11 * domain).
 12 *
 13 * Domain types define expected behavior: one-register-per-reset,
 14 * one-bit-per-reset, status detection method, busywait duration, etc.
 15 *
 16 * We use eqr_ as prefix, as-in "EyeQ Reset", but way shorter.
 17 *
 18 * Known resets in EyeQ5 domain 0 (type EQR_EYEQ5_SARCR):
 19 *  3. CAN0	 4. CAN1	 5. CAN2	 6. SPI0
 20 *  7. SPI1	 8. SPI2	 9. SPI3	10. UART0
 21 * 11. UART1	12. UART2	13. I2C0	14. I2C1
 22 * 15. I2C2	16. I2C3	17. I2C4	18. TIMER0
 23 * 19. TIMER1	20. TIMER2	21. TIMER3	22. TIMER4
 24 * 23. WD0	24. EXT0	25. EXT1	26. GPIO
 25 * 27. WD1
 26 *
 27 * Known resets in EyeQ5 domain 1 (type EQR_EYEQ5_ACRP):
 28 *  0. VMP0	 1. VMP1	 2. VMP2	 3. VMP3
 29 *  4. PMA0	 5. PMA1	 6. PMAC0	 7. PMAC1
 30 *  8. MPC0	 9. MPC1	10. MPC2	11. MPC3
 31 * 12. MPC4
 32 *
 33 * Known resets in EyeQ5 domain 2 (type EQR_EYEQ5_PCIE):
 34 *  0. PCIE0_CORE	 1. PCIE0_APB		 2. PCIE0_LINK_AXI	 3. PCIE0_LINK_MGMT
 35 *  4. PCIE0_LINK_HOT	 5. PCIE0_LINK_PIPE	 6. PCIE1_CORE		 7. PCIE1_APB
 36 *  8. PCIE1_LINK_AXI	 9. PCIE1_LINK_MGMT	10. PCIE1_LINK_HOT	11. PCIE1_LINK_PIPE
 37 * 12. MULTIPHY		13. MULTIPHY_APB	15. PCIE0_LINK_MGMT	16. PCIE1_LINK_MGMT
 38 * 17. PCIE0_LINK_PM	18. PCIE1_LINK_PM
 39 *
 40 * Known resets in EyeQ6L domain 0 (type EQR_EYEQ5_SARCR):
 41 *  0. SPI0	 1. SPI1	 2. UART0	 3. I2C0
 42 *  4. I2C1	 5. TIMER0	 6. TIMER1	 7. TIMER2
 43 *  8. TIMER3	 9. WD0		10. WD1		11. EXT0
 44 * 12. EXT1	13. GPIO
 45 *
 46 * Known resets in EyeQ6L domain 1 (type EQR_EYEQ5_ACRP):
 47 *  0. VMP0	 1. VMP1	 2. VMP2	 3. VMP3
 48 *  4. PMA0	 5. PMA1	 6. PMAC0	 7. PMAC1
 49 *  8. MPC0	 9. MPC1	10. MPC2	11. MPC3
 50 * 12. MPC4
 51 *
 52 * Known resets in EyeQ6H west/east (type EQR_EYEQ6H_SARCR):
 53 *  0. CAN	 1. SPI0	 2. SPI1	 3. UART0
 54 *  4. UART1	 5. I2C0	 6. I2C1	 7. -hole-
 55 *  8. TIMER0	 9. TIMER1	10. WD		11. EXT TIMER
 56 * 12. GPIO
 57 *
 58 * Known resets in EyeQ6H acc (type EQR_EYEQ5_ACRP):
 59 *  1. XNN0	 2. XNN1	 3. XNN2	 4. XNN3
 60 *  5. VMP0	 6. VMP1	 7. VMP2	 8. VMP3
 61 *  9. PMA0	10. PMA1	11. MPC0	12. MPC1
 62 * 13. MPC2	14. MPC3	15. PERIPH
 63 *
 64 * Abbreviations:
 65 *  - PMA: Programmable Macro Array
 66 *  - MPC: Multi-threaded Processing Clusters
 67 *  - VMP: Vector Microcode Processors
 68 *
 69 * Copyright (C) 2024 Mobileye Vision Technologies Ltd.
 70 */
 71
 72#include <linux/array_size.h>
 73#include <linux/auxiliary_bus.h>
 74#include <linux/bitfield.h>
 75#include <linux/bits.h>
 76#include <linux/bug.h>
 77#include <linux/cleanup.h>
 78#include <linux/container_of.h>
 79#include <linux/device.h>
 80#include <linux/err.h>
 81#include <linux/errno.h>
 82#include <linux/init.h>
 83#include <linux/io.h>
 84#include <linux/iopoll.h>
 85#include <linux/lockdep.h>
 86#include <linux/mod_devicetable.h>
 87#include <linux/mutex.h>
 88#include <linux/of.h>
 89#include <linux/reset-controller.h>
 90#include <linux/slab.h>
 91#include <linux/types.h>
 92
 93/*
 94 * A reset ID, as returned by eqr_of_xlate_*(), is a (domain, offset) pair.
 95 * Low byte is domain, rest is offset.
 96 */
 97#define ID_DOMAIN_MASK	GENMASK(7, 0)
 98#define ID_OFFSET_MASK	GENMASK(31, 8)
 99
100enum eqr_domain_type {
101	EQR_EYEQ5_SARCR,
102	EQR_EYEQ5_ACRP,
103	EQR_EYEQ5_PCIE,
104	EQR_EYEQ6H_SARCR,
105};
106
107/*
108 * Domain type EQR_EYEQ5_SARCR register offsets.
109 */
110#define EQR_EYEQ5_SARCR_REQUEST		(0x000)
111#define EQR_EYEQ5_SARCR_STATUS		(0x004)
112
113/*
114 * Domain type EQR_EYEQ5_ACRP register masks.
115 * Registers are: base + 4 * offset.
116 */
117#define EQR_EYEQ5_ACRP_PD_REQ		BIT(0)
118#define EQR_EYEQ5_ACRP_ST_POWER_DOWN	BIT(27)
119#define EQR_EYEQ5_ACRP_ST_ACTIVE	BIT(29)
120
121/*
122 * Domain type EQR_EYEQ6H_SARCR register offsets.
123 */
124#define EQR_EYEQ6H_SARCR_RST_REQUEST	(0x000)
125#define EQR_EYEQ6H_SARCR_CLK_STATUS	(0x004)
126#define EQR_EYEQ6H_SARCR_RST_STATUS	(0x008)
127#define EQR_EYEQ6H_SARCR_CLK_REQUEST	(0x00C)
128
129struct eqr_busy_wait_timings {
130	unsigned long sleep_us;
131	unsigned long timeout_us;
132};
133
134static const struct eqr_busy_wait_timings eqr_timings[] = {
135	[EQR_EYEQ5_SARCR]	= {1, 10},
136	[EQR_EYEQ5_ACRP]	= {1, 40 * USEC_PER_MSEC}, /* LBIST implies long timeout. */
137	/* EQR_EYEQ5_PCIE does no busy waiting. */
138	[EQR_EYEQ6H_SARCR]	= {1, 400},
139};
140
141#define EQR_MAX_DOMAIN_COUNT 3
142
143struct eqr_domain_descriptor {
144	enum eqr_domain_type	type;
145	u32			valid_mask;
146	unsigned int		offset;
147};
148
149struct eqr_match_data {
150	unsigned int				domain_count;
151	const struct eqr_domain_descriptor	*domains;
152};
153
154struct eqr_private {
155	/*
156	 * One mutex per domain for read-modify-write operations on registers.
157	 * Some domains can be involved in LBIST which implies long critical
158	 * sections; we wouldn't want other domains to be impacted by that.
159	 */
160	struct mutex			mutexes[EQR_MAX_DOMAIN_COUNT];
161	void __iomem			*base;
162	const struct eqr_match_data	*data;
163	struct reset_controller_dev	rcdev;
164};
165
166static inline struct eqr_private *eqr_rcdev_to_priv(struct reset_controller_dev *x)
167{
168	return container_of(x, struct eqr_private, rcdev);
169}
170
171static u32 eqr_double_readl(void __iomem *addr_a, void __iomem *addr_b,
172			    u32 *dest_a, u32 *dest_b)
173{
174	*dest_a = readl(addr_a);
175	*dest_b = readl(addr_b);
176	return 0; /* read_poll_timeout() op argument must return something. */
177}
178
179static int eqr_busy_wait_locked(struct eqr_private *priv, struct device *dev,
180				u32 domain, u32 offset, bool assert)
181{
182	void __iomem *base = priv->base + priv->data->domains[domain].offset;
183	enum eqr_domain_type domain_type = priv->data->domains[domain].type;
184	unsigned long timeout_us = eqr_timings[domain_type].timeout_us;
185	unsigned long sleep_us = eqr_timings[domain_type].sleep_us;
186	u32 val, mask, rst_status, clk_status;
187	void __iomem *reg;
188	int ret;
189
190	lockdep_assert_held(&priv->mutexes[domain]);
191
192	switch (domain_type) {
193	case EQR_EYEQ5_SARCR:
194		reg = base + EQR_EYEQ5_SARCR_STATUS;
195		mask = BIT(offset);
196
197		ret = readl_poll_timeout(reg, val, !(val & mask) == assert,
198					 sleep_us, timeout_us);
199		break;
200
201	case EQR_EYEQ5_ACRP:
202		reg = base + 4 * offset;
203		if (assert)
204			mask = EQR_EYEQ5_ACRP_ST_POWER_DOWN;
205		else
206			mask = EQR_EYEQ5_ACRP_ST_ACTIVE;
207
208		ret = readl_poll_timeout(reg, val, !!(val & mask),
209					 sleep_us, timeout_us);
210		break;
211
212	case EQR_EYEQ5_PCIE:
213		ret = 0; /* No busy waiting. */
214		break;
215
216	case EQR_EYEQ6H_SARCR:
217		/*
218		 * Wait until both bits change:
219		 *	readl(base + EQR_EYEQ6H_SARCR_RST_STATUS) & BIT(offset)
220		 *	readl(base + EQR_EYEQ6H_SARCR_CLK_STATUS) & BIT(offset)
221		 */
222		mask = BIT(offset);
223		ret = read_poll_timeout(eqr_double_readl, val,
224					(!(rst_status & mask) == assert) &&
225					(!(clk_status & mask) == assert),
226					sleep_us, timeout_us, false,
227					base + EQR_EYEQ6H_SARCR_RST_STATUS,
228					base + EQR_EYEQ6H_SARCR_CLK_STATUS,
229					&rst_status, &clk_status);
230		break;
231
232	default:
233		WARN_ON(1);
234		ret = -EINVAL;
235		break;
236	}
237
238	if (ret == -ETIMEDOUT)
239		dev_dbg(dev, "%u-%u: timeout\n", domain, offset);
240	return ret;
241}
242
243static void eqr_assert_locked(struct eqr_private *priv, u32 domain, u32 offset)
244{
245	enum eqr_domain_type domain_type = priv->data->domains[domain].type;
246	void __iomem *base, *reg;
247	u32 val;
248
249	lockdep_assert_held(&priv->mutexes[domain]);
250
251	base = priv->base + priv->data->domains[domain].offset;
252
253	switch (domain_type) {
254	case EQR_EYEQ5_SARCR:
255		reg = base + EQR_EYEQ5_SARCR_REQUEST;
256		writel(readl(reg) & ~BIT(offset), reg);
257		break;
258
259	case EQR_EYEQ5_ACRP:
260		reg = base + 4 * offset;
261		writel(readl(reg) | EQR_EYEQ5_ACRP_PD_REQ, reg);
262		break;
263
264	case EQR_EYEQ5_PCIE:
265		writel(readl(base) & ~BIT(offset), base);
266		break;
267
268	case EQR_EYEQ6H_SARCR:
269		/* RST_REQUEST and CLK_REQUEST must be kept in sync. */
270		val = readl(base + EQR_EYEQ6H_SARCR_RST_REQUEST);
271		val &= ~BIT(offset);
272		writel(val, base + EQR_EYEQ6H_SARCR_RST_REQUEST);
273		writel(val, base + EQR_EYEQ6H_SARCR_CLK_REQUEST);
274		break;
275
276	default:
277		WARN_ON(1);
278		break;
279	}
280}
281
282static int eqr_assert(struct reset_controller_dev *rcdev, unsigned long id)
283{
284	struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
285	u32 domain = FIELD_GET(ID_DOMAIN_MASK, id);
286	u32 offset = FIELD_GET(ID_OFFSET_MASK, id);
287
288	dev_dbg(rcdev->dev, "%u-%u: assert request\n", domain, offset);
289
290	guard(mutex)(&priv->mutexes[domain]);
291
292	eqr_assert_locked(priv, domain, offset);
293	return eqr_busy_wait_locked(priv, rcdev->dev, domain, offset, true);
294}
295
296static void eqr_deassert_locked(struct eqr_private *priv, u32 domain,
297				u32 offset)
298{
299	enum eqr_domain_type domain_type = priv->data->domains[domain].type;
300	void __iomem *base, *reg;
301	u32 val;
302
303	lockdep_assert_held(&priv->mutexes[domain]);
304
305	base = priv->base + priv->data->domains[domain].offset;
306
307	switch (domain_type) {
308	case EQR_EYEQ5_SARCR:
309		reg = base + EQR_EYEQ5_SARCR_REQUEST;
310		writel(readl(reg) | BIT(offset), reg);
311		break;
312
313	case EQR_EYEQ5_ACRP:
314		reg = base + 4 * offset;
315		writel(readl(reg) & ~EQR_EYEQ5_ACRP_PD_REQ, reg);
316		break;
317
318	case EQR_EYEQ5_PCIE:
319		writel(readl(base) | BIT(offset), base);
320		break;
321
322	case EQR_EYEQ6H_SARCR:
323		/* RST_REQUEST and CLK_REQUEST must be kept in sync. */
324		val = readl(base + EQR_EYEQ6H_SARCR_RST_REQUEST);
325		val |= BIT(offset);
326		writel(val, base + EQR_EYEQ6H_SARCR_RST_REQUEST);
327		writel(val, base + EQR_EYEQ6H_SARCR_CLK_REQUEST);
328		break;
329
330	default:
331		WARN_ON(1);
332		break;
333	}
334}
335
336static int eqr_deassert(struct reset_controller_dev *rcdev, unsigned long id)
337{
338	struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
339	u32 domain = FIELD_GET(ID_DOMAIN_MASK, id);
340	u32 offset = FIELD_GET(ID_OFFSET_MASK, id);
341
342	dev_dbg(rcdev->dev, "%u-%u: deassert request\n", domain, offset);
343
344	guard(mutex)(&priv->mutexes[domain]);
345
346	eqr_deassert_locked(priv, domain, offset);
347	return eqr_busy_wait_locked(priv, rcdev->dev, domain, offset, false);
348}
349
350static int eqr_status(struct reset_controller_dev *rcdev, unsigned long id)
351{
352	u32 domain = FIELD_GET(ID_DOMAIN_MASK, id);
353	u32 offset = FIELD_GET(ID_OFFSET_MASK, id);
354	struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
355	enum eqr_domain_type domain_type = priv->data->domains[domain].type;
356	void __iomem *base, *reg;
357
358	dev_dbg(rcdev->dev, "%u-%u: status request\n", domain, offset);
359
360	guard(mutex)(&priv->mutexes[domain]);
361
362	base = priv->base + priv->data->domains[domain].offset;
363
364	switch (domain_type) {
365	case EQR_EYEQ5_SARCR:
366		reg = base + EQR_EYEQ5_SARCR_STATUS;
367		return !(readl(reg) & BIT(offset));
368	case EQR_EYEQ5_ACRP:
369		reg = base + 4 * offset;
370		return !(readl(reg) & EQR_EYEQ5_ACRP_ST_ACTIVE);
371	case EQR_EYEQ5_PCIE:
372		return !(readl(base) & BIT(offset));
373	case EQR_EYEQ6H_SARCR:
374		reg = base + EQR_EYEQ6H_SARCR_RST_STATUS;
375		return !(readl(reg) & BIT(offset));
376	default:
377		return -EINVAL;
378	}
379}
380
381static const struct reset_control_ops eqr_ops = {
382	.assert	  = eqr_assert,
383	.deassert = eqr_deassert,
384	.status	  = eqr_status,
385};
386
387static int eqr_of_xlate_internal(struct reset_controller_dev *rcdev,
388				 u32 domain, u32 offset)
389{
390	struct eqr_private *priv = eqr_rcdev_to_priv(rcdev);
391
392	if (domain >= priv->data->domain_count || offset > 31 ||
393	    !(priv->data->domains[domain].valid_mask & BIT(offset))) {
394		dev_err(rcdev->dev, "%u-%u: invalid reset\n", domain, offset);
395		return -EINVAL;
396	}
397
398	return FIELD_PREP(ID_DOMAIN_MASK, domain) | FIELD_PREP(ID_OFFSET_MASK, offset);
399}
400
401static int eqr_of_xlate_onecell(struct reset_controller_dev *rcdev,
402				const struct of_phandle_args *reset_spec)
403{
404	return eqr_of_xlate_internal(rcdev, 0, reset_spec->args[0]);
405}
406
407static int eqr_of_xlate_twocells(struct reset_controller_dev *rcdev,
408				 const struct of_phandle_args *reset_spec)
409{
410	return eqr_of_xlate_internal(rcdev, reset_spec->args[0], reset_spec->args[1]);
411}
412
413static int eqr_probe(struct auxiliary_device *adev,
414		     const struct auxiliary_device_id *id)
415{
416	const struct of_device_id *match;
417	struct device *dev = &adev->dev;
418	struct eqr_private *priv;
419	unsigned int i;
420	int ret;
421
422	/*
423	 * We are an auxiliary device of clk-eyeq. We do not have an OF node by
424	 * default; let's reuse our parent's OF node.
425	 */
426	WARN_ON(dev->of_node);
427	device_set_of_node_from_dev(dev, dev->parent);
428	if (!dev->of_node)
429		return -ENODEV;
430
431	/*
432	 * Using our newfound OF node, we can get match data. We cannot use
433	 * device_get_match_data() because it does not match reused OF nodes.
434	 */
435	match = of_match_node(dev->driver->of_match_table, dev->of_node);
436	if (!match || !match->data)
437		return -ENODEV;
438
439	priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
440	if (!priv)
441		return -ENOMEM;
442
443	priv->data = match->data;
444	priv->base = (void __iomem *)dev_get_platdata(dev);
445	priv->rcdev.ops = &eqr_ops;
446	priv->rcdev.owner = THIS_MODULE;
447	priv->rcdev.dev = dev;
448	priv->rcdev.of_node = dev->of_node;
449
450	if (priv->data->domain_count == 1) {
451		priv->rcdev.of_reset_n_cells = 1;
452		priv->rcdev.of_xlate = eqr_of_xlate_onecell;
453	} else {
454		priv->rcdev.of_reset_n_cells = 2;
455		priv->rcdev.of_xlate = eqr_of_xlate_twocells;
456	}
457
458	for (i = 0; i < priv->data->domain_count; i++)
459		mutex_init(&priv->mutexes[i]);
460
461	priv->rcdev.nr_resets = 0;
462	for (i = 0; i < priv->data->domain_count; i++)
463		priv->rcdev.nr_resets += hweight32(priv->data->domains[i].valid_mask);
464
465	ret = devm_reset_controller_register(dev, &priv->rcdev);
466	if (ret)
467		return dev_err_probe(dev, ret, "failed registering reset controller\n");
468
469	return 0;
470}
471
472static const struct eqr_domain_descriptor eqr_eyeq5_domains[] = {
473	{
474		.type = EQR_EYEQ5_SARCR,
475		.valid_mask = 0xFFFFFF8,
476		.offset = 0x004,
477	},
478	{
479		.type = EQR_EYEQ5_ACRP,
480		.valid_mask = 0x0001FFF,
481		.offset = 0x200,
482	},
483	{
484		.type = EQR_EYEQ5_PCIE,
485		.valid_mask = 0x007BFFF,
486		.offset = 0x120,
487	},
488};
489
490static const struct eqr_match_data eqr_eyeq5_data = {
491	.domain_count	= ARRAY_SIZE(eqr_eyeq5_domains),
492	.domains	= eqr_eyeq5_domains,
493};
494
495static const struct eqr_domain_descriptor eqr_eyeq6l_domains[] = {
496	{
497		.type = EQR_EYEQ5_SARCR,
498		.valid_mask = 0x3FFF,
499		.offset = 0x004,
500	},
501	{
502		.type = EQR_EYEQ5_ACRP,
503		.valid_mask = 0x00FF,
504		.offset = 0x200,
505	},
506};
507
508static const struct eqr_match_data eqr_eyeq6l_data = {
509	.domain_count	= ARRAY_SIZE(eqr_eyeq6l_domains),
510	.domains	= eqr_eyeq6l_domains,
511};
512
513/* West and east OLBs each have an instance. */
514static const struct eqr_domain_descriptor eqr_eyeq6h_we_domains[] = {
515	{
516		.type = EQR_EYEQ6H_SARCR,
517		.valid_mask = 0x1F7F,
518		.offset = 0x004,
519	},
520};
521
522static const struct eqr_match_data eqr_eyeq6h_we_data = {
523	.domain_count	= ARRAY_SIZE(eqr_eyeq6h_we_domains),
524	.domains	= eqr_eyeq6h_we_domains,
525};
526
527static const struct eqr_domain_descriptor eqr_eyeq6h_acc_domains[] = {
528	{
529		.type = EQR_EYEQ5_ACRP,
530		.valid_mask = 0x7FFF,
531		.offset = 0x000,
532	},
533};
534
535static const struct eqr_match_data eqr_eyeq6h_acc_data = {
536	.domain_count	= ARRAY_SIZE(eqr_eyeq6h_acc_domains),
537	.domains	= eqr_eyeq6h_acc_domains,
538};
539
540/*
541 * Table describes OLB system-controller compatibles.
542 * It does not get used to match against devicetree node.
543 */
544static const struct of_device_id eqr_match_table[] = {
545	{ .compatible = "mobileye,eyeq5-olb", .data = &eqr_eyeq5_data },
546	{ .compatible = "mobileye,eyeq6l-olb", .data = &eqr_eyeq6l_data },
547	{ .compatible = "mobileye,eyeq6h-west-olb", .data = &eqr_eyeq6h_we_data },
548	{ .compatible = "mobileye,eyeq6h-east-olb", .data = &eqr_eyeq6h_we_data },
549	{ .compatible = "mobileye,eyeq6h-acc-olb", .data = &eqr_eyeq6h_acc_data },
550	{}
551};
552MODULE_DEVICE_TABLE(of, eqr_match_table);
553
554static const struct auxiliary_device_id eqr_id_table[] = {
555	{ .name = "clk_eyeq.reset" },
556	{ .name = "clk_eyeq.reset_west" },
557	{ .name = "clk_eyeq.reset_east" },
558	{ .name = "clk_eyeq.reset_acc" },
559	{}
560};
561MODULE_DEVICE_TABLE(auxiliary, eqr_id_table);
562
563static struct auxiliary_driver eqr_driver = {
564	.probe = eqr_probe,
565	.id_table = eqr_id_table,
566	.driver = {
567		.of_match_table = eqr_match_table,
568	}
569};
570module_auxiliary_driver(eqr_driver);