Linux Audio

Check our new training course

Loading...
Note: File does not exist in v3.5.6.
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (c) 2015, 2017-2018, 2022, The Linux Foundation. All rights reserved.
  4 */
  5
  6#include <linux/bitops.h>
  7#include <linux/delay.h>
  8#include <linux/err.h>
  9#include <linux/export.h>
 10#include <linux/jiffies.h>
 11#include <linux/kernel.h>
 12#include <linux/ktime.h>
 13#include <linux/pm_domain.h>
 14#include <linux/regmap.h>
 15#include <linux/regulator/consumer.h>
 16#include <linux/reset-controller.h>
 17#include <linux/slab.h>
 18#include "gdsc.h"
 19
 20#define PWR_ON_MASK		BIT(31)
 21#define EN_REST_WAIT_MASK	GENMASK_ULL(23, 20)
 22#define EN_FEW_WAIT_MASK	GENMASK_ULL(19, 16)
 23#define CLK_DIS_WAIT_MASK	GENMASK_ULL(15, 12)
 24#define SW_OVERRIDE_MASK	BIT(2)
 25#define HW_CONTROL_MASK		BIT(1)
 26#define SW_COLLAPSE_MASK	BIT(0)
 27#define GMEM_CLAMP_IO_MASK	BIT(0)
 28#define GMEM_RESET_MASK		BIT(4)
 29
 30/* CFG_GDSCR */
 31#define GDSC_POWER_UP_COMPLETE		BIT(16)
 32#define GDSC_POWER_DOWN_COMPLETE	BIT(15)
 33#define GDSC_RETAIN_FF_ENABLE		BIT(11)
 34#define CFG_GDSCR_OFFSET		0x4
 35
 36/* Wait 2^n CXO cycles between all states. Here, n=2 (4 cycles). */
 37#define EN_REST_WAIT_VAL	0x2
 38#define EN_FEW_WAIT_VAL		0x8
 39#define CLK_DIS_WAIT_VAL	0x2
 40
 41/* Transition delay shifts */
 42#define EN_REST_WAIT_SHIFT	20
 43#define EN_FEW_WAIT_SHIFT	16
 44#define CLK_DIS_WAIT_SHIFT	12
 45
 46#define RETAIN_MEM		BIT(14)
 47#define RETAIN_PERIPH		BIT(13)
 48
 49#define STATUS_POLL_TIMEOUT_US	1500
 50#define TIMEOUT_US		500
 51
 52#define domain_to_gdsc(domain) container_of(domain, struct gdsc, pd)
 53
 54enum gdsc_status {
 55	GDSC_OFF,
 56	GDSC_ON
 57};
 58
 59/* Returns 1 if GDSC status is status, 0 if not, and < 0 on error */
 60static int gdsc_check_status(struct gdsc *sc, enum gdsc_status status)
 61{
 62	unsigned int reg;
 63	u32 val;
 64	int ret;
 65
 66	if (sc->flags & POLL_CFG_GDSCR)
 67		reg = sc->gdscr + CFG_GDSCR_OFFSET;
 68	else if (sc->gds_hw_ctrl)
 69		reg = sc->gds_hw_ctrl;
 70	else
 71		reg = sc->gdscr;
 72
 73	ret = regmap_read(sc->regmap, reg, &val);
 74	if (ret)
 75		return ret;
 76
 77	if (sc->flags & POLL_CFG_GDSCR) {
 78		switch (status) {
 79		case GDSC_ON:
 80			return !!(val & GDSC_POWER_UP_COMPLETE);
 81		case GDSC_OFF:
 82			return !!(val & GDSC_POWER_DOWN_COMPLETE);
 83		}
 84	}
 85
 86	switch (status) {
 87	case GDSC_ON:
 88		return !!(val & PWR_ON_MASK);
 89	case GDSC_OFF:
 90		return !(val & PWR_ON_MASK);
 91	}
 92
 93	return -EINVAL;
 94}
 95
 96static int gdsc_hwctrl(struct gdsc *sc, bool en)
 97{
 98	u32 val = en ? HW_CONTROL_MASK : 0;
 99
100	return regmap_update_bits(sc->regmap, sc->gdscr, HW_CONTROL_MASK, val);
101}
102
103static int gdsc_poll_status(struct gdsc *sc, enum gdsc_status status)
104{
105	ktime_t start;
106
107	start = ktime_get();
108	do {
109		if (gdsc_check_status(sc, status))
110			return 0;
111	} while (ktime_us_delta(ktime_get(), start) < STATUS_POLL_TIMEOUT_US);
112
113	if (gdsc_check_status(sc, status))
114		return 0;
115
116	return -ETIMEDOUT;
117}
118
119static int gdsc_update_collapse_bit(struct gdsc *sc, bool val)
120{
121	u32 reg, mask;
122	int ret;
123
124	if (sc->collapse_mask) {
125		reg = sc->collapse_ctrl;
126		mask = sc->collapse_mask;
127	} else {
128		reg = sc->gdscr;
129		mask = SW_COLLAPSE_MASK;
130	}
131
132	ret = regmap_update_bits(sc->regmap, reg, mask, val ? mask : 0);
133	if (ret)
134		return ret;
135
136	return 0;
137}
138
139static int gdsc_toggle_logic(struct gdsc *sc, enum gdsc_status status,
140		bool wait)
141{
142	int ret;
143
144	if (status == GDSC_ON && sc->rsupply) {
145		ret = regulator_enable(sc->rsupply);
146		if (ret < 0)
147			return ret;
148	}
149
150	ret = gdsc_update_collapse_bit(sc, status == GDSC_OFF);
151
152	/* If disabling votable gdscs, don't poll on status */
153	if ((sc->flags & VOTABLE) && status == GDSC_OFF && !wait) {
154		/*
155		 * Add a short delay here to ensure that an enable
156		 * right after it was disabled does not put it in an
157		 * unknown state
158		 */
159		udelay(TIMEOUT_US);
160		return 0;
161	}
162
163	if (sc->gds_hw_ctrl) {
164		/*
165		 * The gds hw controller asserts/de-asserts the status bit soon
166		 * after it receives a power on/off request from a master.
167		 * The controller then takes around 8 xo cycles to start its
168		 * internal state machine and update the status bit. During
169		 * this time, the status bit does not reflect the true status
170		 * of the core.
171		 * Add a delay of 1 us between writing to the SW_COLLAPSE bit
172		 * and polling the status bit.
173		 */
174		udelay(1);
175	}
176
177	ret = gdsc_poll_status(sc, status);
178	WARN(ret, "%s status stuck at 'o%s'", sc->pd.name, status ? "ff" : "n");
179
180	if (!ret && status == GDSC_OFF && sc->rsupply) {
181		ret = regulator_disable(sc->rsupply);
182		if (ret < 0)
183			return ret;
184	}
185
186	return ret;
187}
188
189static inline int gdsc_deassert_reset(struct gdsc *sc)
190{
191	int i;
192
193	for (i = 0; i < sc->reset_count; i++)
194		sc->rcdev->ops->deassert(sc->rcdev, sc->resets[i]);
195	return 0;
196}
197
198static inline int gdsc_assert_reset(struct gdsc *sc)
199{
200	int i;
201
202	for (i = 0; i < sc->reset_count; i++)
203		sc->rcdev->ops->assert(sc->rcdev, sc->resets[i]);
204	return 0;
205}
206
207static inline void gdsc_force_mem_on(struct gdsc *sc)
208{
209	int i;
210	u32 mask = RETAIN_MEM;
211
212	if (!(sc->flags & NO_RET_PERIPH))
213		mask |= RETAIN_PERIPH;
214
215	for (i = 0; i < sc->cxc_count; i++)
216		regmap_update_bits(sc->regmap, sc->cxcs[i], mask, mask);
217}
218
219static inline void gdsc_clear_mem_on(struct gdsc *sc)
220{
221	int i;
222	u32 mask = RETAIN_MEM;
223
224	if (!(sc->flags & NO_RET_PERIPH))
225		mask |= RETAIN_PERIPH;
226
227	for (i = 0; i < sc->cxc_count; i++)
228		regmap_update_bits(sc->regmap, sc->cxcs[i], mask, 0);
229}
230
231static inline void gdsc_deassert_clamp_io(struct gdsc *sc)
232{
233	regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
234			   GMEM_CLAMP_IO_MASK, 0);
235}
236
237static inline void gdsc_assert_clamp_io(struct gdsc *sc)
238{
239	regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
240			   GMEM_CLAMP_IO_MASK, 1);
241}
242
243static inline void gdsc_assert_reset_aon(struct gdsc *sc)
244{
245	regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
246			   GMEM_RESET_MASK, 1);
247	udelay(1);
248	regmap_update_bits(sc->regmap, sc->clamp_io_ctrl,
249			   GMEM_RESET_MASK, 0);
250}
251
252static void gdsc_retain_ff_on(struct gdsc *sc)
253{
254	u32 mask = GDSC_RETAIN_FF_ENABLE;
255
256	regmap_update_bits(sc->regmap, sc->gdscr, mask, mask);
257}
258
259static int gdsc_enable(struct generic_pm_domain *domain)
260{
261	struct gdsc *sc = domain_to_gdsc(domain);
262	int ret;
263
264	if (sc->pwrsts == PWRSTS_ON)
265		return gdsc_deassert_reset(sc);
266
267	if (sc->flags & SW_RESET) {
268		gdsc_assert_reset(sc);
269		udelay(1);
270		gdsc_deassert_reset(sc);
271	}
272
273	if (sc->flags & CLAMP_IO) {
274		if (sc->flags & AON_RESET)
275			gdsc_assert_reset_aon(sc);
276		gdsc_deassert_clamp_io(sc);
277	}
278
279	ret = gdsc_toggle_logic(sc, GDSC_ON, false);
280	if (ret)
281		return ret;
282
283	if (sc->pwrsts & PWRSTS_OFF)
284		gdsc_force_mem_on(sc);
285
286	/*
287	 * If clocks to this power domain were already on, they will take an
288	 * additional 4 clock cycles to re-enable after the power domain is
289	 * enabled. Delay to account for this. A delay is also needed to ensure
290	 * clocks are not enabled within 400ns of enabling power to the
291	 * memories.
292	 */
293	udelay(1);
294
295	/* Turn on HW trigger mode if supported */
296	if (sc->flags & HW_CTRL) {
297		ret = gdsc_hwctrl(sc, true);
298		if (ret)
299			return ret;
300		/*
301		 * Wait for the GDSC to go through a power down and
302		 * up cycle.  In case a firmware ends up polling status
303		 * bits for the gdsc, it might read an 'on' status before
304		 * the GDSC can finish the power cycle.
305		 * We wait 1us before returning to ensure the firmware
306		 * can't immediately poll the status bits.
307		 */
308		udelay(1);
309	}
310
311	if (sc->flags & RETAIN_FF_ENABLE)
312		gdsc_retain_ff_on(sc);
313
314	return 0;
315}
316
317static int gdsc_disable(struct generic_pm_domain *domain)
318{
319	struct gdsc *sc = domain_to_gdsc(domain);
320	int ret;
321
322	if (sc->pwrsts == PWRSTS_ON)
323		return gdsc_assert_reset(sc);
324
325	/* Turn off HW trigger mode if supported */
326	if (sc->flags & HW_CTRL) {
327		ret = gdsc_hwctrl(sc, false);
328		if (ret < 0)
329			return ret;
330		/*
331		 * Wait for the GDSC to go through a power down and
332		 * up cycle.  In case we end up polling status
333		 * bits for the gdsc before the power cycle is completed
334		 * it might read an 'on' status wrongly.
335		 */
336		udelay(1);
337
338		ret = gdsc_poll_status(sc, GDSC_ON);
339		if (ret)
340			return ret;
341	}
342
343	if (sc->pwrsts & PWRSTS_OFF)
344		gdsc_clear_mem_on(sc);
345
346	/*
347	 * If the GDSC supports only a Retention state, apart from ON,
348	 * leave it in ON state.
349	 * There is no SW control to transition the GDSC into
350	 * Retention state. This happens in HW when the parent
351	 * domain goes down to a Low power state
352	 */
353	if (sc->pwrsts == PWRSTS_RET_ON)
354		return 0;
355
356	ret = gdsc_toggle_logic(sc, GDSC_OFF, domain->synced_poweroff);
357	if (ret)
358		return ret;
359
360	if (sc->flags & CLAMP_IO)
361		gdsc_assert_clamp_io(sc);
362
363	return 0;
364}
365
366static int gdsc_set_hwmode(struct generic_pm_domain *domain, struct device *dev, bool mode)
367{
368	struct gdsc *sc = domain_to_gdsc(domain);
369	int ret;
370
371	ret = gdsc_hwctrl(sc, mode);
372	if (ret)
373		return ret;
374
375	/*
376	 * Wait for the GDSC to go through a power down and
377	 * up cycle. If we poll the status register before the
378	 * power cycle is finished we might read incorrect values.
379	 */
380	udelay(1);
381
382	/*
383	 * When the GDSC is switched to HW mode, HW can disable the GDSC.
384	 * When the GDSC is switched back to SW mode, the GDSC will be enabled
385	 * again, hence we need to poll for GDSC to complete the power up.
386	 */
387	if (!mode)
388		return gdsc_poll_status(sc, GDSC_ON);
389
390	return 0;
391}
392
393static bool gdsc_get_hwmode(struct generic_pm_domain *domain, struct device *dev)
394{
395	struct gdsc *sc = domain_to_gdsc(domain);
396	u32 val;
397
398	regmap_read(sc->regmap, sc->gdscr, &val);
399
400	return !!(val & HW_CONTROL_MASK);
401}
402
403static int gdsc_init(struct gdsc *sc)
404{
405	u32 mask, val;
406	int on, ret;
407
408	/*
409	 * Disable HW trigger: collapse/restore occur based on registers writes.
410	 * Disable SW override: Use hardware state-machine for sequencing.
411	 * Configure wait time between states.
412	 */
413	mask = HW_CONTROL_MASK | SW_OVERRIDE_MASK |
414	       EN_REST_WAIT_MASK | EN_FEW_WAIT_MASK | CLK_DIS_WAIT_MASK;
415
416	if (!sc->en_rest_wait_val)
417		sc->en_rest_wait_val = EN_REST_WAIT_VAL;
418	if (!sc->en_few_wait_val)
419		sc->en_few_wait_val = EN_FEW_WAIT_VAL;
420	if (!sc->clk_dis_wait_val)
421		sc->clk_dis_wait_val = CLK_DIS_WAIT_VAL;
422
423	val = sc->en_rest_wait_val << EN_REST_WAIT_SHIFT |
424		sc->en_few_wait_val << EN_FEW_WAIT_SHIFT |
425		sc->clk_dis_wait_val << CLK_DIS_WAIT_SHIFT;
426
427	ret = regmap_update_bits(sc->regmap, sc->gdscr, mask, val);
428	if (ret)
429		return ret;
430
431	/* Force gdsc ON if only ON state is supported */
432	if (sc->pwrsts == PWRSTS_ON) {
433		ret = gdsc_toggle_logic(sc, GDSC_ON, false);
434		if (ret)
435			return ret;
436	}
437
438	on = gdsc_check_status(sc, GDSC_ON);
439	if (on < 0)
440		return on;
441
442	if (on) {
443		/* The regulator must be on, sync the kernel state */
444		if (sc->rsupply) {
445			ret = regulator_enable(sc->rsupply);
446			if (ret < 0)
447				return ret;
448		}
449
450		/*
451		 * Votable GDSCs can be ON due to Vote from other masters.
452		 * If a Votable GDSC is ON, make sure we have a Vote.
453		 */
454		if (sc->flags & VOTABLE) {
455			ret = gdsc_update_collapse_bit(sc, false);
456			if (ret)
457				goto err_disable_supply;
458		}
459
460		/* Turn on HW trigger mode if supported */
461		if (sc->flags & HW_CTRL) {
462			ret = gdsc_hwctrl(sc, true);
463			if (ret < 0)
464				goto err_disable_supply;
465		}
466
467		/*
468		 * Make sure the retain bit is set if the GDSC is already on,
469		 * otherwise we end up turning off the GDSC and destroying all
470		 * the register contents that we thought we were saving.
471		 */
472		if (sc->flags & RETAIN_FF_ENABLE)
473			gdsc_retain_ff_on(sc);
474	} else if (sc->flags & ALWAYS_ON) {
475		/* If ALWAYS_ON GDSCs are not ON, turn them ON */
476		gdsc_enable(&sc->pd);
477		on = true;
478	}
479
480	if (on || (sc->pwrsts & PWRSTS_RET))
481		gdsc_force_mem_on(sc);
482	else
483		gdsc_clear_mem_on(sc);
484
485	if (sc->flags & ALWAYS_ON)
486		sc->pd.flags |= GENPD_FLAG_ALWAYS_ON;
487	if (!sc->pd.power_off)
488		sc->pd.power_off = gdsc_disable;
489	if (!sc->pd.power_on)
490		sc->pd.power_on = gdsc_enable;
491	if (sc->flags & HW_CTRL_TRIGGER) {
492		sc->pd.set_hwmode_dev = gdsc_set_hwmode;
493		sc->pd.get_hwmode_dev = gdsc_get_hwmode;
494	}
495
496	ret = pm_genpd_init(&sc->pd, NULL, !on);
497	if (ret)
498		goto err_disable_supply;
499
500	return 0;
501
502err_disable_supply:
503	if (on && sc->rsupply)
504		regulator_disable(sc->rsupply);
505
506	return ret;
507}
508
509int gdsc_register(struct gdsc_desc *desc,
510		  struct reset_controller_dev *rcdev, struct regmap *regmap)
511{
512	int i, ret;
513	struct genpd_onecell_data *data;
514	struct device *dev = desc->dev;
515	struct gdsc **scs = desc->scs;
516	size_t num = desc->num;
517
518	data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
519	if (!data)
520		return -ENOMEM;
521
522	data->domains = devm_kcalloc(dev, num, sizeof(*data->domains),
523				     GFP_KERNEL);
524	if (!data->domains)
525		return -ENOMEM;
526
527	for (i = 0; i < num; i++) {
528		if (!scs[i] || !scs[i]->supply)
529			continue;
530
531		scs[i]->rsupply = devm_regulator_get_optional(dev, scs[i]->supply);
532		if (IS_ERR(scs[i]->rsupply)) {
533			ret = PTR_ERR(scs[i]->rsupply);
534			if (ret != -ENODEV)
535				return ret;
536
537			scs[i]->rsupply = NULL;
538		}
539	}
540
541	data->num_domains = num;
542	for (i = 0; i < num; i++) {
543		if (!scs[i])
544			continue;
545		scs[i]->regmap = regmap;
546		scs[i]->rcdev = rcdev;
547		ret = gdsc_init(scs[i]);
548		if (ret)
549			return ret;
550		data->domains[i] = &scs[i]->pd;
551	}
552
553	/* Add subdomains */
554	for (i = 0; i < num; i++) {
555		if (!scs[i])
556			continue;
557		if (scs[i]->parent)
558			pm_genpd_add_subdomain(scs[i]->parent, &scs[i]->pd);
559		else if (!IS_ERR_OR_NULL(dev->pm_domain))
560			pm_genpd_add_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
561	}
562
563	return of_genpd_add_provider_onecell(dev->of_node, data);
564}
565
566void gdsc_unregister(struct gdsc_desc *desc)
567{
568	int i;
569	struct device *dev = desc->dev;
570	struct gdsc **scs = desc->scs;
571	size_t num = desc->num;
572
573	/* Remove subdomains */
574	for (i = 0; i < num; i++) {
575		if (!scs[i])
576			continue;
577		if (scs[i]->parent)
578			pm_genpd_remove_subdomain(scs[i]->parent, &scs[i]->pd);
579		else if (!IS_ERR_OR_NULL(dev->pm_domain))
580			pm_genpd_remove_subdomain(pd_to_genpd(dev->pm_domain), &scs[i]->pd);
581	}
582	of_genpd_del_provider(dev->of_node);
583}
584
585/*
586 * On SDM845+ the GPU GX domain is *almost* entirely controlled by the GMU
587 * running in the CX domain so the CPU doesn't need to know anything about the
588 * GX domain EXCEPT....
589 *
590 * Hardware constraints dictate that the GX be powered down before the CX. If
591 * the GMU crashes it could leave the GX on. In order to successfully bring back
592 * the device the CPU needs to disable the GX headswitch. There being no sane
593 * way to reach in and touch that register from deep inside the GPU driver we
594 * need to set up the infrastructure to be able to ensure that the GPU can
595 * ensure that the GX is off during this super special case. We do this by
596 * defining a GX gdsc with a dummy enable function and a "default" disable
597 * function.
598 *
599 * This allows us to attach with genpd_dev_pm_attach_by_name() in the GPU
600 * driver. During power up, nothing will happen from the CPU (and the GMU will
601 * power up normally but during power down this will ensure that the GX domain
602 * is *really* off - this gives us a semi standard way of doing what we need.
603 */
604int gdsc_gx_do_nothing_enable(struct generic_pm_domain *domain)
605{
606	struct gdsc *sc = domain_to_gdsc(domain);
607	int ret = 0;
608
609	/* Enable the parent supply, when controlled through the regulator framework. */
610	if (sc->rsupply)
611		ret = regulator_enable(sc->rsupply);
612
613	/* Do nothing with the GDSC itself */
614
615	return ret;
616}
617EXPORT_SYMBOL_GPL(gdsc_gx_do_nothing_enable);