Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * A devfreq driver for NVIDIA Tegra SoCs
  4 *
  5 * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
  6 * Copyright (C) 2014 Google, Inc
  7 */
  8
  9#include <linux/clk.h>
 10#include <linux/cpufreq.h>
 11#include <linux/devfreq.h>
 12#include <linux/interrupt.h>
 13#include <linux/io.h>
 
 14#include <linux/module.h>
 15#include <linux/mod_devicetable.h>
 16#include <linux/platform_device.h>
 17#include <linux/pm_opp.h>
 18#include <linux/reset.h>
 
 
 
 19
 20#include "governor.h"
 21
 22#define ACTMON_GLB_STATUS					0x0
 23#define ACTMON_GLB_PERIOD_CTRL					0x4
 24
 25#define ACTMON_DEV_CTRL						0x0
 26#define ACTMON_DEV_CTRL_K_VAL_SHIFT				10
 27#define ACTMON_DEV_CTRL_ENB_PERIODIC				BIT(18)
 28#define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN			BIT(20)
 29#define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN			BIT(21)
 30#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT	23
 31#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT	26
 32#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN		BIT(29)
 33#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN		BIT(30)
 34#define ACTMON_DEV_CTRL_ENB					BIT(31)
 35
 
 
 36#define ACTMON_DEV_UPPER_WMARK					0x4
 37#define ACTMON_DEV_LOWER_WMARK					0x8
 38#define ACTMON_DEV_INIT_AVG					0xc
 39#define ACTMON_DEV_AVG_UPPER_WMARK				0x10
 40#define ACTMON_DEV_AVG_LOWER_WMARK				0x14
 41#define ACTMON_DEV_COUNT_WEIGHT					0x18
 42#define ACTMON_DEV_AVG_COUNT					0x20
 43#define ACTMON_DEV_INTR_STATUS					0x24
 44
 45#define ACTMON_INTR_STATUS_CLEAR				0xffffffff
 46
 47#define ACTMON_DEV_INTR_CONSECUTIVE_UPPER			BIT(31)
 48#define ACTMON_DEV_INTR_CONSECUTIVE_LOWER			BIT(30)
 49
 50#define ACTMON_ABOVE_WMARK_WINDOW				1
 51#define ACTMON_BELOW_WMARK_WINDOW				3
 52#define ACTMON_BOOST_FREQ_STEP					16000
 53
 54/*
 55 * Activity counter is incremented every 256 memory transactions, and each
 56 * transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is
 57 * 4 * 256 = 1024.
 58 */
 59#define ACTMON_COUNT_WEIGHT					0x400
 60
 61/*
 62 * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
 63 * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
 64 */
 65#define ACTMON_AVERAGE_WINDOW_LOG2			6
 66#define ACTMON_SAMPLING_PERIOD				12 /* ms */
 67#define ACTMON_DEFAULT_AVG_BAND				6  /* 1/10 of % */
 68
 69#define KHZ							1000
 70
 
 
 71/* Assume that the bus is saturated if the utilization is 25% */
 72#define BUS_SATURATION_RATIO					25
 73
 74/**
 75 * struct tegra_devfreq_device_config - configuration specific to an ACTMON
 76 * device
 77 *
 78 * Coefficients and thresholds are percentages unless otherwise noted
 79 */
 80struct tegra_devfreq_device_config {
 81	u32		offset;
 82	u32		irq_mask;
 83
 84	/* Factors applied to boost_freq every consecutive watermark breach */
 85	unsigned int	boost_up_coeff;
 86	unsigned int	boost_down_coeff;
 87
 88	/* Define the watermark bounds when applied to the current avg */
 89	unsigned int	boost_up_threshold;
 90	unsigned int	boost_down_threshold;
 91
 92	/*
 93	 * Threshold of activity (cycles) below which the CPU frequency isn't
 94	 * to be taken into account. This is to avoid increasing the EMC
 95	 * frequency when the CPU is very busy but not accessing the bus often.
 
 96	 */
 97	u32		avg_dependency_threshold;
 98};
 99
100enum tegra_actmon_device {
101	MCALL = 0,
102	MCCPU,
103};
104
105static struct tegra_devfreq_device_config actmon_device_configs[] = {
106	{
107		/* MCALL: All memory accesses (including from the CPUs) */
108		.offset = 0x1c0,
109		.irq_mask = 1 << 26,
110		.boost_up_coeff = 200,
111		.boost_down_coeff = 50,
112		.boost_up_threshold = 60,
113		.boost_down_threshold = 40,
114	},
115	{
116		/* MCCPU: memory accesses from the CPUs */
117		.offset = 0x200,
118		.irq_mask = 1 << 25,
119		.boost_up_coeff = 800,
120		.boost_down_coeff = 90,
121		.boost_up_threshold = 27,
122		.boost_down_threshold = 10,
123		.avg_dependency_threshold = 50000,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124	},
125};
126
127/**
128 * struct tegra_devfreq_device - state specific to an ACTMON device
129 *
130 * Frequencies are in kHz.
131 */
132struct tegra_devfreq_device {
133	const struct tegra_devfreq_device_config *config;
134	void __iomem *regs;
135
136	/* Average event count sampled in the last interrupt */
137	u32 avg_count;
138
139	/*
140	 * Extra frequency to increase the target by due to consecutive
141	 * watermark breaches.
142	 */
143	unsigned long boost_freq;
144
145	/* Optimal frequency calculated from the stats for this device */
146	unsigned long target_freq;
147};
148
 
 
 
 
 
 
149struct tegra_devfreq {
150	struct devfreq		*devfreq;
 
151
152	struct reset_control	*reset;
153	struct clk		*clock;
154	void __iomem		*regs;
155
156	struct clk		*emc_clock;
157	unsigned long		max_freq;
158	unsigned long		cur_freq;
159	struct notifier_block	rate_change_nb;
 
 
 
160
161	struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
162
163	int irq;
 
 
 
 
164};
165
166struct tegra_actmon_emc_ratio {
167	unsigned long cpu_freq;
168	unsigned long emc_freq;
169};
170
171static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
172	{ 1400000, ULONG_MAX },
173	{ 1200000,    750000 },
174	{ 1100000,    600000 },
175	{ 1000000,    500000 },
176	{  800000,    375000 },
177	{  500000,    200000 },
178	{  250000,    100000 },
179};
180
181static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset)
182{
183	return readl_relaxed(tegra->regs + offset);
184}
185
186static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset)
187{
188	writel_relaxed(val, tegra->regs + offset);
189}
190
191static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset)
192{
193	return readl_relaxed(dev->regs + offset);
194}
195
196static void device_writel(struct tegra_devfreq_device *dev, u32 val,
197			  u32 offset)
198{
199	writel_relaxed(val, dev->regs + offset);
200}
201
202static unsigned long do_percent(unsigned long val, unsigned int pct)
203{
204	return val * pct / 100;
 
 
 
 
 
 
 
205}
206
207static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
208					   struct tegra_devfreq_device *dev)
209{
210	u32 avg = dev->avg_count;
211	u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
212	u32 band = avg_band_freq * ACTMON_SAMPLING_PERIOD;
 
213
 
214	device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
215
216	avg = max(dev->avg_count, band);
217	device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK);
218}
219
220static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
221				       struct tegra_devfreq_device *dev)
222{
223	u32 val = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
224
225	device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
226		      ACTMON_DEV_UPPER_WMARK);
227
228	device_writel(dev, do_percent(val, dev->config->boost_down_threshold),
229		      ACTMON_DEV_LOWER_WMARK);
230}
231
232static void actmon_write_barrier(struct tegra_devfreq *tegra)
233{
234	/* ensure the update has reached the ACTMON */
235	readl(tegra->regs + ACTMON_GLB_STATUS);
236}
237
238static void actmon_isr_device(struct tegra_devfreq *tegra,
239			      struct tegra_devfreq_device *dev)
240{
241	u32 intr_status, dev_ctrl;
242
243	dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT);
244	tegra_devfreq_update_avg_wmark(tegra, dev);
245
246	intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS);
247	dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL);
248
249	if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
250		/*
251		 * new_boost = min(old_boost * up_coef + step, max_freq)
252		 */
253		dev->boost_freq = do_percent(dev->boost_freq,
254					     dev->config->boost_up_coeff);
255		dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
256
257		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
258
259		if (dev->boost_freq >= tegra->max_freq)
 
260			dev->boost_freq = tegra->max_freq;
261		else
262			dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
263	} else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
264		/*
265		 * new_boost = old_boost * down_coef
266		 * or 0 if (old_boost * down_coef < step / 2)
267		 */
268		dev->boost_freq = do_percent(dev->boost_freq,
269					     dev->config->boost_down_coeff);
270
271		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
272
273		if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1))
274			dev->boost_freq = 0;
275		else
276			dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
277	}
278
279	if (dev->config->avg_dependency_threshold) {
280		if (dev->avg_count >= dev->config->avg_dependency_threshold)
281			dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
282		else if (dev->boost_freq == 0)
283			dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
 
 
284	}
285
286	device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
287
288	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
289
290	actmon_write_barrier(tegra);
291}
292
293static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
294					    unsigned long cpu_freq)
295{
296	unsigned int i;
297	struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
298
299	for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
300		if (cpu_freq >= ratio->cpu_freq) {
301			if (ratio->emc_freq >= tegra->max_freq)
302				return tegra->max_freq;
303			else
304				return ratio->emc_freq;
305		}
306	}
307
308	return 0;
309}
310
 
 
 
 
 
 
 
 
 
 
 
 
 
311static void actmon_update_target(struct tegra_devfreq *tegra,
312				 struct tegra_devfreq_device *dev)
313{
314	unsigned long cpu_freq = 0;
315	unsigned long static_cpu_emc_freq = 0;
316	unsigned int avg_sustain_coef;
317
318	if (dev->config->avg_dependency_threshold) {
319		cpu_freq = cpufreq_get(0);
320		static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
321	}
322
323	dev->target_freq = dev->avg_count / ACTMON_SAMPLING_PERIOD;
324	avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
325	dev->target_freq = do_percent(dev->target_freq, avg_sustain_coef);
326	dev->target_freq += dev->boost_freq;
327
328	if (dev->avg_count >= dev->config->avg_dependency_threshold)
329		dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
 
 
 
330}
331
332static irqreturn_t actmon_thread_isr(int irq, void *data)
333{
334	struct tegra_devfreq *tegra = data;
335	bool handled = false;
336	unsigned int i;
337	u32 val;
338
339	mutex_lock(&tegra->devfreq->lock);
340
341	val = actmon_readl(tegra, ACTMON_GLB_STATUS);
342	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
343		if (val & tegra->devices[i].config->irq_mask) {
344			actmon_isr_device(tegra, tegra->devices + i);
345			handled = true;
346		}
347	}
348
349	if (handled)
350		update_devfreq(tegra->devfreq);
351
352	mutex_unlock(&tegra->devfreq->lock);
353
354	return handled ? IRQ_HANDLED : IRQ_NONE;
355}
356
357static int tegra_actmon_rate_notify_cb(struct notifier_block *nb,
358				       unsigned long action, void *ptr)
359{
360	struct clk_notifier_data *data = ptr;
361	struct tegra_devfreq *tegra;
362	struct tegra_devfreq_device *dev;
363	unsigned int i;
364
365	if (action != POST_RATE_CHANGE)
366		return NOTIFY_OK;
367
368	tegra = container_of(nb, struct tegra_devfreq, rate_change_nb);
369
370	tegra->cur_freq = data->new_rate / KHZ;
371
372	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
373		dev = &tegra->devices[i];
374
375		tegra_devfreq_update_wmark(tegra, dev);
376	}
377
378	actmon_write_barrier(tegra);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379
380	return NOTIFY_OK;
381}
382
383static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
384					  struct tegra_devfreq_device *dev)
385{
386	u32 val = 0;
387
 
 
 
388	dev->target_freq = tegra->cur_freq;
389
390	dev->avg_count = tegra->cur_freq * ACTMON_SAMPLING_PERIOD;
391	device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
392
393	tegra_devfreq_update_avg_wmark(tegra, dev);
394	tegra_devfreq_update_wmark(tegra, dev);
395
396	device_writel(dev, ACTMON_COUNT_WEIGHT, ACTMON_DEV_COUNT_WEIGHT);
397	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
398
399	val |= ACTMON_DEV_CTRL_ENB_PERIODIC;
400	val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
401		<< ACTMON_DEV_CTRL_K_VAL_SHIFT;
402	val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
403		<< ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
404	val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
405		<< ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
406	val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
407	val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
408	val |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
409	val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
410	val |= ACTMON_DEV_CTRL_ENB;
411
412	device_writel(dev, val, ACTMON_DEV_CTRL);
413}
414
415static void tegra_actmon_start(struct tegra_devfreq *tegra)
416{
 
417	unsigned int i;
418
419	disable_irq(tegra->irq);
 
 
 
 
 
420
421	actmon_writel(tegra, ACTMON_SAMPLING_PERIOD - 1,
 
 
 
 
 
 
 
 
422		      ACTMON_GLB_PERIOD_CTRL);
423
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
424	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
425		tegra_actmon_configure_device(tegra, &tegra->devices[i]);
426
427	actmon_write_barrier(tegra);
 
 
 
 
 
 
 
 
 
 
 
 
 
428
429	enable_irq(tegra->irq);
 
 
 
 
 
 
 
 
 
430}
431
432static void tegra_actmon_stop(struct tegra_devfreq *tegra)
433{
434	unsigned int i;
435
436	disable_irq(tegra->irq);
 
437
438	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
439		device_writel(&tegra->devices[i], 0x00000000, ACTMON_DEV_CTRL);
440		device_writel(&tegra->devices[i], ACTMON_INTR_STATUS_CLEAR,
441			      ACTMON_DEV_INTR_STATUS);
442	}
443
444	actmon_write_barrier(tegra);
 
445
446	enable_irq(tegra->irq);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
447}
448
449static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
450				u32 flags)
451{
452	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
453	struct devfreq *devfreq = tegra->devfreq;
454	struct dev_pm_opp *opp;
455	unsigned long rate;
456	int err;
457
458	opp = devfreq_recommended_opp(dev, freq, flags);
459	if (IS_ERR(opp)) {
460		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
461		return PTR_ERR(opp);
462	}
463	rate = dev_pm_opp_get_freq(opp);
464	dev_pm_opp_put(opp);
465
466	err = clk_set_min_rate(tegra->emc_clock, rate);
467	if (err)
468		return err;
469
470	err = clk_set_rate(tegra->emc_clock, 0);
471	if (err)
472		goto restore_min_rate;
473
474	return 0;
475
476restore_min_rate:
477	clk_set_min_rate(tegra->emc_clock, devfreq->previous_freq);
478
479	return err;
480}
481
482static int tegra_devfreq_get_dev_status(struct device *dev,
483					struct devfreq_dev_status *stat)
484{
485	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
486	struct tegra_devfreq_device *actmon_dev;
487	unsigned long cur_freq;
488
489	cur_freq = READ_ONCE(tegra->cur_freq);
490
491	/* To be used by the tegra governor */
492	stat->private_data = tegra;
493
494	/* The below are to be used by the other governors */
495	stat->current_frequency = cur_freq * KHZ;
496
497	actmon_dev = &tegra->devices[MCALL];
498
499	/* Number of cycles spent on memory access */
500	stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT);
501
502	/* The bus can be considered to be saturated way before 100% */
503	stat->busy_time *= 100 / BUS_SATURATION_RATIO;
504
505	/* Number of cycles in a sampling period */
506	stat->total_time = ACTMON_SAMPLING_PERIOD * cur_freq;
507
508	stat->busy_time = min(stat->busy_time, stat->total_time);
509
510	return 0;
511}
512
513static struct devfreq_dev_profile tegra_devfreq_profile = {
514	.polling_ms	= 0,
515	.target		= tegra_devfreq_target,
516	.get_dev_status	= tegra_devfreq_get_dev_status,
 
517};
518
519static int tegra_governor_get_target(struct devfreq *devfreq,
520				     unsigned long *freq)
521{
522	struct devfreq_dev_status *stat;
523	struct tegra_devfreq *tegra;
524	struct tegra_devfreq_device *dev;
525	unsigned long target_freq = 0;
526	unsigned int i;
527	int err;
528
529	err = devfreq_update_stats(devfreq);
530	if (err)
531		return err;
532
533	stat = &devfreq->last_status;
534
535	tegra = stat->private_data;
536
537	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
538		dev = &tegra->devices[i];
539
540		actmon_update_target(tegra, dev);
541
542		target_freq = max(target_freq, dev->target_freq);
543	}
544
 
 
 
 
 
545	*freq = target_freq * KHZ;
546
547	return 0;
548}
549
550static int tegra_governor_event_handler(struct devfreq *devfreq,
551					unsigned int event, void *data)
552{
553	struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
 
 
 
 
 
 
 
 
554
555	switch (event) {
556	case DEVFREQ_GOV_START:
557		devfreq_monitor_start(devfreq);
558		tegra_actmon_start(tegra);
559		break;
560
561	case DEVFREQ_GOV_STOP:
562		tegra_actmon_stop(tegra);
563		devfreq_monitor_stop(devfreq);
564		break;
565
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
566	case DEVFREQ_GOV_SUSPEND:
567		tegra_actmon_stop(tegra);
568		devfreq_monitor_suspend(devfreq);
569		break;
570
571	case DEVFREQ_GOV_RESUME:
572		devfreq_monitor_resume(devfreq);
573		tegra_actmon_start(tegra);
574		break;
575	}
576
577	return 0;
578}
579
580static struct devfreq_governor tegra_devfreq_governor = {
581	.name = "tegra_actmon",
 
 
 
582	.get_target_freq = tegra_governor_get_target,
583	.event_handler = tegra_governor_event_handler,
584	.immutable = true,
585};
586
587static int tegra_devfreq_probe(struct platform_device *pdev)
588{
589	struct tegra_devfreq *tegra;
590	struct tegra_devfreq_device *dev;
 
 
591	unsigned int i;
592	unsigned long rate;
593	int err;
594
595	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
596	if (!tegra)
597		return -ENOMEM;
598
 
 
599	tegra->regs = devm_platform_ioremap_resource(pdev, 0);
600	if (IS_ERR(tegra->regs))
601		return PTR_ERR(tegra->regs);
602
603	tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
604	if (IS_ERR(tegra->reset)) {
605		dev_err(&pdev->dev, "Failed to get reset\n");
606		return PTR_ERR(tegra->reset);
607	}
608
609	tegra->clock = devm_clk_get(&pdev->dev, "actmon");
610	if (IS_ERR(tegra->clock)) {
611		dev_err(&pdev->dev, "Failed to get actmon clock\n");
612		return PTR_ERR(tegra->clock);
613	}
614
615	tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
616	if (IS_ERR(tegra->emc_clock)) {
617		dev_err(&pdev->dev, "Failed to get emc clock\n");
618		return PTR_ERR(tegra->emc_clock);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
619	}
620
621	tegra->irq = platform_get_irq(pdev, 0);
622	if (tegra->irq < 0) {
623		err = tegra->irq;
624		dev_err(&pdev->dev, "Failed to get IRQ: %d\n", err);
 
625		return err;
626	}
627
628	reset_control_assert(tegra->reset);
 
 
 
 
629
630	err = clk_prepare_enable(tegra->clock);
631	if (err) {
632		dev_err(&pdev->dev,
633			"Failed to prepare and enable ACTMON clock\n");
634		return err;
635	}
636
637	reset_control_deassert(tegra->reset);
638
639	tegra->max_freq = clk_round_rate(tegra->emc_clock, ULONG_MAX) / KHZ;
640	tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
 
641
642	for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
643		dev = tegra->devices + i;
644		dev->config = actmon_device_configs + i;
645		dev->regs = tegra->regs + dev->config->offset;
 
646	}
647
648	for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) {
649		rate = clk_round_rate(tegra->emc_clock, rate);
650
651		err = dev_pm_opp_add(&pdev->dev, rate, 0);
652		if (err) {
653			dev_err(&pdev->dev, "Failed to add OPP: %d\n", err);
654			goto remove_opps;
655		}
656	}
657
658	platform_set_drvdata(pdev, tegra);
659
660	tegra->rate_change_nb.notifier_call = tegra_actmon_rate_notify_cb;
661	err = clk_notifier_register(tegra->emc_clock, &tegra->rate_change_nb);
662	if (err) {
663		dev_err(&pdev->dev,
664			"Failed to register rate change notifier\n");
665		goto remove_opps;
666	}
667
668	err = devfreq_add_governor(&tegra_devfreq_governor);
669	if (err) {
670		dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
671		goto unreg_notifier;
672	}
673
674	tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
675	tegra->devfreq = devfreq_add_device(&pdev->dev,
676					    &tegra_devfreq_profile,
677					    "tegra_actmon",
678					    NULL);
679	if (IS_ERR(tegra->devfreq)) {
680		err = PTR_ERR(tegra->devfreq);
681		goto remove_governor;
682	}
683
684	err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
685					actmon_thread_isr, IRQF_ONESHOT,
686					"tegra-devfreq", tegra);
687	if (err) {
688		dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
689		goto remove_devfreq;
690	}
691
692	return 0;
693
694remove_devfreq:
695	devfreq_remove_device(tegra->devfreq);
696
697remove_governor:
698	devfreq_remove_governor(&tegra_devfreq_governor);
699
700unreg_notifier:
701	clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
702
703remove_opps:
704	dev_pm_opp_remove_all_dynamic(&pdev->dev);
705
706	reset_control_reset(tegra->reset);
 
707	clk_disable_unprepare(tegra->clock);
 
 
 
 
708
709	return err;
710}
711
712static int tegra_devfreq_remove(struct platform_device *pdev)
713{
714	struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
715
716	devfreq_remove_device(tegra->devfreq);
717	devfreq_remove_governor(&tegra_devfreq_governor);
718
719	clk_notifier_unregister(tegra->emc_clock, &tegra->rate_change_nb);
720	dev_pm_opp_remove_all_dynamic(&pdev->dev);
721
722	reset_control_reset(tegra->reset);
723	clk_disable_unprepare(tegra->clock);
724
 
 
 
725	return 0;
726}
727
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
728static const struct of_device_id tegra_devfreq_of_match[] = {
729	{ .compatible = "nvidia,tegra30-actmon" },
730	{ .compatible = "nvidia,tegra124-actmon" },
731	{ },
732};
733
734MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
735
736static struct platform_driver tegra_devfreq_driver = {
737	.probe	= tegra_devfreq_probe,
738	.remove	= tegra_devfreq_remove,
739	.driver = {
740		.name = "tegra-devfreq",
741		.of_match_table = tegra_devfreq_of_match,
742	},
743};
744module_platform_driver(tegra_devfreq_driver);
745
746MODULE_LICENSE("GPL v2");
747MODULE_DESCRIPTION("Tegra devfreq driver");
748MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * A devfreq driver for NVIDIA Tegra SoCs
  4 *
  5 * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
  6 * Copyright (C) 2014 Google, Inc
  7 */
  8
  9#include <linux/clk.h>
 10#include <linux/cpufreq.h>
 11#include <linux/devfreq.h>
 12#include <linux/interrupt.h>
 13#include <linux/io.h>
 14#include <linux/irq.h>
 15#include <linux/module.h>
 16#include <linux/of_device.h>
 17#include <linux/platform_device.h>
 18#include <linux/pm_opp.h>
 19#include <linux/reset.h>
 20#include <linux/workqueue.h>
 21
 22#include <soc/tegra/fuse.h>
 23
 24#include "governor.h"
 25
 26#define ACTMON_GLB_STATUS					0x0
 27#define ACTMON_GLB_PERIOD_CTRL					0x4
 28
 29#define ACTMON_DEV_CTRL						0x0
 30#define ACTMON_DEV_CTRL_K_VAL_SHIFT				10
 31#define ACTMON_DEV_CTRL_ENB_PERIODIC				BIT(18)
 32#define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN			BIT(20)
 33#define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN			BIT(21)
 34#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT	23
 35#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT	26
 36#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN		BIT(29)
 37#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN		BIT(30)
 38#define ACTMON_DEV_CTRL_ENB					BIT(31)
 39
 40#define ACTMON_DEV_CTRL_STOP					0x00000000
 41
 42#define ACTMON_DEV_UPPER_WMARK					0x4
 43#define ACTMON_DEV_LOWER_WMARK					0x8
 44#define ACTMON_DEV_INIT_AVG					0xc
 45#define ACTMON_DEV_AVG_UPPER_WMARK				0x10
 46#define ACTMON_DEV_AVG_LOWER_WMARK				0x14
 47#define ACTMON_DEV_COUNT_WEIGHT					0x18
 48#define ACTMON_DEV_AVG_COUNT					0x20
 49#define ACTMON_DEV_INTR_STATUS					0x24
 50
 51#define ACTMON_INTR_STATUS_CLEAR				0xffffffff
 52
 53#define ACTMON_DEV_INTR_CONSECUTIVE_UPPER			BIT(31)
 54#define ACTMON_DEV_INTR_CONSECUTIVE_LOWER			BIT(30)
 55
 56#define ACTMON_ABOVE_WMARK_WINDOW				1
 57#define ACTMON_BELOW_WMARK_WINDOW				3
 58#define ACTMON_BOOST_FREQ_STEP					16000
 59
 60/*
 
 
 
 
 
 
 
 61 * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
 62 * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
 63 */
 64#define ACTMON_AVERAGE_WINDOW_LOG2			6
 65#define ACTMON_SAMPLING_PERIOD				12 /* ms */
 66#define ACTMON_DEFAULT_AVG_BAND				6  /* 1/10 of % */
 67
 68#define KHZ							1000
 69
 70#define KHZ_MAX						(ULONG_MAX / KHZ)
 71
 72/* Assume that the bus is saturated if the utilization is 25% */
 73#define BUS_SATURATION_RATIO					25
 74
 75/**
 76 * struct tegra_devfreq_device_config - configuration specific to an ACTMON
 77 * device
 78 *
 79 * Coefficients and thresholds are percentages unless otherwise noted
 80 */
 81struct tegra_devfreq_device_config {
 82	u32		offset;
 83	u32		irq_mask;
 84
 85	/* Factors applied to boost_freq every consecutive watermark breach */
 86	unsigned int	boost_up_coeff;
 87	unsigned int	boost_down_coeff;
 88
 89	/* Define the watermark bounds when applied to the current avg */
 90	unsigned int	boost_up_threshold;
 91	unsigned int	boost_down_threshold;
 92
 93	/*
 94	 * Threshold of activity (cycles translated to kHz) below which the
 95	 * CPU frequency isn't to be taken into account. This is to avoid
 96	 * increasing the EMC frequency when the CPU is very busy but not
 97	 * accessing the bus often.
 98	 */
 99	u32		avg_dependency_threshold;
100};
101
102enum tegra_actmon_device {
103	MCALL = 0,
104	MCCPU,
105};
106
107static const struct tegra_devfreq_device_config tegra124_device_configs[] = {
108	{
109		/* MCALL: All memory accesses (including from the CPUs) */
110		.offset = 0x1c0,
111		.irq_mask = 1 << 26,
112		.boost_up_coeff = 200,
113		.boost_down_coeff = 50,
114		.boost_up_threshold = 60,
115		.boost_down_threshold = 40,
116	},
117	{
118		/* MCCPU: memory accesses from the CPUs */
119		.offset = 0x200,
120		.irq_mask = 1 << 25,
121		.boost_up_coeff = 800,
122		.boost_down_coeff = 40,
123		.boost_up_threshold = 27,
124		.boost_down_threshold = 10,
125		.avg_dependency_threshold = 16000, /* 16MHz in kHz units */
126	},
127};
128
129static const struct tegra_devfreq_device_config tegra30_device_configs[] = {
130	{
131		/* MCALL: All memory accesses (including from the CPUs) */
132		.offset = 0x1c0,
133		.irq_mask = 1 << 26,
134		.boost_up_coeff = 200,
135		.boost_down_coeff = 50,
136		.boost_up_threshold = 20,
137		.boost_down_threshold = 10,
138	},
139	{
140		/* MCCPU: memory accesses from the CPUs */
141		.offset = 0x200,
142		.irq_mask = 1 << 25,
143		.boost_up_coeff = 800,
144		.boost_down_coeff = 40,
145		.boost_up_threshold = 27,
146		.boost_down_threshold = 10,
147		.avg_dependency_threshold = 16000, /* 16MHz in kHz units */
148	},
149};
150
151/**
152 * struct tegra_devfreq_device - state specific to an ACTMON device
153 *
154 * Frequencies are in kHz.
155 */
156struct tegra_devfreq_device {
157	const struct tegra_devfreq_device_config *config;
158	void __iomem *regs;
159
160	/* Average event count sampled in the last interrupt */
161	u32 avg_count;
162
163	/*
164	 * Extra frequency to increase the target by due to consecutive
165	 * watermark breaches.
166	 */
167	unsigned long boost_freq;
168
169	/* Optimal frequency calculated from the stats for this device */
170	unsigned long target_freq;
171};
172
173struct tegra_devfreq_soc_data {
174	const struct tegra_devfreq_device_config *configs;
175	/* Weight value for count measurements */
176	unsigned int count_weight;
177};
178
179struct tegra_devfreq {
180	struct devfreq		*devfreq;
181	struct opp_table	*opp_table;
182
183	struct reset_control	*reset;
184	struct clk		*clock;
185	void __iomem		*regs;
186
187	struct clk		*emc_clock;
188	unsigned long		max_freq;
189	unsigned long		cur_freq;
190	struct notifier_block	clk_rate_change_nb;
191
192	struct delayed_work	cpufreq_update_work;
193	struct notifier_block	cpu_rate_change_nb;
194
195	struct tegra_devfreq_device devices[2];
196
197	unsigned int		irq;
198
199	bool			started;
200
201	const struct tegra_devfreq_soc_data *soc;
202};
203
204struct tegra_actmon_emc_ratio {
205	unsigned long cpu_freq;
206	unsigned long emc_freq;
207};
208
209static const struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
210	{ 1400000,    KHZ_MAX },
211	{ 1200000,    750000 },
212	{ 1100000,    600000 },
213	{ 1000000,    500000 },
214	{  800000,    375000 },
215	{  500000,    200000 },
216	{  250000,    100000 },
217};
218
219static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset)
220{
221	return readl_relaxed(tegra->regs + offset);
222}
223
224static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset)
225{
226	writel_relaxed(val, tegra->regs + offset);
227}
228
229static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset)
230{
231	return readl_relaxed(dev->regs + offset);
232}
233
234static void device_writel(struct tegra_devfreq_device *dev, u32 val,
235			  u32 offset)
236{
237	writel_relaxed(val, dev->regs + offset);
238}
239
240static unsigned long do_percent(unsigned long long val, unsigned int pct)
241{
242	val = val * pct;
243	do_div(val, 100);
244
245	/*
246	 * High freq + high boosting percent + large polling interval are
247	 * resulting in integer overflow when watermarks are calculated.
248	 */
249	return min_t(u64, val, U32_MAX);
250}
251
252static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
253					   struct tegra_devfreq_device *dev)
254{
 
255	u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
256	u32 band = avg_band_freq * tegra->devfreq->profile->polling_ms;
257	u32 avg;
258
259	avg = min(dev->avg_count, U32_MAX - band);
260	device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
261
262	avg = max(dev->avg_count, band);
263	device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK);
264}
265
266static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
267				       struct tegra_devfreq_device *dev)
268{
269	u32 val = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
270
271	device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
272		      ACTMON_DEV_UPPER_WMARK);
273
274	device_writel(dev, do_percent(val, dev->config->boost_down_threshold),
275		      ACTMON_DEV_LOWER_WMARK);
276}
277
 
 
 
 
 
 
278static void actmon_isr_device(struct tegra_devfreq *tegra,
279			      struct tegra_devfreq_device *dev)
280{
281	u32 intr_status, dev_ctrl;
282
283	dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT);
284	tegra_devfreq_update_avg_wmark(tegra, dev);
285
286	intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS);
287	dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL);
288
289	if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
290		/*
291		 * new_boost = min(old_boost * up_coef + step, max_freq)
292		 */
293		dev->boost_freq = do_percent(dev->boost_freq,
294					     dev->config->boost_up_coeff);
295		dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
296
297		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
298
299		if (dev->boost_freq >= tegra->max_freq) {
300			dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
301			dev->boost_freq = tegra->max_freq;
302		}
 
303	} else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
304		/*
305		 * new_boost = old_boost * down_coef
306		 * or 0 if (old_boost * down_coef < step / 2)
307		 */
308		dev->boost_freq = do_percent(dev->boost_freq,
309					     dev->config->boost_down_coeff);
310
311		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
312
313		if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
 
 
 
 
 
 
 
 
 
314			dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
315			dev->boost_freq = 0;
316		}
317	}
318
319	device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
320
321	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
 
 
322}
323
324static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
325					    unsigned long cpu_freq)
326{
327	unsigned int i;
328	const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
329
330	for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
331		if (cpu_freq >= ratio->cpu_freq) {
332			if (ratio->emc_freq >= tegra->max_freq)
333				return tegra->max_freq;
334			else
335				return ratio->emc_freq;
336		}
337	}
338
339	return 0;
340}
341
342static unsigned long actmon_device_target_freq(struct tegra_devfreq *tegra,
343					       struct tegra_devfreq_device *dev)
344{
345	unsigned int avg_sustain_coef;
346	unsigned long target_freq;
347
348	target_freq = dev->avg_count / tegra->devfreq->profile->polling_ms;
349	avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
350	target_freq = do_percent(target_freq, avg_sustain_coef);
351
352	return target_freq;
353}
354
355static void actmon_update_target(struct tegra_devfreq *tegra,
356				 struct tegra_devfreq_device *dev)
357{
358	unsigned long cpu_freq = 0;
359	unsigned long static_cpu_emc_freq = 0;
 
360
361	dev->target_freq = actmon_device_target_freq(tegra, dev);
 
 
 
362
363	if (dev->config->avg_dependency_threshold &&
364	    dev->config->avg_dependency_threshold <= dev->target_freq) {
365		cpu_freq = cpufreq_quick_get(0);
366		static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
367
368		dev->target_freq += dev->boost_freq;
369		dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
370	} else {
371		dev->target_freq += dev->boost_freq;
372	}
373}
374
375static irqreturn_t actmon_thread_isr(int irq, void *data)
376{
377	struct tegra_devfreq *tegra = data;
378	bool handled = false;
379	unsigned int i;
380	u32 val;
381
382	mutex_lock(&tegra->devfreq->lock);
383
384	val = actmon_readl(tegra, ACTMON_GLB_STATUS);
385	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
386		if (val & tegra->devices[i].config->irq_mask) {
387			actmon_isr_device(tegra, tegra->devices + i);
388			handled = true;
389		}
390	}
391
392	if (handled)
393		update_devfreq(tegra->devfreq);
394
395	mutex_unlock(&tegra->devfreq->lock);
396
397	return handled ? IRQ_HANDLED : IRQ_NONE;
398}
399
400static int tegra_actmon_clk_notify_cb(struct notifier_block *nb,
401				      unsigned long action, void *ptr)
402{
403	struct clk_notifier_data *data = ptr;
404	struct tegra_devfreq *tegra;
405	struct tegra_devfreq_device *dev;
406	unsigned int i;
407
408	if (action != POST_RATE_CHANGE)
409		return NOTIFY_OK;
410
411	tegra = container_of(nb, struct tegra_devfreq, clk_rate_change_nb);
412
413	tegra->cur_freq = data->new_rate / KHZ;
414
415	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
416		dev = &tegra->devices[i];
417
418		tegra_devfreq_update_wmark(tegra, dev);
419	}
420
421	return NOTIFY_OK;
422}
423
424static void tegra_actmon_delayed_update(struct work_struct *work)
425{
426	struct tegra_devfreq *tegra = container_of(work, struct tegra_devfreq,
427						   cpufreq_update_work.work);
428
429	mutex_lock(&tegra->devfreq->lock);
430	update_devfreq(tegra->devfreq);
431	mutex_unlock(&tegra->devfreq->lock);
432}
433
434static unsigned long
435tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra,
436				  unsigned int cpu_freq)
437{
438	struct tegra_devfreq_device *actmon_dev = &tegra->devices[MCCPU];
439	unsigned long static_cpu_emc_freq, dev_freq;
440
441	dev_freq = actmon_device_target_freq(tegra, actmon_dev);
442
443	/* check whether CPU's freq is taken into account at all */
444	if (dev_freq < actmon_dev->config->avg_dependency_threshold)
445		return 0;
446
447	static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
448
449	if (dev_freq + actmon_dev->boost_freq >= static_cpu_emc_freq)
450		return 0;
451
452	return static_cpu_emc_freq;
453}
454
455static int tegra_actmon_cpu_notify_cb(struct notifier_block *nb,
456				      unsigned long action, void *ptr)
457{
458	struct cpufreq_freqs *freqs = ptr;
459	struct tegra_devfreq *tegra;
460	unsigned long old, new, delay;
461
462	if (action != CPUFREQ_POSTCHANGE)
463		return NOTIFY_OK;
464
465	tegra = container_of(nb, struct tegra_devfreq, cpu_rate_change_nb);
466
467	/*
468	 * Quickly check whether CPU frequency should be taken into account
469	 * at all, without blocking CPUFreq's core.
470	 */
471	if (mutex_trylock(&tegra->devfreq->lock)) {
472		old = tegra_actmon_cpufreq_contribution(tegra, freqs->old);
473		new = tegra_actmon_cpufreq_contribution(tegra, freqs->new);
474		mutex_unlock(&tegra->devfreq->lock);
475
476		/*
477		 * If CPU's frequency shouldn't be taken into account at
478		 * the moment, then there is no need to update the devfreq's
479		 * state because ISR will re-check CPU's frequency on the
480		 * next interrupt.
481		 */
482		if (old == new)
483			return NOTIFY_OK;
484	}
485
486	/*
487	 * CPUFreq driver should support CPUFREQ_ASYNC_NOTIFICATION in order
488	 * to allow asynchronous notifications. This means we can't block
489	 * here for too long, otherwise CPUFreq's core will complain with a
490	 * warning splat.
491	 */
492	delay = msecs_to_jiffies(ACTMON_SAMPLING_PERIOD);
493	schedule_delayed_work(&tegra->cpufreq_update_work, delay);
494
495	return NOTIFY_OK;
496}
497
498static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
499					  struct tegra_devfreq_device *dev)
500{
501	u32 val = 0;
502
503	/* reset boosting on governor's restart */
504	dev->boost_freq = 0;
505
506	dev->target_freq = tegra->cur_freq;
507
508	dev->avg_count = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
509	device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
510
511	tegra_devfreq_update_avg_wmark(tegra, dev);
512	tegra_devfreq_update_wmark(tegra, dev);
513
514	device_writel(dev, tegra->soc->count_weight, ACTMON_DEV_COUNT_WEIGHT);
515	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
516
517	val |= ACTMON_DEV_CTRL_ENB_PERIODIC;
518	val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
519		<< ACTMON_DEV_CTRL_K_VAL_SHIFT;
520	val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
521		<< ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
522	val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
523		<< ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
524	val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
525	val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
 
526	val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
527	val |= ACTMON_DEV_CTRL_ENB;
528
529	device_writel(dev, val, ACTMON_DEV_CTRL);
530}
531
532static void tegra_actmon_stop_devices(struct tegra_devfreq *tegra)
533{
534	struct tegra_devfreq_device *dev = tegra->devices;
535	unsigned int i;
536
537	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++, dev++) {
538		device_writel(dev, ACTMON_DEV_CTRL_STOP, ACTMON_DEV_CTRL);
539		device_writel(dev, ACTMON_INTR_STATUS_CLEAR,
540			      ACTMON_DEV_INTR_STATUS);
541	}
542}
543
544static int tegra_actmon_resume(struct tegra_devfreq *tegra)
545{
546	unsigned int i;
547	int err;
548
549	if (!tegra->devfreq->profile->polling_ms || !tegra->started)
550		return 0;
551
552	actmon_writel(tegra, tegra->devfreq->profile->polling_ms - 1,
553		      ACTMON_GLB_PERIOD_CTRL);
554
555	/*
556	 * CLK notifications are needed in order to reconfigure the upper
557	 * consecutive watermark in accordance to the actual clock rate
558	 * to avoid unnecessary upper interrupts.
559	 */
560	err = clk_notifier_register(tegra->emc_clock,
561				    &tegra->clk_rate_change_nb);
562	if (err) {
563		dev_err(tegra->devfreq->dev.parent,
564			"Failed to register rate change notifier\n");
565		return err;
566	}
567
568	tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
569
570	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
571		tegra_actmon_configure_device(tegra, &tegra->devices[i]);
572
573	/*
574	 * We are estimating CPU's memory bandwidth requirement based on
575	 * amount of memory accesses and system's load, judging by CPU's
576	 * frequency. We also don't want to receive events about CPU's
577	 * frequency transaction when governor is stopped, hence notifier
578	 * is registered dynamically.
579	 */
580	err = cpufreq_register_notifier(&tegra->cpu_rate_change_nb,
581					CPUFREQ_TRANSITION_NOTIFIER);
582	if (err) {
583		dev_err(tegra->devfreq->dev.parent,
584			"Failed to register rate change notifier: %d\n", err);
585		goto err_stop;
586	}
587
588	enable_irq(tegra->irq);
589
590	return 0;
591
592err_stop:
593	tegra_actmon_stop_devices(tegra);
594
595	clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
596
597	return err;
598}
599
600static int tegra_actmon_start(struct tegra_devfreq *tegra)
601{
602	int ret = 0;
603
604	if (!tegra->started) {
605		tegra->started = true;
606
607		ret = tegra_actmon_resume(tegra);
608		if (ret)
609			tegra->started = false;
 
610	}
611
612	return ret;
613}
614
615static void tegra_actmon_pause(struct tegra_devfreq *tegra)
616{
617	if (!tegra->devfreq->profile->polling_ms || !tegra->started)
618		return;
619
620	disable_irq(tegra->irq);
621
622	cpufreq_unregister_notifier(&tegra->cpu_rate_change_nb,
623				    CPUFREQ_TRANSITION_NOTIFIER);
624
625	cancel_delayed_work_sync(&tegra->cpufreq_update_work);
626
627	tegra_actmon_stop_devices(tegra);
628
629	clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
630}
631
632static void tegra_actmon_stop(struct tegra_devfreq *tegra)
633{
634	tegra_actmon_pause(tegra);
635	tegra->started = false;
636}
637
638static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
639				u32 flags)
640{
 
 
641	struct dev_pm_opp *opp;
642	int ret;
 
643
644	opp = devfreq_recommended_opp(dev, freq, flags);
645	if (IS_ERR(opp)) {
646		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
647		return PTR_ERR(opp);
648	}
 
 
 
 
 
 
 
 
 
 
 
 
649
650	ret = dev_pm_opp_set_opp(dev, opp);
651	dev_pm_opp_put(opp);
652
653	return ret;
654}
655
656static int tegra_devfreq_get_dev_status(struct device *dev,
657					struct devfreq_dev_status *stat)
658{
659	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
660	struct tegra_devfreq_device *actmon_dev;
661	unsigned long cur_freq;
662
663	cur_freq = READ_ONCE(tegra->cur_freq);
664
665	/* To be used by the tegra governor */
666	stat->private_data = tegra;
667
668	/* The below are to be used by the other governors */
669	stat->current_frequency = cur_freq * KHZ;
670
671	actmon_dev = &tegra->devices[MCALL];
672
673	/* Number of cycles spent on memory access */
674	stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT);
675
676	/* The bus can be considered to be saturated way before 100% */
677	stat->busy_time *= 100 / BUS_SATURATION_RATIO;
678
679	/* Number of cycles in a sampling period */
680	stat->total_time = tegra->devfreq->profile->polling_ms * cur_freq;
681
682	stat->busy_time = min(stat->busy_time, stat->total_time);
683
684	return 0;
685}
686
687static struct devfreq_dev_profile tegra_devfreq_profile = {
688	.polling_ms	= ACTMON_SAMPLING_PERIOD,
689	.target		= tegra_devfreq_target,
690	.get_dev_status	= tegra_devfreq_get_dev_status,
691	.is_cooling_device = true,
692};
693
694static int tegra_governor_get_target(struct devfreq *devfreq,
695				     unsigned long *freq)
696{
697	struct devfreq_dev_status *stat;
698	struct tegra_devfreq *tegra;
699	struct tegra_devfreq_device *dev;
700	unsigned long target_freq = 0;
701	unsigned int i;
702	int err;
703
704	err = devfreq_update_stats(devfreq);
705	if (err)
706		return err;
707
708	stat = &devfreq->last_status;
709
710	tegra = stat->private_data;
711
712	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
713		dev = &tegra->devices[i];
714
715		actmon_update_target(tegra, dev);
716
717		target_freq = max(target_freq, dev->target_freq);
718	}
719
720	/*
721	 * tegra-devfreq driver operates with KHz units, while OPP table
722	 * entries use Hz units. Hence we need to convert the units for the
723	 * devfreq core.
724	 */
725	*freq = target_freq * KHZ;
726
727	return 0;
728}
729
730static int tegra_governor_event_handler(struct devfreq *devfreq,
731					unsigned int event, void *data)
732{
733	struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
734	unsigned int *new_delay = data;
735	int ret = 0;
736
737	/*
738	 * Couple devfreq-device with the governor early because it is
739	 * needed at the moment of governor's start (used by ISR).
740	 */
741	tegra->devfreq = devfreq;
742
743	switch (event) {
744	case DEVFREQ_GOV_START:
745		devfreq_monitor_start(devfreq);
746		ret = tegra_actmon_start(tegra);
747		break;
748
749	case DEVFREQ_GOV_STOP:
750		tegra_actmon_stop(tegra);
751		devfreq_monitor_stop(devfreq);
752		break;
753
754	case DEVFREQ_GOV_UPDATE_INTERVAL:
755		/*
756		 * ACTMON hardware supports up to 256 milliseconds for the
757		 * sampling period.
758		 */
759		if (*new_delay > 256) {
760			ret = -EINVAL;
761			break;
762		}
763
764		tegra_actmon_pause(tegra);
765		devfreq_update_interval(devfreq, new_delay);
766		ret = tegra_actmon_resume(tegra);
767		break;
768
769	case DEVFREQ_GOV_SUSPEND:
770		tegra_actmon_stop(tegra);
771		devfreq_monitor_suspend(devfreq);
772		break;
773
774	case DEVFREQ_GOV_RESUME:
775		devfreq_monitor_resume(devfreq);
776		ret = tegra_actmon_start(tegra);
777		break;
778	}
779
780	return ret;
781}
782
783static struct devfreq_governor tegra_devfreq_governor = {
784	.name = "tegra_actmon",
785	.attrs = DEVFREQ_GOV_ATTR_POLLING_INTERVAL,
786	.flags = DEVFREQ_GOV_FLAG_IMMUTABLE
787		| DEVFREQ_GOV_FLAG_IRQ_DRIVEN,
788	.get_target_freq = tegra_governor_get_target,
789	.event_handler = tegra_governor_event_handler,
 
790};
791
792static int tegra_devfreq_probe(struct platform_device *pdev)
793{
794	u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
795	struct tegra_devfreq_device *dev;
796	struct tegra_devfreq *tegra;
797	struct devfreq *devfreq;
798	unsigned int i;
799	long rate;
800	int err;
801
802	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
803	if (!tegra)
804		return -ENOMEM;
805
806	tegra->soc = of_device_get_match_data(&pdev->dev);
807
808	tegra->regs = devm_platform_ioremap_resource(pdev, 0);
809	if (IS_ERR(tegra->regs))
810		return PTR_ERR(tegra->regs);
811
812	tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
813	if (IS_ERR(tegra->reset)) {
814		dev_err(&pdev->dev, "Failed to get reset\n");
815		return PTR_ERR(tegra->reset);
816	}
817
818	tegra->clock = devm_clk_get(&pdev->dev, "actmon");
819	if (IS_ERR(tegra->clock)) {
820		dev_err(&pdev->dev, "Failed to get actmon clock\n");
821		return PTR_ERR(tegra->clock);
822	}
823
824	tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
825	if (IS_ERR(tegra->emc_clock))
826		return dev_err_probe(&pdev->dev, PTR_ERR(tegra->emc_clock),
827				     "Failed to get emc clock\n");
828
829	err = platform_get_irq(pdev, 0);
830	if (err < 0)
831		return err;
832
833	tegra->irq = err;
834
835	irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN);
836
837	err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
838					actmon_thread_isr, IRQF_ONESHOT,
839					"tegra-devfreq", tegra);
840	if (err) {
841		dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
842		return err;
843	}
844
845	tegra->opp_table = dev_pm_opp_set_supported_hw(&pdev->dev,
846						       &hw_version, 1);
847	err = PTR_ERR_OR_ZERO(tegra->opp_table);
848	if (err) {
849		dev_err(&pdev->dev, "Failed to set supported HW: %d\n", err);
850		return err;
851	}
852
853	err = dev_pm_opp_of_add_table_noclk(&pdev->dev, 0);
854	if (err) {
855		dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err);
856		goto put_hw;
857	}
858
859	err = clk_prepare_enable(tegra->clock);
860	if (err) {
861		dev_err(&pdev->dev,
862			"Failed to prepare and enable ACTMON clock\n");
863		goto remove_table;
864	}
865
866	err = reset_control_reset(tegra->reset);
867	if (err) {
868		dev_err(&pdev->dev, "Failed to reset hardware: %d\n", err);
869		goto disable_clk;
870	}
871
872	rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
873	if (rate < 0) {
874		dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
875		err = rate;
876		goto disable_clk;
877	}
878
879	tegra->max_freq = rate / KHZ;
 
880
881	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
882		dev = tegra->devices + i;
883		dev->config = tegra->soc->configs + i;
884		dev->regs = tegra->regs + dev->config->offset;
 
885	}
886
887	platform_set_drvdata(pdev, tegra);
888
889	tegra->clk_rate_change_nb.notifier_call = tegra_actmon_clk_notify_cb;
890	tegra->cpu_rate_change_nb.notifier_call = tegra_actmon_cpu_notify_cb;
891
892	INIT_DELAYED_WORK(&tegra->cpufreq_update_work,
893			  tegra_actmon_delayed_update);
 
 
894
895	err = devfreq_add_governor(&tegra_devfreq_governor);
896	if (err) {
897		dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
898		goto remove_opps;
899	}
900
901	tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
 
 
 
 
 
 
 
 
902
903	devfreq = devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
904				     "tegra_actmon", NULL);
905	if (IS_ERR(devfreq)) {
906		err = PTR_ERR(devfreq);
907		goto remove_governor;
 
908	}
909
910	return 0;
911
 
 
 
912remove_governor:
913	devfreq_remove_governor(&tegra_devfreq_governor);
914
 
 
 
915remove_opps:
916	dev_pm_opp_remove_all_dynamic(&pdev->dev);
917
918	reset_control_reset(tegra->reset);
919disable_clk:
920	clk_disable_unprepare(tegra->clock);
921remove_table:
922	dev_pm_opp_of_remove_table(&pdev->dev);
923put_hw:
924	dev_pm_opp_put_supported_hw(tegra->opp_table);
925
926	return err;
927}
928
929static int tegra_devfreq_remove(struct platform_device *pdev)
930{
931	struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
932
933	devfreq_remove_device(tegra->devfreq);
934	devfreq_remove_governor(&tegra_devfreq_governor);
935
 
 
 
936	reset_control_reset(tegra->reset);
937	clk_disable_unprepare(tegra->clock);
938
939	dev_pm_opp_of_remove_table(&pdev->dev);
940	dev_pm_opp_put_supported_hw(tegra->opp_table);
941
942	return 0;
943}
944
945static const struct tegra_devfreq_soc_data tegra124_soc = {
946	.configs = tegra124_device_configs,
947
948	/*
949	 * Activity counter is incremented every 256 memory transactions,
950	 * and each transaction takes 4 EMC clocks.
951	 */
952	.count_weight = 4 * 256,
953};
954
955static const struct tegra_devfreq_soc_data tegra30_soc = {
956	.configs = tegra30_device_configs,
957	.count_weight = 2 * 256,
958};
959
960static const struct of_device_id tegra_devfreq_of_match[] = {
961	{ .compatible = "nvidia,tegra30-actmon",  .data = &tegra30_soc, },
962	{ .compatible = "nvidia,tegra124-actmon", .data = &tegra124_soc, },
963	{ },
964};
965
966MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
967
968static struct platform_driver tegra_devfreq_driver = {
969	.probe	= tegra_devfreq_probe,
970	.remove	= tegra_devfreq_remove,
971	.driver = {
972		.name = "tegra-devfreq",
973		.of_match_table = tegra_devfreq_of_match,
974	},
975};
976module_platform_driver(tegra_devfreq_driver);
977
978MODULE_LICENSE("GPL v2");
979MODULE_DESCRIPTION("Tegra devfreq driver");
980MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");