Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * A devfreq driver for NVIDIA Tegra SoCs
  4 *
  5 * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
  6 * Copyright (C) 2014 Google, Inc
  7 */
  8
  9#include <linux/clk.h>
 10#include <linux/cpufreq.h>
 11#include <linux/devfreq.h>
 12#include <linux/interrupt.h>
 13#include <linux/io.h>
 14#include <linux/irq.h>
 15#include <linux/module.h>
 16#include <linux/of_device.h>
 17#include <linux/platform_device.h>
 18#include <linux/pm_opp.h>
 19#include <linux/reset.h>
 20#include <linux/workqueue.h>
 21
 
 
 22#include "governor.h"
 23
 24#define ACTMON_GLB_STATUS					0x0
 25#define ACTMON_GLB_PERIOD_CTRL					0x4
 26
 27#define ACTMON_DEV_CTRL						0x0
 28#define ACTMON_DEV_CTRL_K_VAL_SHIFT				10
 29#define ACTMON_DEV_CTRL_ENB_PERIODIC				BIT(18)
 30#define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN			BIT(20)
 31#define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN			BIT(21)
 32#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT	23
 33#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT	26
 34#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN		BIT(29)
 35#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN		BIT(30)
 36#define ACTMON_DEV_CTRL_ENB					BIT(31)
 37
 38#define ACTMON_DEV_CTRL_STOP					0x00000000
 39
 40#define ACTMON_DEV_UPPER_WMARK					0x4
 41#define ACTMON_DEV_LOWER_WMARK					0x8
 42#define ACTMON_DEV_INIT_AVG					0xc
 43#define ACTMON_DEV_AVG_UPPER_WMARK				0x10
 44#define ACTMON_DEV_AVG_LOWER_WMARK				0x14
 45#define ACTMON_DEV_COUNT_WEIGHT					0x18
 46#define ACTMON_DEV_AVG_COUNT					0x20
 47#define ACTMON_DEV_INTR_STATUS					0x24
 48
 49#define ACTMON_INTR_STATUS_CLEAR				0xffffffff
 50
 51#define ACTMON_DEV_INTR_CONSECUTIVE_UPPER			BIT(31)
 52#define ACTMON_DEV_INTR_CONSECUTIVE_LOWER			BIT(30)
 53
 54#define ACTMON_ABOVE_WMARK_WINDOW				1
 55#define ACTMON_BELOW_WMARK_WINDOW				3
 56#define ACTMON_BOOST_FREQ_STEP					16000
 57
 58/*
 59 * Activity counter is incremented every 256 memory transactions, and each
 60 * transaction takes 4 EMC clocks for Tegra124; So the COUNT_WEIGHT is
 61 * 4 * 256 = 1024.
 62 */
 63#define ACTMON_COUNT_WEIGHT					0x400
 64
 65/*
 66 * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
 67 * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
 68 */
 69#define ACTMON_AVERAGE_WINDOW_LOG2			6
 70#define ACTMON_SAMPLING_PERIOD				12 /* ms */
 71#define ACTMON_DEFAULT_AVG_BAND				6  /* 1/10 of % */
 72
 73#define KHZ							1000
 74
 75#define KHZ_MAX						(ULONG_MAX / KHZ)
 76
 77/* Assume that the bus is saturated if the utilization is 25% */
 78#define BUS_SATURATION_RATIO					25
 79
 80/**
 81 * struct tegra_devfreq_device_config - configuration specific to an ACTMON
 82 * device
 83 *
 84 * Coefficients and thresholds are percentages unless otherwise noted
 85 */
 86struct tegra_devfreq_device_config {
 87	u32		offset;
 88	u32		irq_mask;
 89
 90	/* Factors applied to boost_freq every consecutive watermark breach */
 91	unsigned int	boost_up_coeff;
 92	unsigned int	boost_down_coeff;
 93
 94	/* Define the watermark bounds when applied to the current avg */
 95	unsigned int	boost_up_threshold;
 96	unsigned int	boost_down_threshold;
 97
 98	/*
 99	 * Threshold of activity (cycles translated to kHz) below which the
100	 * CPU frequency isn't to be taken into account. This is to avoid
101	 * increasing the EMC frequency when the CPU is very busy but not
102	 * accessing the bus often.
103	 */
104	u32		avg_dependency_threshold;
105};
106
107enum tegra_actmon_device {
108	MCALL = 0,
109	MCCPU,
110};
111
112static const struct tegra_devfreq_device_config actmon_device_configs[] = {
113	{
114		/* MCALL: All memory accesses (including from the CPUs) */
115		.offset = 0x1c0,
116		.irq_mask = 1 << 26,
117		.boost_up_coeff = 200,
118		.boost_down_coeff = 50,
119		.boost_up_threshold = 60,
120		.boost_down_threshold = 40,
121	},
122	{
123		/* MCCPU: memory accesses from the CPUs */
124		.offset = 0x200,
125		.irq_mask = 1 << 25,
126		.boost_up_coeff = 800,
127		.boost_down_coeff = 40,
128		.boost_up_threshold = 27,
129		.boost_down_threshold = 10,
130		.avg_dependency_threshold = 16000, /* 16MHz in kHz units */
131	},
132};
133
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
134/**
135 * struct tegra_devfreq_device - state specific to an ACTMON device
136 *
137 * Frequencies are in kHz.
138 */
139struct tegra_devfreq_device {
140	const struct tegra_devfreq_device_config *config;
141	void __iomem *regs;
142
143	/* Average event count sampled in the last interrupt */
144	u32 avg_count;
145
146	/*
147	 * Extra frequency to increase the target by due to consecutive
148	 * watermark breaches.
149	 */
150	unsigned long boost_freq;
151
152	/* Optimal frequency calculated from the stats for this device */
153	unsigned long target_freq;
154};
155
 
 
 
 
 
 
156struct tegra_devfreq {
157	struct devfreq		*devfreq;
158
159	struct reset_control	*reset;
160	struct clk		*clock;
161	void __iomem		*regs;
162
163	struct clk		*emc_clock;
164	unsigned long		max_freq;
165	unsigned long		cur_freq;
166	struct notifier_block	clk_rate_change_nb;
167
168	struct delayed_work	cpufreq_update_work;
169	struct notifier_block	cpu_rate_change_nb;
170
171	struct tegra_devfreq_device devices[ARRAY_SIZE(actmon_device_configs)];
172
173	unsigned int		irq;
174
175	bool			started;
 
 
176};
177
178struct tegra_actmon_emc_ratio {
179	unsigned long cpu_freq;
180	unsigned long emc_freq;
181};
182
183static const struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
184	{ 1400000,    KHZ_MAX },
185	{ 1200000,    750000 },
186	{ 1100000,    600000 },
187	{ 1000000,    500000 },
188	{  800000,    375000 },
189	{  500000,    200000 },
190	{  250000,    100000 },
191};
192
193static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset)
194{
195	return readl_relaxed(tegra->regs + offset);
196}
197
198static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset)
199{
200	writel_relaxed(val, tegra->regs + offset);
201}
202
203static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset)
204{
205	return readl_relaxed(dev->regs + offset);
206}
207
208static void device_writel(struct tegra_devfreq_device *dev, u32 val,
209			  u32 offset)
210{
211	writel_relaxed(val, dev->regs + offset);
212}
213
214static unsigned long do_percent(unsigned long long val, unsigned int pct)
215{
216	val = val * pct;
217	do_div(val, 100);
218
219	/*
220	 * High freq + high boosting percent + large polling interval are
221	 * resulting in integer overflow when watermarks are calculated.
222	 */
223	return min_t(u64, val, U32_MAX);
224}
225
226static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
227					   struct tegra_devfreq_device *dev)
228{
229	u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
230	u32 band = avg_band_freq * tegra->devfreq->profile->polling_ms;
231	u32 avg;
232
233	avg = min(dev->avg_count, U32_MAX - band);
234	device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
235
236	avg = max(dev->avg_count, band);
237	device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK);
238}
239
240static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
241				       struct tegra_devfreq_device *dev)
242{
243	u32 val = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
244
245	device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
246		      ACTMON_DEV_UPPER_WMARK);
247
248	device_writel(dev, do_percent(val, dev->config->boost_down_threshold),
249		      ACTMON_DEV_LOWER_WMARK);
250}
251
252static void actmon_isr_device(struct tegra_devfreq *tegra,
253			      struct tegra_devfreq_device *dev)
254{
255	u32 intr_status, dev_ctrl;
256
257	dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT);
258	tegra_devfreq_update_avg_wmark(tegra, dev);
259
260	intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS);
261	dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL);
262
263	if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
264		/*
265		 * new_boost = min(old_boost * up_coef + step, max_freq)
266		 */
267		dev->boost_freq = do_percent(dev->boost_freq,
268					     dev->config->boost_up_coeff);
269		dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
270
271		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
272
273		if (dev->boost_freq >= tegra->max_freq) {
274			dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
275			dev->boost_freq = tegra->max_freq;
276		}
277	} else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
278		/*
279		 * new_boost = old_boost * down_coef
280		 * or 0 if (old_boost * down_coef < step / 2)
281		 */
282		dev->boost_freq = do_percent(dev->boost_freq,
283					     dev->config->boost_down_coeff);
284
285		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
286
287		if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
288			dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
289			dev->boost_freq = 0;
290		}
291	}
292
293	device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
294
295	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
296}
297
298static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
299					    unsigned long cpu_freq)
300{
301	unsigned int i;
302	const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
303
304	for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
305		if (cpu_freq >= ratio->cpu_freq) {
306			if (ratio->emc_freq >= tegra->max_freq)
307				return tegra->max_freq;
308			else
309				return ratio->emc_freq;
310		}
311	}
312
313	return 0;
314}
315
316static unsigned long actmon_device_target_freq(struct tegra_devfreq *tegra,
317					       struct tegra_devfreq_device *dev)
318{
319	unsigned int avg_sustain_coef;
320	unsigned long target_freq;
321
322	target_freq = dev->avg_count / tegra->devfreq->profile->polling_ms;
323	avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
324	target_freq = do_percent(target_freq, avg_sustain_coef);
325
326	return target_freq;
327}
328
329static void actmon_update_target(struct tegra_devfreq *tegra,
330				 struct tegra_devfreq_device *dev)
331{
332	unsigned long cpu_freq = 0;
333	unsigned long static_cpu_emc_freq = 0;
334
335	dev->target_freq = actmon_device_target_freq(tegra, dev);
336
337	if (dev->config->avg_dependency_threshold &&
338	    dev->config->avg_dependency_threshold <= dev->target_freq) {
339		cpu_freq = cpufreq_quick_get(0);
340		static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
341
342		dev->target_freq += dev->boost_freq;
343		dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
344	} else {
345		dev->target_freq += dev->boost_freq;
346	}
347}
348
349static irqreturn_t actmon_thread_isr(int irq, void *data)
350{
351	struct tegra_devfreq *tegra = data;
352	bool handled = false;
353	unsigned int i;
354	u32 val;
355
356	mutex_lock(&tegra->devfreq->lock);
357
358	val = actmon_readl(tegra, ACTMON_GLB_STATUS);
359	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
360		if (val & tegra->devices[i].config->irq_mask) {
361			actmon_isr_device(tegra, tegra->devices + i);
362			handled = true;
363		}
364	}
365
366	if (handled)
367		update_devfreq(tegra->devfreq);
368
369	mutex_unlock(&tegra->devfreq->lock);
370
371	return handled ? IRQ_HANDLED : IRQ_NONE;
372}
373
374static int tegra_actmon_clk_notify_cb(struct notifier_block *nb,
375				      unsigned long action, void *ptr)
376{
377	struct clk_notifier_data *data = ptr;
378	struct tegra_devfreq *tegra;
379	struct tegra_devfreq_device *dev;
380	unsigned int i;
381
382	if (action != POST_RATE_CHANGE)
383		return NOTIFY_OK;
384
385	tegra = container_of(nb, struct tegra_devfreq, clk_rate_change_nb);
386
387	tegra->cur_freq = data->new_rate / KHZ;
388
389	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
390		dev = &tegra->devices[i];
391
392		tegra_devfreq_update_wmark(tegra, dev);
393	}
394
395	return NOTIFY_OK;
396}
397
398static void tegra_actmon_delayed_update(struct work_struct *work)
399{
400	struct tegra_devfreq *tegra = container_of(work, struct tegra_devfreq,
401						   cpufreq_update_work.work);
402
403	mutex_lock(&tegra->devfreq->lock);
404	update_devfreq(tegra->devfreq);
405	mutex_unlock(&tegra->devfreq->lock);
406}
407
408static unsigned long
409tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra,
410				  unsigned int cpu_freq)
411{
412	struct tegra_devfreq_device *actmon_dev = &tegra->devices[MCCPU];
413	unsigned long static_cpu_emc_freq, dev_freq;
414
415	dev_freq = actmon_device_target_freq(tegra, actmon_dev);
416
417	/* check whether CPU's freq is taken into account at all */
418	if (dev_freq < actmon_dev->config->avg_dependency_threshold)
419		return 0;
420
421	static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
422
423	if (dev_freq + actmon_dev->boost_freq >= static_cpu_emc_freq)
424		return 0;
425
426	return static_cpu_emc_freq;
427}
428
429static int tegra_actmon_cpu_notify_cb(struct notifier_block *nb,
430				      unsigned long action, void *ptr)
431{
432	struct cpufreq_freqs *freqs = ptr;
433	struct tegra_devfreq *tegra;
434	unsigned long old, new, delay;
435
436	if (action != CPUFREQ_POSTCHANGE)
437		return NOTIFY_OK;
438
439	tegra = container_of(nb, struct tegra_devfreq, cpu_rate_change_nb);
440
441	/*
442	 * Quickly check whether CPU frequency should be taken into account
443	 * at all, without blocking CPUFreq's core.
444	 */
445	if (mutex_trylock(&tegra->devfreq->lock)) {
446		old = tegra_actmon_cpufreq_contribution(tegra, freqs->old);
447		new = tegra_actmon_cpufreq_contribution(tegra, freqs->new);
448		mutex_unlock(&tegra->devfreq->lock);
449
450		/*
451		 * If CPU's frequency shouldn't be taken into account at
452		 * the moment, then there is no need to update the devfreq's
453		 * state because ISR will re-check CPU's frequency on the
454		 * next interrupt.
455		 */
456		if (old == new)
457			return NOTIFY_OK;
458	}
459
460	/*
461	 * CPUFreq driver should support CPUFREQ_ASYNC_NOTIFICATION in order
462	 * to allow asynchronous notifications. This means we can't block
463	 * here for too long, otherwise CPUFreq's core will complain with a
464	 * warning splat.
465	 */
466	delay = msecs_to_jiffies(ACTMON_SAMPLING_PERIOD);
467	schedule_delayed_work(&tegra->cpufreq_update_work, delay);
468
469	return NOTIFY_OK;
470}
471
472static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
473					  struct tegra_devfreq_device *dev)
474{
475	u32 val = 0;
476
477	/* reset boosting on governor's restart */
478	dev->boost_freq = 0;
479
480	dev->target_freq = tegra->cur_freq;
481
482	dev->avg_count = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
483	device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
484
485	tegra_devfreq_update_avg_wmark(tegra, dev);
486	tegra_devfreq_update_wmark(tegra, dev);
487
488	device_writel(dev, ACTMON_COUNT_WEIGHT, ACTMON_DEV_COUNT_WEIGHT);
489	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
490
491	val |= ACTMON_DEV_CTRL_ENB_PERIODIC;
492	val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
493		<< ACTMON_DEV_CTRL_K_VAL_SHIFT;
494	val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
495		<< ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
496	val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
497		<< ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
498	val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
499	val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
500	val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
501	val |= ACTMON_DEV_CTRL_ENB;
502
503	device_writel(dev, val, ACTMON_DEV_CTRL);
504}
505
506static void tegra_actmon_stop_devices(struct tegra_devfreq *tegra)
507{
508	struct tegra_devfreq_device *dev = tegra->devices;
509	unsigned int i;
510
511	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++, dev++) {
512		device_writel(dev, ACTMON_DEV_CTRL_STOP, ACTMON_DEV_CTRL);
513		device_writel(dev, ACTMON_INTR_STATUS_CLEAR,
514			      ACTMON_DEV_INTR_STATUS);
515	}
516}
517
518static int tegra_actmon_resume(struct tegra_devfreq *tegra)
519{
520	unsigned int i;
521	int err;
522
523	if (!tegra->devfreq->profile->polling_ms || !tegra->started)
524		return 0;
525
526	actmon_writel(tegra, tegra->devfreq->profile->polling_ms - 1,
527		      ACTMON_GLB_PERIOD_CTRL);
528
529	/*
530	 * CLK notifications are needed in order to reconfigure the upper
531	 * consecutive watermark in accordance to the actual clock rate
532	 * to avoid unnecessary upper interrupts.
533	 */
534	err = clk_notifier_register(tegra->emc_clock,
535				    &tegra->clk_rate_change_nb);
536	if (err) {
537		dev_err(tegra->devfreq->dev.parent,
538			"Failed to register rate change notifier\n");
539		return err;
540	}
541
542	tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
543
544	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
545		tegra_actmon_configure_device(tegra, &tegra->devices[i]);
546
547	/*
548	 * We are estimating CPU's memory bandwidth requirement based on
549	 * amount of memory accesses and system's load, judging by CPU's
550	 * frequency. We also don't want to receive events about CPU's
551	 * frequency transaction when governor is stopped, hence notifier
552	 * is registered dynamically.
553	 */
554	err = cpufreq_register_notifier(&tegra->cpu_rate_change_nb,
555					CPUFREQ_TRANSITION_NOTIFIER);
556	if (err) {
557		dev_err(tegra->devfreq->dev.parent,
558			"Failed to register rate change notifier: %d\n", err);
559		goto err_stop;
560	}
561
562	enable_irq(tegra->irq);
563
564	return 0;
565
566err_stop:
567	tegra_actmon_stop_devices(tegra);
568
569	clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
570
571	return err;
572}
573
574static int tegra_actmon_start(struct tegra_devfreq *tegra)
575{
576	int ret = 0;
577
578	if (!tegra->started) {
579		tegra->started = true;
580
581		ret = tegra_actmon_resume(tegra);
582		if (ret)
583			tegra->started = false;
584	}
585
586	return ret;
587}
588
589static void tegra_actmon_pause(struct tegra_devfreq *tegra)
590{
591	if (!tegra->devfreq->profile->polling_ms || !tegra->started)
592		return;
593
594	disable_irq(tegra->irq);
595
596	cpufreq_unregister_notifier(&tegra->cpu_rate_change_nb,
597				    CPUFREQ_TRANSITION_NOTIFIER);
598
599	cancel_delayed_work_sync(&tegra->cpufreq_update_work);
600
601	tegra_actmon_stop_devices(tegra);
602
603	clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
604}
605
606static void tegra_actmon_stop(struct tegra_devfreq *tegra)
607{
608	tegra_actmon_pause(tegra);
609	tegra->started = false;
610}
611
612static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
613				u32 flags)
614{
615	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
616	struct devfreq *devfreq = tegra->devfreq;
617	struct dev_pm_opp *opp;
618	unsigned long rate;
619	int err;
620
621	opp = devfreq_recommended_opp(dev, freq, flags);
622	if (IS_ERR(opp)) {
623		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
624		return PTR_ERR(opp);
625	}
626	rate = dev_pm_opp_get_freq(opp);
627	dev_pm_opp_put(opp);
628
629	err = clk_set_min_rate(tegra->emc_clock, rate * KHZ);
630	if (err)
631		return err;
632
633	err = clk_set_rate(tegra->emc_clock, 0);
634	if (err)
635		goto restore_min_rate;
636
637	return 0;
638
639restore_min_rate:
640	clk_set_min_rate(tegra->emc_clock, devfreq->previous_freq);
641
642	return err;
643}
644
645static int tegra_devfreq_get_dev_status(struct device *dev,
646					struct devfreq_dev_status *stat)
647{
648	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
649	struct tegra_devfreq_device *actmon_dev;
650	unsigned long cur_freq;
651
652	cur_freq = READ_ONCE(tegra->cur_freq);
653
654	/* To be used by the tegra governor */
655	stat->private_data = tegra;
656
657	/* The below are to be used by the other governors */
658	stat->current_frequency = cur_freq;
659
660	actmon_dev = &tegra->devices[MCALL];
661
662	/* Number of cycles spent on memory access */
663	stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT);
664
665	/* The bus can be considered to be saturated way before 100% */
666	stat->busy_time *= 100 / BUS_SATURATION_RATIO;
667
668	/* Number of cycles in a sampling period */
669	stat->total_time = tegra->devfreq->profile->polling_ms * cur_freq;
670
671	stat->busy_time = min(stat->busy_time, stat->total_time);
672
673	return 0;
674}
675
676static struct devfreq_dev_profile tegra_devfreq_profile = {
677	.polling_ms	= ACTMON_SAMPLING_PERIOD,
678	.target		= tegra_devfreq_target,
679	.get_dev_status	= tegra_devfreq_get_dev_status,
 
680};
681
682static int tegra_governor_get_target(struct devfreq *devfreq,
683				     unsigned long *freq)
684{
685	struct devfreq_dev_status *stat;
686	struct tegra_devfreq *tegra;
687	struct tegra_devfreq_device *dev;
688	unsigned long target_freq = 0;
689	unsigned int i;
690	int err;
691
692	err = devfreq_update_stats(devfreq);
693	if (err)
694		return err;
695
696	stat = &devfreq->last_status;
697
698	tegra = stat->private_data;
699
700	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
701		dev = &tegra->devices[i];
702
703		actmon_update_target(tegra, dev);
704
705		target_freq = max(target_freq, dev->target_freq);
706	}
707
708	*freq = target_freq;
 
 
 
 
 
709
710	return 0;
711}
712
713static int tegra_governor_event_handler(struct devfreq *devfreq,
714					unsigned int event, void *data)
715{
716	struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
717	unsigned int *new_delay = data;
718	int ret = 0;
719
720	/*
721	 * Couple devfreq-device with the governor early because it is
722	 * needed at the moment of governor's start (used by ISR).
723	 */
724	tegra->devfreq = devfreq;
725
726	switch (event) {
727	case DEVFREQ_GOV_START:
728		devfreq_monitor_start(devfreq);
729		ret = tegra_actmon_start(tegra);
730		break;
731
732	case DEVFREQ_GOV_STOP:
733		tegra_actmon_stop(tegra);
734		devfreq_monitor_stop(devfreq);
735		break;
736
737	case DEVFREQ_GOV_UPDATE_INTERVAL:
738		/*
739		 * ACTMON hardware supports up to 256 milliseconds for the
740		 * sampling period.
741		 */
742		if (*new_delay > 256) {
743			ret = -EINVAL;
744			break;
745		}
746
747		tegra_actmon_pause(tegra);
748		devfreq_update_interval(devfreq, new_delay);
749		ret = tegra_actmon_resume(tegra);
750		break;
751
752	case DEVFREQ_GOV_SUSPEND:
753		tegra_actmon_stop(tegra);
754		devfreq_monitor_suspend(devfreq);
755		break;
756
757	case DEVFREQ_GOV_RESUME:
758		devfreq_monitor_resume(devfreq);
759		ret = tegra_actmon_start(tegra);
760		break;
761	}
762
763	return ret;
764}
765
766static struct devfreq_governor tegra_devfreq_governor = {
767	.name = "tegra_actmon",
 
 
 
768	.get_target_freq = tegra_governor_get_target,
769	.event_handler = tegra_governor_event_handler,
770	.immutable = true,
771	.interrupt_driven = true,
772};
773
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
774static int tegra_devfreq_probe(struct platform_device *pdev)
775{
 
776	struct tegra_devfreq_device *dev;
777	struct tegra_devfreq *tegra;
778	struct devfreq *devfreq;
779	unsigned int i;
780	long rate;
781	int err;
 
 
 
 
 
 
 
782
783	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
784	if (!tegra)
785		return -ENOMEM;
786
 
 
787	tegra->regs = devm_platform_ioremap_resource(pdev, 0);
788	if (IS_ERR(tegra->regs))
789		return PTR_ERR(tegra->regs);
790
791	tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
792	if (IS_ERR(tegra->reset)) {
793		dev_err(&pdev->dev, "Failed to get reset\n");
794		return PTR_ERR(tegra->reset);
795	}
796
797	tegra->clock = devm_clk_get(&pdev->dev, "actmon");
798	if (IS_ERR(tegra->clock)) {
799		dev_err(&pdev->dev, "Failed to get actmon clock\n");
800		return PTR_ERR(tegra->clock);
801	}
802
803	tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
804	if (IS_ERR(tegra->emc_clock)) {
805		dev_err(&pdev->dev, "Failed to get emc clock\n");
806		return PTR_ERR(tegra->emc_clock);
807	}
808
809	err = platform_get_irq(pdev, 0);
810	if (err < 0)
811		return err;
812
813	tegra->irq = err;
814
815	irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN);
816
817	err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
818					actmon_thread_isr, IRQF_ONESHOT,
819					"tegra-devfreq", tegra);
820	if (err) {
821		dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
822		return err;
823	}
824
825	reset_control_assert(tegra->reset);
 
 
 
 
826
827	err = clk_prepare_enable(tegra->clock);
828	if (err) {
829		dev_err(&pdev->dev,
830			"Failed to prepare and enable ACTMON clock\n");
831		return err;
832	}
833
834	reset_control_deassert(tegra->reset);
 
 
835
836	rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
837	if (rate < 0) {
838		dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
839		err = rate;
840		goto disable_clk;
841	}
842
843	tegra->max_freq = rate / KHZ;
844
845	for (i = 0; i < ARRAY_SIZE(actmon_device_configs); i++) {
846		dev = tegra->devices + i;
847		dev->config = actmon_device_configs + i;
848		dev->regs = tegra->regs + dev->config->offset;
849	}
850
851	for (rate = 0; rate <= tegra->max_freq * KHZ; rate++) {
852		rate = clk_round_rate(tegra->emc_clock, rate);
853
854		if (rate < 0) {
855			dev_err(&pdev->dev,
856				"Failed to round clock rate: %ld\n", rate);
857			err = rate;
858			goto remove_opps;
859		}
860
861		err = dev_pm_opp_add(&pdev->dev, rate / KHZ, 0);
862		if (err) {
863			dev_err(&pdev->dev, "Failed to add OPP: %d\n", err);
864			goto remove_opps;
865		}
866	}
867
868	platform_set_drvdata(pdev, tegra);
869
870	tegra->clk_rate_change_nb.notifier_call = tegra_actmon_clk_notify_cb;
871	tegra->cpu_rate_change_nb.notifier_call = tegra_actmon_cpu_notify_cb;
872
873	INIT_DELAYED_WORK(&tegra->cpufreq_update_work,
874			  tegra_actmon_delayed_update);
875
876	err = devfreq_add_governor(&tegra_devfreq_governor);
877	if (err) {
878		dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
879		goto remove_opps;
880	}
881
882	tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
883	tegra_devfreq_profile.initial_freq /= KHZ;
884
885	devfreq = devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
886				     "tegra_actmon", NULL);
887	if (IS_ERR(devfreq)) {
888		err = PTR_ERR(devfreq);
889		goto remove_governor;
890	}
891
892	return 0;
893
894remove_governor:
895	devfreq_remove_governor(&tegra_devfreq_governor);
896
897remove_opps:
898	dev_pm_opp_remove_all_dynamic(&pdev->dev);
899
900	reset_control_reset(tegra->reset);
901disable_clk:
902	clk_disable_unprepare(tegra->clock);
903
904	return err;
905}
906
907static int tegra_devfreq_remove(struct platform_device *pdev)
908{
909	struct tegra_devfreq *tegra = platform_get_drvdata(pdev);
910
911	devfreq_remove_device(tegra->devfreq);
912	devfreq_remove_governor(&tegra_devfreq_governor);
913
914	dev_pm_opp_remove_all_dynamic(&pdev->dev);
915
916	reset_control_reset(tegra->reset);
917	clk_disable_unprepare(tegra->clock);
 
 
918
919	return 0;
920}
 
 
921
922static const struct of_device_id tegra_devfreq_of_match[] = {
923	{ .compatible = "nvidia,tegra30-actmon" },
924	{ .compatible = "nvidia,tegra124-actmon" },
925	{ },
926};
927
928MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
929
930static struct platform_driver tegra_devfreq_driver = {
931	.probe	= tegra_devfreq_probe,
932	.remove	= tegra_devfreq_remove,
933	.driver = {
934		.name = "tegra-devfreq",
935		.of_match_table = tegra_devfreq_of_match,
936	},
937};
938module_platform_driver(tegra_devfreq_driver);
939
940MODULE_LICENSE("GPL v2");
941MODULE_DESCRIPTION("Tegra devfreq driver");
942MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * A devfreq driver for NVIDIA Tegra SoCs
  4 *
  5 * Copyright (c) 2014 NVIDIA CORPORATION. All rights reserved.
  6 * Copyright (C) 2014 Google, Inc
  7 */
  8
  9#include <linux/clk.h>
 10#include <linux/cpufreq.h>
 11#include <linux/devfreq.h>
 12#include <linux/interrupt.h>
 13#include <linux/io.h>
 14#include <linux/irq.h>
 15#include <linux/module.h>
 16#include <linux/of.h>
 17#include <linux/platform_device.h>
 18#include <linux/pm_opp.h>
 19#include <linux/reset.h>
 20#include <linux/workqueue.h>
 21
 22#include <soc/tegra/fuse.h>
 23
 24#include "governor.h"
 25
 26#define ACTMON_GLB_STATUS					0x0
 27#define ACTMON_GLB_PERIOD_CTRL					0x4
 28
 29#define ACTMON_DEV_CTRL						0x0
 30#define ACTMON_DEV_CTRL_K_VAL_SHIFT				10
 31#define ACTMON_DEV_CTRL_ENB_PERIODIC				BIT(18)
 32#define ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN			BIT(20)
 33#define ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN			BIT(21)
 34#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT	23
 35#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT	26
 36#define ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN		BIT(29)
 37#define ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN		BIT(30)
 38#define ACTMON_DEV_CTRL_ENB					BIT(31)
 39
 40#define ACTMON_DEV_CTRL_STOP					0x00000000
 41
 42#define ACTMON_DEV_UPPER_WMARK					0x4
 43#define ACTMON_DEV_LOWER_WMARK					0x8
 44#define ACTMON_DEV_INIT_AVG					0xc
 45#define ACTMON_DEV_AVG_UPPER_WMARK				0x10
 46#define ACTMON_DEV_AVG_LOWER_WMARK				0x14
 47#define ACTMON_DEV_COUNT_WEIGHT					0x18
 48#define ACTMON_DEV_AVG_COUNT					0x20
 49#define ACTMON_DEV_INTR_STATUS					0x24
 50
 51#define ACTMON_INTR_STATUS_CLEAR				0xffffffff
 52
 53#define ACTMON_DEV_INTR_CONSECUTIVE_UPPER			BIT(31)
 54#define ACTMON_DEV_INTR_CONSECUTIVE_LOWER			BIT(30)
 55
 56#define ACTMON_ABOVE_WMARK_WINDOW				1
 57#define ACTMON_BELOW_WMARK_WINDOW				3
 58#define ACTMON_BOOST_FREQ_STEP					16000
 59
 60/*
 
 
 
 
 
 
 
 61 * ACTMON_AVERAGE_WINDOW_LOG2: default value for @DEV_CTRL_K_VAL, which
 62 * translates to 2 ^ (K_VAL + 1). ex: 2 ^ (6 + 1) = 128
 63 */
 64#define ACTMON_AVERAGE_WINDOW_LOG2			6
 65#define ACTMON_SAMPLING_PERIOD				12 /* ms */
 66#define ACTMON_DEFAULT_AVG_BAND				6  /* 1/10 of % */
 67
 68#define KHZ							1000
 69
 70#define KHZ_MAX						(ULONG_MAX / KHZ)
 71
 72/* Assume that the bus is saturated if the utilization is 25% */
 73#define BUS_SATURATION_RATIO					25
 74
 75/**
 76 * struct tegra_devfreq_device_config - configuration specific to an ACTMON
 77 * device
 78 *
 79 * Coefficients and thresholds are percentages unless otherwise noted
 80 */
 81struct tegra_devfreq_device_config {
 82	u32		offset;
 83	u32		irq_mask;
 84
 85	/* Factors applied to boost_freq every consecutive watermark breach */
 86	unsigned int	boost_up_coeff;
 87	unsigned int	boost_down_coeff;
 88
 89	/* Define the watermark bounds when applied to the current avg */
 90	unsigned int	boost_up_threshold;
 91	unsigned int	boost_down_threshold;
 92
 93	/*
 94	 * Threshold of activity (cycles translated to kHz) below which the
 95	 * CPU frequency isn't to be taken into account. This is to avoid
 96	 * increasing the EMC frequency when the CPU is very busy but not
 97	 * accessing the bus often.
 98	 */
 99	u32		avg_dependency_threshold;
100};
101
102enum tegra_actmon_device {
103	MCALL = 0,
104	MCCPU,
105};
106
107static const struct tegra_devfreq_device_config tegra124_device_configs[] = {
108	{
109		/* MCALL: All memory accesses (including from the CPUs) */
110		.offset = 0x1c0,
111		.irq_mask = 1 << 26,
112		.boost_up_coeff = 200,
113		.boost_down_coeff = 50,
114		.boost_up_threshold = 60,
115		.boost_down_threshold = 40,
116	},
117	{
118		/* MCCPU: memory accesses from the CPUs */
119		.offset = 0x200,
120		.irq_mask = 1 << 25,
121		.boost_up_coeff = 800,
122		.boost_down_coeff = 40,
123		.boost_up_threshold = 27,
124		.boost_down_threshold = 10,
125		.avg_dependency_threshold = 16000, /* 16MHz in kHz units */
126	},
127};
128
129static const struct tegra_devfreq_device_config tegra30_device_configs[] = {
130	{
131		/* MCALL: All memory accesses (including from the CPUs) */
132		.offset = 0x1c0,
133		.irq_mask = 1 << 26,
134		.boost_up_coeff = 200,
135		.boost_down_coeff = 50,
136		.boost_up_threshold = 20,
137		.boost_down_threshold = 10,
138	},
139	{
140		/* MCCPU: memory accesses from the CPUs */
141		.offset = 0x200,
142		.irq_mask = 1 << 25,
143		.boost_up_coeff = 800,
144		.boost_down_coeff = 40,
145		.boost_up_threshold = 27,
146		.boost_down_threshold = 10,
147		.avg_dependency_threshold = 16000, /* 16MHz in kHz units */
148	},
149};
150
151/**
152 * struct tegra_devfreq_device - state specific to an ACTMON device
153 *
154 * Frequencies are in kHz.
155 */
156struct tegra_devfreq_device {
157	const struct tegra_devfreq_device_config *config;
158	void __iomem *regs;
159
160	/* Average event count sampled in the last interrupt */
161	u32 avg_count;
162
163	/*
164	 * Extra frequency to increase the target by due to consecutive
165	 * watermark breaches.
166	 */
167	unsigned long boost_freq;
168
169	/* Optimal frequency calculated from the stats for this device */
170	unsigned long target_freq;
171};
172
173struct tegra_devfreq_soc_data {
174	const struct tegra_devfreq_device_config *configs;
175	/* Weight value for count measurements */
176	unsigned int count_weight;
177};
178
179struct tegra_devfreq {
180	struct devfreq		*devfreq;
181
182	struct reset_control	*reset;
183	struct clk		*clock;
184	void __iomem		*regs;
185
186	struct clk		*emc_clock;
187	unsigned long		max_freq;
188	unsigned long		cur_freq;
189	struct notifier_block	clk_rate_change_nb;
190
191	struct delayed_work	cpufreq_update_work;
192	struct notifier_block	cpu_rate_change_nb;
193
194	struct tegra_devfreq_device devices[2];
195
196	unsigned int		irq;
197
198	bool			started;
199
200	const struct tegra_devfreq_soc_data *soc;
201};
202
203struct tegra_actmon_emc_ratio {
204	unsigned long cpu_freq;
205	unsigned long emc_freq;
206};
207
208static const struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
209	{ 1400000,    KHZ_MAX },
210	{ 1200000,    750000 },
211	{ 1100000,    600000 },
212	{ 1000000,    500000 },
213	{  800000,    375000 },
214	{  500000,    200000 },
215	{  250000,    100000 },
216};
217
218static u32 actmon_readl(struct tegra_devfreq *tegra, u32 offset)
219{
220	return readl_relaxed(tegra->regs + offset);
221}
222
223static void actmon_writel(struct tegra_devfreq *tegra, u32 val, u32 offset)
224{
225	writel_relaxed(val, tegra->regs + offset);
226}
227
228static u32 device_readl(struct tegra_devfreq_device *dev, u32 offset)
229{
230	return readl_relaxed(dev->regs + offset);
231}
232
233static void device_writel(struct tegra_devfreq_device *dev, u32 val,
234			  u32 offset)
235{
236	writel_relaxed(val, dev->regs + offset);
237}
238
239static unsigned long do_percent(unsigned long long val, unsigned int pct)
240{
241	val = val * pct;
242	do_div(val, 100);
243
244	/*
245	 * High freq + high boosting percent + large polling interval are
246	 * resulting in integer overflow when watermarks are calculated.
247	 */
248	return min_t(u64, val, U32_MAX);
249}
250
251static void tegra_devfreq_update_avg_wmark(struct tegra_devfreq *tegra,
252					   struct tegra_devfreq_device *dev)
253{
254	u32 avg_band_freq = tegra->max_freq * ACTMON_DEFAULT_AVG_BAND / KHZ;
255	u32 band = avg_band_freq * tegra->devfreq->profile->polling_ms;
256	u32 avg;
257
258	avg = min(dev->avg_count, U32_MAX - band);
259	device_writel(dev, avg + band, ACTMON_DEV_AVG_UPPER_WMARK);
260
261	avg = max(dev->avg_count, band);
262	device_writel(dev, avg - band, ACTMON_DEV_AVG_LOWER_WMARK);
263}
264
265static void tegra_devfreq_update_wmark(struct tegra_devfreq *tegra,
266				       struct tegra_devfreq_device *dev)
267{
268	u32 val = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
269
270	device_writel(dev, do_percent(val, dev->config->boost_up_threshold),
271		      ACTMON_DEV_UPPER_WMARK);
272
273	device_writel(dev, do_percent(val, dev->config->boost_down_threshold),
274		      ACTMON_DEV_LOWER_WMARK);
275}
276
277static void actmon_isr_device(struct tegra_devfreq *tegra,
278			      struct tegra_devfreq_device *dev)
279{
280	u32 intr_status, dev_ctrl;
281
282	dev->avg_count = device_readl(dev, ACTMON_DEV_AVG_COUNT);
283	tegra_devfreq_update_avg_wmark(tegra, dev);
284
285	intr_status = device_readl(dev, ACTMON_DEV_INTR_STATUS);
286	dev_ctrl = device_readl(dev, ACTMON_DEV_CTRL);
287
288	if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_UPPER) {
289		/*
290		 * new_boost = min(old_boost * up_coef + step, max_freq)
291		 */
292		dev->boost_freq = do_percent(dev->boost_freq,
293					     dev->config->boost_up_coeff);
294		dev->boost_freq += ACTMON_BOOST_FREQ_STEP;
295
296		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
297
298		if (dev->boost_freq >= tegra->max_freq) {
299			dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
300			dev->boost_freq = tegra->max_freq;
301		}
302	} else if (intr_status & ACTMON_DEV_INTR_CONSECUTIVE_LOWER) {
303		/*
304		 * new_boost = old_boost * down_coef
305		 * or 0 if (old_boost * down_coef < step / 2)
306		 */
307		dev->boost_freq = do_percent(dev->boost_freq,
308					     dev->config->boost_down_coeff);
309
310		dev_ctrl |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
311
312		if (dev->boost_freq < (ACTMON_BOOST_FREQ_STEP >> 1)) {
313			dev_ctrl &= ~ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_EN;
314			dev->boost_freq = 0;
315		}
316	}
317
318	device_writel(dev, dev_ctrl, ACTMON_DEV_CTRL);
319
320	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
321}
322
323static unsigned long actmon_cpu_to_emc_rate(struct tegra_devfreq *tegra,
324					    unsigned long cpu_freq)
325{
326	unsigned int i;
327	const struct tegra_actmon_emc_ratio *ratio = actmon_emc_ratios;
328
329	for (i = 0; i < ARRAY_SIZE(actmon_emc_ratios); i++, ratio++) {
330		if (cpu_freq >= ratio->cpu_freq) {
331			if (ratio->emc_freq >= tegra->max_freq)
332				return tegra->max_freq;
333			else
334				return ratio->emc_freq;
335		}
336	}
337
338	return 0;
339}
340
341static unsigned long actmon_device_target_freq(struct tegra_devfreq *tegra,
342					       struct tegra_devfreq_device *dev)
343{
344	unsigned int avg_sustain_coef;
345	unsigned long target_freq;
346
347	target_freq = dev->avg_count / tegra->devfreq->profile->polling_ms;
348	avg_sustain_coef = 100 * 100 / dev->config->boost_up_threshold;
349	target_freq = do_percent(target_freq, avg_sustain_coef);
350
351	return target_freq;
352}
353
354static void actmon_update_target(struct tegra_devfreq *tegra,
355				 struct tegra_devfreq_device *dev)
356{
357	unsigned long cpu_freq = 0;
358	unsigned long static_cpu_emc_freq = 0;
359
360	dev->target_freq = actmon_device_target_freq(tegra, dev);
361
362	if (dev->config->avg_dependency_threshold &&
363	    dev->config->avg_dependency_threshold <= dev->target_freq) {
364		cpu_freq = cpufreq_quick_get(0);
365		static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
366
367		dev->target_freq += dev->boost_freq;
368		dev->target_freq = max(dev->target_freq, static_cpu_emc_freq);
369	} else {
370		dev->target_freq += dev->boost_freq;
371	}
372}
373
374static irqreturn_t actmon_thread_isr(int irq, void *data)
375{
376	struct tegra_devfreq *tegra = data;
377	bool handled = false;
378	unsigned int i;
379	u32 val;
380
381	mutex_lock(&tegra->devfreq->lock);
382
383	val = actmon_readl(tegra, ACTMON_GLB_STATUS);
384	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
385		if (val & tegra->devices[i].config->irq_mask) {
386			actmon_isr_device(tegra, tegra->devices + i);
387			handled = true;
388		}
389	}
390
391	if (handled)
392		update_devfreq(tegra->devfreq);
393
394	mutex_unlock(&tegra->devfreq->lock);
395
396	return handled ? IRQ_HANDLED : IRQ_NONE;
397}
398
399static int tegra_actmon_clk_notify_cb(struct notifier_block *nb,
400				      unsigned long action, void *ptr)
401{
402	struct clk_notifier_data *data = ptr;
403	struct tegra_devfreq *tegra;
404	struct tegra_devfreq_device *dev;
405	unsigned int i;
406
407	if (action != POST_RATE_CHANGE)
408		return NOTIFY_OK;
409
410	tegra = container_of(nb, struct tegra_devfreq, clk_rate_change_nb);
411
412	tegra->cur_freq = data->new_rate / KHZ;
413
414	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
415		dev = &tegra->devices[i];
416
417		tegra_devfreq_update_wmark(tegra, dev);
418	}
419
420	return NOTIFY_OK;
421}
422
423static void tegra_actmon_delayed_update(struct work_struct *work)
424{
425	struct tegra_devfreq *tegra = container_of(work, struct tegra_devfreq,
426						   cpufreq_update_work.work);
427
428	mutex_lock(&tegra->devfreq->lock);
429	update_devfreq(tegra->devfreq);
430	mutex_unlock(&tegra->devfreq->lock);
431}
432
433static unsigned long
434tegra_actmon_cpufreq_contribution(struct tegra_devfreq *tegra,
435				  unsigned int cpu_freq)
436{
437	struct tegra_devfreq_device *actmon_dev = &tegra->devices[MCCPU];
438	unsigned long static_cpu_emc_freq, dev_freq;
439
440	dev_freq = actmon_device_target_freq(tegra, actmon_dev);
441
442	/* check whether CPU's freq is taken into account at all */
443	if (dev_freq < actmon_dev->config->avg_dependency_threshold)
444		return 0;
445
446	static_cpu_emc_freq = actmon_cpu_to_emc_rate(tegra, cpu_freq);
447
448	if (dev_freq + actmon_dev->boost_freq >= static_cpu_emc_freq)
449		return 0;
450
451	return static_cpu_emc_freq;
452}
453
454static int tegra_actmon_cpu_notify_cb(struct notifier_block *nb,
455				      unsigned long action, void *ptr)
456{
457	struct cpufreq_freqs *freqs = ptr;
458	struct tegra_devfreq *tegra;
459	unsigned long old, new, delay;
460
461	if (action != CPUFREQ_POSTCHANGE)
462		return NOTIFY_OK;
463
464	tegra = container_of(nb, struct tegra_devfreq, cpu_rate_change_nb);
465
466	/*
467	 * Quickly check whether CPU frequency should be taken into account
468	 * at all, without blocking CPUFreq's core.
469	 */
470	if (mutex_trylock(&tegra->devfreq->lock)) {
471		old = tegra_actmon_cpufreq_contribution(tegra, freqs->old);
472		new = tegra_actmon_cpufreq_contribution(tegra, freqs->new);
473		mutex_unlock(&tegra->devfreq->lock);
474
475		/*
476		 * If CPU's frequency shouldn't be taken into account at
477		 * the moment, then there is no need to update the devfreq's
478		 * state because ISR will re-check CPU's frequency on the
479		 * next interrupt.
480		 */
481		if (old == new)
482			return NOTIFY_OK;
483	}
484
485	/*
486	 * CPUFreq driver should support CPUFREQ_ASYNC_NOTIFICATION in order
487	 * to allow asynchronous notifications. This means we can't block
488	 * here for too long, otherwise CPUFreq's core will complain with a
489	 * warning splat.
490	 */
491	delay = msecs_to_jiffies(ACTMON_SAMPLING_PERIOD);
492	schedule_delayed_work(&tegra->cpufreq_update_work, delay);
493
494	return NOTIFY_OK;
495}
496
497static void tegra_actmon_configure_device(struct tegra_devfreq *tegra,
498					  struct tegra_devfreq_device *dev)
499{
500	u32 val = 0;
501
502	/* reset boosting on governor's restart */
503	dev->boost_freq = 0;
504
505	dev->target_freq = tegra->cur_freq;
506
507	dev->avg_count = tegra->cur_freq * tegra->devfreq->profile->polling_ms;
508	device_writel(dev, dev->avg_count, ACTMON_DEV_INIT_AVG);
509
510	tegra_devfreq_update_avg_wmark(tegra, dev);
511	tegra_devfreq_update_wmark(tegra, dev);
512
513	device_writel(dev, tegra->soc->count_weight, ACTMON_DEV_COUNT_WEIGHT);
514	device_writel(dev, ACTMON_INTR_STATUS_CLEAR, ACTMON_DEV_INTR_STATUS);
515
516	val |= ACTMON_DEV_CTRL_ENB_PERIODIC;
517	val |= (ACTMON_AVERAGE_WINDOW_LOG2 - 1)
518		<< ACTMON_DEV_CTRL_K_VAL_SHIFT;
519	val |= (ACTMON_BELOW_WMARK_WINDOW - 1)
520		<< ACTMON_DEV_CTRL_CONSECUTIVE_BELOW_WMARK_NUM_SHIFT;
521	val |= (ACTMON_ABOVE_WMARK_WINDOW - 1)
522		<< ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_NUM_SHIFT;
523	val |= ACTMON_DEV_CTRL_AVG_ABOVE_WMARK_EN;
524	val |= ACTMON_DEV_CTRL_AVG_BELOW_WMARK_EN;
525	val |= ACTMON_DEV_CTRL_CONSECUTIVE_ABOVE_WMARK_EN;
526	val |= ACTMON_DEV_CTRL_ENB;
527
528	device_writel(dev, val, ACTMON_DEV_CTRL);
529}
530
531static void tegra_actmon_stop_devices(struct tegra_devfreq *tegra)
532{
533	struct tegra_devfreq_device *dev = tegra->devices;
534	unsigned int i;
535
536	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++, dev++) {
537		device_writel(dev, ACTMON_DEV_CTRL_STOP, ACTMON_DEV_CTRL);
538		device_writel(dev, ACTMON_INTR_STATUS_CLEAR,
539			      ACTMON_DEV_INTR_STATUS);
540	}
541}
542
543static int tegra_actmon_resume(struct tegra_devfreq *tegra)
544{
545	unsigned int i;
546	int err;
547
548	if (!tegra->devfreq->profile->polling_ms || !tegra->started)
549		return 0;
550
551	actmon_writel(tegra, tegra->devfreq->profile->polling_ms - 1,
552		      ACTMON_GLB_PERIOD_CTRL);
553
554	/*
555	 * CLK notifications are needed in order to reconfigure the upper
556	 * consecutive watermark in accordance to the actual clock rate
557	 * to avoid unnecessary upper interrupts.
558	 */
559	err = clk_notifier_register(tegra->emc_clock,
560				    &tegra->clk_rate_change_nb);
561	if (err) {
562		dev_err(tegra->devfreq->dev.parent,
563			"Failed to register rate change notifier\n");
564		return err;
565	}
566
567	tegra->cur_freq = clk_get_rate(tegra->emc_clock) / KHZ;
568
569	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++)
570		tegra_actmon_configure_device(tegra, &tegra->devices[i]);
571
572	/*
573	 * We are estimating CPU's memory bandwidth requirement based on
574	 * amount of memory accesses and system's load, judging by CPU's
575	 * frequency. We also don't want to receive events about CPU's
576	 * frequency transaction when governor is stopped, hence notifier
577	 * is registered dynamically.
578	 */
579	err = cpufreq_register_notifier(&tegra->cpu_rate_change_nb,
580					CPUFREQ_TRANSITION_NOTIFIER);
581	if (err) {
582		dev_err(tegra->devfreq->dev.parent,
583			"Failed to register rate change notifier: %d\n", err);
584		goto err_stop;
585	}
586
587	enable_irq(tegra->irq);
588
589	return 0;
590
591err_stop:
592	tegra_actmon_stop_devices(tegra);
593
594	clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
595
596	return err;
597}
598
599static int tegra_actmon_start(struct tegra_devfreq *tegra)
600{
601	int ret = 0;
602
603	if (!tegra->started) {
604		tegra->started = true;
605
606		ret = tegra_actmon_resume(tegra);
607		if (ret)
608			tegra->started = false;
609	}
610
611	return ret;
612}
613
614static void tegra_actmon_pause(struct tegra_devfreq *tegra)
615{
616	if (!tegra->devfreq->profile->polling_ms || !tegra->started)
617		return;
618
619	disable_irq(tegra->irq);
620
621	cpufreq_unregister_notifier(&tegra->cpu_rate_change_nb,
622				    CPUFREQ_TRANSITION_NOTIFIER);
623
624	cancel_delayed_work_sync(&tegra->cpufreq_update_work);
625
626	tegra_actmon_stop_devices(tegra);
627
628	clk_notifier_unregister(tegra->emc_clock, &tegra->clk_rate_change_nb);
629}
630
631static void tegra_actmon_stop(struct tegra_devfreq *tegra)
632{
633	tegra_actmon_pause(tegra);
634	tegra->started = false;
635}
636
637static int tegra_devfreq_target(struct device *dev, unsigned long *freq,
638				u32 flags)
639{
 
 
640	struct dev_pm_opp *opp;
641	int ret;
 
642
643	opp = devfreq_recommended_opp(dev, freq, flags);
644	if (IS_ERR(opp)) {
645		dev_err(dev, "Failed to find opp for %lu Hz\n", *freq);
646		return PTR_ERR(opp);
647	}
 
 
648
649	ret = dev_pm_opp_set_opp(dev, opp);
650	dev_pm_opp_put(opp);
 
 
 
 
 
 
 
 
 
 
651
652	return ret;
653}
654
655static int tegra_devfreq_get_dev_status(struct device *dev,
656					struct devfreq_dev_status *stat)
657{
658	struct tegra_devfreq *tegra = dev_get_drvdata(dev);
659	struct tegra_devfreq_device *actmon_dev;
660	unsigned long cur_freq;
661
662	cur_freq = READ_ONCE(tegra->cur_freq);
663
664	/* To be used by the tegra governor */
665	stat->private_data = tegra;
666
667	/* The below are to be used by the other governors */
668	stat->current_frequency = cur_freq * KHZ;
669
670	actmon_dev = &tegra->devices[MCALL];
671
672	/* Number of cycles spent on memory access */
673	stat->busy_time = device_readl(actmon_dev, ACTMON_DEV_AVG_COUNT);
674
675	/* The bus can be considered to be saturated way before 100% */
676	stat->busy_time *= 100 / BUS_SATURATION_RATIO;
677
678	/* Number of cycles in a sampling period */
679	stat->total_time = tegra->devfreq->profile->polling_ms * cur_freq;
680
681	stat->busy_time = min(stat->busy_time, stat->total_time);
682
683	return 0;
684}
685
686static struct devfreq_dev_profile tegra_devfreq_profile = {
687	.polling_ms	= ACTMON_SAMPLING_PERIOD,
688	.target		= tegra_devfreq_target,
689	.get_dev_status	= tegra_devfreq_get_dev_status,
690	.is_cooling_device = true,
691};
692
693static int tegra_governor_get_target(struct devfreq *devfreq,
694				     unsigned long *freq)
695{
696	struct devfreq_dev_status *stat;
697	struct tegra_devfreq *tegra;
698	struct tegra_devfreq_device *dev;
699	unsigned long target_freq = 0;
700	unsigned int i;
701	int err;
702
703	err = devfreq_update_stats(devfreq);
704	if (err)
705		return err;
706
707	stat = &devfreq->last_status;
708
709	tegra = stat->private_data;
710
711	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
712		dev = &tegra->devices[i];
713
714		actmon_update_target(tegra, dev);
715
716		target_freq = max(target_freq, dev->target_freq);
717	}
718
719	/*
720	 * tegra-devfreq driver operates with KHz units, while OPP table
721	 * entries use Hz units. Hence we need to convert the units for the
722	 * devfreq core.
723	 */
724	*freq = target_freq * KHZ;
725
726	return 0;
727}
728
729static int tegra_governor_event_handler(struct devfreq *devfreq,
730					unsigned int event, void *data)
731{
732	struct tegra_devfreq *tegra = dev_get_drvdata(devfreq->dev.parent);
733	unsigned int *new_delay = data;
734	int ret = 0;
735
736	/*
737	 * Couple devfreq-device with the governor early because it is
738	 * needed at the moment of governor's start (used by ISR).
739	 */
740	tegra->devfreq = devfreq;
741
742	switch (event) {
743	case DEVFREQ_GOV_START:
744		devfreq_monitor_start(devfreq);
745		ret = tegra_actmon_start(tegra);
746		break;
747
748	case DEVFREQ_GOV_STOP:
749		tegra_actmon_stop(tegra);
750		devfreq_monitor_stop(devfreq);
751		break;
752
753	case DEVFREQ_GOV_UPDATE_INTERVAL:
754		/*
755		 * ACTMON hardware supports up to 256 milliseconds for the
756		 * sampling period.
757		 */
758		if (*new_delay > 256) {
759			ret = -EINVAL;
760			break;
761		}
762
763		tegra_actmon_pause(tegra);
764		devfreq_update_interval(devfreq, new_delay);
765		ret = tegra_actmon_resume(tegra);
766		break;
767
768	case DEVFREQ_GOV_SUSPEND:
769		tegra_actmon_stop(tegra);
770		devfreq_monitor_suspend(devfreq);
771		break;
772
773	case DEVFREQ_GOV_RESUME:
774		devfreq_monitor_resume(devfreq);
775		ret = tegra_actmon_start(tegra);
776		break;
777	}
778
779	return ret;
780}
781
782static struct devfreq_governor tegra_devfreq_governor = {
783	.name = "tegra_actmon",
784	.attrs = DEVFREQ_GOV_ATTR_POLLING_INTERVAL,
785	.flags = DEVFREQ_GOV_FLAG_IMMUTABLE
786		| DEVFREQ_GOV_FLAG_IRQ_DRIVEN,
787	.get_target_freq = tegra_governor_get_target,
788	.event_handler = tegra_governor_event_handler,
 
 
789};
790
791static void devm_tegra_devfreq_deinit_hw(void *data)
792{
793	struct tegra_devfreq *tegra = data;
794
795	reset_control_reset(tegra->reset);
796	clk_disable_unprepare(tegra->clock);
797}
798
799static int devm_tegra_devfreq_init_hw(struct device *dev,
800				      struct tegra_devfreq *tegra)
801{
802	int err;
803
804	err = clk_prepare_enable(tegra->clock);
805	if (err) {
806		dev_err(dev, "Failed to prepare and enable ACTMON clock\n");
807		return err;
808	}
809
810	err = devm_add_action_or_reset(dev, devm_tegra_devfreq_deinit_hw,
811				       tegra);
812	if (err)
813		return err;
814
815	err = reset_control_reset(tegra->reset);
816	if (err) {
817		dev_err(dev, "Failed to reset hardware: %d\n", err);
818		return err;
819	}
820
821	return err;
822}
823
824static int tegra_devfreq_config_clks_nop(struct device *dev,
825					 struct opp_table *opp_table,
826					 struct dev_pm_opp *opp, void *data,
827					 bool scaling_down)
828{
829	/* We want to skip clk configuration via dev_pm_opp_set_opp() */
830	return 0;
831}
832
833static int tegra_devfreq_probe(struct platform_device *pdev)
834{
835	u32 hw_version = BIT(tegra_sku_info.soc_speedo_id);
836	struct tegra_devfreq_device *dev;
837	struct tegra_devfreq *tegra;
838	struct devfreq *devfreq;
839	unsigned int i;
840	long rate;
841	int err;
842	const char *clk_names[] = { "actmon", NULL };
843	struct dev_pm_opp_config config = {
844		.supported_hw = &hw_version,
845		.supported_hw_count = 1,
846		.clk_names = clk_names,
847		.config_clks = tegra_devfreq_config_clks_nop,
848	};
849
850	tegra = devm_kzalloc(&pdev->dev, sizeof(*tegra), GFP_KERNEL);
851	if (!tegra)
852		return -ENOMEM;
853
854	tegra->soc = of_device_get_match_data(&pdev->dev);
855
856	tegra->regs = devm_platform_ioremap_resource(pdev, 0);
857	if (IS_ERR(tegra->regs))
858		return PTR_ERR(tegra->regs);
859
860	tegra->reset = devm_reset_control_get(&pdev->dev, "actmon");
861	if (IS_ERR(tegra->reset)) {
862		dev_err(&pdev->dev, "Failed to get reset\n");
863		return PTR_ERR(tegra->reset);
864	}
865
866	tegra->clock = devm_clk_get(&pdev->dev, "actmon");
867	if (IS_ERR(tegra->clock)) {
868		dev_err(&pdev->dev, "Failed to get actmon clock\n");
869		return PTR_ERR(tegra->clock);
870	}
871
872	tegra->emc_clock = devm_clk_get(&pdev->dev, "emc");
873	if (IS_ERR(tegra->emc_clock))
874		return dev_err_probe(&pdev->dev, PTR_ERR(tegra->emc_clock),
875				     "Failed to get emc clock\n");
 
876
877	err = platform_get_irq(pdev, 0);
878	if (err < 0)
879		return err;
880
881	tegra->irq = err;
882
883	irq_set_status_flags(tegra->irq, IRQ_NOAUTOEN);
884
885	err = devm_request_threaded_irq(&pdev->dev, tegra->irq, NULL,
886					actmon_thread_isr, IRQF_ONESHOT,
887					"tegra-devfreq", tegra);
888	if (err) {
889		dev_err(&pdev->dev, "Interrupt request failed: %d\n", err);
890		return err;
891	}
892
893	err = devm_pm_opp_set_config(&pdev->dev, &config);
894	if (err) {
895		dev_err(&pdev->dev, "Failed to set OPP config: %d\n", err);
896		return err;
897	}
898
899	err = devm_pm_opp_of_add_table_indexed(&pdev->dev, 0);
900	if (err) {
901		dev_err(&pdev->dev, "Failed to add OPP table: %d\n", err);
 
902		return err;
903	}
904
905	err = devm_tegra_devfreq_init_hw(&pdev->dev, tegra);
906	if (err)
907		return err;
908
909	rate = clk_round_rate(tegra->emc_clock, ULONG_MAX);
910	if (rate <= 0) {
911		dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate);
912		return rate ?: -EINVAL;
 
913	}
914
915	tegra->max_freq = rate / KHZ;
916
917	for (i = 0; i < ARRAY_SIZE(tegra->devices); i++) {
918		dev = tegra->devices + i;
919		dev->config = tegra->soc->configs + i;
920		dev->regs = tegra->regs + dev->config->offset;
921	}
922
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
923	platform_set_drvdata(pdev, tegra);
924
925	tegra->clk_rate_change_nb.notifier_call = tegra_actmon_clk_notify_cb;
926	tegra->cpu_rate_change_nb.notifier_call = tegra_actmon_cpu_notify_cb;
927
928	INIT_DELAYED_WORK(&tegra->cpufreq_update_work,
929			  tegra_actmon_delayed_update);
930
931	err = devm_devfreq_add_governor(&pdev->dev, &tegra_devfreq_governor);
932	if (err) {
933		dev_err(&pdev->dev, "Failed to add governor: %d\n", err);
934		return err;
935	}
936
937	tegra_devfreq_profile.initial_freq = clk_get_rate(tegra->emc_clock);
 
938
939	devfreq = devm_devfreq_add_device(&pdev->dev, &tegra_devfreq_profile,
940					  "tegra_actmon", NULL);
941	if (IS_ERR(devfreq)) {
942		dev_err(&pdev->dev, "Failed to add device: %pe\n", devfreq);
943		return PTR_ERR(devfreq);
944	}
945
946	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
947}
948
949static const struct tegra_devfreq_soc_data tegra124_soc = {
950	.configs = tegra124_device_configs,
 
 
 
 
951
952	/*
953	 * Activity counter is incremented every 256 memory transactions,
954	 * and each transaction takes 4 EMC clocks.
955	 */
956	.count_weight = 4 * 256,
957};
958
959static const struct tegra_devfreq_soc_data tegra30_soc = {
960	.configs = tegra30_device_configs,
961	.count_weight = 2 * 256,
962};
963
964static const struct of_device_id tegra_devfreq_of_match[] = {
965	{ .compatible = "nvidia,tegra30-actmon",  .data = &tegra30_soc, },
966	{ .compatible = "nvidia,tegra124-actmon", .data = &tegra124_soc, },
967	{ },
968};
969
970MODULE_DEVICE_TABLE(of, tegra_devfreq_of_match);
971
972static struct platform_driver tegra_devfreq_driver = {
973	.probe	= tegra_devfreq_probe,
 
974	.driver = {
975		.name = "tegra-devfreq",
976		.of_match_table = tegra_devfreq_of_match,
977	},
978};
979module_platform_driver(tegra_devfreq_driver);
980
981MODULE_LICENSE("GPL v2");
982MODULE_DESCRIPTION("Tegra devfreq driver");
983MODULE_AUTHOR("Tomeu Vizoso <tomeu.vizoso@collabora.com>");