Loading...
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2019 Collabora ltd. */
3
4#include <linux/clk.h>
5#include <linux/devfreq.h>
6#include <linux/devfreq_cooling.h>
7#include <linux/nvmem-consumer.h>
8#include <linux/platform_device.h>
9#include <linux/pm_opp.h>
10
11#include "panfrost_device.h"
12#include "panfrost_devfreq.h"
13
14static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfreq)
15{
16 ktime_t now, last;
17
18 now = ktime_get();
19 last = pfdevfreq->time_last_update;
20
21 if (pfdevfreq->busy_count > 0)
22 pfdevfreq->busy_time += ktime_sub(now, last);
23 else
24 pfdevfreq->idle_time += ktime_sub(now, last);
25
26 pfdevfreq->time_last_update = now;
27}
28
29static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
30 u32 flags)
31{
32 struct panfrost_device *ptdev = dev_get_drvdata(dev);
33 struct dev_pm_opp *opp;
34 int err;
35
36 opp = devfreq_recommended_opp(dev, freq, flags);
37 if (IS_ERR(opp))
38 return PTR_ERR(opp);
39 dev_pm_opp_put(opp);
40
41 err = dev_pm_opp_set_rate(dev, *freq);
42 if (!err)
43 ptdev->pfdevfreq.current_frequency = *freq;
44
45 return err;
46}
47
48static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq)
49{
50 pfdevfreq->busy_time = 0;
51 pfdevfreq->idle_time = 0;
52 pfdevfreq->time_last_update = ktime_get();
53}
54
55static int panfrost_devfreq_get_dev_status(struct device *dev,
56 struct devfreq_dev_status *status)
57{
58 struct panfrost_device *pfdev = dev_get_drvdata(dev);
59 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
60 unsigned long irqflags;
61
62 status->current_frequency = clk_get_rate(pfdev->clock);
63
64 spin_lock_irqsave(&pfdevfreq->lock, irqflags);
65
66 panfrost_devfreq_update_utilization(pfdevfreq);
67
68 status->total_time = ktime_to_ns(ktime_add(pfdevfreq->busy_time,
69 pfdevfreq->idle_time));
70
71 status->busy_time = ktime_to_ns(pfdevfreq->busy_time);
72
73 panfrost_devfreq_reset(pfdevfreq);
74
75 spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
76
77 dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
78 status->busy_time, status->total_time,
79 status->busy_time / (status->total_time / 100),
80 status->current_frequency / 1000 / 1000);
81
82 return 0;
83}
84
85static struct devfreq_dev_profile panfrost_devfreq_profile = {
86 .timer = DEVFREQ_TIMER_DELAYED,
87 .polling_ms = 50, /* ~3 frames */
88 .target = panfrost_devfreq_target,
89 .get_dev_status = panfrost_devfreq_get_dev_status,
90};
91
92static int panfrost_read_speedbin(struct device *dev)
93{
94 u32 val;
95 int ret;
96
97 ret = nvmem_cell_read_variable_le_u32(dev, "speed-bin", &val);
98 if (ret) {
99 /*
100 * -ENOENT means that this platform doesn't support speedbins
101 * as it didn't declare any speed-bin nvmem: in this case, we
102 * keep going without it; any other error means that we are
103 * supposed to read the bin value, but we failed doing so.
104 */
105 if (ret != -ENOENT && ret != -EOPNOTSUPP) {
106 DRM_DEV_ERROR(dev, "Cannot read speed-bin (%d).", ret);
107 return ret;
108 }
109
110 return 0;
111 }
112 DRM_DEV_DEBUG(dev, "Using speed-bin = 0x%x\n", val);
113
114 return devm_pm_opp_set_supported_hw(dev, &val, 1);
115}
116
117int panfrost_devfreq_init(struct panfrost_device *pfdev)
118{
119 int ret;
120 struct dev_pm_opp *opp;
121 unsigned long cur_freq;
122 struct device *dev = &pfdev->pdev->dev;
123 struct devfreq *devfreq;
124 struct thermal_cooling_device *cooling;
125 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
126 unsigned long freq = ULONG_MAX;
127
128 if (pfdev->comp->num_supplies > 1) {
129 /*
130 * GPUs with more than 1 supply require platform-specific handling:
131 * continue without devfreq
132 */
133 DRM_DEV_INFO(dev, "More than 1 supply is not supported yet\n");
134 return 0;
135 }
136
137 ret = panfrost_read_speedbin(dev);
138 if (ret)
139 return ret;
140
141 ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names);
142 if (ret) {
143 /* Continue if the optional regulator is missing */
144 if (ret != -ENODEV) {
145 if (ret != -EPROBE_DEFER)
146 DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
147 return ret;
148 }
149 }
150
151 ret = devm_pm_opp_of_add_table(dev);
152 if (ret) {
153 /* Optional, continue without devfreq */
154 if (ret == -ENODEV)
155 ret = 0;
156 return ret;
157 }
158 pfdevfreq->opp_of_table_added = true;
159
160 spin_lock_init(&pfdevfreq->lock);
161
162 panfrost_devfreq_reset(pfdevfreq);
163
164 cur_freq = clk_get_rate(pfdev->clock);
165
166 opp = devfreq_recommended_opp(dev, &cur_freq, 0);
167 if (IS_ERR(opp))
168 return PTR_ERR(opp);
169
170 panfrost_devfreq_profile.initial_freq = cur_freq;
171
172 /*
173 * We could wait until panfrost_devfreq_target() to set this value, but
174 * since the simple_ondemand governor works asynchronously, there's a
175 * chance by the time someone opens the device's fdinfo file, current
176 * frequency hasn't been updated yet, so let's just do an early set.
177 */
178 pfdevfreq->current_frequency = cur_freq;
179
180 /*
181 * Set the recommend OPP this will enable and configure the regulator
182 * if any and will avoid a switch off by regulator_late_cleanup()
183 */
184 ret = dev_pm_opp_set_opp(dev, opp);
185 dev_pm_opp_put(opp);
186 if (ret) {
187 DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
188 return ret;
189 }
190
191 /* Find the fastest defined rate */
192 opp = dev_pm_opp_find_freq_floor(dev, &freq);
193 if (IS_ERR(opp))
194 return PTR_ERR(opp);
195 pfdevfreq->fast_rate = freq;
196
197 dev_pm_opp_put(opp);
198
199 /*
200 * Setup default thresholds for the simple_ondemand governor.
201 * The values are chosen based on experiments.
202 */
203 pfdevfreq->gov_data.upthreshold = 45;
204 pfdevfreq->gov_data.downdifferential = 5;
205
206 devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile,
207 DEVFREQ_GOV_SIMPLE_ONDEMAND,
208 &pfdevfreq->gov_data);
209 if (IS_ERR(devfreq)) {
210 DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
211 return PTR_ERR(devfreq);
212 }
213 pfdevfreq->devfreq = devfreq;
214
215 cooling = devfreq_cooling_em_register(devfreq, NULL);
216 if (IS_ERR(cooling))
217 DRM_DEV_INFO(dev, "Failed to register cooling device\n");
218 else
219 pfdevfreq->cooling = cooling;
220
221 return 0;
222}
223
224void panfrost_devfreq_fini(struct panfrost_device *pfdev)
225{
226 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
227
228 if (pfdevfreq->cooling) {
229 devfreq_cooling_unregister(pfdevfreq->cooling);
230 pfdevfreq->cooling = NULL;
231 }
232}
233
234void panfrost_devfreq_resume(struct panfrost_device *pfdev)
235{
236 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
237
238 if (!pfdevfreq->devfreq)
239 return;
240
241 panfrost_devfreq_reset(pfdevfreq);
242
243 devfreq_resume_device(pfdevfreq->devfreq);
244}
245
246void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
247{
248 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
249
250 if (!pfdevfreq->devfreq)
251 return;
252
253 devfreq_suspend_device(pfdevfreq->devfreq);
254}
255
256void panfrost_devfreq_record_busy(struct panfrost_devfreq *pfdevfreq)
257{
258 unsigned long irqflags;
259
260 if (!pfdevfreq->devfreq)
261 return;
262
263 spin_lock_irqsave(&pfdevfreq->lock, irqflags);
264
265 panfrost_devfreq_update_utilization(pfdevfreq);
266
267 pfdevfreq->busy_count++;
268
269 spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
270}
271
272void panfrost_devfreq_record_idle(struct panfrost_devfreq *pfdevfreq)
273{
274 unsigned long irqflags;
275
276 if (!pfdevfreq->devfreq)
277 return;
278
279 spin_lock_irqsave(&pfdevfreq->lock, irqflags);
280
281 panfrost_devfreq_update_utilization(pfdevfreq);
282
283 WARN_ON(--pfdevfreq->busy_count < 0);
284
285 spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
286}
1// SPDX-License-Identifier: GPL-2.0
2/* Copyright 2019 Collabora ltd. */
3
4#include <linux/clk.h>
5#include <linux/devfreq.h>
6#include <linux/devfreq_cooling.h>
7#include <linux/platform_device.h>
8#include <linux/pm_opp.h>
9
10#include "panfrost_device.h"
11#include "panfrost_devfreq.h"
12
13static void panfrost_devfreq_update_utilization(struct panfrost_devfreq *pfdevfreq)
14{
15 ktime_t now, last;
16
17 now = ktime_get();
18 last = pfdevfreq->time_last_update;
19
20 if (pfdevfreq->busy_count > 0)
21 pfdevfreq->busy_time += ktime_sub(now, last);
22 else
23 pfdevfreq->idle_time += ktime_sub(now, last);
24
25 pfdevfreq->time_last_update = now;
26}
27
28static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
29 u32 flags)
30{
31 struct dev_pm_opp *opp;
32
33 opp = devfreq_recommended_opp(dev, freq, flags);
34 if (IS_ERR(opp))
35 return PTR_ERR(opp);
36 dev_pm_opp_put(opp);
37
38 return dev_pm_opp_set_rate(dev, *freq);
39}
40
41static void panfrost_devfreq_reset(struct panfrost_devfreq *pfdevfreq)
42{
43 pfdevfreq->busy_time = 0;
44 pfdevfreq->idle_time = 0;
45 pfdevfreq->time_last_update = ktime_get();
46}
47
48static int panfrost_devfreq_get_dev_status(struct device *dev,
49 struct devfreq_dev_status *status)
50{
51 struct panfrost_device *pfdev = dev_get_drvdata(dev);
52 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
53 unsigned long irqflags;
54
55 status->current_frequency = clk_get_rate(pfdev->clock);
56
57 spin_lock_irqsave(&pfdevfreq->lock, irqflags);
58
59 panfrost_devfreq_update_utilization(pfdevfreq);
60
61 status->total_time = ktime_to_ns(ktime_add(pfdevfreq->busy_time,
62 pfdevfreq->idle_time));
63
64 status->busy_time = ktime_to_ns(pfdevfreq->busy_time);
65
66 panfrost_devfreq_reset(pfdevfreq);
67
68 spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
69
70 dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n",
71 status->busy_time, status->total_time,
72 status->busy_time / (status->total_time / 100),
73 status->current_frequency / 1000 / 1000);
74
75 return 0;
76}
77
78static struct devfreq_dev_profile panfrost_devfreq_profile = {
79 .timer = DEVFREQ_TIMER_DELAYED,
80 .polling_ms = 50, /* ~3 frames */
81 .target = panfrost_devfreq_target,
82 .get_dev_status = panfrost_devfreq_get_dev_status,
83};
84
85int panfrost_devfreq_init(struct panfrost_device *pfdev)
86{
87 int ret;
88 struct dev_pm_opp *opp;
89 unsigned long cur_freq;
90 struct device *dev = &pfdev->pdev->dev;
91 struct devfreq *devfreq;
92 struct thermal_cooling_device *cooling;
93 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
94
95 if (pfdev->comp->num_supplies > 1) {
96 /*
97 * GPUs with more than 1 supply require platform-specific handling:
98 * continue without devfreq
99 */
100 DRM_DEV_INFO(dev, "More than 1 supply is not supported yet\n");
101 return 0;
102 }
103
104 ret = devm_pm_opp_set_regulators(dev, pfdev->comp->supply_names);
105 if (ret) {
106 /* Continue if the optional regulator is missing */
107 if (ret != -ENODEV) {
108 if (ret != -EPROBE_DEFER)
109 DRM_DEV_ERROR(dev, "Couldn't set OPP regulators\n");
110 return ret;
111 }
112 }
113
114 ret = devm_pm_opp_of_add_table(dev);
115 if (ret) {
116 /* Optional, continue without devfreq */
117 if (ret == -ENODEV)
118 ret = 0;
119 return ret;
120 }
121 pfdevfreq->opp_of_table_added = true;
122
123 spin_lock_init(&pfdevfreq->lock);
124
125 panfrost_devfreq_reset(pfdevfreq);
126
127 cur_freq = clk_get_rate(pfdev->clock);
128
129 opp = devfreq_recommended_opp(dev, &cur_freq, 0);
130 if (IS_ERR(opp))
131 return PTR_ERR(opp);
132
133 panfrost_devfreq_profile.initial_freq = cur_freq;
134
135 /*
136 * Set the recommend OPP this will enable and configure the regulator
137 * if any and will avoid a switch off by regulator_late_cleanup()
138 */
139 ret = dev_pm_opp_set_opp(dev, opp);
140 if (ret) {
141 DRM_DEV_ERROR(dev, "Couldn't set recommended OPP\n");
142 return ret;
143 }
144
145 dev_pm_opp_put(opp);
146
147 /*
148 * Setup default thresholds for the simple_ondemand governor.
149 * The values are chosen based on experiments.
150 */
151 pfdevfreq->gov_data.upthreshold = 45;
152 pfdevfreq->gov_data.downdifferential = 5;
153
154 devfreq = devm_devfreq_add_device(dev, &panfrost_devfreq_profile,
155 DEVFREQ_GOV_SIMPLE_ONDEMAND,
156 &pfdevfreq->gov_data);
157 if (IS_ERR(devfreq)) {
158 DRM_DEV_ERROR(dev, "Couldn't initialize GPU devfreq\n");
159 return PTR_ERR(devfreq);
160 }
161 pfdevfreq->devfreq = devfreq;
162
163 cooling = devfreq_cooling_em_register(devfreq, NULL);
164 if (IS_ERR(cooling))
165 DRM_DEV_INFO(dev, "Failed to register cooling device\n");
166 else
167 pfdevfreq->cooling = cooling;
168
169 return 0;
170}
171
172void panfrost_devfreq_fini(struct panfrost_device *pfdev)
173{
174 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
175
176 if (pfdevfreq->cooling) {
177 devfreq_cooling_unregister(pfdevfreq->cooling);
178 pfdevfreq->cooling = NULL;
179 }
180}
181
182void panfrost_devfreq_resume(struct panfrost_device *pfdev)
183{
184 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
185
186 if (!pfdevfreq->devfreq)
187 return;
188
189 panfrost_devfreq_reset(pfdevfreq);
190
191 devfreq_resume_device(pfdevfreq->devfreq);
192}
193
194void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
195{
196 struct panfrost_devfreq *pfdevfreq = &pfdev->pfdevfreq;
197
198 if (!pfdevfreq->devfreq)
199 return;
200
201 devfreq_suspend_device(pfdevfreq->devfreq);
202}
203
204void panfrost_devfreq_record_busy(struct panfrost_devfreq *pfdevfreq)
205{
206 unsigned long irqflags;
207
208 if (!pfdevfreq->devfreq)
209 return;
210
211 spin_lock_irqsave(&pfdevfreq->lock, irqflags);
212
213 panfrost_devfreq_update_utilization(pfdevfreq);
214
215 pfdevfreq->busy_count++;
216
217 spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
218}
219
220void panfrost_devfreq_record_idle(struct panfrost_devfreq *pfdevfreq)
221{
222 unsigned long irqflags;
223
224 if (!pfdevfreq->devfreq)
225 return;
226
227 spin_lock_irqsave(&pfdevfreq->lock, irqflags);
228
229 panfrost_devfreq_update_utilization(pfdevfreq);
230
231 WARN_ON(--pfdevfreq->busy_count < 0);
232
233 spin_unlock_irqrestore(&pfdevfreq->lock, irqflags);
234}