Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Energy Model of devices
  4 *
  5 * Copyright (c) 2018-2021, Arm ltd.
  6 * Written by: Quentin Perret, Arm ltd.
  7 * Improvements provided by: Lukasz Luba, Arm ltd.
  8 */
  9
 10#define pr_fmt(fmt) "energy_model: " fmt
 11
 12#include <linux/cpu.h>
 13#include <linux/cpufreq.h>
 14#include <linux/cpumask.h>
 15#include <linux/debugfs.h>
 16#include <linux/energy_model.h>
 17#include <linux/sched/topology.h>
 18#include <linux/slab.h>
 19
 20/*
 21 * Mutex serializing the registrations of performance domains and letting
 22 * callbacks defined by drivers sleep.
 23 */
 24static DEFINE_MUTEX(em_pd_mutex);
 25
 
 
 
 
 
 
 26static bool _is_cpu_device(struct device *dev)
 27{
 28	return (dev->bus == &cpu_subsys);
 29}
 30
 31#ifdef CONFIG_DEBUG_FS
 32static struct dentry *rootdir;
 33
 34static void em_debug_create_ps(struct em_perf_state *ps, struct dentry *pd)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 35{
 
 
 36	struct dentry *d;
 37	char name[24];
 38
 39	snprintf(name, sizeof(name), "ps:%lu", ps->frequency);
 
 
 
 
 
 
 
 
 40
 41	/* Create per-ps directory */
 42	d = debugfs_create_dir(name, pd);
 43	debugfs_create_ulong("frequency", 0444, d, &ps->frequency);
 44	debugfs_create_ulong("power", 0444, d, &ps->power);
 45	debugfs_create_ulong("cost", 0444, d, &ps->cost);
 46	debugfs_create_ulong("inefficient", 0444, d, &ps->flags);
 
 
 
 
 
 
 47}
 48
 49static int em_debug_cpus_show(struct seq_file *s, void *unused)
 50{
 51	seq_printf(s, "%*pbl\n", cpumask_pr_args(to_cpumask(s->private)));
 52
 53	return 0;
 54}
 55DEFINE_SHOW_ATTRIBUTE(em_debug_cpus);
 56
 57static int em_debug_flags_show(struct seq_file *s, void *unused)
 58{
 59	struct em_perf_domain *pd = s->private;
 60
 61	seq_printf(s, "%#lx\n", pd->flags);
 62
 63	return 0;
 64}
 65DEFINE_SHOW_ATTRIBUTE(em_debug_flags);
 66
 67static void em_debug_create_pd(struct device *dev)
 68{
 
 69	struct dentry *d;
 70	int i;
 71
 72	/* Create the directory of the performance domain */
 73	d = debugfs_create_dir(dev_name(dev), rootdir);
 74
 75	if (_is_cpu_device(dev))
 76		debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus,
 77				    &em_debug_cpus_fops);
 78
 79	debugfs_create_file("flags", 0444, d, dev->em_pd,
 80			    &em_debug_flags_fops);
 81
 
 
 
 
 
 82	/* Create a sub-directory for each performance state */
 83	for (i = 0; i < dev->em_pd->nr_perf_states; i++)
 84		em_debug_create_ps(&dev->em_pd->table[i], d);
 85
 86}
 87
 88static void em_debug_remove_pd(struct device *dev)
 89{
 90	struct dentry *debug_dir;
 91
 92	debug_dir = debugfs_lookup(dev_name(dev), rootdir);
 93	debugfs_remove_recursive(debug_dir);
 94}
 95
 96static int __init em_debug_init(void)
 97{
 98	/* Create /sys/kernel/debug/energy_model directory */
 99	rootdir = debugfs_create_dir("energy_model", NULL);
100
101	return 0;
102}
103fs_initcall(em_debug_init);
104#else /* CONFIG_DEBUG_FS */
105static void em_debug_create_pd(struct device *dev) {}
106static void em_debug_remove_pd(struct device *dev) {}
107#endif
108
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
109static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd,
110				int nr_states, struct em_data_callback *cb,
 
111				unsigned long flags)
112{
113	unsigned long power, freq, prev_freq = 0, prev_cost = ULONG_MAX;
114	struct em_perf_state *table;
115	int i, ret;
116	u64 fmax;
117
118	table = kcalloc(nr_states, sizeof(*table), GFP_KERNEL);
119	if (!table)
120		return -ENOMEM;
121
122	/* Build the list of performance states for this performance domain */
123	for (i = 0, freq = 0; i < nr_states; i++, freq++) {
124		/*
125		 * active_power() is a driver callback which ceils 'freq' to
126		 * lowest performance state of 'dev' above 'freq' and updates
127		 * 'power' and 'freq' accordingly.
128		 */
129		ret = cb->active_power(dev, &power, &freq);
130		if (ret) {
131			dev_err(dev, "EM: invalid perf. state: %d\n",
132				ret);
133			goto free_ps_table;
134		}
135
136		/*
137		 * We expect the driver callback to increase the frequency for
138		 * higher performance states.
139		 */
140		if (freq <= prev_freq) {
141			dev_err(dev, "EM: non-increasing freq: %lu\n",
142				freq);
143			goto free_ps_table;
144		}
145
146		/*
147		 * The power returned by active_state() is expected to be
148		 * positive and be in range.
149		 */
150		if (!power || power > EM_MAX_POWER) {
151			dev_err(dev, "EM: invalid power: %lu\n",
152				power);
153			goto free_ps_table;
154		}
155
156		table[i].power = power;
157		table[i].frequency = prev_freq = freq;
158	}
159
160	/* Compute the cost of each performance state. */
161	fmax = (u64) table[nr_states - 1].frequency;
162	for (i = nr_states - 1; i >= 0; i--) {
163		unsigned long power_res, cost;
164
165		if (flags & EM_PERF_DOMAIN_ARTIFICIAL) {
166			ret = cb->get_cost(dev, table[i].frequency, &cost);
167			if (ret || !cost || cost > EM_MAX_POWER) {
168				dev_err(dev, "EM: invalid cost %lu %d\n",
169					cost, ret);
170				goto free_ps_table;
171			}
172		} else {
173			power_res = table[i].power;
174			cost = div64_u64(fmax * power_res, table[i].frequency);
175		}
176
177		table[i].cost = cost;
178
179		if (table[i].cost >= prev_cost) {
180			table[i].flags = EM_PERF_STATE_INEFFICIENT;
181			dev_dbg(dev, "EM: OPP:%lu is inefficient\n",
182				table[i].frequency);
183		} else {
184			prev_cost = table[i].cost;
185		}
186	}
187
188	pd->table = table;
189	pd->nr_perf_states = nr_states;
 
190
191	return 0;
192
193free_ps_table:
194	kfree(table);
195	return -EINVAL;
196}
197
198static int em_create_pd(struct device *dev, int nr_states,
199			struct em_data_callback *cb, cpumask_t *cpus,
200			unsigned long flags)
201{
 
202	struct em_perf_domain *pd;
203	struct device *cpu_dev;
204	int cpu, ret, num_cpus;
205
206	if (_is_cpu_device(dev)) {
207		num_cpus = cpumask_weight(cpus);
208
209		/* Prevent max possible energy calculation to not overflow */
210		if (num_cpus > EM_MAX_NUM_CPUS) {
211			dev_err(dev, "EM: too many CPUs, overflow possible\n");
212			return -EINVAL;
213		}
214
215		pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL);
216		if (!pd)
217			return -ENOMEM;
218
219		cpumask_copy(em_span_cpus(pd), cpus);
220	} else {
221		pd = kzalloc(sizeof(*pd), GFP_KERNEL);
222		if (!pd)
223			return -ENOMEM;
224	}
225
226	ret = em_create_perf_table(dev, pd, nr_states, cb, flags);
227	if (ret) {
228		kfree(pd);
229		return ret;
230	}
 
 
 
 
 
 
231
232	if (_is_cpu_device(dev))
233		for_each_cpu(cpu, cpus) {
234			cpu_dev = get_cpu_device(cpu);
235			cpu_dev->em_pd = pd;
236		}
237
238	dev->em_pd = pd;
239
240	return 0;
 
 
 
 
 
 
241}
242
243static void em_cpufreq_update_efficiencies(struct device *dev)
 
244{
245	struct em_perf_domain *pd = dev->em_pd;
246	struct em_perf_state *table;
247	struct cpufreq_policy *policy;
248	int found = 0;
249	int i;
250
251	if (!_is_cpu_device(dev) || !pd)
252		return;
253
254	policy = cpufreq_cpu_get(cpumask_first(em_span_cpus(pd)));
255	if (!policy) {
256		dev_warn(dev, "EM: Access to CPUFreq policy failed");
 
257		return;
258	}
259
260	table = pd->table;
 
 
 
 
261
262	for (i = 0; i < pd->nr_perf_states; i++) {
263		if (!(table[i].flags & EM_PERF_STATE_INEFFICIENT))
264			continue;
265
266		if (!cpufreq_table_set_inefficient(policy, table[i].frequency))
267			found++;
268	}
269
270	cpufreq_cpu_put(policy);
271
272	if (!found)
273		return;
274
275	/*
276	 * Efficiencies have been installed in CPUFreq, inefficient frequencies
277	 * will be skipped. The EM can do the same.
278	 */
279	pd->flags |= EM_PERF_DOMAIN_SKIP_INEFFICIENCIES;
280}
281
282/**
283 * em_pd_get() - Return the performance domain for a device
284 * @dev : Device to find the performance domain for
285 *
286 * Returns the performance domain to which @dev belongs, or NULL if it doesn't
287 * exist.
288 */
289struct em_perf_domain *em_pd_get(struct device *dev)
290{
291	if (IS_ERR_OR_NULL(dev))
292		return NULL;
293
294	return dev->em_pd;
295}
296EXPORT_SYMBOL_GPL(em_pd_get);
297
298/**
299 * em_cpu_get() - Return the performance domain for a CPU
300 * @cpu : CPU to find the performance domain for
301 *
302 * Returns the performance domain to which @cpu belongs, or NULL if it doesn't
303 * exist.
304 */
305struct em_perf_domain *em_cpu_get(int cpu)
306{
307	struct device *cpu_dev;
308
309	cpu_dev = get_cpu_device(cpu);
310	if (!cpu_dev)
311		return NULL;
312
313	return em_pd_get(cpu_dev);
314}
315EXPORT_SYMBOL_GPL(em_cpu_get);
316
317/**
318 * em_dev_register_perf_domain() - Register the Energy Model (EM) for a device
319 * @dev		: Device for which the EM is to register
320 * @nr_states	: Number of performance states to register
321 * @cb		: Callback functions providing the data of the Energy Model
322 * @cpus	: Pointer to cpumask_t, which in case of a CPU device is
323 *		obligatory. It can be taken from i.e. 'policy->cpus'. For other
324 *		type of devices this should be set to NULL.
325 * @microwatts	: Flag indicating that the power values are in micro-Watts or
326 *		in some other scale. It must be set properly.
327 *
328 * Create Energy Model tables for a performance domain using the callbacks
329 * defined in cb.
330 *
331 * The @microwatts is important to set with correct value. Some kernel
332 * sub-systems might rely on this flag and check if all devices in the EM are
333 * using the same scale.
334 *
335 * If multiple clients register the same performance domain, all but the first
336 * registration will be ignored.
337 *
338 * Return 0 on success
339 */
340int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
341				struct em_data_callback *cb, cpumask_t *cpus,
342				bool microwatts)
343{
344	unsigned long cap, prev_cap = 0;
345	unsigned long flags = 0;
346	int cpu, ret;
347
348	if (!dev || !nr_states || !cb)
349		return -EINVAL;
350
351	/*
352	 * Use a mutex to serialize the registration of performance domains and
353	 * let the driver-defined callback functions sleep.
354	 */
355	mutex_lock(&em_pd_mutex);
356
357	if (dev->em_pd) {
358		ret = -EEXIST;
359		goto unlock;
360	}
361
362	if (_is_cpu_device(dev)) {
363		if (!cpus) {
364			dev_err(dev, "EM: invalid CPU mask\n");
365			ret = -EINVAL;
366			goto unlock;
367		}
368
369		for_each_cpu(cpu, cpus) {
370			if (em_cpu_get(cpu)) {
371				dev_err(dev, "EM: exists for CPU%d\n", cpu);
372				ret = -EEXIST;
373				goto unlock;
374			}
375			/*
376			 * All CPUs of a domain must have the same
377			 * micro-architecture since they all share the same
378			 * table.
379			 */
380			cap = arch_scale_cpu_capacity(cpu);
381			if (prev_cap && prev_cap != cap) {
382				dev_err(dev, "EM: CPUs of %*pbl must have the same capacity\n",
383					cpumask_pr_args(cpus));
384
385				ret = -EINVAL;
386				goto unlock;
387			}
388			prev_cap = cap;
389		}
390	}
391
392	if (microwatts)
393		flags |= EM_PERF_DOMAIN_MICROWATTS;
394	else if (cb->get_cost)
395		flags |= EM_PERF_DOMAIN_ARTIFICIAL;
396
 
 
 
 
 
 
 
 
 
 
 
397	ret = em_create_pd(dev, nr_states, cb, cpus, flags);
398	if (ret)
399		goto unlock;
400
401	dev->em_pd->flags |= flags;
402
403	em_cpufreq_update_efficiencies(dev);
404
405	em_debug_create_pd(dev);
406	dev_info(dev, "EM: created perf domain\n");
407
408unlock:
409	mutex_unlock(&em_pd_mutex);
 
 
 
 
410	return ret;
411}
412EXPORT_SYMBOL_GPL(em_dev_register_perf_domain);
413
414/**
415 * em_dev_unregister_perf_domain() - Unregister Energy Model (EM) for a device
416 * @dev		: Device for which the EM is registered
417 *
418 * Unregister the EM for the specified @dev (but not a CPU device).
419 */
420void em_dev_unregister_perf_domain(struct device *dev)
421{
422	if (IS_ERR_OR_NULL(dev) || !dev->em_pd)
423		return;
424
425	if (_is_cpu_device(dev))
426		return;
427
428	/*
429	 * The mutex separates all register/unregister requests and protects
430	 * from potential clean-up/setup issues in the debugfs directories.
431	 * The debugfs directory name is the same as device's name.
432	 */
433	mutex_lock(&em_pd_mutex);
434	em_debug_remove_pd(dev);
435
436	kfree(dev->em_pd->table);
 
437	kfree(dev->em_pd);
438	dev->em_pd = NULL;
439	mutex_unlock(&em_pd_mutex);
440}
441EXPORT_SYMBOL_GPL(em_dev_unregister_perf_domain);
v6.9.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Energy Model of devices
  4 *
  5 * Copyright (c) 2018-2021, Arm ltd.
  6 * Written by: Quentin Perret, Arm ltd.
  7 * Improvements provided by: Lukasz Luba, Arm ltd.
  8 */
  9
 10#define pr_fmt(fmt) "energy_model: " fmt
 11
 12#include <linux/cpu.h>
 13#include <linux/cpufreq.h>
 14#include <linux/cpumask.h>
 15#include <linux/debugfs.h>
 16#include <linux/energy_model.h>
 17#include <linux/sched/topology.h>
 18#include <linux/slab.h>
 19
 20/*
 21 * Mutex serializing the registrations of performance domains and letting
 22 * callbacks defined by drivers sleep.
 23 */
 24static DEFINE_MUTEX(em_pd_mutex);
 25
 26static void em_cpufreq_update_efficiencies(struct device *dev,
 27					   struct em_perf_state *table);
 28static void em_check_capacity_update(void);
 29static void em_update_workfn(struct work_struct *work);
 30static DECLARE_DELAYED_WORK(em_update_work, em_update_workfn);
 31
 32static bool _is_cpu_device(struct device *dev)
 33{
 34	return (dev->bus == &cpu_subsys);
 35}
 36
 37#ifdef CONFIG_DEBUG_FS
 38static struct dentry *rootdir;
 39
 40struct em_dbg_info {
 41	struct em_perf_domain *pd;
 42	int ps_id;
 43};
 44
 45#define DEFINE_EM_DBG_SHOW(name, fname)					\
 46static int em_debug_##fname##_show(struct seq_file *s, void *unused)	\
 47{									\
 48	struct em_dbg_info *em_dbg = s->private;			\
 49	struct em_perf_state *table;					\
 50	unsigned long val;						\
 51									\
 52	rcu_read_lock();						\
 53	table = em_perf_state_from_pd(em_dbg->pd);			\
 54	val = table[em_dbg->ps_id].name;				\
 55	rcu_read_unlock();						\
 56									\
 57	seq_printf(s, "%lu\n", val);					\
 58	return 0;							\
 59}									\
 60DEFINE_SHOW_ATTRIBUTE(em_debug_##fname)
 61
 62DEFINE_EM_DBG_SHOW(frequency, frequency);
 63DEFINE_EM_DBG_SHOW(power, power);
 64DEFINE_EM_DBG_SHOW(cost, cost);
 65DEFINE_EM_DBG_SHOW(performance, performance);
 66DEFINE_EM_DBG_SHOW(flags, inefficiency);
 67
 68static void em_debug_create_ps(struct em_perf_domain *em_pd,
 69			       struct em_dbg_info *em_dbg, int i,
 70			       struct dentry *pd)
 71{
 72	struct em_perf_state *table;
 73	unsigned long freq;
 74	struct dentry *d;
 75	char name[24];
 76
 77	em_dbg[i].pd = em_pd;
 78	em_dbg[i].ps_id = i;
 79
 80	rcu_read_lock();
 81	table = em_perf_state_from_pd(em_pd);
 82	freq = table[i].frequency;
 83	rcu_read_unlock();
 84
 85	snprintf(name, sizeof(name), "ps:%lu", freq);
 86
 87	/* Create per-ps directory */
 88	d = debugfs_create_dir(name, pd);
 89	debugfs_create_file("frequency", 0444, d, &em_dbg[i],
 90			    &em_debug_frequency_fops);
 91	debugfs_create_file("power", 0444, d, &em_dbg[i],
 92			    &em_debug_power_fops);
 93	debugfs_create_file("cost", 0444, d, &em_dbg[i],
 94			    &em_debug_cost_fops);
 95	debugfs_create_file("performance", 0444, d, &em_dbg[i],
 96			    &em_debug_performance_fops);
 97	debugfs_create_file("inefficient", 0444, d, &em_dbg[i],
 98			    &em_debug_inefficiency_fops);
 99}
100
101static int em_debug_cpus_show(struct seq_file *s, void *unused)
102{
103	seq_printf(s, "%*pbl\n", cpumask_pr_args(to_cpumask(s->private)));
104
105	return 0;
106}
107DEFINE_SHOW_ATTRIBUTE(em_debug_cpus);
108
109static int em_debug_flags_show(struct seq_file *s, void *unused)
110{
111	struct em_perf_domain *pd = s->private;
112
113	seq_printf(s, "%#lx\n", pd->flags);
114
115	return 0;
116}
117DEFINE_SHOW_ATTRIBUTE(em_debug_flags);
118
119static void em_debug_create_pd(struct device *dev)
120{
121	struct em_dbg_info *em_dbg;
122	struct dentry *d;
123	int i;
124
125	/* Create the directory of the performance domain */
126	d = debugfs_create_dir(dev_name(dev), rootdir);
127
128	if (_is_cpu_device(dev))
129		debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus,
130				    &em_debug_cpus_fops);
131
132	debugfs_create_file("flags", 0444, d, dev->em_pd,
133			    &em_debug_flags_fops);
134
135	em_dbg = devm_kcalloc(dev, dev->em_pd->nr_perf_states,
136			      sizeof(*em_dbg), GFP_KERNEL);
137	if (!em_dbg)
138		return;
139
140	/* Create a sub-directory for each performance state */
141	for (i = 0; i < dev->em_pd->nr_perf_states; i++)
142		em_debug_create_ps(dev->em_pd, em_dbg, i, d);
143
144}
145
146static void em_debug_remove_pd(struct device *dev)
147{
148	debugfs_lookup_and_remove(dev_name(dev), rootdir);
 
 
 
149}
150
151static int __init em_debug_init(void)
152{
153	/* Create /sys/kernel/debug/energy_model directory */
154	rootdir = debugfs_create_dir("energy_model", NULL);
155
156	return 0;
157}
158fs_initcall(em_debug_init);
159#else /* CONFIG_DEBUG_FS */
160static void em_debug_create_pd(struct device *dev) {}
161static void em_debug_remove_pd(struct device *dev) {}
162#endif
163
164static void em_destroy_table_rcu(struct rcu_head *rp)
165{
166	struct em_perf_table __rcu *table;
167
168	table = container_of(rp, struct em_perf_table, rcu);
169	kfree(table);
170}
171
172static void em_release_table_kref(struct kref *kref)
173{
174	struct em_perf_table __rcu *table;
175
176	/* It was the last owner of this table so we can free */
177	table = container_of(kref, struct em_perf_table, kref);
178
179	call_rcu(&table->rcu, em_destroy_table_rcu);
180}
181
182/**
183 * em_table_free() - Handles safe free of the EM table when needed
184 * @table : EM table which is going to be freed
185 *
186 * No return values.
187 */
188void em_table_free(struct em_perf_table __rcu *table)
189{
190	kref_put(&table->kref, em_release_table_kref);
191}
192
193/**
194 * em_table_alloc() - Allocate a new EM table
195 * @pd		: EM performance domain for which this must be done
196 *
197 * Allocate a new EM table and initialize its kref to indicate that it
198 * has a user.
199 * Returns allocated table or NULL.
200 */
201struct em_perf_table __rcu *em_table_alloc(struct em_perf_domain *pd)
202{
203	struct em_perf_table __rcu *table;
204	int table_size;
205
206	table_size = sizeof(struct em_perf_state) * pd->nr_perf_states;
207
208	table = kzalloc(sizeof(*table) + table_size, GFP_KERNEL);
209	if (!table)
210		return NULL;
211
212	kref_init(&table->kref);
213
214	return table;
215}
216
217static void em_init_performance(struct device *dev, struct em_perf_domain *pd,
218				struct em_perf_state *table, int nr_states)
219{
220	u64 fmax, max_cap;
221	int i, cpu;
222
223	/* This is needed only for CPUs and EAS skip other devices */
224	if (!_is_cpu_device(dev))
225		return;
226
227	cpu = cpumask_first(em_span_cpus(pd));
228
229	/*
230	 * Calculate the performance value for each frequency with
231	 * linear relationship. The final CPU capacity might not be ready at
232	 * boot time, but the EM will be updated a bit later with correct one.
233	 */
234	fmax = (u64) table[nr_states - 1].frequency;
235	max_cap = (u64) arch_scale_cpu_capacity(cpu);
236	for (i = 0; i < nr_states; i++)
237		table[i].performance = div64_u64(max_cap * table[i].frequency,
238						 fmax);
239}
240
241static int em_compute_costs(struct device *dev, struct em_perf_state *table,
242			    struct em_data_callback *cb, int nr_states,
243			    unsigned long flags)
244{
245	unsigned long prev_cost = ULONG_MAX;
246	int i, ret;
247
248	/* Compute the cost of each performance state. */
249	for (i = nr_states - 1; i >= 0; i--) {
250		unsigned long power_res, cost;
251
252		if ((flags & EM_PERF_DOMAIN_ARTIFICIAL) && cb->get_cost) {
253			ret = cb->get_cost(dev, table[i].frequency, &cost);
254			if (ret || !cost || cost > EM_MAX_POWER) {
255				dev_err(dev, "EM: invalid cost %lu %d\n",
256					cost, ret);
257				return -EINVAL;
258			}
259		} else {
260			/* increase resolution of 'cost' precision */
261			power_res = table[i].power * 10;
262			cost = power_res / table[i].performance;
263		}
264
265		table[i].cost = cost;
266
267		if (table[i].cost >= prev_cost) {
268			table[i].flags = EM_PERF_STATE_INEFFICIENT;
269			dev_dbg(dev, "EM: OPP:%lu is inefficient\n",
270				table[i].frequency);
271		} else {
272			prev_cost = table[i].cost;
273		}
274	}
275
276	return 0;
277}
278
279/**
280 * em_dev_compute_costs() - Calculate cost values for new runtime EM table
281 * @dev		: Device for which the EM table is to be updated
282 * @table	: The new EM table that is going to get the costs calculated
283 * @nr_states	: Number of performance states
284 *
285 * Calculate the em_perf_state::cost values for new runtime EM table. The
286 * values are used for EAS during task placement. It also calculates and sets
287 * the efficiency flag for each performance state. When the function finish
288 * successfully the EM table is ready to be updated and used by EAS.
289 *
290 * Return 0 on success or a proper error in case of failure.
291 */
292int em_dev_compute_costs(struct device *dev, struct em_perf_state *table,
293			 int nr_states)
294{
295	return em_compute_costs(dev, table, NULL, nr_states, 0);
296}
297
298/**
299 * em_dev_update_perf_domain() - Update runtime EM table for a device
300 * @dev		: Device for which the EM is to be updated
301 * @new_table	: The new EM table that is going to be used from now
302 *
303 * Update EM runtime modifiable table for the @dev using the provided @table.
304 *
305 * This function uses a mutex to serialize writers, so it must not be called
306 * from a non-sleeping context.
307 *
308 * Return 0 on success or an error code on failure.
309 */
310int em_dev_update_perf_domain(struct device *dev,
311			      struct em_perf_table __rcu *new_table)
312{
313	struct em_perf_table __rcu *old_table;
314	struct em_perf_domain *pd;
315
316	if (!dev)
317		return -EINVAL;
318
319	/* Serialize update/unregister or concurrent updates */
320	mutex_lock(&em_pd_mutex);
321
322	if (!dev->em_pd) {
323		mutex_unlock(&em_pd_mutex);
324		return -EINVAL;
325	}
326	pd = dev->em_pd;
327
328	kref_get(&new_table->kref);
329
330	old_table = pd->em_table;
331	rcu_assign_pointer(pd->em_table, new_table);
332
333	em_cpufreq_update_efficiencies(dev, new_table->state);
334
335	em_table_free(old_table);
336
337	mutex_unlock(&em_pd_mutex);
338	return 0;
339}
340EXPORT_SYMBOL_GPL(em_dev_update_perf_domain);
341
342static int em_create_perf_table(struct device *dev, struct em_perf_domain *pd,
343				struct em_perf_state *table,
344				struct em_data_callback *cb,
345				unsigned long flags)
346{
347	unsigned long power, freq, prev_freq = 0;
348	int nr_states = pd->nr_perf_states;
349	int i, ret;
 
 
 
 
 
350
351	/* Build the list of performance states for this performance domain */
352	for (i = 0, freq = 0; i < nr_states; i++, freq++) {
353		/*
354		 * active_power() is a driver callback which ceils 'freq' to
355		 * lowest performance state of 'dev' above 'freq' and updates
356		 * 'power' and 'freq' accordingly.
357		 */
358		ret = cb->active_power(dev, &power, &freq);
359		if (ret) {
360			dev_err(dev, "EM: invalid perf. state: %d\n",
361				ret);
362			return -EINVAL;
363		}
364
365		/*
366		 * We expect the driver callback to increase the frequency for
367		 * higher performance states.
368		 */
369		if (freq <= prev_freq) {
370			dev_err(dev, "EM: non-increasing freq: %lu\n",
371				freq);
372			return -EINVAL;
373		}
374
375		/*
376		 * The power returned by active_state() is expected to be
377		 * positive and be in range.
378		 */
379		if (!power || power > EM_MAX_POWER) {
380			dev_err(dev, "EM: invalid power: %lu\n",
381				power);
382			return -EINVAL;
383		}
384
385		table[i].power = power;
386		table[i].frequency = prev_freq = freq;
387	}
388
389	em_init_performance(dev, pd, table, nr_states);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
390
391	ret = em_compute_costs(dev, table, cb, nr_states, flags);
392	if (ret)
393		return -EINVAL;
394
395	return 0;
 
 
 
 
396}
397
398static int em_create_pd(struct device *dev, int nr_states,
399			struct em_data_callback *cb, cpumask_t *cpus,
400			unsigned long flags)
401{
402	struct em_perf_table __rcu *em_table;
403	struct em_perf_domain *pd;
404	struct device *cpu_dev;
405	int cpu, ret, num_cpus;
406
407	if (_is_cpu_device(dev)) {
408		num_cpus = cpumask_weight(cpus);
409
410		/* Prevent max possible energy calculation to not overflow */
411		if (num_cpus > EM_MAX_NUM_CPUS) {
412			dev_err(dev, "EM: too many CPUs, overflow possible\n");
413			return -EINVAL;
414		}
415
416		pd = kzalloc(sizeof(*pd) + cpumask_size(), GFP_KERNEL);
417		if (!pd)
418			return -ENOMEM;
419
420		cpumask_copy(em_span_cpus(pd), cpus);
421	} else {
422		pd = kzalloc(sizeof(*pd), GFP_KERNEL);
423		if (!pd)
424			return -ENOMEM;
425	}
426
427	pd->nr_perf_states = nr_states;
428
429	em_table = em_table_alloc(pd);
430	if (!em_table)
431		goto free_pd;
432
433	ret = em_create_perf_table(dev, pd, em_table->state, cb, flags);
434	if (ret)
435		goto free_pd_table;
436
437	rcu_assign_pointer(pd->em_table, em_table);
438
439	if (_is_cpu_device(dev))
440		for_each_cpu(cpu, cpus) {
441			cpu_dev = get_cpu_device(cpu);
442			cpu_dev->em_pd = pd;
443		}
444
445	dev->em_pd = pd;
446
447	return 0;
448
449free_pd_table:
450	kfree(em_table);
451free_pd:
452	kfree(pd);
453	return -EINVAL;
454}
455
456static void
457em_cpufreq_update_efficiencies(struct device *dev, struct em_perf_state *table)
458{
459	struct em_perf_domain *pd = dev->em_pd;
 
460	struct cpufreq_policy *policy;
461	int found = 0;
462	int i, cpu;
463
464	if (!_is_cpu_device(dev))
465		return;
466
467	/* Try to get a CPU which is active and in this PD */
468	cpu = cpumask_first_and(em_span_cpus(pd), cpu_active_mask);
469	if (cpu >= nr_cpu_ids) {
470		dev_warn(dev, "EM: No online CPU for CPUFreq policy\n");
471		return;
472	}
473
474	policy = cpufreq_cpu_get(cpu);
475	if (!policy) {
476		dev_warn(dev, "EM: Access to CPUFreq policy failed\n");
477		return;
478	}
479
480	for (i = 0; i < pd->nr_perf_states; i++) {
481		if (!(table[i].flags & EM_PERF_STATE_INEFFICIENT))
482			continue;
483
484		if (!cpufreq_table_set_inefficient(policy, table[i].frequency))
485			found++;
486	}
487
488	cpufreq_cpu_put(policy);
489
490	if (!found)
491		return;
492
493	/*
494	 * Efficiencies have been installed in CPUFreq, inefficient frequencies
495	 * will be skipped. The EM can do the same.
496	 */
497	pd->flags |= EM_PERF_DOMAIN_SKIP_INEFFICIENCIES;
498}
499
500/**
501 * em_pd_get() - Return the performance domain for a device
502 * @dev : Device to find the performance domain for
503 *
504 * Returns the performance domain to which @dev belongs, or NULL if it doesn't
505 * exist.
506 */
507struct em_perf_domain *em_pd_get(struct device *dev)
508{
509	if (IS_ERR_OR_NULL(dev))
510		return NULL;
511
512	return dev->em_pd;
513}
514EXPORT_SYMBOL_GPL(em_pd_get);
515
516/**
517 * em_cpu_get() - Return the performance domain for a CPU
518 * @cpu : CPU to find the performance domain for
519 *
520 * Returns the performance domain to which @cpu belongs, or NULL if it doesn't
521 * exist.
522 */
523struct em_perf_domain *em_cpu_get(int cpu)
524{
525	struct device *cpu_dev;
526
527	cpu_dev = get_cpu_device(cpu);
528	if (!cpu_dev)
529		return NULL;
530
531	return em_pd_get(cpu_dev);
532}
533EXPORT_SYMBOL_GPL(em_cpu_get);
534
535/**
536 * em_dev_register_perf_domain() - Register the Energy Model (EM) for a device
537 * @dev		: Device for which the EM is to register
538 * @nr_states	: Number of performance states to register
539 * @cb		: Callback functions providing the data of the Energy Model
540 * @cpus	: Pointer to cpumask_t, which in case of a CPU device is
541 *		obligatory. It can be taken from i.e. 'policy->cpus'. For other
542 *		type of devices this should be set to NULL.
543 * @microwatts	: Flag indicating that the power values are in micro-Watts or
544 *		in some other scale. It must be set properly.
545 *
546 * Create Energy Model tables for a performance domain using the callbacks
547 * defined in cb.
548 *
549 * The @microwatts is important to set with correct value. Some kernel
550 * sub-systems might rely on this flag and check if all devices in the EM are
551 * using the same scale.
552 *
553 * If multiple clients register the same performance domain, all but the first
554 * registration will be ignored.
555 *
556 * Return 0 on success
557 */
558int em_dev_register_perf_domain(struct device *dev, unsigned int nr_states,
559				struct em_data_callback *cb, cpumask_t *cpus,
560				bool microwatts)
561{
562	unsigned long cap, prev_cap = 0;
563	unsigned long flags = 0;
564	int cpu, ret;
565
566	if (!dev || !nr_states || !cb)
567		return -EINVAL;
568
569	/*
570	 * Use a mutex to serialize the registration of performance domains and
571	 * let the driver-defined callback functions sleep.
572	 */
573	mutex_lock(&em_pd_mutex);
574
575	if (dev->em_pd) {
576		ret = -EEXIST;
577		goto unlock;
578	}
579
580	if (_is_cpu_device(dev)) {
581		if (!cpus) {
582			dev_err(dev, "EM: invalid CPU mask\n");
583			ret = -EINVAL;
584			goto unlock;
585		}
586
587		for_each_cpu(cpu, cpus) {
588			if (em_cpu_get(cpu)) {
589				dev_err(dev, "EM: exists for CPU%d\n", cpu);
590				ret = -EEXIST;
591				goto unlock;
592			}
593			/*
594			 * All CPUs of a domain must have the same
595			 * micro-architecture since they all share the same
596			 * table.
597			 */
598			cap = arch_scale_cpu_capacity(cpu);
599			if (prev_cap && prev_cap != cap) {
600				dev_err(dev, "EM: CPUs of %*pbl must have the same capacity\n",
601					cpumask_pr_args(cpus));
602
603				ret = -EINVAL;
604				goto unlock;
605			}
606			prev_cap = cap;
607		}
608	}
609
610	if (microwatts)
611		flags |= EM_PERF_DOMAIN_MICROWATTS;
612	else if (cb->get_cost)
613		flags |= EM_PERF_DOMAIN_ARTIFICIAL;
614
615	/*
616	 * EM only supports uW (exception is artificial EM).
617	 * Therefore, check and force the drivers to provide
618	 * power in uW.
619	 */
620	if (!microwatts && !(flags & EM_PERF_DOMAIN_ARTIFICIAL)) {
621		dev_err(dev, "EM: only supports uW power values\n");
622		ret = -EINVAL;
623		goto unlock;
624	}
625
626	ret = em_create_pd(dev, nr_states, cb, cpus, flags);
627	if (ret)
628		goto unlock;
629
630	dev->em_pd->flags |= flags;
631
632	em_cpufreq_update_efficiencies(dev, dev->em_pd->em_table->state);
633
634	em_debug_create_pd(dev);
635	dev_info(dev, "EM: created perf domain\n");
636
637unlock:
638	mutex_unlock(&em_pd_mutex);
639
640	if (_is_cpu_device(dev))
641		em_check_capacity_update();
642
643	return ret;
644}
645EXPORT_SYMBOL_GPL(em_dev_register_perf_domain);
646
647/**
648 * em_dev_unregister_perf_domain() - Unregister Energy Model (EM) for a device
649 * @dev		: Device for which the EM is registered
650 *
651 * Unregister the EM for the specified @dev (but not a CPU device).
652 */
653void em_dev_unregister_perf_domain(struct device *dev)
654{
655	if (IS_ERR_OR_NULL(dev) || !dev->em_pd)
656		return;
657
658	if (_is_cpu_device(dev))
659		return;
660
661	/*
662	 * The mutex separates all register/unregister requests and protects
663	 * from potential clean-up/setup issues in the debugfs directories.
664	 * The debugfs directory name is the same as device's name.
665	 */
666	mutex_lock(&em_pd_mutex);
667	em_debug_remove_pd(dev);
668
669	em_table_free(dev->em_pd->em_table);
670
671	kfree(dev->em_pd);
672	dev->em_pd = NULL;
673	mutex_unlock(&em_pd_mutex);
674}
675EXPORT_SYMBOL_GPL(em_dev_unregister_perf_domain);
676
677/*
678 * Adjustment of CPU performance values after boot, when all CPUs capacites
679 * are correctly calculated.
680 */
681static void em_adjust_new_capacity(struct device *dev,
682				   struct em_perf_domain *pd,
683				   u64 max_cap)
684{
685	struct em_perf_table __rcu *em_table;
686	struct em_perf_state *ps, *new_ps;
687	int ret, ps_size;
688
689	em_table = em_table_alloc(pd);
690	if (!em_table) {
691		dev_warn(dev, "EM: allocation failed\n");
692		return;
693	}
694
695	new_ps = em_table->state;
696
697	rcu_read_lock();
698	ps = em_perf_state_from_pd(pd);
699	/* Initialize data based on old table */
700	ps_size = sizeof(struct em_perf_state) * pd->nr_perf_states;
701	memcpy(new_ps, ps, ps_size);
702
703	rcu_read_unlock();
704
705	em_init_performance(dev, pd, new_ps, pd->nr_perf_states);
706	ret = em_compute_costs(dev, new_ps, NULL, pd->nr_perf_states,
707			       pd->flags);
708	if (ret) {
709		dev_warn(dev, "EM: compute costs failed\n");
710		return;
711	}
712
713	ret = em_dev_update_perf_domain(dev, em_table);
714	if (ret)
715		dev_warn(dev, "EM: update failed %d\n", ret);
716
717	/*
718	 * This is one-time-update, so give up the ownership in this updater.
719	 * The EM framework has incremented the usage counter and from now
720	 * will keep the reference (then free the memory when needed).
721	 */
722	em_table_free(em_table);
723}
724
725static void em_check_capacity_update(void)
726{
727	cpumask_var_t cpu_done_mask;
728	struct em_perf_state *table;
729	struct em_perf_domain *pd;
730	unsigned long cpu_capacity;
731	int cpu;
732
733	if (!zalloc_cpumask_var(&cpu_done_mask, GFP_KERNEL)) {
734		pr_warn("no free memory\n");
735		return;
736	}
737
738	/* Check if CPUs capacity has changed than update EM */
739	for_each_possible_cpu(cpu) {
740		struct cpufreq_policy *policy;
741		unsigned long em_max_perf;
742		struct device *dev;
743
744		if (cpumask_test_cpu(cpu, cpu_done_mask))
745			continue;
746
747		policy = cpufreq_cpu_get(cpu);
748		if (!policy) {
749			pr_debug("Accessing cpu%d policy failed\n", cpu);
750			schedule_delayed_work(&em_update_work,
751					      msecs_to_jiffies(1000));
752			break;
753		}
754		cpufreq_cpu_put(policy);
755
756		pd = em_cpu_get(cpu);
757		if (!pd || em_is_artificial(pd))
758			continue;
759
760		cpumask_or(cpu_done_mask, cpu_done_mask,
761			   em_span_cpus(pd));
762
763		cpu_capacity = arch_scale_cpu_capacity(cpu);
764
765		rcu_read_lock();
766		table = em_perf_state_from_pd(pd);
767		em_max_perf = table[pd->nr_perf_states - 1].performance;
768		rcu_read_unlock();
769
770		/*
771		 * Check if the CPU capacity has been adjusted during boot
772		 * and trigger the update for new performance values.
773		 */
774		if (em_max_perf == cpu_capacity)
775			continue;
776
777		pr_debug("updating cpu%d cpu_cap=%lu old capacity=%lu\n",
778			 cpu, cpu_capacity, em_max_perf);
779
780		dev = get_cpu_device(cpu);
781		em_adjust_new_capacity(dev, pd, cpu_capacity);
782	}
783
784	free_cpumask_var(cpu_done_mask);
785}
786
787static void em_update_workfn(struct work_struct *work)
788{
789	em_check_capacity_update();
790}