Loading...
1/*
2 * This module exposes the interface to kernel space for specifying
3 * QoS dependencies. It provides infrastructure for registration of:
4 *
5 * Dependents on a QoS value : register requests
6 * Watchers of QoS value : get notified when target QoS value changes
7 *
8 * This QoS design is best effort based. Dependents register their QoS needs.
9 * Watchers register to keep track of the current QoS needs of the system.
10 *
11 * There are 3 basic classes of QoS parameter: latency, timeout, throughput
12 * each have defined units:
13 * latency: usec
14 * timeout: usec <-- currently not used.
15 * throughput: kbs (kilo byte / sec)
16 *
17 * There are lists of pm_qos_objects each one wrapping requests, notifiers
18 *
19 * User mode requests on a QOS parameter register themselves to the
20 * subsystem by opening the device node /dev/... and writing there request to
21 * the node. As long as the process holds a file handle open to the node the
22 * client continues to be accounted for. Upon file release the usermode
23 * request is removed and a new qos target is computed. This way when the
24 * request that the application has is cleaned up when closes the file
25 * pointer or exits the pm_qos_object will get an opportunity to clean up.
26 *
27 * Mark Gross <mgross@linux.intel.com>
28 */
29
30/*#define DEBUG*/
31
32#include <linux/pm_qos.h>
33#include <linux/sched.h>
34#include <linux/spinlock.h>
35#include <linux/slab.h>
36#include <linux/time.h>
37#include <linux/fs.h>
38#include <linux/device.h>
39#include <linux/miscdevice.h>
40#include <linux/string.h>
41#include <linux/platform_device.h>
42#include <linux/init.h>
43#include <linux/kernel.h>
44
45#include <linux/uaccess.h>
46#include <linux/export.h>
47
48/*
49 * locking rule: all changes to constraints or notifiers lists
50 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
51 * held, taken with _irqsave. One lock to rule them all
52 */
53struct pm_qos_object {
54 struct pm_qos_constraints *constraints;
55 struct miscdevice pm_qos_power_miscdev;
56 char *name;
57};
58
59static DEFINE_SPINLOCK(pm_qos_lock);
60
61static struct pm_qos_object null_pm_qos;
62
63static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
64static struct pm_qos_constraints cpu_dma_constraints = {
65 .list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
66 .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
67 .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
68 .type = PM_QOS_MIN,
69 .notifiers = &cpu_dma_lat_notifier,
70};
71static struct pm_qos_object cpu_dma_pm_qos = {
72 .constraints = &cpu_dma_constraints,
73 .name = "cpu_dma_latency",
74};
75
76static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
77static struct pm_qos_constraints network_lat_constraints = {
78 .list = PLIST_HEAD_INIT(network_lat_constraints.list),
79 .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
80 .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
81 .type = PM_QOS_MIN,
82 .notifiers = &network_lat_notifier,
83};
84static struct pm_qos_object network_lat_pm_qos = {
85 .constraints = &network_lat_constraints,
86 .name = "network_latency",
87};
88
89
90static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
91static struct pm_qos_constraints network_tput_constraints = {
92 .list = PLIST_HEAD_INIT(network_tput_constraints.list),
93 .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
94 .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
95 .type = PM_QOS_MAX,
96 .notifiers = &network_throughput_notifier,
97};
98static struct pm_qos_object network_throughput_pm_qos = {
99 .constraints = &network_tput_constraints,
100 .name = "network_throughput",
101};
102
103
104static struct pm_qos_object *pm_qos_array[] = {
105 &null_pm_qos,
106 &cpu_dma_pm_qos,
107 &network_lat_pm_qos,
108 &network_throughput_pm_qos
109};
110
111static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
112 size_t count, loff_t *f_pos);
113static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
114 size_t count, loff_t *f_pos);
115static int pm_qos_power_open(struct inode *inode, struct file *filp);
116static int pm_qos_power_release(struct inode *inode, struct file *filp);
117
118static const struct file_operations pm_qos_power_fops = {
119 .write = pm_qos_power_write,
120 .read = pm_qos_power_read,
121 .open = pm_qos_power_open,
122 .release = pm_qos_power_release,
123 .llseek = noop_llseek,
124};
125
126/* unlocked internal variant */
127static inline int pm_qos_get_value(struct pm_qos_constraints *c)
128{
129 if (plist_head_empty(&c->list))
130 return c->default_value;
131
132 switch (c->type) {
133 case PM_QOS_MIN:
134 return plist_first(&c->list)->prio;
135
136 case PM_QOS_MAX:
137 return plist_last(&c->list)->prio;
138
139 default:
140 /* runtime check for not using enum */
141 BUG();
142 }
143}
144
145s32 pm_qos_read_value(struct pm_qos_constraints *c)
146{
147 return c->target_value;
148}
149
150static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
151{
152 c->target_value = value;
153}
154
155/**
156 * pm_qos_update_target - manages the constraints list and calls the notifiers
157 * if needed
158 * @c: constraints data struct
159 * @node: request to add to the list, to update or to remove
160 * @action: action to take on the constraints list
161 * @value: value of the request to add or update
162 *
163 * This function returns 1 if the aggregated constraint value has changed, 0
164 * otherwise.
165 */
166int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
167 enum pm_qos_req_action action, int value)
168{
169 unsigned long flags;
170 int prev_value, curr_value, new_value;
171
172 spin_lock_irqsave(&pm_qos_lock, flags);
173 prev_value = pm_qos_get_value(c);
174 if (value == PM_QOS_DEFAULT_VALUE)
175 new_value = c->default_value;
176 else
177 new_value = value;
178
179 switch (action) {
180 case PM_QOS_REMOVE_REQ:
181 plist_del(node, &c->list);
182 break;
183 case PM_QOS_UPDATE_REQ:
184 /*
185 * to change the list, we atomically remove, reinit
186 * with new value and add, then see if the extremal
187 * changed
188 */
189 plist_del(node, &c->list);
190 case PM_QOS_ADD_REQ:
191 plist_node_init(node, new_value);
192 plist_add(node, &c->list);
193 break;
194 default:
195 /* no action */
196 ;
197 }
198
199 curr_value = pm_qos_get_value(c);
200 pm_qos_set_value(c, curr_value);
201
202 spin_unlock_irqrestore(&pm_qos_lock, flags);
203
204 if (prev_value != curr_value) {
205 blocking_notifier_call_chain(c->notifiers,
206 (unsigned long)curr_value,
207 NULL);
208 return 1;
209 } else {
210 return 0;
211 }
212}
213
214/**
215 * pm_qos_request - returns current system wide qos expectation
216 * @pm_qos_class: identification of which qos value is requested
217 *
218 * This function returns the current target value.
219 */
220int pm_qos_request(int pm_qos_class)
221{
222 return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
223}
224EXPORT_SYMBOL_GPL(pm_qos_request);
225
226int pm_qos_request_active(struct pm_qos_request *req)
227{
228 return req->pm_qos_class != 0;
229}
230EXPORT_SYMBOL_GPL(pm_qos_request_active);
231
232/**
233 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
234 * @work: work struct for the delayed work (timeout)
235 *
236 * This cancels the timeout request by falling back to the default at timeout.
237 */
238static void pm_qos_work_fn(struct work_struct *work)
239{
240 struct pm_qos_request *req = container_of(to_delayed_work(work),
241 struct pm_qos_request,
242 work);
243
244 pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
245}
246
247/**
248 * pm_qos_add_request - inserts new qos request into the list
249 * @req: pointer to a preallocated handle
250 * @pm_qos_class: identifies which list of qos request to use
251 * @value: defines the qos request
252 *
253 * This function inserts a new entry in the pm_qos_class list of requested qos
254 * performance characteristics. It recomputes the aggregate QoS expectations
255 * for the pm_qos_class of parameters and initializes the pm_qos_request
256 * handle. Caller needs to save this handle for later use in updates and
257 * removal.
258 */
259
260void pm_qos_add_request(struct pm_qos_request *req,
261 int pm_qos_class, s32 value)
262{
263 if (!req) /*guard against callers passing in null */
264 return;
265
266 if (pm_qos_request_active(req)) {
267 WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
268 return;
269 }
270 req->pm_qos_class = pm_qos_class;
271 INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
272 pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
273 &req->node, PM_QOS_ADD_REQ, value);
274}
275EXPORT_SYMBOL_GPL(pm_qos_add_request);
276
277/**
278 * pm_qos_update_request - modifies an existing qos request
279 * @req : handle to list element holding a pm_qos request to use
280 * @value: defines the qos request
281 *
282 * Updates an existing qos request for the pm_qos_class of parameters along
283 * with updating the target pm_qos_class value.
284 *
285 * Attempts are made to make this code callable on hot code paths.
286 */
287void pm_qos_update_request(struct pm_qos_request *req,
288 s32 new_value)
289{
290 if (!req) /*guard against callers passing in null */
291 return;
292
293 if (!pm_qos_request_active(req)) {
294 WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
295 return;
296 }
297
298 if (delayed_work_pending(&req->work))
299 cancel_delayed_work_sync(&req->work);
300
301 if (new_value != req->node.prio)
302 pm_qos_update_target(
303 pm_qos_array[req->pm_qos_class]->constraints,
304 &req->node, PM_QOS_UPDATE_REQ, new_value);
305}
306EXPORT_SYMBOL_GPL(pm_qos_update_request);
307
308/**
309 * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
310 * @req : handle to list element holding a pm_qos request to use
311 * @new_value: defines the temporal qos request
312 * @timeout_us: the effective duration of this qos request in usecs.
313 *
314 * After timeout_us, this qos request is cancelled automatically.
315 */
316void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
317 unsigned long timeout_us)
318{
319 if (!req)
320 return;
321 if (WARN(!pm_qos_request_active(req),
322 "%s called for unknown object.", __func__))
323 return;
324
325 if (delayed_work_pending(&req->work))
326 cancel_delayed_work_sync(&req->work);
327
328 if (new_value != req->node.prio)
329 pm_qos_update_target(
330 pm_qos_array[req->pm_qos_class]->constraints,
331 &req->node, PM_QOS_UPDATE_REQ, new_value);
332
333 schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
334}
335
336/**
337 * pm_qos_remove_request - modifies an existing qos request
338 * @req: handle to request list element
339 *
340 * Will remove pm qos request from the list of constraints and
341 * recompute the current target value for the pm_qos_class. Call this
342 * on slow code paths.
343 */
344void pm_qos_remove_request(struct pm_qos_request *req)
345{
346 if (!req) /*guard against callers passing in null */
347 return;
348 /* silent return to keep pcm code cleaner */
349
350 if (!pm_qos_request_active(req)) {
351 WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
352 return;
353 }
354
355 if (delayed_work_pending(&req->work))
356 cancel_delayed_work_sync(&req->work);
357
358 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
359 &req->node, PM_QOS_REMOVE_REQ,
360 PM_QOS_DEFAULT_VALUE);
361 memset(req, 0, sizeof(*req));
362}
363EXPORT_SYMBOL_GPL(pm_qos_remove_request);
364
365/**
366 * pm_qos_add_notifier - sets notification entry for changes to target value
367 * @pm_qos_class: identifies which qos target changes should be notified.
368 * @notifier: notifier block managed by caller.
369 *
370 * will register the notifier into a notification chain that gets called
371 * upon changes to the pm_qos_class target value.
372 */
373int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
374{
375 int retval;
376
377 retval = blocking_notifier_chain_register(
378 pm_qos_array[pm_qos_class]->constraints->notifiers,
379 notifier);
380
381 return retval;
382}
383EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
384
385/**
386 * pm_qos_remove_notifier - deletes notification entry from chain.
387 * @pm_qos_class: identifies which qos target changes are notified.
388 * @notifier: notifier block to be removed.
389 *
390 * will remove the notifier from the notification chain that gets called
391 * upon changes to the pm_qos_class target value.
392 */
393int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
394{
395 int retval;
396
397 retval = blocking_notifier_chain_unregister(
398 pm_qos_array[pm_qos_class]->constraints->notifiers,
399 notifier);
400
401 return retval;
402}
403EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
404
405/* User space interface to PM QoS classes via misc devices */
406static int register_pm_qos_misc(struct pm_qos_object *qos)
407{
408 qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
409 qos->pm_qos_power_miscdev.name = qos->name;
410 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
411
412 return misc_register(&qos->pm_qos_power_miscdev);
413}
414
415static int find_pm_qos_object_by_minor(int minor)
416{
417 int pm_qos_class;
418
419 for (pm_qos_class = 0;
420 pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
421 if (minor ==
422 pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
423 return pm_qos_class;
424 }
425 return -1;
426}
427
428static int pm_qos_power_open(struct inode *inode, struct file *filp)
429{
430 long pm_qos_class;
431
432 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
433 if (pm_qos_class >= 0) {
434 struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
435 if (!req)
436 return -ENOMEM;
437
438 pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
439 filp->private_data = req;
440
441 return 0;
442 }
443 return -EPERM;
444}
445
446static int pm_qos_power_release(struct inode *inode, struct file *filp)
447{
448 struct pm_qos_request *req;
449
450 req = filp->private_data;
451 pm_qos_remove_request(req);
452 kfree(req);
453
454 return 0;
455}
456
457
458static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
459 size_t count, loff_t *f_pos)
460{
461 s32 value;
462 unsigned long flags;
463 struct pm_qos_request *req = filp->private_data;
464
465 if (!req)
466 return -EINVAL;
467 if (!pm_qos_request_active(req))
468 return -EINVAL;
469
470 spin_lock_irqsave(&pm_qos_lock, flags);
471 value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
472 spin_unlock_irqrestore(&pm_qos_lock, flags);
473
474 return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
475}
476
477static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
478 size_t count, loff_t *f_pos)
479{
480 s32 value;
481 struct pm_qos_request *req;
482
483 if (count == sizeof(s32)) {
484 if (copy_from_user(&value, buf, sizeof(s32)))
485 return -EFAULT;
486 } else if (count <= 11) { /* ASCII perhaps? */
487 char ascii_value[11];
488 unsigned long int ulval;
489 int ret;
490
491 if (copy_from_user(ascii_value, buf, count))
492 return -EFAULT;
493
494 if (count > 10) {
495 if (ascii_value[10] == '\n')
496 ascii_value[10] = '\0';
497 else
498 return -EINVAL;
499 } else {
500 ascii_value[count] = '\0';
501 }
502 ret = strict_strtoul(ascii_value, 16, &ulval);
503 if (ret) {
504 pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret);
505 return -EINVAL;
506 }
507 value = (s32)lower_32_bits(ulval);
508 } else {
509 return -EINVAL;
510 }
511
512 req = filp->private_data;
513 pm_qos_update_request(req, value);
514
515 return count;
516}
517
518
519static int __init pm_qos_power_init(void)
520{
521 int ret = 0;
522 int i;
523
524 BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
525
526 for (i = 1; i < PM_QOS_NUM_CLASSES; i++) {
527 ret = register_pm_qos_misc(pm_qos_array[i]);
528 if (ret < 0) {
529 printk(KERN_ERR "pm_qos_param: %s setup failed\n",
530 pm_qos_array[i]->name);
531 return ret;
532 }
533 }
534
535 return ret;
536}
537
538late_initcall(pm_qos_power_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Power Management Quality of Service (PM QoS) support base.
4 *
5 * Copyright (C) 2020 Intel Corporation
6 *
7 * Authors:
8 * Mark Gross <mgross@linux.intel.com>
9 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
10 *
11 * Provided here is an interface for specifying PM QoS dependencies. It allows
12 * entities depending on QoS constraints to register their requests which are
13 * aggregated as appropriate to produce effective constraints (target values)
14 * that can be monitored by entities needing to respect them, either by polling
15 * or through a built-in notification mechanism.
16 *
17 * In addition to the basic functionality, more specific interfaces for managing
18 * global CPU latency QoS requests and frequency QoS requests are provided.
19 */
20
21/*#define DEBUG*/
22
23#include <linux/pm_qos.h>
24#include <linux/sched.h>
25#include <linux/spinlock.h>
26#include <linux/slab.h>
27#include <linux/time.h>
28#include <linux/fs.h>
29#include <linux/device.h>
30#include <linux/miscdevice.h>
31#include <linux/string.h>
32#include <linux/platform_device.h>
33#include <linux/init.h>
34#include <linux/kernel.h>
35#include <linux/debugfs.h>
36#include <linux/seq_file.h>
37
38#include <linux/uaccess.h>
39#include <linux/export.h>
40#include <trace/events/power.h>
41
42/*
43 * locking rule: all changes to constraints or notifiers lists
44 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
45 * held, taken with _irqsave. One lock to rule them all
46 */
47static DEFINE_SPINLOCK(pm_qos_lock);
48
49/**
50 * pm_qos_read_value - Return the current effective constraint value.
51 * @c: List of PM QoS constraint requests.
52 */
53s32 pm_qos_read_value(struct pm_qos_constraints *c)
54{
55 return READ_ONCE(c->target_value);
56}
57
58static int pm_qos_get_value(struct pm_qos_constraints *c)
59{
60 if (plist_head_empty(&c->list))
61 return c->no_constraint_value;
62
63 switch (c->type) {
64 case PM_QOS_MIN:
65 return plist_first(&c->list)->prio;
66
67 case PM_QOS_MAX:
68 return plist_last(&c->list)->prio;
69
70 default:
71 WARN(1, "Unknown PM QoS type in %s\n", __func__);
72 return PM_QOS_DEFAULT_VALUE;
73 }
74}
75
76static void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
77{
78 WRITE_ONCE(c->target_value, value);
79}
80
81/**
82 * pm_qos_update_target - Update a list of PM QoS constraint requests.
83 * @c: List of PM QoS requests.
84 * @node: Target list entry.
85 * @action: Action to carry out (add, update or remove).
86 * @value: New request value for the target list entry.
87 *
88 * Update the given list of PM QoS constraint requests, @c, by carrying an
89 * @action involving the @node list entry and @value on it.
90 *
91 * The recognized values of @action are PM_QOS_ADD_REQ (store @value in @node
92 * and add it to the list), PM_QOS_UPDATE_REQ (remove @node from the list, store
93 * @value in it and add it to the list again), and PM_QOS_REMOVE_REQ (remove
94 * @node from the list, ignore @value).
95 *
96 * Return: 1 if the aggregate constraint value has changed, 0 otherwise.
97 */
98int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
99 enum pm_qos_req_action action, int value)
100{
101 int prev_value, curr_value, new_value;
102 unsigned long flags;
103
104 spin_lock_irqsave(&pm_qos_lock, flags);
105
106 prev_value = pm_qos_get_value(c);
107 if (value == PM_QOS_DEFAULT_VALUE)
108 new_value = c->default_value;
109 else
110 new_value = value;
111
112 switch (action) {
113 case PM_QOS_REMOVE_REQ:
114 plist_del(node, &c->list);
115 break;
116 case PM_QOS_UPDATE_REQ:
117 /*
118 * To change the list, atomically remove, reinit with new value
119 * and add, then see if the aggregate has changed.
120 */
121 plist_del(node, &c->list);
122 fallthrough;
123 case PM_QOS_ADD_REQ:
124 plist_node_init(node, new_value);
125 plist_add(node, &c->list);
126 break;
127 default:
128 /* no action */
129 ;
130 }
131
132 curr_value = pm_qos_get_value(c);
133 pm_qos_set_value(c, curr_value);
134
135 spin_unlock_irqrestore(&pm_qos_lock, flags);
136
137 trace_pm_qos_update_target(action, prev_value, curr_value);
138
139 if (prev_value == curr_value)
140 return 0;
141
142 if (c->notifiers)
143 blocking_notifier_call_chain(c->notifiers, curr_value, NULL);
144
145 return 1;
146}
147
148/**
149 * pm_qos_flags_remove_req - Remove device PM QoS flags request.
150 * @pqf: Device PM QoS flags set to remove the request from.
151 * @req: Request to remove from the set.
152 */
153static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
154 struct pm_qos_flags_request *req)
155{
156 s32 val = 0;
157
158 list_del(&req->node);
159 list_for_each_entry(req, &pqf->list, node)
160 val |= req->flags;
161
162 pqf->effective_flags = val;
163}
164
165/**
166 * pm_qos_update_flags - Update a set of PM QoS flags.
167 * @pqf: Set of PM QoS flags to update.
168 * @req: Request to add to the set, to modify, or to remove from the set.
169 * @action: Action to take on the set.
170 * @val: Value of the request to add or modify.
171 *
172 * Return: 1 if the aggregate constraint value has changed, 0 otherwise.
173 */
174bool pm_qos_update_flags(struct pm_qos_flags *pqf,
175 struct pm_qos_flags_request *req,
176 enum pm_qos_req_action action, s32 val)
177{
178 unsigned long irqflags;
179 s32 prev_value, curr_value;
180
181 spin_lock_irqsave(&pm_qos_lock, irqflags);
182
183 prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
184
185 switch (action) {
186 case PM_QOS_REMOVE_REQ:
187 pm_qos_flags_remove_req(pqf, req);
188 break;
189 case PM_QOS_UPDATE_REQ:
190 pm_qos_flags_remove_req(pqf, req);
191 fallthrough;
192 case PM_QOS_ADD_REQ:
193 req->flags = val;
194 INIT_LIST_HEAD(&req->node);
195 list_add_tail(&req->node, &pqf->list);
196 pqf->effective_flags |= val;
197 break;
198 default:
199 /* no action */
200 ;
201 }
202
203 curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
204
205 spin_unlock_irqrestore(&pm_qos_lock, irqflags);
206
207 trace_pm_qos_update_flags(action, prev_value, curr_value);
208
209 return prev_value != curr_value;
210}
211
212#ifdef CONFIG_CPU_IDLE
213/* Definitions related to the CPU latency QoS. */
214
215static struct pm_qos_constraints cpu_latency_constraints = {
216 .list = PLIST_HEAD_INIT(cpu_latency_constraints.list),
217 .target_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
218 .default_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
219 .no_constraint_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
220 .type = PM_QOS_MIN,
221};
222
223static inline bool cpu_latency_qos_value_invalid(s32 value)
224{
225 return value < 0 && value != PM_QOS_DEFAULT_VALUE;
226}
227
228/**
229 * cpu_latency_qos_limit - Return current system-wide CPU latency QoS limit.
230 */
231s32 cpu_latency_qos_limit(void)
232{
233 return pm_qos_read_value(&cpu_latency_constraints);
234}
235
236/**
237 * cpu_latency_qos_request_active - Check the given PM QoS request.
238 * @req: PM QoS request to check.
239 *
240 * Return: 'true' if @req has been added to the CPU latency QoS list, 'false'
241 * otherwise.
242 */
243bool cpu_latency_qos_request_active(struct pm_qos_request *req)
244{
245 return req->qos == &cpu_latency_constraints;
246}
247EXPORT_SYMBOL_GPL(cpu_latency_qos_request_active);
248
249static void cpu_latency_qos_apply(struct pm_qos_request *req,
250 enum pm_qos_req_action action, s32 value)
251{
252 int ret = pm_qos_update_target(req->qos, &req->node, action, value);
253 if (ret > 0)
254 wake_up_all_idle_cpus();
255}
256
257/**
258 * cpu_latency_qos_add_request - Add new CPU latency QoS request.
259 * @req: Pointer to a preallocated handle.
260 * @value: Requested constraint value.
261 *
262 * Use @value to initialize the request handle pointed to by @req, insert it as
263 * a new entry to the CPU latency QoS list and recompute the effective QoS
264 * constraint for that list.
265 *
266 * Callers need to save the handle for later use in updates and removal of the
267 * QoS request represented by it.
268 */
269void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value)
270{
271 if (!req || cpu_latency_qos_value_invalid(value))
272 return;
273
274 if (cpu_latency_qos_request_active(req)) {
275 WARN(1, KERN_ERR "%s called for already added request\n", __func__);
276 return;
277 }
278
279 trace_pm_qos_add_request(value);
280
281 req->qos = &cpu_latency_constraints;
282 cpu_latency_qos_apply(req, PM_QOS_ADD_REQ, value);
283}
284EXPORT_SYMBOL_GPL(cpu_latency_qos_add_request);
285
286/**
287 * cpu_latency_qos_update_request - Modify existing CPU latency QoS request.
288 * @req : QoS request to update.
289 * @new_value: New requested constraint value.
290 *
291 * Use @new_value to update the QoS request represented by @req in the CPU
292 * latency QoS list along with updating the effective constraint value for that
293 * list.
294 */
295void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value)
296{
297 if (!req || cpu_latency_qos_value_invalid(new_value))
298 return;
299
300 if (!cpu_latency_qos_request_active(req)) {
301 WARN(1, KERN_ERR "%s called for unknown object\n", __func__);
302 return;
303 }
304
305 trace_pm_qos_update_request(new_value);
306
307 if (new_value == req->node.prio)
308 return;
309
310 cpu_latency_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
311}
312EXPORT_SYMBOL_GPL(cpu_latency_qos_update_request);
313
314/**
315 * cpu_latency_qos_remove_request - Remove existing CPU latency QoS request.
316 * @req: QoS request to remove.
317 *
318 * Remove the CPU latency QoS request represented by @req from the CPU latency
319 * QoS list along with updating the effective constraint value for that list.
320 */
321void cpu_latency_qos_remove_request(struct pm_qos_request *req)
322{
323 if (!req)
324 return;
325
326 if (!cpu_latency_qos_request_active(req)) {
327 WARN(1, KERN_ERR "%s called for unknown object\n", __func__);
328 return;
329 }
330
331 trace_pm_qos_remove_request(PM_QOS_DEFAULT_VALUE);
332
333 cpu_latency_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
334 memset(req, 0, sizeof(*req));
335}
336EXPORT_SYMBOL_GPL(cpu_latency_qos_remove_request);
337
338/* User space interface to the CPU latency QoS via misc device. */
339
340static int cpu_latency_qos_open(struct inode *inode, struct file *filp)
341{
342 struct pm_qos_request *req;
343
344 req = kzalloc(sizeof(*req), GFP_KERNEL);
345 if (!req)
346 return -ENOMEM;
347
348 cpu_latency_qos_add_request(req, PM_QOS_DEFAULT_VALUE);
349 filp->private_data = req;
350
351 return 0;
352}
353
354static int cpu_latency_qos_release(struct inode *inode, struct file *filp)
355{
356 struct pm_qos_request *req = filp->private_data;
357
358 filp->private_data = NULL;
359
360 cpu_latency_qos_remove_request(req);
361 kfree(req);
362
363 return 0;
364}
365
366static ssize_t cpu_latency_qos_read(struct file *filp, char __user *buf,
367 size_t count, loff_t *f_pos)
368{
369 struct pm_qos_request *req = filp->private_data;
370 unsigned long flags;
371 s32 value;
372
373 if (!req || !cpu_latency_qos_request_active(req))
374 return -EINVAL;
375
376 spin_lock_irqsave(&pm_qos_lock, flags);
377 value = pm_qos_get_value(&cpu_latency_constraints);
378 spin_unlock_irqrestore(&pm_qos_lock, flags);
379
380 return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
381}
382
383static ssize_t cpu_latency_qos_write(struct file *filp, const char __user *buf,
384 size_t count, loff_t *f_pos)
385{
386 s32 value;
387
388 if (count == sizeof(s32)) {
389 if (copy_from_user(&value, buf, sizeof(s32)))
390 return -EFAULT;
391 } else {
392 int ret;
393
394 ret = kstrtos32_from_user(buf, count, 16, &value);
395 if (ret)
396 return ret;
397 }
398
399 cpu_latency_qos_update_request(filp->private_data, value);
400
401 return count;
402}
403
404static const struct file_operations cpu_latency_qos_fops = {
405 .write = cpu_latency_qos_write,
406 .read = cpu_latency_qos_read,
407 .open = cpu_latency_qos_open,
408 .release = cpu_latency_qos_release,
409 .llseek = noop_llseek,
410};
411
412static struct miscdevice cpu_latency_qos_miscdev = {
413 .minor = MISC_DYNAMIC_MINOR,
414 .name = "cpu_dma_latency",
415 .fops = &cpu_latency_qos_fops,
416};
417
418static int __init cpu_latency_qos_init(void)
419{
420 int ret;
421
422 ret = misc_register(&cpu_latency_qos_miscdev);
423 if (ret < 0)
424 pr_err("%s: %s setup failed\n", __func__,
425 cpu_latency_qos_miscdev.name);
426
427 return ret;
428}
429late_initcall(cpu_latency_qos_init);
430#endif /* CONFIG_CPU_IDLE */
431
432/* Definitions related to the frequency QoS below. */
433
434static inline bool freq_qos_value_invalid(s32 value)
435{
436 return value < 0 && value != PM_QOS_DEFAULT_VALUE;
437}
438
439/**
440 * freq_constraints_init - Initialize frequency QoS constraints.
441 * @qos: Frequency QoS constraints to initialize.
442 */
443void freq_constraints_init(struct freq_constraints *qos)
444{
445 struct pm_qos_constraints *c;
446
447 c = &qos->min_freq;
448 plist_head_init(&c->list);
449 c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE;
450 c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE;
451 c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE;
452 c->type = PM_QOS_MAX;
453 c->notifiers = &qos->min_freq_notifiers;
454 BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
455
456 c = &qos->max_freq;
457 plist_head_init(&c->list);
458 c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE;
459 c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE;
460 c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE;
461 c->type = PM_QOS_MIN;
462 c->notifiers = &qos->max_freq_notifiers;
463 BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
464}
465
466/**
467 * freq_qos_read_value - Get frequency QoS constraint for a given list.
468 * @qos: Constraints to evaluate.
469 * @type: QoS request type.
470 */
471s32 freq_qos_read_value(struct freq_constraints *qos,
472 enum freq_qos_req_type type)
473{
474 s32 ret;
475
476 switch (type) {
477 case FREQ_QOS_MIN:
478 ret = IS_ERR_OR_NULL(qos) ?
479 FREQ_QOS_MIN_DEFAULT_VALUE :
480 pm_qos_read_value(&qos->min_freq);
481 break;
482 case FREQ_QOS_MAX:
483 ret = IS_ERR_OR_NULL(qos) ?
484 FREQ_QOS_MAX_DEFAULT_VALUE :
485 pm_qos_read_value(&qos->max_freq);
486 break;
487 default:
488 WARN_ON(1);
489 ret = 0;
490 }
491
492 return ret;
493}
494
495/**
496 * freq_qos_apply - Add/modify/remove frequency QoS request.
497 * @req: Constraint request to apply.
498 * @action: Action to perform (add/update/remove).
499 * @value: Value to assign to the QoS request.
500 *
501 * This is only meant to be called from inside pm_qos, not drivers.
502 */
503int freq_qos_apply(struct freq_qos_request *req,
504 enum pm_qos_req_action action, s32 value)
505{
506 int ret;
507
508 switch(req->type) {
509 case FREQ_QOS_MIN:
510 ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode,
511 action, value);
512 break;
513 case FREQ_QOS_MAX:
514 ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode,
515 action, value);
516 break;
517 default:
518 ret = -EINVAL;
519 }
520
521 return ret;
522}
523
524/**
525 * freq_qos_add_request - Insert new frequency QoS request into a given list.
526 * @qos: Constraints to update.
527 * @req: Preallocated request object.
528 * @type: Request type.
529 * @value: Request value.
530 *
531 * Insert a new entry into the @qos list of requests, recompute the effective
532 * QoS constraint value for that list and initialize the @req object. The
533 * caller needs to save that object for later use in updates and removal.
534 *
535 * Return 1 if the effective constraint value has changed, 0 if the effective
536 * constraint value has not changed, or a negative error code on failures.
537 */
538int freq_qos_add_request(struct freq_constraints *qos,
539 struct freq_qos_request *req,
540 enum freq_qos_req_type type, s32 value)
541{
542 int ret;
543
544 if (IS_ERR_OR_NULL(qos) || !req || freq_qos_value_invalid(value))
545 return -EINVAL;
546
547 if (WARN(freq_qos_request_active(req),
548 "%s() called for active request\n", __func__))
549 return -EINVAL;
550
551 req->qos = qos;
552 req->type = type;
553 ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value);
554 if (ret < 0) {
555 req->qos = NULL;
556 req->type = 0;
557 }
558
559 return ret;
560}
561EXPORT_SYMBOL_GPL(freq_qos_add_request);
562
563/**
564 * freq_qos_update_request - Modify existing frequency QoS request.
565 * @req: Request to modify.
566 * @new_value: New request value.
567 *
568 * Update an existing frequency QoS request along with the effective constraint
569 * value for the list of requests it belongs to.
570 *
571 * Return 1 if the effective constraint value has changed, 0 if the effective
572 * constraint value has not changed, or a negative error code on failures.
573 */
574int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
575{
576 if (!req || freq_qos_value_invalid(new_value))
577 return -EINVAL;
578
579 if (WARN(!freq_qos_request_active(req),
580 "%s() called for unknown object\n", __func__))
581 return -EINVAL;
582
583 if (req->pnode.prio == new_value)
584 return 0;
585
586 return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
587}
588EXPORT_SYMBOL_GPL(freq_qos_update_request);
589
590/**
591 * freq_qos_remove_request - Remove frequency QoS request from its list.
592 * @req: Request to remove.
593 *
594 * Remove the given frequency QoS request from the list of constraints it
595 * belongs to and recompute the effective constraint value for that list.
596 *
597 * Return 1 if the effective constraint value has changed, 0 if the effective
598 * constraint value has not changed, or a negative error code on failures.
599 */
600int freq_qos_remove_request(struct freq_qos_request *req)
601{
602 int ret;
603
604 if (!req)
605 return -EINVAL;
606
607 if (WARN(!freq_qos_request_active(req),
608 "%s() called for unknown object\n", __func__))
609 return -EINVAL;
610
611 ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
612 req->qos = NULL;
613 req->type = 0;
614
615 return ret;
616}
617EXPORT_SYMBOL_GPL(freq_qos_remove_request);
618
619/**
620 * freq_qos_add_notifier - Add frequency QoS change notifier.
621 * @qos: List of requests to add the notifier to.
622 * @type: Request type.
623 * @notifier: Notifier block to add.
624 */
625int freq_qos_add_notifier(struct freq_constraints *qos,
626 enum freq_qos_req_type type,
627 struct notifier_block *notifier)
628{
629 int ret;
630
631 if (IS_ERR_OR_NULL(qos) || !notifier)
632 return -EINVAL;
633
634 switch (type) {
635 case FREQ_QOS_MIN:
636 ret = blocking_notifier_chain_register(qos->min_freq.notifiers,
637 notifier);
638 break;
639 case FREQ_QOS_MAX:
640 ret = blocking_notifier_chain_register(qos->max_freq.notifiers,
641 notifier);
642 break;
643 default:
644 WARN_ON(1);
645 ret = -EINVAL;
646 }
647
648 return ret;
649}
650EXPORT_SYMBOL_GPL(freq_qos_add_notifier);
651
652/**
653 * freq_qos_remove_notifier - Remove frequency QoS change notifier.
654 * @qos: List of requests to remove the notifier from.
655 * @type: Request type.
656 * @notifier: Notifier block to remove.
657 */
658int freq_qos_remove_notifier(struct freq_constraints *qos,
659 enum freq_qos_req_type type,
660 struct notifier_block *notifier)
661{
662 int ret;
663
664 if (IS_ERR_OR_NULL(qos) || !notifier)
665 return -EINVAL;
666
667 switch (type) {
668 case FREQ_QOS_MIN:
669 ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers,
670 notifier);
671 break;
672 case FREQ_QOS_MAX:
673 ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers,
674 notifier);
675 break;
676 default:
677 WARN_ON(1);
678 ret = -EINVAL;
679 }
680
681 return ret;
682}
683EXPORT_SYMBOL_GPL(freq_qos_remove_notifier);