Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Power Management Quality of Service (PM QoS) support base.
4 *
5 * Copyright (C) 2020 Intel Corporation
6 *
7 * Authors:
8 * Mark Gross <mgross@linux.intel.com>
9 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
10 *
11 * Provided here is an interface for specifying PM QoS dependencies. It allows
12 * entities depending on QoS constraints to register their requests which are
13 * aggregated as appropriate to produce effective constraints (target values)
14 * that can be monitored by entities needing to respect them, either by polling
15 * or through a built-in notification mechanism.
16 *
17 * In addition to the basic functionality, more specific interfaces for managing
18 * global CPU latency QoS requests and frequency QoS requests are provided.
19 */
20
21/*#define DEBUG*/
22
23#include <linux/pm_qos.h>
24#include <linux/sched.h>
25#include <linux/spinlock.h>
26#include <linux/slab.h>
27#include <linux/time.h>
28#include <linux/fs.h>
29#include <linux/device.h>
30#include <linux/miscdevice.h>
31#include <linux/string.h>
32#include <linux/platform_device.h>
33#include <linux/init.h>
34#include <linux/kernel.h>
35#include <linux/debugfs.h>
36#include <linux/seq_file.h>
37
38#include <linux/uaccess.h>
39#include <linux/export.h>
40#include <trace/events/power.h>
41
42/*
43 * locking rule: all changes to constraints or notifiers lists
44 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
45 * held, taken with _irqsave. One lock to rule them all
46 */
47static DEFINE_SPINLOCK(pm_qos_lock);
48
49/**
50 * pm_qos_read_value - Return the current effective constraint value.
51 * @c: List of PM QoS constraint requests.
52 */
53s32 pm_qos_read_value(struct pm_qos_constraints *c)
54{
55 return READ_ONCE(c->target_value);
56}
57
58static int pm_qos_get_value(struct pm_qos_constraints *c)
59{
60 if (plist_head_empty(&c->list))
61 return c->no_constraint_value;
62
63 switch (c->type) {
64 case PM_QOS_MIN:
65 return plist_first(&c->list)->prio;
66
67 case PM_QOS_MAX:
68 return plist_last(&c->list)->prio;
69
70 default:
71 WARN(1, "Unknown PM QoS type in %s\n", __func__);
72 return PM_QOS_DEFAULT_VALUE;
73 }
74}
75
76static void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
77{
78 WRITE_ONCE(c->target_value, value);
79}
80
81/**
82 * pm_qos_update_target - Update a list of PM QoS constraint requests.
83 * @c: List of PM QoS requests.
84 * @node: Target list entry.
85 * @action: Action to carry out (add, update or remove).
86 * @value: New request value for the target list entry.
87 *
88 * Update the given list of PM QoS constraint requests, @c, by carrying an
89 * @action involving the @node list entry and @value on it.
90 *
91 * The recognized values of @action are PM_QOS_ADD_REQ (store @value in @node
92 * and add it to the list), PM_QOS_UPDATE_REQ (remove @node from the list, store
93 * @value in it and add it to the list again), and PM_QOS_REMOVE_REQ (remove
94 * @node from the list, ignore @value).
95 *
96 * Return: 1 if the aggregate constraint value has changed, 0 otherwise.
97 */
98int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
99 enum pm_qos_req_action action, int value)
100{
101 int prev_value, curr_value, new_value;
102 unsigned long flags;
103
104 spin_lock_irqsave(&pm_qos_lock, flags);
105
106 prev_value = pm_qos_get_value(c);
107 if (value == PM_QOS_DEFAULT_VALUE)
108 new_value = c->default_value;
109 else
110 new_value = value;
111
112 switch (action) {
113 case PM_QOS_REMOVE_REQ:
114 plist_del(node, &c->list);
115 break;
116 case PM_QOS_UPDATE_REQ:
117 /*
118 * To change the list, atomically remove, reinit with new value
119 * and add, then see if the aggregate has changed.
120 */
121 plist_del(node, &c->list);
122 fallthrough;
123 case PM_QOS_ADD_REQ:
124 plist_node_init(node, new_value);
125 plist_add(node, &c->list);
126 break;
127 default:
128 /* no action */
129 ;
130 }
131
132 curr_value = pm_qos_get_value(c);
133 pm_qos_set_value(c, curr_value);
134
135 spin_unlock_irqrestore(&pm_qos_lock, flags);
136
137 trace_pm_qos_update_target(action, prev_value, curr_value);
138
139 if (prev_value == curr_value)
140 return 0;
141
142 if (c->notifiers)
143 blocking_notifier_call_chain(c->notifiers, curr_value, NULL);
144
145 return 1;
146}
147
148/**
149 * pm_qos_flags_remove_req - Remove device PM QoS flags request.
150 * @pqf: Device PM QoS flags set to remove the request from.
151 * @req: Request to remove from the set.
152 */
153static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
154 struct pm_qos_flags_request *req)
155{
156 s32 val = 0;
157
158 list_del(&req->node);
159 list_for_each_entry(req, &pqf->list, node)
160 val |= req->flags;
161
162 pqf->effective_flags = val;
163}
164
165/**
166 * pm_qos_update_flags - Update a set of PM QoS flags.
167 * @pqf: Set of PM QoS flags to update.
168 * @req: Request to add to the set, to modify, or to remove from the set.
169 * @action: Action to take on the set.
170 * @val: Value of the request to add or modify.
171 *
172 * Return: 1 if the aggregate constraint value has changed, 0 otherwise.
173 */
174bool pm_qos_update_flags(struct pm_qos_flags *pqf,
175 struct pm_qos_flags_request *req,
176 enum pm_qos_req_action action, s32 val)
177{
178 unsigned long irqflags;
179 s32 prev_value, curr_value;
180
181 spin_lock_irqsave(&pm_qos_lock, irqflags);
182
183 prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
184
185 switch (action) {
186 case PM_QOS_REMOVE_REQ:
187 pm_qos_flags_remove_req(pqf, req);
188 break;
189 case PM_QOS_UPDATE_REQ:
190 pm_qos_flags_remove_req(pqf, req);
191 fallthrough;
192 case PM_QOS_ADD_REQ:
193 req->flags = val;
194 INIT_LIST_HEAD(&req->node);
195 list_add_tail(&req->node, &pqf->list);
196 pqf->effective_flags |= val;
197 break;
198 default:
199 /* no action */
200 ;
201 }
202
203 curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
204
205 spin_unlock_irqrestore(&pm_qos_lock, irqflags);
206
207 trace_pm_qos_update_flags(action, prev_value, curr_value);
208
209 return prev_value != curr_value;
210}
211
212#ifdef CONFIG_CPU_IDLE
213/* Definitions related to the CPU latency QoS. */
214
215static struct pm_qos_constraints cpu_latency_constraints = {
216 .list = PLIST_HEAD_INIT(cpu_latency_constraints.list),
217 .target_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
218 .default_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
219 .no_constraint_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
220 .type = PM_QOS_MIN,
221};
222
223/**
224 * cpu_latency_qos_limit - Return current system-wide CPU latency QoS limit.
225 */
226s32 cpu_latency_qos_limit(void)
227{
228 return pm_qos_read_value(&cpu_latency_constraints);
229}
230
231/**
232 * cpu_latency_qos_request_active - Check the given PM QoS request.
233 * @req: PM QoS request to check.
234 *
235 * Return: 'true' if @req has been added to the CPU latency QoS list, 'false'
236 * otherwise.
237 */
238bool cpu_latency_qos_request_active(struct pm_qos_request *req)
239{
240 return req->qos == &cpu_latency_constraints;
241}
242EXPORT_SYMBOL_GPL(cpu_latency_qos_request_active);
243
244static void cpu_latency_qos_apply(struct pm_qos_request *req,
245 enum pm_qos_req_action action, s32 value)
246{
247 int ret = pm_qos_update_target(req->qos, &req->node, action, value);
248 if (ret > 0)
249 wake_up_all_idle_cpus();
250}
251
252/**
253 * cpu_latency_qos_add_request - Add new CPU latency QoS request.
254 * @req: Pointer to a preallocated handle.
255 * @value: Requested constraint value.
256 *
257 * Use @value to initialize the request handle pointed to by @req, insert it as
258 * a new entry to the CPU latency QoS list and recompute the effective QoS
259 * constraint for that list.
260 *
261 * Callers need to save the handle for later use in updates and removal of the
262 * QoS request represented by it.
263 */
264void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value)
265{
266 if (!req)
267 return;
268
269 if (cpu_latency_qos_request_active(req)) {
270 WARN(1, KERN_ERR "%s called for already added request\n", __func__);
271 return;
272 }
273
274 trace_pm_qos_add_request(value);
275
276 req->qos = &cpu_latency_constraints;
277 cpu_latency_qos_apply(req, PM_QOS_ADD_REQ, value);
278}
279EXPORT_SYMBOL_GPL(cpu_latency_qos_add_request);
280
281/**
282 * cpu_latency_qos_update_request - Modify existing CPU latency QoS request.
283 * @req : QoS request to update.
284 * @new_value: New requested constraint value.
285 *
286 * Use @new_value to update the QoS request represented by @req in the CPU
287 * latency QoS list along with updating the effective constraint value for that
288 * list.
289 */
290void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value)
291{
292 if (!req)
293 return;
294
295 if (!cpu_latency_qos_request_active(req)) {
296 WARN(1, KERN_ERR "%s called for unknown object\n", __func__);
297 return;
298 }
299
300 trace_pm_qos_update_request(new_value);
301
302 if (new_value == req->node.prio)
303 return;
304
305 cpu_latency_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
306}
307EXPORT_SYMBOL_GPL(cpu_latency_qos_update_request);
308
309/**
310 * cpu_latency_qos_remove_request - Remove existing CPU latency QoS request.
311 * @req: QoS request to remove.
312 *
313 * Remove the CPU latency QoS request represented by @req from the CPU latency
314 * QoS list along with updating the effective constraint value for that list.
315 */
316void cpu_latency_qos_remove_request(struct pm_qos_request *req)
317{
318 if (!req)
319 return;
320
321 if (!cpu_latency_qos_request_active(req)) {
322 WARN(1, KERN_ERR "%s called for unknown object\n", __func__);
323 return;
324 }
325
326 trace_pm_qos_remove_request(PM_QOS_DEFAULT_VALUE);
327
328 cpu_latency_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
329 memset(req, 0, sizeof(*req));
330}
331EXPORT_SYMBOL_GPL(cpu_latency_qos_remove_request);
332
333/* User space interface to the CPU latency QoS via misc device. */
334
335static int cpu_latency_qos_open(struct inode *inode, struct file *filp)
336{
337 struct pm_qos_request *req;
338
339 req = kzalloc(sizeof(*req), GFP_KERNEL);
340 if (!req)
341 return -ENOMEM;
342
343 cpu_latency_qos_add_request(req, PM_QOS_DEFAULT_VALUE);
344 filp->private_data = req;
345
346 return 0;
347}
348
349static int cpu_latency_qos_release(struct inode *inode, struct file *filp)
350{
351 struct pm_qos_request *req = filp->private_data;
352
353 filp->private_data = NULL;
354
355 cpu_latency_qos_remove_request(req);
356 kfree(req);
357
358 return 0;
359}
360
361static ssize_t cpu_latency_qos_read(struct file *filp, char __user *buf,
362 size_t count, loff_t *f_pos)
363{
364 struct pm_qos_request *req = filp->private_data;
365 unsigned long flags;
366 s32 value;
367
368 if (!req || !cpu_latency_qos_request_active(req))
369 return -EINVAL;
370
371 spin_lock_irqsave(&pm_qos_lock, flags);
372 value = pm_qos_get_value(&cpu_latency_constraints);
373 spin_unlock_irqrestore(&pm_qos_lock, flags);
374
375 return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
376}
377
378static ssize_t cpu_latency_qos_write(struct file *filp, const char __user *buf,
379 size_t count, loff_t *f_pos)
380{
381 s32 value;
382
383 if (count == sizeof(s32)) {
384 if (copy_from_user(&value, buf, sizeof(s32)))
385 return -EFAULT;
386 } else {
387 int ret;
388
389 ret = kstrtos32_from_user(buf, count, 16, &value);
390 if (ret)
391 return ret;
392 }
393
394 cpu_latency_qos_update_request(filp->private_data, value);
395
396 return count;
397}
398
399static const struct file_operations cpu_latency_qos_fops = {
400 .write = cpu_latency_qos_write,
401 .read = cpu_latency_qos_read,
402 .open = cpu_latency_qos_open,
403 .release = cpu_latency_qos_release,
404 .llseek = noop_llseek,
405};
406
407static struct miscdevice cpu_latency_qos_miscdev = {
408 .minor = MISC_DYNAMIC_MINOR,
409 .name = "cpu_dma_latency",
410 .fops = &cpu_latency_qos_fops,
411};
412
413static int __init cpu_latency_qos_init(void)
414{
415 int ret;
416
417 ret = misc_register(&cpu_latency_qos_miscdev);
418 if (ret < 0)
419 pr_err("%s: %s setup failed\n", __func__,
420 cpu_latency_qos_miscdev.name);
421
422 return ret;
423}
424late_initcall(cpu_latency_qos_init);
425#endif /* CONFIG_CPU_IDLE */
426
427/* Definitions related to the frequency QoS below. */
428
429/**
430 * freq_constraints_init - Initialize frequency QoS constraints.
431 * @qos: Frequency QoS constraints to initialize.
432 */
433void freq_constraints_init(struct freq_constraints *qos)
434{
435 struct pm_qos_constraints *c;
436
437 c = &qos->min_freq;
438 plist_head_init(&c->list);
439 c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE;
440 c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE;
441 c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE;
442 c->type = PM_QOS_MAX;
443 c->notifiers = &qos->min_freq_notifiers;
444 BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
445
446 c = &qos->max_freq;
447 plist_head_init(&c->list);
448 c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE;
449 c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE;
450 c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE;
451 c->type = PM_QOS_MIN;
452 c->notifiers = &qos->max_freq_notifiers;
453 BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
454}
455
456/**
457 * freq_qos_read_value - Get frequency QoS constraint for a given list.
458 * @qos: Constraints to evaluate.
459 * @type: QoS request type.
460 */
461s32 freq_qos_read_value(struct freq_constraints *qos,
462 enum freq_qos_req_type type)
463{
464 s32 ret;
465
466 switch (type) {
467 case FREQ_QOS_MIN:
468 ret = IS_ERR_OR_NULL(qos) ?
469 FREQ_QOS_MIN_DEFAULT_VALUE :
470 pm_qos_read_value(&qos->min_freq);
471 break;
472 case FREQ_QOS_MAX:
473 ret = IS_ERR_OR_NULL(qos) ?
474 FREQ_QOS_MAX_DEFAULT_VALUE :
475 pm_qos_read_value(&qos->max_freq);
476 break;
477 default:
478 WARN_ON(1);
479 ret = 0;
480 }
481
482 return ret;
483}
484
485/**
486 * freq_qos_apply - Add/modify/remove frequency QoS request.
487 * @req: Constraint request to apply.
488 * @action: Action to perform (add/update/remove).
489 * @value: Value to assign to the QoS request.
490 *
491 * This is only meant to be called from inside pm_qos, not drivers.
492 */
493int freq_qos_apply(struct freq_qos_request *req,
494 enum pm_qos_req_action action, s32 value)
495{
496 int ret;
497
498 switch(req->type) {
499 case FREQ_QOS_MIN:
500 ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode,
501 action, value);
502 break;
503 case FREQ_QOS_MAX:
504 ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode,
505 action, value);
506 break;
507 default:
508 ret = -EINVAL;
509 }
510
511 return ret;
512}
513
514/**
515 * freq_qos_add_request - Insert new frequency QoS request into a given list.
516 * @qos: Constraints to update.
517 * @req: Preallocated request object.
518 * @type: Request type.
519 * @value: Request value.
520 *
521 * Insert a new entry into the @qos list of requests, recompute the effective
522 * QoS constraint value for that list and initialize the @req object. The
523 * caller needs to save that object for later use in updates and removal.
524 *
525 * Return 1 if the effective constraint value has changed, 0 if the effective
526 * constraint value has not changed, or a negative error code on failures.
527 */
528int freq_qos_add_request(struct freq_constraints *qos,
529 struct freq_qos_request *req,
530 enum freq_qos_req_type type, s32 value)
531{
532 int ret;
533
534 if (IS_ERR_OR_NULL(qos) || !req || value < 0)
535 return -EINVAL;
536
537 if (WARN(freq_qos_request_active(req),
538 "%s() called for active request\n", __func__))
539 return -EINVAL;
540
541 req->qos = qos;
542 req->type = type;
543 ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value);
544 if (ret < 0) {
545 req->qos = NULL;
546 req->type = 0;
547 }
548
549 return ret;
550}
551EXPORT_SYMBOL_GPL(freq_qos_add_request);
552
553/**
554 * freq_qos_update_request - Modify existing frequency QoS request.
555 * @req: Request to modify.
556 * @new_value: New request value.
557 *
558 * Update an existing frequency QoS request along with the effective constraint
559 * value for the list of requests it belongs to.
560 *
561 * Return 1 if the effective constraint value has changed, 0 if the effective
562 * constraint value has not changed, or a negative error code on failures.
563 */
564int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
565{
566 if (!req || new_value < 0)
567 return -EINVAL;
568
569 if (WARN(!freq_qos_request_active(req),
570 "%s() called for unknown object\n", __func__))
571 return -EINVAL;
572
573 if (req->pnode.prio == new_value)
574 return 0;
575
576 return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
577}
578EXPORT_SYMBOL_GPL(freq_qos_update_request);
579
580/**
581 * freq_qos_remove_request - Remove frequency QoS request from its list.
582 * @req: Request to remove.
583 *
584 * Remove the given frequency QoS request from the list of constraints it
585 * belongs to and recompute the effective constraint value for that list.
586 *
587 * Return 1 if the effective constraint value has changed, 0 if the effective
588 * constraint value has not changed, or a negative error code on failures.
589 */
590int freq_qos_remove_request(struct freq_qos_request *req)
591{
592 int ret;
593
594 if (!req)
595 return -EINVAL;
596
597 if (WARN(!freq_qos_request_active(req),
598 "%s() called for unknown object\n", __func__))
599 return -EINVAL;
600
601 ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
602 req->qos = NULL;
603 req->type = 0;
604
605 return ret;
606}
607EXPORT_SYMBOL_GPL(freq_qos_remove_request);
608
609/**
610 * freq_qos_add_notifier - Add frequency QoS change notifier.
611 * @qos: List of requests to add the notifier to.
612 * @type: Request type.
613 * @notifier: Notifier block to add.
614 */
615int freq_qos_add_notifier(struct freq_constraints *qos,
616 enum freq_qos_req_type type,
617 struct notifier_block *notifier)
618{
619 int ret;
620
621 if (IS_ERR_OR_NULL(qos) || !notifier)
622 return -EINVAL;
623
624 switch (type) {
625 case FREQ_QOS_MIN:
626 ret = blocking_notifier_chain_register(qos->min_freq.notifiers,
627 notifier);
628 break;
629 case FREQ_QOS_MAX:
630 ret = blocking_notifier_chain_register(qos->max_freq.notifiers,
631 notifier);
632 break;
633 default:
634 WARN_ON(1);
635 ret = -EINVAL;
636 }
637
638 return ret;
639}
640EXPORT_SYMBOL_GPL(freq_qos_add_notifier);
641
642/**
643 * freq_qos_remove_notifier - Remove frequency QoS change notifier.
644 * @qos: List of requests to remove the notifier from.
645 * @type: Request type.
646 * @notifier: Notifier block to remove.
647 */
648int freq_qos_remove_notifier(struct freq_constraints *qos,
649 enum freq_qos_req_type type,
650 struct notifier_block *notifier)
651{
652 int ret;
653
654 if (IS_ERR_OR_NULL(qos) || !notifier)
655 return -EINVAL;
656
657 switch (type) {
658 case FREQ_QOS_MIN:
659 ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers,
660 notifier);
661 break;
662 case FREQ_QOS_MAX:
663 ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers,
664 notifier);
665 break;
666 default:
667 WARN_ON(1);
668 ret = -EINVAL;
669 }
670
671 return ret;
672}
673EXPORT_SYMBOL_GPL(freq_qos_remove_notifier);
1/*
2 * This module exposes the interface to kernel space for specifying
3 * QoS dependencies. It provides infrastructure for registration of:
4 *
5 * Dependents on a QoS value : register requests
6 * Watchers of QoS value : get notified when target QoS value changes
7 *
8 * This QoS design is best effort based. Dependents register their QoS needs.
9 * Watchers register to keep track of the current QoS needs of the system.
10 *
11 * There are 3 basic classes of QoS parameter: latency, timeout, throughput
12 * each have defined units:
13 * latency: usec
14 * timeout: usec <-- currently not used.
15 * throughput: kbs (kilo byte / sec)
16 *
17 * There are lists of pm_qos_objects each one wrapping requests, notifiers
18 *
19 * User mode requests on a QOS parameter register themselves to the
20 * subsystem by opening the device node /dev/... and writing there request to
21 * the node. As long as the process holds a file handle open to the node the
22 * client continues to be accounted for. Upon file release the usermode
23 * request is removed and a new qos target is computed. This way when the
24 * request that the application has is cleaned up when closes the file
25 * pointer or exits the pm_qos_object will get an opportunity to clean up.
26 *
27 * Mark Gross <mgross@linux.intel.com>
28 */
29
30/*#define DEBUG*/
31
32#include <linux/pm_qos.h>
33#include <linux/sched.h>
34#include <linux/spinlock.h>
35#include <linux/slab.h>
36#include <linux/time.h>
37#include <linux/fs.h>
38#include <linux/device.h>
39#include <linux/miscdevice.h>
40#include <linux/string.h>
41#include <linux/platform_device.h>
42#include <linux/init.h>
43#include <linux/kernel.h>
44
45#include <linux/uaccess.h>
46#include <linux/export.h>
47#include <trace/events/power.h>
48
49/*
50 * locking rule: all changes to constraints or notifiers lists
51 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
52 * held, taken with _irqsave. One lock to rule them all
53 */
54struct pm_qos_object {
55 struct pm_qos_constraints *constraints;
56 struct miscdevice pm_qos_power_miscdev;
57 char *name;
58};
59
60static DEFINE_SPINLOCK(pm_qos_lock);
61
62static struct pm_qos_object null_pm_qos;
63
64static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
65static struct pm_qos_constraints cpu_dma_constraints = {
66 .list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
67 .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
68 .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
69 .no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
70 .type = PM_QOS_MIN,
71 .notifiers = &cpu_dma_lat_notifier,
72};
73static struct pm_qos_object cpu_dma_pm_qos = {
74 .constraints = &cpu_dma_constraints,
75 .name = "cpu_dma_latency",
76};
77
78static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
79static struct pm_qos_constraints network_lat_constraints = {
80 .list = PLIST_HEAD_INIT(network_lat_constraints.list),
81 .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
82 .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
83 .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
84 .type = PM_QOS_MIN,
85 .notifiers = &network_lat_notifier,
86};
87static struct pm_qos_object network_lat_pm_qos = {
88 .constraints = &network_lat_constraints,
89 .name = "network_latency",
90};
91
92
93static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
94static struct pm_qos_constraints network_tput_constraints = {
95 .list = PLIST_HEAD_INIT(network_tput_constraints.list),
96 .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
97 .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
98 .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
99 .type = PM_QOS_MAX,
100 .notifiers = &network_throughput_notifier,
101};
102static struct pm_qos_object network_throughput_pm_qos = {
103 .constraints = &network_tput_constraints,
104 .name = "network_throughput",
105};
106
107
108static struct pm_qos_object *pm_qos_array[] = {
109 &null_pm_qos,
110 &cpu_dma_pm_qos,
111 &network_lat_pm_qos,
112 &network_throughput_pm_qos
113};
114
115static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
116 size_t count, loff_t *f_pos);
117static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
118 size_t count, loff_t *f_pos);
119static int pm_qos_power_open(struct inode *inode, struct file *filp);
120static int pm_qos_power_release(struct inode *inode, struct file *filp);
121
122static const struct file_operations pm_qos_power_fops = {
123 .write = pm_qos_power_write,
124 .read = pm_qos_power_read,
125 .open = pm_qos_power_open,
126 .release = pm_qos_power_release,
127 .llseek = noop_llseek,
128};
129
130/* unlocked internal variant */
131static inline int pm_qos_get_value(struct pm_qos_constraints *c)
132{
133 if (plist_head_empty(&c->list))
134 return c->no_constraint_value;
135
136 switch (c->type) {
137 case PM_QOS_MIN:
138 return plist_first(&c->list)->prio;
139
140 case PM_QOS_MAX:
141 return plist_last(&c->list)->prio;
142
143 default:
144 /* runtime check for not using enum */
145 BUG();
146 return PM_QOS_DEFAULT_VALUE;
147 }
148}
149
150s32 pm_qos_read_value(struct pm_qos_constraints *c)
151{
152 return c->target_value;
153}
154
155static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
156{
157 c->target_value = value;
158}
159
160/**
161 * pm_qos_update_target - manages the constraints list and calls the notifiers
162 * if needed
163 * @c: constraints data struct
164 * @node: request to add to the list, to update or to remove
165 * @action: action to take on the constraints list
166 * @value: value of the request to add or update
167 *
168 * This function returns 1 if the aggregated constraint value has changed, 0
169 * otherwise.
170 */
171int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
172 enum pm_qos_req_action action, int value)
173{
174 unsigned long flags;
175 int prev_value, curr_value, new_value;
176 int ret;
177
178 spin_lock_irqsave(&pm_qos_lock, flags);
179 prev_value = pm_qos_get_value(c);
180 if (value == PM_QOS_DEFAULT_VALUE)
181 new_value = c->default_value;
182 else
183 new_value = value;
184
185 switch (action) {
186 case PM_QOS_REMOVE_REQ:
187 plist_del(node, &c->list);
188 break;
189 case PM_QOS_UPDATE_REQ:
190 /*
191 * to change the list, we atomically remove, reinit
192 * with new value and add, then see if the extremal
193 * changed
194 */
195 plist_del(node, &c->list);
196 case PM_QOS_ADD_REQ:
197 plist_node_init(node, new_value);
198 plist_add(node, &c->list);
199 break;
200 default:
201 /* no action */
202 ;
203 }
204
205 curr_value = pm_qos_get_value(c);
206 pm_qos_set_value(c, curr_value);
207
208 spin_unlock_irqrestore(&pm_qos_lock, flags);
209
210 trace_pm_qos_update_target(action, prev_value, curr_value);
211 if (prev_value != curr_value) {
212 ret = 1;
213 if (c->notifiers)
214 blocking_notifier_call_chain(c->notifiers,
215 (unsigned long)curr_value,
216 NULL);
217 } else {
218 ret = 0;
219 }
220 return ret;
221}
222
223/**
224 * pm_qos_flags_remove_req - Remove device PM QoS flags request.
225 * @pqf: Device PM QoS flags set to remove the request from.
226 * @req: Request to remove from the set.
227 */
228static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
229 struct pm_qos_flags_request *req)
230{
231 s32 val = 0;
232
233 list_del(&req->node);
234 list_for_each_entry(req, &pqf->list, node)
235 val |= req->flags;
236
237 pqf->effective_flags = val;
238}
239
240/**
241 * pm_qos_update_flags - Update a set of PM QoS flags.
242 * @pqf: Set of flags to update.
243 * @req: Request to add to the set, to modify, or to remove from the set.
244 * @action: Action to take on the set.
245 * @val: Value of the request to add or modify.
246 *
247 * Update the given set of PM QoS flags and call notifiers if the aggregate
248 * value has changed. Returns 1 if the aggregate constraint value has changed,
249 * 0 otherwise.
250 */
251bool pm_qos_update_flags(struct pm_qos_flags *pqf,
252 struct pm_qos_flags_request *req,
253 enum pm_qos_req_action action, s32 val)
254{
255 unsigned long irqflags;
256 s32 prev_value, curr_value;
257
258 spin_lock_irqsave(&pm_qos_lock, irqflags);
259
260 prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
261
262 switch (action) {
263 case PM_QOS_REMOVE_REQ:
264 pm_qos_flags_remove_req(pqf, req);
265 break;
266 case PM_QOS_UPDATE_REQ:
267 pm_qos_flags_remove_req(pqf, req);
268 case PM_QOS_ADD_REQ:
269 req->flags = val;
270 INIT_LIST_HEAD(&req->node);
271 list_add_tail(&req->node, &pqf->list);
272 pqf->effective_flags |= val;
273 break;
274 default:
275 /* no action */
276 ;
277 }
278
279 curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
280
281 spin_unlock_irqrestore(&pm_qos_lock, irqflags);
282
283 trace_pm_qos_update_flags(action, prev_value, curr_value);
284 return prev_value != curr_value;
285}
286
287/**
288 * pm_qos_request - returns current system wide qos expectation
289 * @pm_qos_class: identification of which qos value is requested
290 *
291 * This function returns the current target value.
292 */
293int pm_qos_request(int pm_qos_class)
294{
295 return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
296}
297EXPORT_SYMBOL_GPL(pm_qos_request);
298
299int pm_qos_request_active(struct pm_qos_request *req)
300{
301 return req->pm_qos_class != 0;
302}
303EXPORT_SYMBOL_GPL(pm_qos_request_active);
304
305static void __pm_qos_update_request(struct pm_qos_request *req,
306 s32 new_value)
307{
308 trace_pm_qos_update_request(req->pm_qos_class, new_value);
309
310 if (new_value != req->node.prio)
311 pm_qos_update_target(
312 pm_qos_array[req->pm_qos_class]->constraints,
313 &req->node, PM_QOS_UPDATE_REQ, new_value);
314}
315
316/**
317 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
318 * @work: work struct for the delayed work (timeout)
319 *
320 * This cancels the timeout request by falling back to the default at timeout.
321 */
322static void pm_qos_work_fn(struct work_struct *work)
323{
324 struct pm_qos_request *req = container_of(to_delayed_work(work),
325 struct pm_qos_request,
326 work);
327
328 __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
329}
330
331/**
332 * pm_qos_add_request - inserts new qos request into the list
333 * @req: pointer to a preallocated handle
334 * @pm_qos_class: identifies which list of qos request to use
335 * @value: defines the qos request
336 *
337 * This function inserts a new entry in the pm_qos_class list of requested qos
338 * performance characteristics. It recomputes the aggregate QoS expectations
339 * for the pm_qos_class of parameters and initializes the pm_qos_request
340 * handle. Caller needs to save this handle for later use in updates and
341 * removal.
342 */
343
344void pm_qos_add_request(struct pm_qos_request *req,
345 int pm_qos_class, s32 value)
346{
347 if (!req) /*guard against callers passing in null */
348 return;
349
350 if (pm_qos_request_active(req)) {
351 WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
352 return;
353 }
354 req->pm_qos_class = pm_qos_class;
355 INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
356 trace_pm_qos_add_request(pm_qos_class, value);
357 pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
358 &req->node, PM_QOS_ADD_REQ, value);
359}
360EXPORT_SYMBOL_GPL(pm_qos_add_request);
361
362/**
363 * pm_qos_update_request - modifies an existing qos request
364 * @req : handle to list element holding a pm_qos request to use
365 * @value: defines the qos request
366 *
367 * Updates an existing qos request for the pm_qos_class of parameters along
368 * with updating the target pm_qos_class value.
369 *
370 * Attempts are made to make this code callable on hot code paths.
371 */
372void pm_qos_update_request(struct pm_qos_request *req,
373 s32 new_value)
374{
375 if (!req) /*guard against callers passing in null */
376 return;
377
378 if (!pm_qos_request_active(req)) {
379 WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
380 return;
381 }
382
383 cancel_delayed_work_sync(&req->work);
384 __pm_qos_update_request(req, new_value);
385}
386EXPORT_SYMBOL_GPL(pm_qos_update_request);
387
388/**
389 * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
390 * @req : handle to list element holding a pm_qos request to use
391 * @new_value: defines the temporal qos request
392 * @timeout_us: the effective duration of this qos request in usecs.
393 *
394 * After timeout_us, this qos request is cancelled automatically.
395 */
396void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
397 unsigned long timeout_us)
398{
399 if (!req)
400 return;
401 if (WARN(!pm_qos_request_active(req),
402 "%s called for unknown object.", __func__))
403 return;
404
405 cancel_delayed_work_sync(&req->work);
406
407 trace_pm_qos_update_request_timeout(req->pm_qos_class,
408 new_value, timeout_us);
409 if (new_value != req->node.prio)
410 pm_qos_update_target(
411 pm_qos_array[req->pm_qos_class]->constraints,
412 &req->node, PM_QOS_UPDATE_REQ, new_value);
413
414 schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
415}
416
417/**
418 * pm_qos_remove_request - modifies an existing qos request
419 * @req: handle to request list element
420 *
421 * Will remove pm qos request from the list of constraints and
422 * recompute the current target value for the pm_qos_class. Call this
423 * on slow code paths.
424 */
425void pm_qos_remove_request(struct pm_qos_request *req)
426{
427 if (!req) /*guard against callers passing in null */
428 return;
429 /* silent return to keep pcm code cleaner */
430
431 if (!pm_qos_request_active(req)) {
432 WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
433 return;
434 }
435
436 cancel_delayed_work_sync(&req->work);
437
438 trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
439 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
440 &req->node, PM_QOS_REMOVE_REQ,
441 PM_QOS_DEFAULT_VALUE);
442 memset(req, 0, sizeof(*req));
443}
444EXPORT_SYMBOL_GPL(pm_qos_remove_request);
445
446/**
447 * pm_qos_add_notifier - sets notification entry for changes to target value
448 * @pm_qos_class: identifies which qos target changes should be notified.
449 * @notifier: notifier block managed by caller.
450 *
451 * will register the notifier into a notification chain that gets called
452 * upon changes to the pm_qos_class target value.
453 */
454int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
455{
456 int retval;
457
458 retval = blocking_notifier_chain_register(
459 pm_qos_array[pm_qos_class]->constraints->notifiers,
460 notifier);
461
462 return retval;
463}
464EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
465
466/**
467 * pm_qos_remove_notifier - deletes notification entry from chain.
468 * @pm_qos_class: identifies which qos target changes are notified.
469 * @notifier: notifier block to be removed.
470 *
471 * will remove the notifier from the notification chain that gets called
472 * upon changes to the pm_qos_class target value.
473 */
474int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
475{
476 int retval;
477
478 retval = blocking_notifier_chain_unregister(
479 pm_qos_array[pm_qos_class]->constraints->notifiers,
480 notifier);
481
482 return retval;
483}
484EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
485
486/* User space interface to PM QoS classes via misc devices */
487static int register_pm_qos_misc(struct pm_qos_object *qos)
488{
489 qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
490 qos->pm_qos_power_miscdev.name = qos->name;
491 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
492
493 return misc_register(&qos->pm_qos_power_miscdev);
494}
495
496static int find_pm_qos_object_by_minor(int minor)
497{
498 int pm_qos_class;
499
500 for (pm_qos_class = PM_QOS_CPU_DMA_LATENCY;
501 pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
502 if (minor ==
503 pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
504 return pm_qos_class;
505 }
506 return -1;
507}
508
509static int pm_qos_power_open(struct inode *inode, struct file *filp)
510{
511 long pm_qos_class;
512
513 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
514 if (pm_qos_class >= PM_QOS_CPU_DMA_LATENCY) {
515 struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
516 if (!req)
517 return -ENOMEM;
518
519 pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
520 filp->private_data = req;
521
522 return 0;
523 }
524 return -EPERM;
525}
526
527static int pm_qos_power_release(struct inode *inode, struct file *filp)
528{
529 struct pm_qos_request *req;
530
531 req = filp->private_data;
532 pm_qos_remove_request(req);
533 kfree(req);
534
535 return 0;
536}
537
538
539static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
540 size_t count, loff_t *f_pos)
541{
542 s32 value;
543 unsigned long flags;
544 struct pm_qos_request *req = filp->private_data;
545
546 if (!req)
547 return -EINVAL;
548 if (!pm_qos_request_active(req))
549 return -EINVAL;
550
551 spin_lock_irqsave(&pm_qos_lock, flags);
552 value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
553 spin_unlock_irqrestore(&pm_qos_lock, flags);
554
555 return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
556}
557
558static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
559 size_t count, loff_t *f_pos)
560{
561 s32 value;
562 struct pm_qos_request *req;
563
564 if (count == sizeof(s32)) {
565 if (copy_from_user(&value, buf, sizeof(s32)))
566 return -EFAULT;
567 } else {
568 int ret;
569
570 ret = kstrtos32_from_user(buf, count, 16, &value);
571 if (ret)
572 return ret;
573 }
574
575 req = filp->private_data;
576 pm_qos_update_request(req, value);
577
578 return count;
579}
580
581
582static int __init pm_qos_power_init(void)
583{
584 int ret = 0;
585 int i;
586
587 BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
588
589 for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
590 ret = register_pm_qos_misc(pm_qos_array[i]);
591 if (ret < 0) {
592 printk(KERN_ERR "pm_qos_param: %s setup failed\n",
593 pm_qos_array[i]->name);
594 return ret;
595 }
596 }
597
598 return ret;
599}
600
601late_initcall(pm_qos_power_init);