Loading...
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Power Management Quality of Service (PM QoS) support base.
4 *
5 * Copyright (C) 2020 Intel Corporation
6 *
7 * Authors:
8 * Mark Gross <mgross@linux.intel.com>
9 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
10 *
11 * Provided here is an interface for specifying PM QoS dependencies. It allows
12 * entities depending on QoS constraints to register their requests which are
13 * aggregated as appropriate to produce effective constraints (target values)
14 * that can be monitored by entities needing to respect them, either by polling
15 * or through a built-in notification mechanism.
16 *
17 * In addition to the basic functionality, more specific interfaces for managing
18 * global CPU latency QoS requests and frequency QoS requests are provided.
19 */
20
21/*#define DEBUG*/
22
23#include <linux/pm_qos.h>
24#include <linux/sched.h>
25#include <linux/spinlock.h>
26#include <linux/slab.h>
27#include <linux/time.h>
28#include <linux/fs.h>
29#include <linux/device.h>
30#include <linux/miscdevice.h>
31#include <linux/string.h>
32#include <linux/platform_device.h>
33#include <linux/init.h>
34#include <linux/kernel.h>
35#include <linux/debugfs.h>
36#include <linux/seq_file.h>
37
38#include <linux/uaccess.h>
39#include <linux/export.h>
40#include <trace/events/power.h>
41
42/*
43 * locking rule: all changes to constraints or notifiers lists
44 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
45 * held, taken with _irqsave. One lock to rule them all
46 */
47static DEFINE_SPINLOCK(pm_qos_lock);
48
49/**
50 * pm_qos_read_value - Return the current effective constraint value.
51 * @c: List of PM QoS constraint requests.
52 */
53s32 pm_qos_read_value(struct pm_qos_constraints *c)
54{
55 return READ_ONCE(c->target_value);
56}
57
58static int pm_qos_get_value(struct pm_qos_constraints *c)
59{
60 if (plist_head_empty(&c->list))
61 return c->no_constraint_value;
62
63 switch (c->type) {
64 case PM_QOS_MIN:
65 return plist_first(&c->list)->prio;
66
67 case PM_QOS_MAX:
68 return plist_last(&c->list)->prio;
69
70 default:
71 WARN(1, "Unknown PM QoS type in %s\n", __func__);
72 return PM_QOS_DEFAULT_VALUE;
73 }
74}
75
76static void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
77{
78 WRITE_ONCE(c->target_value, value);
79}
80
81/**
82 * pm_qos_update_target - Update a list of PM QoS constraint requests.
83 * @c: List of PM QoS requests.
84 * @node: Target list entry.
85 * @action: Action to carry out (add, update or remove).
86 * @value: New request value for the target list entry.
87 *
88 * Update the given list of PM QoS constraint requests, @c, by carrying an
89 * @action involving the @node list entry and @value on it.
90 *
91 * The recognized values of @action are PM_QOS_ADD_REQ (store @value in @node
92 * and add it to the list), PM_QOS_UPDATE_REQ (remove @node from the list, store
93 * @value in it and add it to the list again), and PM_QOS_REMOVE_REQ (remove
94 * @node from the list, ignore @value).
95 *
96 * Return: 1 if the aggregate constraint value has changed, 0 otherwise.
97 */
98int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
99 enum pm_qos_req_action action, int value)
100{
101 int prev_value, curr_value, new_value;
102 unsigned long flags;
103
104 spin_lock_irqsave(&pm_qos_lock, flags);
105
106 prev_value = pm_qos_get_value(c);
107 if (value == PM_QOS_DEFAULT_VALUE)
108 new_value = c->default_value;
109 else
110 new_value = value;
111
112 switch (action) {
113 case PM_QOS_REMOVE_REQ:
114 plist_del(node, &c->list);
115 break;
116 case PM_QOS_UPDATE_REQ:
117 /*
118 * To change the list, atomically remove, reinit with new value
119 * and add, then see if the aggregate has changed.
120 */
121 plist_del(node, &c->list);
122 fallthrough;
123 case PM_QOS_ADD_REQ:
124 plist_node_init(node, new_value);
125 plist_add(node, &c->list);
126 break;
127 default:
128 /* no action */
129 ;
130 }
131
132 curr_value = pm_qos_get_value(c);
133 pm_qos_set_value(c, curr_value);
134
135 spin_unlock_irqrestore(&pm_qos_lock, flags);
136
137 trace_pm_qos_update_target(action, prev_value, curr_value);
138
139 if (prev_value == curr_value)
140 return 0;
141
142 if (c->notifiers)
143 blocking_notifier_call_chain(c->notifiers, curr_value, NULL);
144
145 return 1;
146}
147
148/**
149 * pm_qos_flags_remove_req - Remove device PM QoS flags request.
150 * @pqf: Device PM QoS flags set to remove the request from.
151 * @req: Request to remove from the set.
152 */
153static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
154 struct pm_qos_flags_request *req)
155{
156 s32 val = 0;
157
158 list_del(&req->node);
159 list_for_each_entry(req, &pqf->list, node)
160 val |= req->flags;
161
162 pqf->effective_flags = val;
163}
164
165/**
166 * pm_qos_update_flags - Update a set of PM QoS flags.
167 * @pqf: Set of PM QoS flags to update.
168 * @req: Request to add to the set, to modify, or to remove from the set.
169 * @action: Action to take on the set.
170 * @val: Value of the request to add or modify.
171 *
172 * Return: 1 if the aggregate constraint value has changed, 0 otherwise.
173 */
174bool pm_qos_update_flags(struct pm_qos_flags *pqf,
175 struct pm_qos_flags_request *req,
176 enum pm_qos_req_action action, s32 val)
177{
178 unsigned long irqflags;
179 s32 prev_value, curr_value;
180
181 spin_lock_irqsave(&pm_qos_lock, irqflags);
182
183 prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
184
185 switch (action) {
186 case PM_QOS_REMOVE_REQ:
187 pm_qos_flags_remove_req(pqf, req);
188 break;
189 case PM_QOS_UPDATE_REQ:
190 pm_qos_flags_remove_req(pqf, req);
191 fallthrough;
192 case PM_QOS_ADD_REQ:
193 req->flags = val;
194 INIT_LIST_HEAD(&req->node);
195 list_add_tail(&req->node, &pqf->list);
196 pqf->effective_flags |= val;
197 break;
198 default:
199 /* no action */
200 ;
201 }
202
203 curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
204
205 spin_unlock_irqrestore(&pm_qos_lock, irqflags);
206
207 trace_pm_qos_update_flags(action, prev_value, curr_value);
208
209 return prev_value != curr_value;
210}
211
212#ifdef CONFIG_CPU_IDLE
213/* Definitions related to the CPU latency QoS. */
214
215static struct pm_qos_constraints cpu_latency_constraints = {
216 .list = PLIST_HEAD_INIT(cpu_latency_constraints.list),
217 .target_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
218 .default_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
219 .no_constraint_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
220 .type = PM_QOS_MIN,
221};
222
223/**
224 * cpu_latency_qos_limit - Return current system-wide CPU latency QoS limit.
225 */
226s32 cpu_latency_qos_limit(void)
227{
228 return pm_qos_read_value(&cpu_latency_constraints);
229}
230
231/**
232 * cpu_latency_qos_request_active - Check the given PM QoS request.
233 * @req: PM QoS request to check.
234 *
235 * Return: 'true' if @req has been added to the CPU latency QoS list, 'false'
236 * otherwise.
237 */
238bool cpu_latency_qos_request_active(struct pm_qos_request *req)
239{
240 return req->qos == &cpu_latency_constraints;
241}
242EXPORT_SYMBOL_GPL(cpu_latency_qos_request_active);
243
244static void cpu_latency_qos_apply(struct pm_qos_request *req,
245 enum pm_qos_req_action action, s32 value)
246{
247 int ret = pm_qos_update_target(req->qos, &req->node, action, value);
248 if (ret > 0)
249 wake_up_all_idle_cpus();
250}
251
252/**
253 * cpu_latency_qos_add_request - Add new CPU latency QoS request.
254 * @req: Pointer to a preallocated handle.
255 * @value: Requested constraint value.
256 *
257 * Use @value to initialize the request handle pointed to by @req, insert it as
258 * a new entry to the CPU latency QoS list and recompute the effective QoS
259 * constraint for that list.
260 *
261 * Callers need to save the handle for later use in updates and removal of the
262 * QoS request represented by it.
263 */
264void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value)
265{
266 if (!req)
267 return;
268
269 if (cpu_latency_qos_request_active(req)) {
270 WARN(1, KERN_ERR "%s called for already added request\n", __func__);
271 return;
272 }
273
274 trace_pm_qos_add_request(value);
275
276 req->qos = &cpu_latency_constraints;
277 cpu_latency_qos_apply(req, PM_QOS_ADD_REQ, value);
278}
279EXPORT_SYMBOL_GPL(cpu_latency_qos_add_request);
280
281/**
282 * cpu_latency_qos_update_request - Modify existing CPU latency QoS request.
283 * @req : QoS request to update.
284 * @new_value: New requested constraint value.
285 *
286 * Use @new_value to update the QoS request represented by @req in the CPU
287 * latency QoS list along with updating the effective constraint value for that
288 * list.
289 */
290void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value)
291{
292 if (!req)
293 return;
294
295 if (!cpu_latency_qos_request_active(req)) {
296 WARN(1, KERN_ERR "%s called for unknown object\n", __func__);
297 return;
298 }
299
300 trace_pm_qos_update_request(new_value);
301
302 if (new_value == req->node.prio)
303 return;
304
305 cpu_latency_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
306}
307EXPORT_SYMBOL_GPL(cpu_latency_qos_update_request);
308
309/**
310 * cpu_latency_qos_remove_request - Remove existing CPU latency QoS request.
311 * @req: QoS request to remove.
312 *
313 * Remove the CPU latency QoS request represented by @req from the CPU latency
314 * QoS list along with updating the effective constraint value for that list.
315 */
316void cpu_latency_qos_remove_request(struct pm_qos_request *req)
317{
318 if (!req)
319 return;
320
321 if (!cpu_latency_qos_request_active(req)) {
322 WARN(1, KERN_ERR "%s called for unknown object\n", __func__);
323 return;
324 }
325
326 trace_pm_qos_remove_request(PM_QOS_DEFAULT_VALUE);
327
328 cpu_latency_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
329 memset(req, 0, sizeof(*req));
330}
331EXPORT_SYMBOL_GPL(cpu_latency_qos_remove_request);
332
333/* User space interface to the CPU latency QoS via misc device. */
334
335static int cpu_latency_qos_open(struct inode *inode, struct file *filp)
336{
337 struct pm_qos_request *req;
338
339 req = kzalloc(sizeof(*req), GFP_KERNEL);
340 if (!req)
341 return -ENOMEM;
342
343 cpu_latency_qos_add_request(req, PM_QOS_DEFAULT_VALUE);
344 filp->private_data = req;
345
346 return 0;
347}
348
349static int cpu_latency_qos_release(struct inode *inode, struct file *filp)
350{
351 struct pm_qos_request *req = filp->private_data;
352
353 filp->private_data = NULL;
354
355 cpu_latency_qos_remove_request(req);
356 kfree(req);
357
358 return 0;
359}
360
361static ssize_t cpu_latency_qos_read(struct file *filp, char __user *buf,
362 size_t count, loff_t *f_pos)
363{
364 struct pm_qos_request *req = filp->private_data;
365 unsigned long flags;
366 s32 value;
367
368 if (!req || !cpu_latency_qos_request_active(req))
369 return -EINVAL;
370
371 spin_lock_irqsave(&pm_qos_lock, flags);
372 value = pm_qos_get_value(&cpu_latency_constraints);
373 spin_unlock_irqrestore(&pm_qos_lock, flags);
374
375 return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
376}
377
378static ssize_t cpu_latency_qos_write(struct file *filp, const char __user *buf,
379 size_t count, loff_t *f_pos)
380{
381 s32 value;
382
383 if (count == sizeof(s32)) {
384 if (copy_from_user(&value, buf, sizeof(s32)))
385 return -EFAULT;
386 } else {
387 int ret;
388
389 ret = kstrtos32_from_user(buf, count, 16, &value);
390 if (ret)
391 return ret;
392 }
393
394 cpu_latency_qos_update_request(filp->private_data, value);
395
396 return count;
397}
398
399static const struct file_operations cpu_latency_qos_fops = {
400 .write = cpu_latency_qos_write,
401 .read = cpu_latency_qos_read,
402 .open = cpu_latency_qos_open,
403 .release = cpu_latency_qos_release,
404 .llseek = noop_llseek,
405};
406
407static struct miscdevice cpu_latency_qos_miscdev = {
408 .minor = MISC_DYNAMIC_MINOR,
409 .name = "cpu_dma_latency",
410 .fops = &cpu_latency_qos_fops,
411};
412
413static int __init cpu_latency_qos_init(void)
414{
415 int ret;
416
417 ret = misc_register(&cpu_latency_qos_miscdev);
418 if (ret < 0)
419 pr_err("%s: %s setup failed\n", __func__,
420 cpu_latency_qos_miscdev.name);
421
422 return ret;
423}
424late_initcall(cpu_latency_qos_init);
425#endif /* CONFIG_CPU_IDLE */
426
427/* Definitions related to the frequency QoS below. */
428
429/**
430 * freq_constraints_init - Initialize frequency QoS constraints.
431 * @qos: Frequency QoS constraints to initialize.
432 */
433void freq_constraints_init(struct freq_constraints *qos)
434{
435 struct pm_qos_constraints *c;
436
437 c = &qos->min_freq;
438 plist_head_init(&c->list);
439 c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE;
440 c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE;
441 c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE;
442 c->type = PM_QOS_MAX;
443 c->notifiers = &qos->min_freq_notifiers;
444 BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
445
446 c = &qos->max_freq;
447 plist_head_init(&c->list);
448 c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE;
449 c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE;
450 c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE;
451 c->type = PM_QOS_MIN;
452 c->notifiers = &qos->max_freq_notifiers;
453 BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
454}
455
456/**
457 * freq_qos_read_value - Get frequency QoS constraint for a given list.
458 * @qos: Constraints to evaluate.
459 * @type: QoS request type.
460 */
461s32 freq_qos_read_value(struct freq_constraints *qos,
462 enum freq_qos_req_type type)
463{
464 s32 ret;
465
466 switch (type) {
467 case FREQ_QOS_MIN:
468 ret = IS_ERR_OR_NULL(qos) ?
469 FREQ_QOS_MIN_DEFAULT_VALUE :
470 pm_qos_read_value(&qos->min_freq);
471 break;
472 case FREQ_QOS_MAX:
473 ret = IS_ERR_OR_NULL(qos) ?
474 FREQ_QOS_MAX_DEFAULT_VALUE :
475 pm_qos_read_value(&qos->max_freq);
476 break;
477 default:
478 WARN_ON(1);
479 ret = 0;
480 }
481
482 return ret;
483}
484
485/**
486 * freq_qos_apply - Add/modify/remove frequency QoS request.
487 * @req: Constraint request to apply.
488 * @action: Action to perform (add/update/remove).
489 * @value: Value to assign to the QoS request.
490 *
491 * This is only meant to be called from inside pm_qos, not drivers.
492 */
493int freq_qos_apply(struct freq_qos_request *req,
494 enum pm_qos_req_action action, s32 value)
495{
496 int ret;
497
498 switch(req->type) {
499 case FREQ_QOS_MIN:
500 ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode,
501 action, value);
502 break;
503 case FREQ_QOS_MAX:
504 ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode,
505 action, value);
506 break;
507 default:
508 ret = -EINVAL;
509 }
510
511 return ret;
512}
513
514/**
515 * freq_qos_add_request - Insert new frequency QoS request into a given list.
516 * @qos: Constraints to update.
517 * @req: Preallocated request object.
518 * @type: Request type.
519 * @value: Request value.
520 *
521 * Insert a new entry into the @qos list of requests, recompute the effective
522 * QoS constraint value for that list and initialize the @req object. The
523 * caller needs to save that object for later use in updates and removal.
524 *
525 * Return 1 if the effective constraint value has changed, 0 if the effective
526 * constraint value has not changed, or a negative error code on failures.
527 */
528int freq_qos_add_request(struct freq_constraints *qos,
529 struct freq_qos_request *req,
530 enum freq_qos_req_type type, s32 value)
531{
532 int ret;
533
534 if (IS_ERR_OR_NULL(qos) || !req || value < 0)
535 return -EINVAL;
536
537 if (WARN(freq_qos_request_active(req),
538 "%s() called for active request\n", __func__))
539 return -EINVAL;
540
541 req->qos = qos;
542 req->type = type;
543 ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value);
544 if (ret < 0) {
545 req->qos = NULL;
546 req->type = 0;
547 }
548
549 return ret;
550}
551EXPORT_SYMBOL_GPL(freq_qos_add_request);
552
553/**
554 * freq_qos_update_request - Modify existing frequency QoS request.
555 * @req: Request to modify.
556 * @new_value: New request value.
557 *
558 * Update an existing frequency QoS request along with the effective constraint
559 * value for the list of requests it belongs to.
560 *
561 * Return 1 if the effective constraint value has changed, 0 if the effective
562 * constraint value has not changed, or a negative error code on failures.
563 */
564int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
565{
566 if (!req || new_value < 0)
567 return -EINVAL;
568
569 if (WARN(!freq_qos_request_active(req),
570 "%s() called for unknown object\n", __func__))
571 return -EINVAL;
572
573 if (req->pnode.prio == new_value)
574 return 0;
575
576 return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
577}
578EXPORT_SYMBOL_GPL(freq_qos_update_request);
579
580/**
581 * freq_qos_remove_request - Remove frequency QoS request from its list.
582 * @req: Request to remove.
583 *
584 * Remove the given frequency QoS request from the list of constraints it
585 * belongs to and recompute the effective constraint value for that list.
586 *
587 * Return 1 if the effective constraint value has changed, 0 if the effective
588 * constraint value has not changed, or a negative error code on failures.
589 */
590int freq_qos_remove_request(struct freq_qos_request *req)
591{
592 int ret;
593
594 if (!req)
595 return -EINVAL;
596
597 if (WARN(!freq_qos_request_active(req),
598 "%s() called for unknown object\n", __func__))
599 return -EINVAL;
600
601 ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
602 req->qos = NULL;
603 req->type = 0;
604
605 return ret;
606}
607EXPORT_SYMBOL_GPL(freq_qos_remove_request);
608
609/**
610 * freq_qos_add_notifier - Add frequency QoS change notifier.
611 * @qos: List of requests to add the notifier to.
612 * @type: Request type.
613 * @notifier: Notifier block to add.
614 */
615int freq_qos_add_notifier(struct freq_constraints *qos,
616 enum freq_qos_req_type type,
617 struct notifier_block *notifier)
618{
619 int ret;
620
621 if (IS_ERR_OR_NULL(qos) || !notifier)
622 return -EINVAL;
623
624 switch (type) {
625 case FREQ_QOS_MIN:
626 ret = blocking_notifier_chain_register(qos->min_freq.notifiers,
627 notifier);
628 break;
629 case FREQ_QOS_MAX:
630 ret = blocking_notifier_chain_register(qos->max_freq.notifiers,
631 notifier);
632 break;
633 default:
634 WARN_ON(1);
635 ret = -EINVAL;
636 }
637
638 return ret;
639}
640EXPORT_SYMBOL_GPL(freq_qos_add_notifier);
641
642/**
643 * freq_qos_remove_notifier - Remove frequency QoS change notifier.
644 * @qos: List of requests to remove the notifier from.
645 * @type: Request type.
646 * @notifier: Notifier block to remove.
647 */
648int freq_qos_remove_notifier(struct freq_constraints *qos,
649 enum freq_qos_req_type type,
650 struct notifier_block *notifier)
651{
652 int ret;
653
654 if (IS_ERR_OR_NULL(qos) || !notifier)
655 return -EINVAL;
656
657 switch (type) {
658 case FREQ_QOS_MIN:
659 ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers,
660 notifier);
661 break;
662 case FREQ_QOS_MAX:
663 ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers,
664 notifier);
665 break;
666 default:
667 WARN_ON(1);
668 ret = -EINVAL;
669 }
670
671 return ret;
672}
673EXPORT_SYMBOL_GPL(freq_qos_remove_notifier);
1/*
2 * This module exposes the interface to kernel space for specifying
3 * QoS dependencies. It provides infrastructure for registration of:
4 *
5 * Dependents on a QoS value : register requests
6 * Watchers of QoS value : get notified when target QoS value changes
7 *
8 * This QoS design is best effort based. Dependents register their QoS needs.
9 * Watchers register to keep track of the current QoS needs of the system.
10 *
11 * There are 3 basic classes of QoS parameter: latency, timeout, throughput
12 * each have defined units:
13 * latency: usec
14 * timeout: usec <-- currently not used.
15 * throughput: kbs (kilo byte / sec)
16 *
17 * There are lists of pm_qos_objects each one wrapping requests, notifiers
18 *
19 * User mode requests on a QOS parameter register themselves to the
20 * subsystem by opening the device node /dev/... and writing there request to
21 * the node. As long as the process holds a file handle open to the node the
22 * client continues to be accounted for. Upon file release the usermode
23 * request is removed and a new qos target is computed. This way when the
24 * request that the application has is cleaned up when closes the file
25 * pointer or exits the pm_qos_object will get an opportunity to clean up.
26 *
27 * Mark Gross <mgross@linux.intel.com>
28 */
29
30/*#define DEBUG*/
31
32#include <linux/pm_qos.h>
33#include <linux/sched.h>
34#include <linux/spinlock.h>
35#include <linux/slab.h>
36#include <linux/time.h>
37#include <linux/fs.h>
38#include <linux/device.h>
39#include <linux/miscdevice.h>
40#include <linux/string.h>
41#include <linux/platform_device.h>
42#include <linux/init.h>
43#include <linux/kernel.h>
44#include <linux/debugfs.h>
45#include <linux/seq_file.h>
46
47#include <linux/uaccess.h>
48#include <linux/export.h>
49#include <trace/events/power.h>
50
51/*
52 * locking rule: all changes to constraints or notifiers lists
53 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
54 * held, taken with _irqsave. One lock to rule them all
55 */
56struct pm_qos_object {
57 struct pm_qos_constraints *constraints;
58 struct miscdevice pm_qos_power_miscdev;
59 char *name;
60};
61
62static DEFINE_SPINLOCK(pm_qos_lock);
63
64static struct pm_qos_object null_pm_qos;
65
66static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
67static struct pm_qos_constraints cpu_dma_constraints = {
68 .list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
69 .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
70 .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
71 .no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
72 .type = PM_QOS_MIN,
73 .notifiers = &cpu_dma_lat_notifier,
74};
75static struct pm_qos_object cpu_dma_pm_qos = {
76 .constraints = &cpu_dma_constraints,
77 .name = "cpu_dma_latency",
78};
79
80static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
81static struct pm_qos_constraints network_lat_constraints = {
82 .list = PLIST_HEAD_INIT(network_lat_constraints.list),
83 .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
84 .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
85 .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
86 .type = PM_QOS_MIN,
87 .notifiers = &network_lat_notifier,
88};
89static struct pm_qos_object network_lat_pm_qos = {
90 .constraints = &network_lat_constraints,
91 .name = "network_latency",
92};
93
94
95static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
96static struct pm_qos_constraints network_tput_constraints = {
97 .list = PLIST_HEAD_INIT(network_tput_constraints.list),
98 .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
99 .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
100 .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
101 .type = PM_QOS_MAX,
102 .notifiers = &network_throughput_notifier,
103};
104static struct pm_qos_object network_throughput_pm_qos = {
105 .constraints = &network_tput_constraints,
106 .name = "network_throughput",
107};
108
109
110static BLOCKING_NOTIFIER_HEAD(memory_bandwidth_notifier);
111static struct pm_qos_constraints memory_bw_constraints = {
112 .list = PLIST_HEAD_INIT(memory_bw_constraints.list),
113 .target_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
114 .default_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
115 .no_constraint_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
116 .type = PM_QOS_SUM,
117 .notifiers = &memory_bandwidth_notifier,
118};
119static struct pm_qos_object memory_bandwidth_pm_qos = {
120 .constraints = &memory_bw_constraints,
121 .name = "memory_bandwidth",
122};
123
124
125static struct pm_qos_object *pm_qos_array[] = {
126 &null_pm_qos,
127 &cpu_dma_pm_qos,
128 &network_lat_pm_qos,
129 &network_throughput_pm_qos,
130 &memory_bandwidth_pm_qos,
131};
132
133static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
134 size_t count, loff_t *f_pos);
135static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
136 size_t count, loff_t *f_pos);
137static int pm_qos_power_open(struct inode *inode, struct file *filp);
138static int pm_qos_power_release(struct inode *inode, struct file *filp);
139
140static const struct file_operations pm_qos_power_fops = {
141 .write = pm_qos_power_write,
142 .read = pm_qos_power_read,
143 .open = pm_qos_power_open,
144 .release = pm_qos_power_release,
145 .llseek = noop_llseek,
146};
147
148/* unlocked internal variant */
149static inline int pm_qos_get_value(struct pm_qos_constraints *c)
150{
151 struct plist_node *node;
152 int total_value = 0;
153
154 if (plist_head_empty(&c->list))
155 return c->no_constraint_value;
156
157 switch (c->type) {
158 case PM_QOS_MIN:
159 return plist_first(&c->list)->prio;
160
161 case PM_QOS_MAX:
162 return plist_last(&c->list)->prio;
163
164 case PM_QOS_SUM:
165 plist_for_each(node, &c->list)
166 total_value += node->prio;
167
168 return total_value;
169
170 default:
171 /* runtime check for not using enum */
172 BUG();
173 return PM_QOS_DEFAULT_VALUE;
174 }
175}
176
177s32 pm_qos_read_value(struct pm_qos_constraints *c)
178{
179 return c->target_value;
180}
181
182static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
183{
184 c->target_value = value;
185}
186
187static inline int pm_qos_get_value(struct pm_qos_constraints *c);
188static int pm_qos_dbg_show_requests(struct seq_file *s, void *unused)
189{
190 struct pm_qos_object *qos = (struct pm_qos_object *)s->private;
191 struct pm_qos_constraints *c;
192 struct pm_qos_request *req;
193 char *type;
194 unsigned long flags;
195 int tot_reqs = 0;
196 int active_reqs = 0;
197
198 if (IS_ERR_OR_NULL(qos)) {
199 pr_err("%s: bad qos param!\n", __func__);
200 return -EINVAL;
201 }
202 c = qos->constraints;
203 if (IS_ERR_OR_NULL(c)) {
204 pr_err("%s: Bad constraints on qos?\n", __func__);
205 return -EINVAL;
206 }
207
208 /* Lock to ensure we have a snapshot */
209 spin_lock_irqsave(&pm_qos_lock, flags);
210 if (plist_head_empty(&c->list)) {
211 seq_puts(s, "Empty!\n");
212 goto out;
213 }
214
215 switch (c->type) {
216 case PM_QOS_MIN:
217 type = "Minimum";
218 break;
219 case PM_QOS_MAX:
220 type = "Maximum";
221 break;
222 case PM_QOS_SUM:
223 type = "Sum";
224 break;
225 default:
226 type = "Unknown";
227 }
228
229 plist_for_each_entry(req, &c->list, node) {
230 char *state = "Default";
231
232 if ((req->node).prio != c->default_value) {
233 active_reqs++;
234 state = "Active";
235 }
236 tot_reqs++;
237 seq_printf(s, "%d: %d: %s\n", tot_reqs,
238 (req->node).prio, state);
239 }
240
241 seq_printf(s, "Type=%s, Value=%d, Requests: active=%d / total=%d\n",
242 type, pm_qos_get_value(c), active_reqs, tot_reqs);
243
244out:
245 spin_unlock_irqrestore(&pm_qos_lock, flags);
246 return 0;
247}
248
249static int pm_qos_dbg_open(struct inode *inode, struct file *file)
250{
251 return single_open(file, pm_qos_dbg_show_requests,
252 inode->i_private);
253}
254
255static const struct file_operations pm_qos_debug_fops = {
256 .open = pm_qos_dbg_open,
257 .read = seq_read,
258 .llseek = seq_lseek,
259 .release = single_release,
260};
261
262/**
263 * pm_qos_update_target - manages the constraints list and calls the notifiers
264 * if needed
265 * @c: constraints data struct
266 * @node: request to add to the list, to update or to remove
267 * @action: action to take on the constraints list
268 * @value: value of the request to add or update
269 *
270 * This function returns 1 if the aggregated constraint value has changed, 0
271 * otherwise.
272 */
273int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
274 enum pm_qos_req_action action, int value)
275{
276 unsigned long flags;
277 int prev_value, curr_value, new_value;
278 int ret;
279
280 spin_lock_irqsave(&pm_qos_lock, flags);
281 prev_value = pm_qos_get_value(c);
282 if (value == PM_QOS_DEFAULT_VALUE)
283 new_value = c->default_value;
284 else
285 new_value = value;
286
287 switch (action) {
288 case PM_QOS_REMOVE_REQ:
289 plist_del(node, &c->list);
290 break;
291 case PM_QOS_UPDATE_REQ:
292 /*
293 * to change the list, we atomically remove, reinit
294 * with new value and add, then see if the extremal
295 * changed
296 */
297 plist_del(node, &c->list);
298 case PM_QOS_ADD_REQ:
299 plist_node_init(node, new_value);
300 plist_add(node, &c->list);
301 break;
302 default:
303 /* no action */
304 ;
305 }
306
307 curr_value = pm_qos_get_value(c);
308 pm_qos_set_value(c, curr_value);
309
310 spin_unlock_irqrestore(&pm_qos_lock, flags);
311
312 trace_pm_qos_update_target(action, prev_value, curr_value);
313 if (prev_value != curr_value) {
314 ret = 1;
315 if (c->notifiers)
316 blocking_notifier_call_chain(c->notifiers,
317 (unsigned long)curr_value,
318 NULL);
319 } else {
320 ret = 0;
321 }
322 return ret;
323}
324
325/**
326 * pm_qos_flags_remove_req - Remove device PM QoS flags request.
327 * @pqf: Device PM QoS flags set to remove the request from.
328 * @req: Request to remove from the set.
329 */
330static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
331 struct pm_qos_flags_request *req)
332{
333 s32 val = 0;
334
335 list_del(&req->node);
336 list_for_each_entry(req, &pqf->list, node)
337 val |= req->flags;
338
339 pqf->effective_flags = val;
340}
341
342/**
343 * pm_qos_update_flags - Update a set of PM QoS flags.
344 * @pqf: Set of flags to update.
345 * @req: Request to add to the set, to modify, or to remove from the set.
346 * @action: Action to take on the set.
347 * @val: Value of the request to add or modify.
348 *
349 * Update the given set of PM QoS flags and call notifiers if the aggregate
350 * value has changed. Returns 1 if the aggregate constraint value has changed,
351 * 0 otherwise.
352 */
353bool pm_qos_update_flags(struct pm_qos_flags *pqf,
354 struct pm_qos_flags_request *req,
355 enum pm_qos_req_action action, s32 val)
356{
357 unsigned long irqflags;
358 s32 prev_value, curr_value;
359
360 spin_lock_irqsave(&pm_qos_lock, irqflags);
361
362 prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
363
364 switch (action) {
365 case PM_QOS_REMOVE_REQ:
366 pm_qos_flags_remove_req(pqf, req);
367 break;
368 case PM_QOS_UPDATE_REQ:
369 pm_qos_flags_remove_req(pqf, req);
370 case PM_QOS_ADD_REQ:
371 req->flags = val;
372 INIT_LIST_HEAD(&req->node);
373 list_add_tail(&req->node, &pqf->list);
374 pqf->effective_flags |= val;
375 break;
376 default:
377 /* no action */
378 ;
379 }
380
381 curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
382
383 spin_unlock_irqrestore(&pm_qos_lock, irqflags);
384
385 trace_pm_qos_update_flags(action, prev_value, curr_value);
386 return prev_value != curr_value;
387}
388
389/**
390 * pm_qos_request - returns current system wide qos expectation
391 * @pm_qos_class: identification of which qos value is requested
392 *
393 * This function returns the current target value.
394 */
395int pm_qos_request(int pm_qos_class)
396{
397 return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
398}
399EXPORT_SYMBOL_GPL(pm_qos_request);
400
401int pm_qos_request_active(struct pm_qos_request *req)
402{
403 return req->pm_qos_class != 0;
404}
405EXPORT_SYMBOL_GPL(pm_qos_request_active);
406
407static void __pm_qos_update_request(struct pm_qos_request *req,
408 s32 new_value)
409{
410 trace_pm_qos_update_request(req->pm_qos_class, new_value);
411
412 if (new_value != req->node.prio)
413 pm_qos_update_target(
414 pm_qos_array[req->pm_qos_class]->constraints,
415 &req->node, PM_QOS_UPDATE_REQ, new_value);
416}
417
418/**
419 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
420 * @work: work struct for the delayed work (timeout)
421 *
422 * This cancels the timeout request by falling back to the default at timeout.
423 */
424static void pm_qos_work_fn(struct work_struct *work)
425{
426 struct pm_qos_request *req = container_of(to_delayed_work(work),
427 struct pm_qos_request,
428 work);
429
430 __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
431}
432
433/**
434 * pm_qos_add_request - inserts new qos request into the list
435 * @req: pointer to a preallocated handle
436 * @pm_qos_class: identifies which list of qos request to use
437 * @value: defines the qos request
438 *
439 * This function inserts a new entry in the pm_qos_class list of requested qos
440 * performance characteristics. It recomputes the aggregate QoS expectations
441 * for the pm_qos_class of parameters and initializes the pm_qos_request
442 * handle. Caller needs to save this handle for later use in updates and
443 * removal.
444 */
445
446void pm_qos_add_request(struct pm_qos_request *req,
447 int pm_qos_class, s32 value)
448{
449 if (!req) /*guard against callers passing in null */
450 return;
451
452 if (pm_qos_request_active(req)) {
453 WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
454 return;
455 }
456 req->pm_qos_class = pm_qos_class;
457 INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
458 trace_pm_qos_add_request(pm_qos_class, value);
459 pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
460 &req->node, PM_QOS_ADD_REQ, value);
461}
462EXPORT_SYMBOL_GPL(pm_qos_add_request);
463
464/**
465 * pm_qos_update_request - modifies an existing qos request
466 * @req : handle to list element holding a pm_qos request to use
467 * @value: defines the qos request
468 *
469 * Updates an existing qos request for the pm_qos_class of parameters along
470 * with updating the target pm_qos_class value.
471 *
472 * Attempts are made to make this code callable on hot code paths.
473 */
474void pm_qos_update_request(struct pm_qos_request *req,
475 s32 new_value)
476{
477 if (!req) /*guard against callers passing in null */
478 return;
479
480 if (!pm_qos_request_active(req)) {
481 WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
482 return;
483 }
484
485 cancel_delayed_work_sync(&req->work);
486 __pm_qos_update_request(req, new_value);
487}
488EXPORT_SYMBOL_GPL(pm_qos_update_request);
489
490/**
491 * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
492 * @req : handle to list element holding a pm_qos request to use
493 * @new_value: defines the temporal qos request
494 * @timeout_us: the effective duration of this qos request in usecs.
495 *
496 * After timeout_us, this qos request is cancelled automatically.
497 */
498void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
499 unsigned long timeout_us)
500{
501 if (!req)
502 return;
503 if (WARN(!pm_qos_request_active(req),
504 "%s called for unknown object.", __func__))
505 return;
506
507 cancel_delayed_work_sync(&req->work);
508
509 trace_pm_qos_update_request_timeout(req->pm_qos_class,
510 new_value, timeout_us);
511 if (new_value != req->node.prio)
512 pm_qos_update_target(
513 pm_qos_array[req->pm_qos_class]->constraints,
514 &req->node, PM_QOS_UPDATE_REQ, new_value);
515
516 schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
517}
518
519/**
520 * pm_qos_remove_request - modifies an existing qos request
521 * @req: handle to request list element
522 *
523 * Will remove pm qos request from the list of constraints and
524 * recompute the current target value for the pm_qos_class. Call this
525 * on slow code paths.
526 */
527void pm_qos_remove_request(struct pm_qos_request *req)
528{
529 if (!req) /*guard against callers passing in null */
530 return;
531 /* silent return to keep pcm code cleaner */
532
533 if (!pm_qos_request_active(req)) {
534 WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
535 return;
536 }
537
538 cancel_delayed_work_sync(&req->work);
539
540 trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
541 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
542 &req->node, PM_QOS_REMOVE_REQ,
543 PM_QOS_DEFAULT_VALUE);
544 memset(req, 0, sizeof(*req));
545}
546EXPORT_SYMBOL_GPL(pm_qos_remove_request);
547
548/**
549 * pm_qos_add_notifier - sets notification entry for changes to target value
550 * @pm_qos_class: identifies which qos target changes should be notified.
551 * @notifier: notifier block managed by caller.
552 *
553 * will register the notifier into a notification chain that gets called
554 * upon changes to the pm_qos_class target value.
555 */
556int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
557{
558 int retval;
559
560 retval = blocking_notifier_chain_register(
561 pm_qos_array[pm_qos_class]->constraints->notifiers,
562 notifier);
563
564 return retval;
565}
566EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
567
568/**
569 * pm_qos_remove_notifier - deletes notification entry from chain.
570 * @pm_qos_class: identifies which qos target changes are notified.
571 * @notifier: notifier block to be removed.
572 *
573 * will remove the notifier from the notification chain that gets called
574 * upon changes to the pm_qos_class target value.
575 */
576int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
577{
578 int retval;
579
580 retval = blocking_notifier_chain_unregister(
581 pm_qos_array[pm_qos_class]->constraints->notifiers,
582 notifier);
583
584 return retval;
585}
586EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
587
588/* User space interface to PM QoS classes via misc devices */
589static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d)
590{
591 qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
592 qos->pm_qos_power_miscdev.name = qos->name;
593 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
594
595 if (d) {
596 (void)debugfs_create_file(qos->name, S_IRUGO, d,
597 (void *)qos, &pm_qos_debug_fops);
598 }
599
600 return misc_register(&qos->pm_qos_power_miscdev);
601}
602
603static int find_pm_qos_object_by_minor(int minor)
604{
605 int pm_qos_class;
606
607 for (pm_qos_class = PM_QOS_CPU_DMA_LATENCY;
608 pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
609 if (minor ==
610 pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
611 return pm_qos_class;
612 }
613 return -1;
614}
615
616static int pm_qos_power_open(struct inode *inode, struct file *filp)
617{
618 long pm_qos_class;
619
620 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
621 if (pm_qos_class >= PM_QOS_CPU_DMA_LATENCY) {
622 struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
623 if (!req)
624 return -ENOMEM;
625
626 pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
627 filp->private_data = req;
628
629 return 0;
630 }
631 return -EPERM;
632}
633
634static int pm_qos_power_release(struct inode *inode, struct file *filp)
635{
636 struct pm_qos_request *req;
637
638 req = filp->private_data;
639 pm_qos_remove_request(req);
640 kfree(req);
641
642 return 0;
643}
644
645
646static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
647 size_t count, loff_t *f_pos)
648{
649 s32 value;
650 unsigned long flags;
651 struct pm_qos_request *req = filp->private_data;
652
653 if (!req)
654 return -EINVAL;
655 if (!pm_qos_request_active(req))
656 return -EINVAL;
657
658 spin_lock_irqsave(&pm_qos_lock, flags);
659 value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
660 spin_unlock_irqrestore(&pm_qos_lock, flags);
661
662 return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
663}
664
665static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
666 size_t count, loff_t *f_pos)
667{
668 s32 value;
669 struct pm_qos_request *req;
670
671 if (count == sizeof(s32)) {
672 if (copy_from_user(&value, buf, sizeof(s32)))
673 return -EFAULT;
674 } else {
675 int ret;
676
677 ret = kstrtos32_from_user(buf, count, 16, &value);
678 if (ret)
679 return ret;
680 }
681
682 req = filp->private_data;
683 pm_qos_update_request(req, value);
684
685 return count;
686}
687
688
689static int __init pm_qos_power_init(void)
690{
691 int ret = 0;
692 int i;
693 struct dentry *d;
694
695 BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
696
697 d = debugfs_create_dir("pm_qos", NULL);
698 if (IS_ERR_OR_NULL(d))
699 d = NULL;
700
701 for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
702 ret = register_pm_qos_misc(pm_qos_array[i], d);
703 if (ret < 0) {
704 printk(KERN_ERR "pm_qos_param: %s setup failed\n",
705 pm_qos_array[i]->name);
706 return ret;
707 }
708 }
709
710 return ret;
711}
712
713late_initcall(pm_qos_power_init);