Loading...
1/*
2 * This module exposes the interface to kernel space for specifying
3 * QoS dependencies. It provides infrastructure for registration of:
4 *
5 * Dependents on a QoS value : register requests
6 * Watchers of QoS value : get notified when target QoS value changes
7 *
8 * This QoS design is best effort based. Dependents register their QoS needs.
9 * Watchers register to keep track of the current QoS needs of the system.
10 *
11 * There are 3 basic classes of QoS parameter: latency, timeout, throughput
12 * each have defined units:
13 * latency: usec
14 * timeout: usec <-- currently not used.
15 * throughput: kbs (kilo byte / sec)
16 *
17 * There are lists of pm_qos_objects each one wrapping requests, notifiers
18 *
19 * User mode requests on a QOS parameter register themselves to the
20 * subsystem by opening the device node /dev/... and writing there request to
21 * the node. As long as the process holds a file handle open to the node the
22 * client continues to be accounted for. Upon file release the usermode
23 * request is removed and a new qos target is computed. This way when the
24 * request that the application has is cleaned up when closes the file
25 * pointer or exits the pm_qos_object will get an opportunity to clean up.
26 *
27 * Mark Gross <mgross@linux.intel.com>
28 */
29
30/*#define DEBUG*/
31
32#include <linux/pm_qos.h>
33#include <linux/sched.h>
34#include <linux/spinlock.h>
35#include <linux/slab.h>
36#include <linux/time.h>
37#include <linux/fs.h>
38#include <linux/device.h>
39#include <linux/miscdevice.h>
40#include <linux/string.h>
41#include <linux/platform_device.h>
42#include <linux/init.h>
43#include <linux/kernel.h>
44
45#include <linux/uaccess.h>
46#include <linux/export.h>
47#include <trace/events/power.h>
48
49/*
50 * locking rule: all changes to constraints or notifiers lists
51 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
52 * held, taken with _irqsave. One lock to rule them all
53 */
54struct pm_qos_object {
55 struct pm_qos_constraints *constraints;
56 struct miscdevice pm_qos_power_miscdev;
57 char *name;
58};
59
60static DEFINE_SPINLOCK(pm_qos_lock);
61
62static struct pm_qos_object null_pm_qos;
63
64static BLOCKING_NOTIFIER_HEAD(cpu_dma_lat_notifier);
65static struct pm_qos_constraints cpu_dma_constraints = {
66 .list = PLIST_HEAD_INIT(cpu_dma_constraints.list),
67 .target_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
68 .default_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
69 .no_constraint_value = PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE,
70 .type = PM_QOS_MIN,
71 .notifiers = &cpu_dma_lat_notifier,
72};
73static struct pm_qos_object cpu_dma_pm_qos = {
74 .constraints = &cpu_dma_constraints,
75 .name = "cpu_dma_latency",
76};
77
78static BLOCKING_NOTIFIER_HEAD(network_lat_notifier);
79static struct pm_qos_constraints network_lat_constraints = {
80 .list = PLIST_HEAD_INIT(network_lat_constraints.list),
81 .target_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
82 .default_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
83 .no_constraint_value = PM_QOS_NETWORK_LAT_DEFAULT_VALUE,
84 .type = PM_QOS_MIN,
85 .notifiers = &network_lat_notifier,
86};
87static struct pm_qos_object network_lat_pm_qos = {
88 .constraints = &network_lat_constraints,
89 .name = "network_latency",
90};
91
92
93static BLOCKING_NOTIFIER_HEAD(network_throughput_notifier);
94static struct pm_qos_constraints network_tput_constraints = {
95 .list = PLIST_HEAD_INIT(network_tput_constraints.list),
96 .target_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
97 .default_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
98 .no_constraint_value = PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE,
99 .type = PM_QOS_MAX,
100 .notifiers = &network_throughput_notifier,
101};
102static struct pm_qos_object network_throughput_pm_qos = {
103 .constraints = &network_tput_constraints,
104 .name = "network_throughput",
105};
106
107
108static struct pm_qos_object *pm_qos_array[] = {
109 &null_pm_qos,
110 &cpu_dma_pm_qos,
111 &network_lat_pm_qos,
112 &network_throughput_pm_qos
113};
114
115static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
116 size_t count, loff_t *f_pos);
117static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
118 size_t count, loff_t *f_pos);
119static int pm_qos_power_open(struct inode *inode, struct file *filp);
120static int pm_qos_power_release(struct inode *inode, struct file *filp);
121
122static const struct file_operations pm_qos_power_fops = {
123 .write = pm_qos_power_write,
124 .read = pm_qos_power_read,
125 .open = pm_qos_power_open,
126 .release = pm_qos_power_release,
127 .llseek = noop_llseek,
128};
129
130/* unlocked internal variant */
131static inline int pm_qos_get_value(struct pm_qos_constraints *c)
132{
133 if (plist_head_empty(&c->list))
134 return c->no_constraint_value;
135
136 switch (c->type) {
137 case PM_QOS_MIN:
138 return plist_first(&c->list)->prio;
139
140 case PM_QOS_MAX:
141 return plist_last(&c->list)->prio;
142
143 default:
144 /* runtime check for not using enum */
145 BUG();
146 return PM_QOS_DEFAULT_VALUE;
147 }
148}
149
150s32 pm_qos_read_value(struct pm_qos_constraints *c)
151{
152 return c->target_value;
153}
154
155static inline void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
156{
157 c->target_value = value;
158}
159
160/**
161 * pm_qos_update_target - manages the constraints list and calls the notifiers
162 * if needed
163 * @c: constraints data struct
164 * @node: request to add to the list, to update or to remove
165 * @action: action to take on the constraints list
166 * @value: value of the request to add or update
167 *
168 * This function returns 1 if the aggregated constraint value has changed, 0
169 * otherwise.
170 */
171int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
172 enum pm_qos_req_action action, int value)
173{
174 unsigned long flags;
175 int prev_value, curr_value, new_value;
176 int ret;
177
178 spin_lock_irqsave(&pm_qos_lock, flags);
179 prev_value = pm_qos_get_value(c);
180 if (value == PM_QOS_DEFAULT_VALUE)
181 new_value = c->default_value;
182 else
183 new_value = value;
184
185 switch (action) {
186 case PM_QOS_REMOVE_REQ:
187 plist_del(node, &c->list);
188 break;
189 case PM_QOS_UPDATE_REQ:
190 /*
191 * to change the list, we atomically remove, reinit
192 * with new value and add, then see if the extremal
193 * changed
194 */
195 plist_del(node, &c->list);
196 case PM_QOS_ADD_REQ:
197 plist_node_init(node, new_value);
198 plist_add(node, &c->list);
199 break;
200 default:
201 /* no action */
202 ;
203 }
204
205 curr_value = pm_qos_get_value(c);
206 pm_qos_set_value(c, curr_value);
207
208 spin_unlock_irqrestore(&pm_qos_lock, flags);
209
210 trace_pm_qos_update_target(action, prev_value, curr_value);
211 if (prev_value != curr_value) {
212 ret = 1;
213 if (c->notifiers)
214 blocking_notifier_call_chain(c->notifiers,
215 (unsigned long)curr_value,
216 NULL);
217 } else {
218 ret = 0;
219 }
220 return ret;
221}
222
223/**
224 * pm_qos_flags_remove_req - Remove device PM QoS flags request.
225 * @pqf: Device PM QoS flags set to remove the request from.
226 * @req: Request to remove from the set.
227 */
228static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
229 struct pm_qos_flags_request *req)
230{
231 s32 val = 0;
232
233 list_del(&req->node);
234 list_for_each_entry(req, &pqf->list, node)
235 val |= req->flags;
236
237 pqf->effective_flags = val;
238}
239
240/**
241 * pm_qos_update_flags - Update a set of PM QoS flags.
242 * @pqf: Set of flags to update.
243 * @req: Request to add to the set, to modify, or to remove from the set.
244 * @action: Action to take on the set.
245 * @val: Value of the request to add or modify.
246 *
247 * Update the given set of PM QoS flags and call notifiers if the aggregate
248 * value has changed. Returns 1 if the aggregate constraint value has changed,
249 * 0 otherwise.
250 */
251bool pm_qos_update_flags(struct pm_qos_flags *pqf,
252 struct pm_qos_flags_request *req,
253 enum pm_qos_req_action action, s32 val)
254{
255 unsigned long irqflags;
256 s32 prev_value, curr_value;
257
258 spin_lock_irqsave(&pm_qos_lock, irqflags);
259
260 prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
261
262 switch (action) {
263 case PM_QOS_REMOVE_REQ:
264 pm_qos_flags_remove_req(pqf, req);
265 break;
266 case PM_QOS_UPDATE_REQ:
267 pm_qos_flags_remove_req(pqf, req);
268 case PM_QOS_ADD_REQ:
269 req->flags = val;
270 INIT_LIST_HEAD(&req->node);
271 list_add_tail(&req->node, &pqf->list);
272 pqf->effective_flags |= val;
273 break;
274 default:
275 /* no action */
276 ;
277 }
278
279 curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
280
281 spin_unlock_irqrestore(&pm_qos_lock, irqflags);
282
283 trace_pm_qos_update_flags(action, prev_value, curr_value);
284 return prev_value != curr_value;
285}
286
287/**
288 * pm_qos_request - returns current system wide qos expectation
289 * @pm_qos_class: identification of which qos value is requested
290 *
291 * This function returns the current target value.
292 */
293int pm_qos_request(int pm_qos_class)
294{
295 return pm_qos_read_value(pm_qos_array[pm_qos_class]->constraints);
296}
297EXPORT_SYMBOL_GPL(pm_qos_request);
298
299int pm_qos_request_active(struct pm_qos_request *req)
300{
301 return req->pm_qos_class != 0;
302}
303EXPORT_SYMBOL_GPL(pm_qos_request_active);
304
305static void __pm_qos_update_request(struct pm_qos_request *req,
306 s32 new_value)
307{
308 trace_pm_qos_update_request(req->pm_qos_class, new_value);
309
310 if (new_value != req->node.prio)
311 pm_qos_update_target(
312 pm_qos_array[req->pm_qos_class]->constraints,
313 &req->node, PM_QOS_UPDATE_REQ, new_value);
314}
315
316/**
317 * pm_qos_work_fn - the timeout handler of pm_qos_update_request_timeout
318 * @work: work struct for the delayed work (timeout)
319 *
320 * This cancels the timeout request by falling back to the default at timeout.
321 */
322static void pm_qos_work_fn(struct work_struct *work)
323{
324 struct pm_qos_request *req = container_of(to_delayed_work(work),
325 struct pm_qos_request,
326 work);
327
328 __pm_qos_update_request(req, PM_QOS_DEFAULT_VALUE);
329}
330
331/**
332 * pm_qos_add_request - inserts new qos request into the list
333 * @req: pointer to a preallocated handle
334 * @pm_qos_class: identifies which list of qos request to use
335 * @value: defines the qos request
336 *
337 * This function inserts a new entry in the pm_qos_class list of requested qos
338 * performance characteristics. It recomputes the aggregate QoS expectations
339 * for the pm_qos_class of parameters and initializes the pm_qos_request
340 * handle. Caller needs to save this handle for later use in updates and
341 * removal.
342 */
343
344void pm_qos_add_request(struct pm_qos_request *req,
345 int pm_qos_class, s32 value)
346{
347 if (!req) /*guard against callers passing in null */
348 return;
349
350 if (pm_qos_request_active(req)) {
351 WARN(1, KERN_ERR "pm_qos_add_request() called for already added request\n");
352 return;
353 }
354 req->pm_qos_class = pm_qos_class;
355 INIT_DELAYED_WORK(&req->work, pm_qos_work_fn);
356 trace_pm_qos_add_request(pm_qos_class, value);
357 pm_qos_update_target(pm_qos_array[pm_qos_class]->constraints,
358 &req->node, PM_QOS_ADD_REQ, value);
359}
360EXPORT_SYMBOL_GPL(pm_qos_add_request);
361
362/**
363 * pm_qos_update_request - modifies an existing qos request
364 * @req : handle to list element holding a pm_qos request to use
365 * @value: defines the qos request
366 *
367 * Updates an existing qos request for the pm_qos_class of parameters along
368 * with updating the target pm_qos_class value.
369 *
370 * Attempts are made to make this code callable on hot code paths.
371 */
372void pm_qos_update_request(struct pm_qos_request *req,
373 s32 new_value)
374{
375 if (!req) /*guard against callers passing in null */
376 return;
377
378 if (!pm_qos_request_active(req)) {
379 WARN(1, KERN_ERR "pm_qos_update_request() called for unknown object\n");
380 return;
381 }
382
383 cancel_delayed_work_sync(&req->work);
384 __pm_qos_update_request(req, new_value);
385}
386EXPORT_SYMBOL_GPL(pm_qos_update_request);
387
388/**
389 * pm_qos_update_request_timeout - modifies an existing qos request temporarily.
390 * @req : handle to list element holding a pm_qos request to use
391 * @new_value: defines the temporal qos request
392 * @timeout_us: the effective duration of this qos request in usecs.
393 *
394 * After timeout_us, this qos request is cancelled automatically.
395 */
396void pm_qos_update_request_timeout(struct pm_qos_request *req, s32 new_value,
397 unsigned long timeout_us)
398{
399 if (!req)
400 return;
401 if (WARN(!pm_qos_request_active(req),
402 "%s called for unknown object.", __func__))
403 return;
404
405 cancel_delayed_work_sync(&req->work);
406
407 trace_pm_qos_update_request_timeout(req->pm_qos_class,
408 new_value, timeout_us);
409 if (new_value != req->node.prio)
410 pm_qos_update_target(
411 pm_qos_array[req->pm_qos_class]->constraints,
412 &req->node, PM_QOS_UPDATE_REQ, new_value);
413
414 schedule_delayed_work(&req->work, usecs_to_jiffies(timeout_us));
415}
416
417/**
418 * pm_qos_remove_request - modifies an existing qos request
419 * @req: handle to request list element
420 *
421 * Will remove pm qos request from the list of constraints and
422 * recompute the current target value for the pm_qos_class. Call this
423 * on slow code paths.
424 */
425void pm_qos_remove_request(struct pm_qos_request *req)
426{
427 if (!req) /*guard against callers passing in null */
428 return;
429 /* silent return to keep pcm code cleaner */
430
431 if (!pm_qos_request_active(req)) {
432 WARN(1, KERN_ERR "pm_qos_remove_request() called for unknown object\n");
433 return;
434 }
435
436 cancel_delayed_work_sync(&req->work);
437
438 trace_pm_qos_remove_request(req->pm_qos_class, PM_QOS_DEFAULT_VALUE);
439 pm_qos_update_target(pm_qos_array[req->pm_qos_class]->constraints,
440 &req->node, PM_QOS_REMOVE_REQ,
441 PM_QOS_DEFAULT_VALUE);
442 memset(req, 0, sizeof(*req));
443}
444EXPORT_SYMBOL_GPL(pm_qos_remove_request);
445
446/**
447 * pm_qos_add_notifier - sets notification entry for changes to target value
448 * @pm_qos_class: identifies which qos target changes should be notified.
449 * @notifier: notifier block managed by caller.
450 *
451 * will register the notifier into a notification chain that gets called
452 * upon changes to the pm_qos_class target value.
453 */
454int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier)
455{
456 int retval;
457
458 retval = blocking_notifier_chain_register(
459 pm_qos_array[pm_qos_class]->constraints->notifiers,
460 notifier);
461
462 return retval;
463}
464EXPORT_SYMBOL_GPL(pm_qos_add_notifier);
465
466/**
467 * pm_qos_remove_notifier - deletes notification entry from chain.
468 * @pm_qos_class: identifies which qos target changes are notified.
469 * @notifier: notifier block to be removed.
470 *
471 * will remove the notifier from the notification chain that gets called
472 * upon changes to the pm_qos_class target value.
473 */
474int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier)
475{
476 int retval;
477
478 retval = blocking_notifier_chain_unregister(
479 pm_qos_array[pm_qos_class]->constraints->notifiers,
480 notifier);
481
482 return retval;
483}
484EXPORT_SYMBOL_GPL(pm_qos_remove_notifier);
485
486/* User space interface to PM QoS classes via misc devices */
487static int register_pm_qos_misc(struct pm_qos_object *qos)
488{
489 qos->pm_qos_power_miscdev.minor = MISC_DYNAMIC_MINOR;
490 qos->pm_qos_power_miscdev.name = qos->name;
491 qos->pm_qos_power_miscdev.fops = &pm_qos_power_fops;
492
493 return misc_register(&qos->pm_qos_power_miscdev);
494}
495
496static int find_pm_qos_object_by_minor(int minor)
497{
498 int pm_qos_class;
499
500 for (pm_qos_class = PM_QOS_CPU_DMA_LATENCY;
501 pm_qos_class < PM_QOS_NUM_CLASSES; pm_qos_class++) {
502 if (minor ==
503 pm_qos_array[pm_qos_class]->pm_qos_power_miscdev.minor)
504 return pm_qos_class;
505 }
506 return -1;
507}
508
509static int pm_qos_power_open(struct inode *inode, struct file *filp)
510{
511 long pm_qos_class;
512
513 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
514 if (pm_qos_class >= PM_QOS_CPU_DMA_LATENCY) {
515 struct pm_qos_request *req = kzalloc(sizeof(*req), GFP_KERNEL);
516 if (!req)
517 return -ENOMEM;
518
519 pm_qos_add_request(req, pm_qos_class, PM_QOS_DEFAULT_VALUE);
520 filp->private_data = req;
521
522 return 0;
523 }
524 return -EPERM;
525}
526
527static int pm_qos_power_release(struct inode *inode, struct file *filp)
528{
529 struct pm_qos_request *req;
530
531 req = filp->private_data;
532 pm_qos_remove_request(req);
533 kfree(req);
534
535 return 0;
536}
537
538
539static ssize_t pm_qos_power_read(struct file *filp, char __user *buf,
540 size_t count, loff_t *f_pos)
541{
542 s32 value;
543 unsigned long flags;
544 struct pm_qos_request *req = filp->private_data;
545
546 if (!req)
547 return -EINVAL;
548 if (!pm_qos_request_active(req))
549 return -EINVAL;
550
551 spin_lock_irqsave(&pm_qos_lock, flags);
552 value = pm_qos_get_value(pm_qos_array[req->pm_qos_class]->constraints);
553 spin_unlock_irqrestore(&pm_qos_lock, flags);
554
555 return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
556}
557
558static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
559 size_t count, loff_t *f_pos)
560{
561 s32 value;
562 struct pm_qos_request *req;
563
564 if (count == sizeof(s32)) {
565 if (copy_from_user(&value, buf, sizeof(s32)))
566 return -EFAULT;
567 } else {
568 int ret;
569
570 ret = kstrtos32_from_user(buf, count, 16, &value);
571 if (ret)
572 return ret;
573 }
574
575 req = filp->private_data;
576 pm_qos_update_request(req, value);
577
578 return count;
579}
580
581
582static int __init pm_qos_power_init(void)
583{
584 int ret = 0;
585 int i;
586
587 BUILD_BUG_ON(ARRAY_SIZE(pm_qos_array) != PM_QOS_NUM_CLASSES);
588
589 for (i = PM_QOS_CPU_DMA_LATENCY; i < PM_QOS_NUM_CLASSES; i++) {
590 ret = register_pm_qos_misc(pm_qos_array[i]);
591 if (ret < 0) {
592 printk(KERN_ERR "pm_qos_param: %s setup failed\n",
593 pm_qos_array[i]->name);
594 return ret;
595 }
596 }
597
598 return ret;
599}
600
601late_initcall(pm_qos_power_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * Power Management Quality of Service (PM QoS) support base.
4 *
5 * Copyright (C) 2020 Intel Corporation
6 *
7 * Authors:
8 * Mark Gross <mgross@linux.intel.com>
9 * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
10 *
11 * Provided here is an interface for specifying PM QoS dependencies. It allows
12 * entities depending on QoS constraints to register their requests which are
13 * aggregated as appropriate to produce effective constraints (target values)
14 * that can be monitored by entities needing to respect them, either by polling
15 * or through a built-in notification mechanism.
16 *
17 * In addition to the basic functionality, more specific interfaces for managing
18 * global CPU latency QoS requests and frequency QoS requests are provided.
19 */
20
21/*#define DEBUG*/
22
23#include <linux/pm_qos.h>
24#include <linux/sched.h>
25#include <linux/spinlock.h>
26#include <linux/slab.h>
27#include <linux/time.h>
28#include <linux/fs.h>
29#include <linux/device.h>
30#include <linux/miscdevice.h>
31#include <linux/string.h>
32#include <linux/platform_device.h>
33#include <linux/init.h>
34#include <linux/kernel.h>
35#include <linux/debugfs.h>
36#include <linux/seq_file.h>
37
38#include <linux/uaccess.h>
39#include <linux/export.h>
40#include <trace/events/power.h>
41
42/*
43 * locking rule: all changes to constraints or notifiers lists
44 * or pm_qos_object list and pm_qos_objects need to happen with pm_qos_lock
45 * held, taken with _irqsave. One lock to rule them all
46 */
47static DEFINE_SPINLOCK(pm_qos_lock);
48
49/**
50 * pm_qos_read_value - Return the current effective constraint value.
51 * @c: List of PM QoS constraint requests.
52 */
53s32 pm_qos_read_value(struct pm_qos_constraints *c)
54{
55 return READ_ONCE(c->target_value);
56}
57
58static int pm_qos_get_value(struct pm_qos_constraints *c)
59{
60 if (plist_head_empty(&c->list))
61 return c->no_constraint_value;
62
63 switch (c->type) {
64 case PM_QOS_MIN:
65 return plist_first(&c->list)->prio;
66
67 case PM_QOS_MAX:
68 return plist_last(&c->list)->prio;
69
70 default:
71 WARN(1, "Unknown PM QoS type in %s\n", __func__);
72 return PM_QOS_DEFAULT_VALUE;
73 }
74}
75
76static void pm_qos_set_value(struct pm_qos_constraints *c, s32 value)
77{
78 WRITE_ONCE(c->target_value, value);
79}
80
81/**
82 * pm_qos_update_target - Update a list of PM QoS constraint requests.
83 * @c: List of PM QoS requests.
84 * @node: Target list entry.
85 * @action: Action to carry out (add, update or remove).
86 * @value: New request value for the target list entry.
87 *
88 * Update the given list of PM QoS constraint requests, @c, by carrying an
89 * @action involving the @node list entry and @value on it.
90 *
91 * The recognized values of @action are PM_QOS_ADD_REQ (store @value in @node
92 * and add it to the list), PM_QOS_UPDATE_REQ (remove @node from the list, store
93 * @value in it and add it to the list again), and PM_QOS_REMOVE_REQ (remove
94 * @node from the list, ignore @value).
95 *
96 * Return: 1 if the aggregate constraint value has changed, 0 otherwise.
97 */
98int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
99 enum pm_qos_req_action action, int value)
100{
101 int prev_value, curr_value, new_value;
102 unsigned long flags;
103
104 spin_lock_irqsave(&pm_qos_lock, flags);
105
106 prev_value = pm_qos_get_value(c);
107 if (value == PM_QOS_DEFAULT_VALUE)
108 new_value = c->default_value;
109 else
110 new_value = value;
111
112 switch (action) {
113 case PM_QOS_REMOVE_REQ:
114 plist_del(node, &c->list);
115 break;
116 case PM_QOS_UPDATE_REQ:
117 /*
118 * To change the list, atomically remove, reinit with new value
119 * and add, then see if the aggregate has changed.
120 */
121 plist_del(node, &c->list);
122 fallthrough;
123 case PM_QOS_ADD_REQ:
124 plist_node_init(node, new_value);
125 plist_add(node, &c->list);
126 break;
127 default:
128 /* no action */
129 ;
130 }
131
132 curr_value = pm_qos_get_value(c);
133 pm_qos_set_value(c, curr_value);
134
135 spin_unlock_irqrestore(&pm_qos_lock, flags);
136
137 trace_pm_qos_update_target(action, prev_value, curr_value);
138
139 if (prev_value == curr_value)
140 return 0;
141
142 if (c->notifiers)
143 blocking_notifier_call_chain(c->notifiers, curr_value, NULL);
144
145 return 1;
146}
147
148/**
149 * pm_qos_flags_remove_req - Remove device PM QoS flags request.
150 * @pqf: Device PM QoS flags set to remove the request from.
151 * @req: Request to remove from the set.
152 */
153static void pm_qos_flags_remove_req(struct pm_qos_flags *pqf,
154 struct pm_qos_flags_request *req)
155{
156 s32 val = 0;
157
158 list_del(&req->node);
159 list_for_each_entry(req, &pqf->list, node)
160 val |= req->flags;
161
162 pqf->effective_flags = val;
163}
164
165/**
166 * pm_qos_update_flags - Update a set of PM QoS flags.
167 * @pqf: Set of PM QoS flags to update.
168 * @req: Request to add to the set, to modify, or to remove from the set.
169 * @action: Action to take on the set.
170 * @val: Value of the request to add or modify.
171 *
172 * Return: 1 if the aggregate constraint value has changed, 0 otherwise.
173 */
174bool pm_qos_update_flags(struct pm_qos_flags *pqf,
175 struct pm_qos_flags_request *req,
176 enum pm_qos_req_action action, s32 val)
177{
178 unsigned long irqflags;
179 s32 prev_value, curr_value;
180
181 spin_lock_irqsave(&pm_qos_lock, irqflags);
182
183 prev_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
184
185 switch (action) {
186 case PM_QOS_REMOVE_REQ:
187 pm_qos_flags_remove_req(pqf, req);
188 break;
189 case PM_QOS_UPDATE_REQ:
190 pm_qos_flags_remove_req(pqf, req);
191 fallthrough;
192 case PM_QOS_ADD_REQ:
193 req->flags = val;
194 INIT_LIST_HEAD(&req->node);
195 list_add_tail(&req->node, &pqf->list);
196 pqf->effective_flags |= val;
197 break;
198 default:
199 /* no action */
200 ;
201 }
202
203 curr_value = list_empty(&pqf->list) ? 0 : pqf->effective_flags;
204
205 spin_unlock_irqrestore(&pm_qos_lock, irqflags);
206
207 trace_pm_qos_update_flags(action, prev_value, curr_value);
208
209 return prev_value != curr_value;
210}
211
212#ifdef CONFIG_CPU_IDLE
213/* Definitions related to the CPU latency QoS. */
214
215static struct pm_qos_constraints cpu_latency_constraints = {
216 .list = PLIST_HEAD_INIT(cpu_latency_constraints.list),
217 .target_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
218 .default_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
219 .no_constraint_value = PM_QOS_CPU_LATENCY_DEFAULT_VALUE,
220 .type = PM_QOS_MIN,
221};
222
223static inline bool cpu_latency_qos_value_invalid(s32 value)
224{
225 return value < 0 && value != PM_QOS_DEFAULT_VALUE;
226}
227
228/**
229 * cpu_latency_qos_limit - Return current system-wide CPU latency QoS limit.
230 */
231s32 cpu_latency_qos_limit(void)
232{
233 return pm_qos_read_value(&cpu_latency_constraints);
234}
235
236/**
237 * cpu_latency_qos_request_active - Check the given PM QoS request.
238 * @req: PM QoS request to check.
239 *
240 * Return: 'true' if @req has been added to the CPU latency QoS list, 'false'
241 * otherwise.
242 */
243bool cpu_latency_qos_request_active(struct pm_qos_request *req)
244{
245 return req->qos == &cpu_latency_constraints;
246}
247EXPORT_SYMBOL_GPL(cpu_latency_qos_request_active);
248
249static void cpu_latency_qos_apply(struct pm_qos_request *req,
250 enum pm_qos_req_action action, s32 value)
251{
252 int ret = pm_qos_update_target(req->qos, &req->node, action, value);
253 if (ret > 0)
254 wake_up_all_idle_cpus();
255}
256
257/**
258 * cpu_latency_qos_add_request - Add new CPU latency QoS request.
259 * @req: Pointer to a preallocated handle.
260 * @value: Requested constraint value.
261 *
262 * Use @value to initialize the request handle pointed to by @req, insert it as
263 * a new entry to the CPU latency QoS list and recompute the effective QoS
264 * constraint for that list.
265 *
266 * Callers need to save the handle for later use in updates and removal of the
267 * QoS request represented by it.
268 */
269void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value)
270{
271 if (!req || cpu_latency_qos_value_invalid(value))
272 return;
273
274 if (cpu_latency_qos_request_active(req)) {
275 WARN(1, KERN_ERR "%s called for already added request\n", __func__);
276 return;
277 }
278
279 trace_pm_qos_add_request(value);
280
281 req->qos = &cpu_latency_constraints;
282 cpu_latency_qos_apply(req, PM_QOS_ADD_REQ, value);
283}
284EXPORT_SYMBOL_GPL(cpu_latency_qos_add_request);
285
286/**
287 * cpu_latency_qos_update_request - Modify existing CPU latency QoS request.
288 * @req : QoS request to update.
289 * @new_value: New requested constraint value.
290 *
291 * Use @new_value to update the QoS request represented by @req in the CPU
292 * latency QoS list along with updating the effective constraint value for that
293 * list.
294 */
295void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value)
296{
297 if (!req || cpu_latency_qos_value_invalid(new_value))
298 return;
299
300 if (!cpu_latency_qos_request_active(req)) {
301 WARN(1, KERN_ERR "%s called for unknown object\n", __func__);
302 return;
303 }
304
305 trace_pm_qos_update_request(new_value);
306
307 if (new_value == req->node.prio)
308 return;
309
310 cpu_latency_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
311}
312EXPORT_SYMBOL_GPL(cpu_latency_qos_update_request);
313
314/**
315 * cpu_latency_qos_remove_request - Remove existing CPU latency QoS request.
316 * @req: QoS request to remove.
317 *
318 * Remove the CPU latency QoS request represented by @req from the CPU latency
319 * QoS list along with updating the effective constraint value for that list.
320 */
321void cpu_latency_qos_remove_request(struct pm_qos_request *req)
322{
323 if (!req)
324 return;
325
326 if (!cpu_latency_qos_request_active(req)) {
327 WARN(1, KERN_ERR "%s called for unknown object\n", __func__);
328 return;
329 }
330
331 trace_pm_qos_remove_request(PM_QOS_DEFAULT_VALUE);
332
333 cpu_latency_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
334 memset(req, 0, sizeof(*req));
335}
336EXPORT_SYMBOL_GPL(cpu_latency_qos_remove_request);
337
338/* User space interface to the CPU latency QoS via misc device. */
339
340static int cpu_latency_qos_open(struct inode *inode, struct file *filp)
341{
342 struct pm_qos_request *req;
343
344 req = kzalloc(sizeof(*req), GFP_KERNEL);
345 if (!req)
346 return -ENOMEM;
347
348 cpu_latency_qos_add_request(req, PM_QOS_DEFAULT_VALUE);
349 filp->private_data = req;
350
351 return 0;
352}
353
354static int cpu_latency_qos_release(struct inode *inode, struct file *filp)
355{
356 struct pm_qos_request *req = filp->private_data;
357
358 filp->private_data = NULL;
359
360 cpu_latency_qos_remove_request(req);
361 kfree(req);
362
363 return 0;
364}
365
366static ssize_t cpu_latency_qos_read(struct file *filp, char __user *buf,
367 size_t count, loff_t *f_pos)
368{
369 struct pm_qos_request *req = filp->private_data;
370 unsigned long flags;
371 s32 value;
372
373 if (!req || !cpu_latency_qos_request_active(req))
374 return -EINVAL;
375
376 spin_lock_irqsave(&pm_qos_lock, flags);
377 value = pm_qos_get_value(&cpu_latency_constraints);
378 spin_unlock_irqrestore(&pm_qos_lock, flags);
379
380 return simple_read_from_buffer(buf, count, f_pos, &value, sizeof(s32));
381}
382
383static ssize_t cpu_latency_qos_write(struct file *filp, const char __user *buf,
384 size_t count, loff_t *f_pos)
385{
386 s32 value;
387
388 if (count == sizeof(s32)) {
389 if (copy_from_user(&value, buf, sizeof(s32)))
390 return -EFAULT;
391 } else {
392 int ret;
393
394 ret = kstrtos32_from_user(buf, count, 16, &value);
395 if (ret)
396 return ret;
397 }
398
399 cpu_latency_qos_update_request(filp->private_data, value);
400
401 return count;
402}
403
404static const struct file_operations cpu_latency_qos_fops = {
405 .write = cpu_latency_qos_write,
406 .read = cpu_latency_qos_read,
407 .open = cpu_latency_qos_open,
408 .release = cpu_latency_qos_release,
409 .llseek = noop_llseek,
410};
411
412static struct miscdevice cpu_latency_qos_miscdev = {
413 .minor = MISC_DYNAMIC_MINOR,
414 .name = "cpu_dma_latency",
415 .fops = &cpu_latency_qos_fops,
416};
417
418static int __init cpu_latency_qos_init(void)
419{
420 int ret;
421
422 ret = misc_register(&cpu_latency_qos_miscdev);
423 if (ret < 0)
424 pr_err("%s: %s setup failed\n", __func__,
425 cpu_latency_qos_miscdev.name);
426
427 return ret;
428}
429late_initcall(cpu_latency_qos_init);
430#endif /* CONFIG_CPU_IDLE */
431
432/* Definitions related to the frequency QoS below. */
433
434static inline bool freq_qos_value_invalid(s32 value)
435{
436 return value < 0 && value != PM_QOS_DEFAULT_VALUE;
437}
438
439/**
440 * freq_constraints_init - Initialize frequency QoS constraints.
441 * @qos: Frequency QoS constraints to initialize.
442 */
443void freq_constraints_init(struct freq_constraints *qos)
444{
445 struct pm_qos_constraints *c;
446
447 c = &qos->min_freq;
448 plist_head_init(&c->list);
449 c->target_value = FREQ_QOS_MIN_DEFAULT_VALUE;
450 c->default_value = FREQ_QOS_MIN_DEFAULT_VALUE;
451 c->no_constraint_value = FREQ_QOS_MIN_DEFAULT_VALUE;
452 c->type = PM_QOS_MAX;
453 c->notifiers = &qos->min_freq_notifiers;
454 BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
455
456 c = &qos->max_freq;
457 plist_head_init(&c->list);
458 c->target_value = FREQ_QOS_MAX_DEFAULT_VALUE;
459 c->default_value = FREQ_QOS_MAX_DEFAULT_VALUE;
460 c->no_constraint_value = FREQ_QOS_MAX_DEFAULT_VALUE;
461 c->type = PM_QOS_MIN;
462 c->notifiers = &qos->max_freq_notifiers;
463 BLOCKING_INIT_NOTIFIER_HEAD(c->notifiers);
464}
465
466/**
467 * freq_qos_read_value - Get frequency QoS constraint for a given list.
468 * @qos: Constraints to evaluate.
469 * @type: QoS request type.
470 */
471s32 freq_qos_read_value(struct freq_constraints *qos,
472 enum freq_qos_req_type type)
473{
474 s32 ret;
475
476 switch (type) {
477 case FREQ_QOS_MIN:
478 ret = IS_ERR_OR_NULL(qos) ?
479 FREQ_QOS_MIN_DEFAULT_VALUE :
480 pm_qos_read_value(&qos->min_freq);
481 break;
482 case FREQ_QOS_MAX:
483 ret = IS_ERR_OR_NULL(qos) ?
484 FREQ_QOS_MAX_DEFAULT_VALUE :
485 pm_qos_read_value(&qos->max_freq);
486 break;
487 default:
488 WARN_ON(1);
489 ret = 0;
490 }
491
492 return ret;
493}
494
495/**
496 * freq_qos_apply - Add/modify/remove frequency QoS request.
497 * @req: Constraint request to apply.
498 * @action: Action to perform (add/update/remove).
499 * @value: Value to assign to the QoS request.
500 *
501 * This is only meant to be called from inside pm_qos, not drivers.
502 */
503int freq_qos_apply(struct freq_qos_request *req,
504 enum pm_qos_req_action action, s32 value)
505{
506 int ret;
507
508 switch(req->type) {
509 case FREQ_QOS_MIN:
510 ret = pm_qos_update_target(&req->qos->min_freq, &req->pnode,
511 action, value);
512 break;
513 case FREQ_QOS_MAX:
514 ret = pm_qos_update_target(&req->qos->max_freq, &req->pnode,
515 action, value);
516 break;
517 default:
518 ret = -EINVAL;
519 }
520
521 return ret;
522}
523
524/**
525 * freq_qos_add_request - Insert new frequency QoS request into a given list.
526 * @qos: Constraints to update.
527 * @req: Preallocated request object.
528 * @type: Request type.
529 * @value: Request value.
530 *
531 * Insert a new entry into the @qos list of requests, recompute the effective
532 * QoS constraint value for that list and initialize the @req object. The
533 * caller needs to save that object for later use in updates and removal.
534 *
535 * Return 1 if the effective constraint value has changed, 0 if the effective
536 * constraint value has not changed, or a negative error code on failures.
537 */
538int freq_qos_add_request(struct freq_constraints *qos,
539 struct freq_qos_request *req,
540 enum freq_qos_req_type type, s32 value)
541{
542 int ret;
543
544 if (IS_ERR_OR_NULL(qos) || !req || freq_qos_value_invalid(value))
545 return -EINVAL;
546
547 if (WARN(freq_qos_request_active(req),
548 "%s() called for active request\n", __func__))
549 return -EINVAL;
550
551 req->qos = qos;
552 req->type = type;
553 ret = freq_qos_apply(req, PM_QOS_ADD_REQ, value);
554 if (ret < 0) {
555 req->qos = NULL;
556 req->type = 0;
557 }
558
559 return ret;
560}
561EXPORT_SYMBOL_GPL(freq_qos_add_request);
562
563/**
564 * freq_qos_update_request - Modify existing frequency QoS request.
565 * @req: Request to modify.
566 * @new_value: New request value.
567 *
568 * Update an existing frequency QoS request along with the effective constraint
569 * value for the list of requests it belongs to.
570 *
571 * Return 1 if the effective constraint value has changed, 0 if the effective
572 * constraint value has not changed, or a negative error code on failures.
573 */
574int freq_qos_update_request(struct freq_qos_request *req, s32 new_value)
575{
576 if (!req || freq_qos_value_invalid(new_value))
577 return -EINVAL;
578
579 if (WARN(!freq_qos_request_active(req),
580 "%s() called for unknown object\n", __func__))
581 return -EINVAL;
582
583 if (req->pnode.prio == new_value)
584 return 0;
585
586 return freq_qos_apply(req, PM_QOS_UPDATE_REQ, new_value);
587}
588EXPORT_SYMBOL_GPL(freq_qos_update_request);
589
590/**
591 * freq_qos_remove_request - Remove frequency QoS request from its list.
592 * @req: Request to remove.
593 *
594 * Remove the given frequency QoS request from the list of constraints it
595 * belongs to and recompute the effective constraint value for that list.
596 *
597 * Return 1 if the effective constraint value has changed, 0 if the effective
598 * constraint value has not changed, or a negative error code on failures.
599 */
600int freq_qos_remove_request(struct freq_qos_request *req)
601{
602 int ret;
603
604 if (!req)
605 return -EINVAL;
606
607 if (WARN(!freq_qos_request_active(req),
608 "%s() called for unknown object\n", __func__))
609 return -EINVAL;
610
611 ret = freq_qos_apply(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
612 req->qos = NULL;
613 req->type = 0;
614
615 return ret;
616}
617EXPORT_SYMBOL_GPL(freq_qos_remove_request);
618
619/**
620 * freq_qos_add_notifier - Add frequency QoS change notifier.
621 * @qos: List of requests to add the notifier to.
622 * @type: Request type.
623 * @notifier: Notifier block to add.
624 */
625int freq_qos_add_notifier(struct freq_constraints *qos,
626 enum freq_qos_req_type type,
627 struct notifier_block *notifier)
628{
629 int ret;
630
631 if (IS_ERR_OR_NULL(qos) || !notifier)
632 return -EINVAL;
633
634 switch (type) {
635 case FREQ_QOS_MIN:
636 ret = blocking_notifier_chain_register(qos->min_freq.notifiers,
637 notifier);
638 break;
639 case FREQ_QOS_MAX:
640 ret = blocking_notifier_chain_register(qos->max_freq.notifiers,
641 notifier);
642 break;
643 default:
644 WARN_ON(1);
645 ret = -EINVAL;
646 }
647
648 return ret;
649}
650EXPORT_SYMBOL_GPL(freq_qos_add_notifier);
651
652/**
653 * freq_qos_remove_notifier - Remove frequency QoS change notifier.
654 * @qos: List of requests to remove the notifier from.
655 * @type: Request type.
656 * @notifier: Notifier block to remove.
657 */
658int freq_qos_remove_notifier(struct freq_constraints *qos,
659 enum freq_qos_req_type type,
660 struct notifier_block *notifier)
661{
662 int ret;
663
664 if (IS_ERR_OR_NULL(qos) || !notifier)
665 return -EINVAL;
666
667 switch (type) {
668 case FREQ_QOS_MIN:
669 ret = blocking_notifier_chain_unregister(qos->min_freq.notifiers,
670 notifier);
671 break;
672 case FREQ_QOS_MAX:
673 ret = blocking_notifier_chain_unregister(qos->max_freq.notifiers,
674 notifier);
675 break;
676 default:
677 WARN_ON(1);
678 ret = -EINVAL;
679 }
680
681 return ret;
682}
683EXPORT_SYMBOL_GPL(freq_qos_remove_notifier);