Linux Audio

Check our new training course

Loading...
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Devices PM QoS constraints management
  4 *
  5 * Copyright (C) 2011 Texas Instruments, Inc.
  6 *
  7 * This module exposes the interface to kernel space for specifying
  8 * per-device PM QoS dependencies. It provides infrastructure for registration
  9 * of:
 10 *
 11 * Dependents on a QoS value : register requests
 12 * Watchers of QoS value : get notified when target QoS value changes
 13 *
 14 * This QoS design is best effort based. Dependents register their QoS needs.
 15 * Watchers register to keep track of the current QoS needs of the system.
 16 * Watchers can register a per-device notification callback using the
 17 * dev_pm_qos_*_notifier API. The notification chain data is stored in the
 18 * per-device constraint data struct.
 19 *
 20 * Note about the per-device constraint data struct allocation:
 21 * . The per-device constraints data struct ptr is stored into the device
 22 *    dev_pm_info.
 23 * . To minimize the data usage by the per-device constraints, the data struct
 24 *   is only allocated at the first call to dev_pm_qos_add_request.
 25 * . The data is later free'd when the device is removed from the system.
 26 *  . A global mutex protects the constraints users from the data being
 27 *     allocated and free'd.
 28 */
 29
 30#include <linux/pm_qos.h>
 31#include <linux/spinlock.h>
 32#include <linux/slab.h>
 33#include <linux/device.h>
 34#include <linux/mutex.h>
 35#include <linux/export.h>
 36#include <linux/pm_runtime.h>
 37#include <linux/err.h>
 38#include <trace/events/power.h>
 39
 40#include "power.h"
 41
 42static DEFINE_MUTEX(dev_pm_qos_mtx);
 43static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
 44
 45/**
 46 * __dev_pm_qos_flags - Check PM QoS flags for a given device.
 47 * @dev: Device to check the PM QoS flags for.
 48 * @mask: Flags to check against.
 49 *
 50 * This routine must be called with dev->power.lock held.
 51 */
 52enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
 53{
 54	struct dev_pm_qos *qos = dev->power.qos;
 55	struct pm_qos_flags *pqf;
 56	s32 val;
 57
 58	lockdep_assert_held(&dev->power.lock);
 59
 60	if (IS_ERR_OR_NULL(qos))
 61		return PM_QOS_FLAGS_UNDEFINED;
 62
 63	pqf = &qos->flags;
 64	if (list_empty(&pqf->list))
 65		return PM_QOS_FLAGS_UNDEFINED;
 66
 67	val = pqf->effective_flags & mask;
 68	if (val)
 69		return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
 70
 71	return PM_QOS_FLAGS_NONE;
 72}
 73
 74/**
 75 * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
 76 * @dev: Device to check the PM QoS flags for.
 77 * @mask: Flags to check against.
 78 */
 79enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
 80{
 81	unsigned long irqflags;
 82	enum pm_qos_flags_status ret;
 83
 84	spin_lock_irqsave(&dev->power.lock, irqflags);
 85	ret = __dev_pm_qos_flags(dev, mask);
 86	spin_unlock_irqrestore(&dev->power.lock, irqflags);
 87
 88	return ret;
 89}
 90EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
 91
 92/**
 93 * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
 94 * @dev: Device to get the PM QoS constraint value for.
 95 *
 96 * This routine must be called with dev->power.lock held.
 97 */
 98s32 __dev_pm_qos_resume_latency(struct device *dev)
 99{
100	lockdep_assert_held(&dev->power.lock);
101
102	return dev_pm_qos_raw_resume_latency(dev);
103}
104
105/**
106 * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
107 * @dev: Device to get the PM QoS constraint value for.
108 * @type: QoS request type.
109 */
110s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
111{
112	struct dev_pm_qos *qos = dev->power.qos;
113	unsigned long flags;
114	s32 ret;
115
116	spin_lock_irqsave(&dev->power.lock, flags);
117
118	switch (type) {
119	case DEV_PM_QOS_RESUME_LATENCY:
120		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
121			: pm_qos_read_value(&qos->resume_latency);
122		break;
123	case DEV_PM_QOS_MIN_FREQUENCY:
124		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
125			: freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
126		break;
127	case DEV_PM_QOS_MAX_FREQUENCY:
128		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
129			: freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
130		break;
131	default:
132		WARN_ON(1);
133		ret = 0;
134	}
135
136	spin_unlock_irqrestore(&dev->power.lock, flags);
137
138	return ret;
139}
 
140
141/**
142 * apply_constraint - Add/modify/remove device PM QoS request.
143 * @req: Constraint request to apply
144 * @action: Action to perform (add/update/remove).
145 * @value: Value to assign to the QoS request.
146 *
147 * Internal function to update the constraints list using the PM QoS core
148 * code and if needed call the per-device callbacks.
149 */
150static int apply_constraint(struct dev_pm_qos_request *req,
151			    enum pm_qos_req_action action, s32 value)
152{
153	struct dev_pm_qos *qos = req->dev->power.qos;
154	int ret;
155
156	switch(req->type) {
157	case DEV_PM_QOS_RESUME_LATENCY:
158		if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
159			value = 0;
160
161		ret = pm_qos_update_target(&qos->resume_latency,
162					   &req->data.pnode, action, value);
163		break;
164	case DEV_PM_QOS_LATENCY_TOLERANCE:
165		ret = pm_qos_update_target(&qos->latency_tolerance,
166					   &req->data.pnode, action, value);
167		if (ret) {
168			value = pm_qos_read_value(&qos->latency_tolerance);
169			req->dev->power.set_latency_tolerance(req->dev, value);
170		}
171		break;
172	case DEV_PM_QOS_MIN_FREQUENCY:
173	case DEV_PM_QOS_MAX_FREQUENCY:
174		ret = freq_qos_apply(&req->data.freq, action, value);
175		break;
176	case DEV_PM_QOS_FLAGS:
177		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
178					  action, value);
179		break;
180	default:
181		ret = -EINVAL;
182	}
183
184	return ret;
185}
186
187/*
188 * dev_pm_qos_constraints_allocate
189 * @dev: device to allocate data for
190 *
191 * Called at the first call to add_request, for constraint data allocation
192 * Must be called with the dev_pm_qos_mtx mutex held
193 */
194static int dev_pm_qos_constraints_allocate(struct device *dev)
195{
196	struct dev_pm_qos *qos;
197	struct pm_qos_constraints *c;
198	struct blocking_notifier_head *n;
199
200	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
201	if (!qos)
202		return -ENOMEM;
203
204	n = kzalloc(3 * sizeof(*n), GFP_KERNEL);
205	if (!n) {
206		kfree(qos);
207		return -ENOMEM;
208	}
209
210	c = &qos->resume_latency;
211	plist_head_init(&c->list);
212	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
213	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
214	c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
215	c->type = PM_QOS_MIN;
216	c->notifiers = n;
217	BLOCKING_INIT_NOTIFIER_HEAD(n);
218
219	c = &qos->latency_tolerance;
220	plist_head_init(&c->list);
221	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
222	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
223	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
224	c->type = PM_QOS_MIN;
225
226	freq_constraints_init(&qos->freq);
227
228	INIT_LIST_HEAD(&qos->flags.list);
229
230	spin_lock_irq(&dev->power.lock);
231	dev->power.qos = qos;
232	spin_unlock_irq(&dev->power.lock);
233
234	return 0;
235}
236
237static void __dev_pm_qos_hide_latency_limit(struct device *dev);
238static void __dev_pm_qos_hide_flags(struct device *dev);
239
240/**
241 * dev_pm_qos_constraints_destroy
242 * @dev: target device
243 *
244 * Called from the device PM subsystem on device removal under device_pm_lock().
245 */
246void dev_pm_qos_constraints_destroy(struct device *dev)
247{
248	struct dev_pm_qos *qos;
249	struct dev_pm_qos_request *req, *tmp;
250	struct pm_qos_constraints *c;
251	struct pm_qos_flags *f;
252
253	mutex_lock(&dev_pm_qos_sysfs_mtx);
254
255	/*
256	 * If the device's PM QoS resume latency limit or PM QoS flags have been
257	 * exposed to user space, they have to be hidden at this point.
258	 */
259	pm_qos_sysfs_remove_resume_latency(dev);
260	pm_qos_sysfs_remove_flags(dev);
261
262	mutex_lock(&dev_pm_qos_mtx);
263
264	__dev_pm_qos_hide_latency_limit(dev);
265	__dev_pm_qos_hide_flags(dev);
266
267	qos = dev->power.qos;
268	if (!qos)
269		goto out;
270
271	/* Flush the constraints lists for the device. */
272	c = &qos->resume_latency;
273	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
274		/*
275		 * Update constraints list and call the notification
276		 * callbacks if needed
277		 */
278		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
279		memset(req, 0, sizeof(*req));
280	}
281
282	c = &qos->latency_tolerance;
283	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
284		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
285		memset(req, 0, sizeof(*req));
286	}
287
288	c = &qos->freq.min_freq;
289	plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
290		apply_constraint(req, PM_QOS_REMOVE_REQ,
291				 PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
292		memset(req, 0, sizeof(*req));
293	}
294
295	c = &qos->freq.max_freq;
296	plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
297		apply_constraint(req, PM_QOS_REMOVE_REQ,
298				 PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
299		memset(req, 0, sizeof(*req));
300	}
301
302	f = &qos->flags;
303	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
304		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
305		memset(req, 0, sizeof(*req));
306	}
307
308	spin_lock_irq(&dev->power.lock);
309	dev->power.qos = ERR_PTR(-ENODEV);
310	spin_unlock_irq(&dev->power.lock);
311
312	kfree(qos->resume_latency.notifiers);
313	kfree(qos);
314
315 out:
316	mutex_unlock(&dev_pm_qos_mtx);
317
318	mutex_unlock(&dev_pm_qos_sysfs_mtx);
319}
320
321static bool dev_pm_qos_invalid_req_type(struct device *dev,
322					enum dev_pm_qos_req_type type)
323{
324	return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
325	       !dev->power.set_latency_tolerance;
326}
327
328static int __dev_pm_qos_add_request(struct device *dev,
329				    struct dev_pm_qos_request *req,
330				    enum dev_pm_qos_req_type type, s32 value)
331{
332	int ret = 0;
333
334	if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
335		return -EINVAL;
336
337	if (WARN(dev_pm_qos_request_active(req),
338		 "%s() called for already added request\n", __func__))
339		return -EINVAL;
340
341	if (IS_ERR(dev->power.qos))
342		ret = -ENODEV;
343	else if (!dev->power.qos)
344		ret = dev_pm_qos_constraints_allocate(dev);
345
346	trace_dev_pm_qos_add_request(dev_name(dev), type, value);
347	if (ret)
348		return ret;
349
350	req->dev = dev;
351	req->type = type;
352	if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
353		ret = freq_qos_add_request(&dev->power.qos->freq,
354					   &req->data.freq,
355					   FREQ_QOS_MIN, value);
356	else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
357		ret = freq_qos_add_request(&dev->power.qos->freq,
358					   &req->data.freq,
359					   FREQ_QOS_MAX, value);
360	else
361		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
362
363	return ret;
364}
365
366/**
367 * dev_pm_qos_add_request - inserts new qos request into the list
368 * @dev: target device for the constraint
369 * @req: pointer to a preallocated handle
370 * @type: type of the request
371 * @value: defines the qos request
372 *
373 * This function inserts a new entry in the device constraints list of
374 * requested qos performance characteristics. It recomputes the aggregate
375 * QoS expectations of parameters and initializes the dev_pm_qos_request
376 * handle.  Caller needs to save this handle for later use in updates and
377 * removal.
378 *
379 * Returns 1 if the aggregated constraint value has changed,
380 * 0 if the aggregated constraint value has not changed,
381 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
382 * to allocate for data structures, -ENODEV if the device has just been removed
383 * from the system.
384 *
385 * Callers should ensure that the target device is not RPM_SUSPENDED before
386 * using this function for requests of type DEV_PM_QOS_FLAGS.
387 */
388int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
389			   enum dev_pm_qos_req_type type, s32 value)
390{
391	int ret;
392
393	mutex_lock(&dev_pm_qos_mtx);
394	ret = __dev_pm_qos_add_request(dev, req, type, value);
395	mutex_unlock(&dev_pm_qos_mtx);
396	return ret;
397}
398EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
399
400/**
401 * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
402 * @req : PM QoS request to modify.
403 * @new_value: New value to request.
404 */
405static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
406				       s32 new_value)
407{
408	s32 curr_value;
409	int ret = 0;
410
411	if (!req) /*guard against callers passing in null */
412		return -EINVAL;
413
414	if (WARN(!dev_pm_qos_request_active(req),
415		 "%s() called for unknown object\n", __func__))
416		return -EINVAL;
417
418	if (IS_ERR_OR_NULL(req->dev->power.qos))
419		return -ENODEV;
420
421	switch(req->type) {
422	case DEV_PM_QOS_RESUME_LATENCY:
423	case DEV_PM_QOS_LATENCY_TOLERANCE:
424		curr_value = req->data.pnode.prio;
425		break;
426	case DEV_PM_QOS_MIN_FREQUENCY:
427	case DEV_PM_QOS_MAX_FREQUENCY:
428		curr_value = req->data.freq.pnode.prio;
429		break;
430	case DEV_PM_QOS_FLAGS:
431		curr_value = req->data.flr.flags;
432		break;
433	default:
434		return -EINVAL;
435	}
436
437	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
438					new_value);
439	if (curr_value != new_value)
440		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
441
442	return ret;
443}
444
445/**
446 * dev_pm_qos_update_request - modifies an existing qos request
447 * @req : handle to list element holding a dev_pm_qos request to use
448 * @new_value: defines the qos request
449 *
450 * Updates an existing dev PM qos request along with updating the
451 * target value.
452 *
453 * Attempts are made to make this code callable on hot code paths.
454 *
455 * Returns 1 if the aggregated constraint value has changed,
456 * 0 if the aggregated constraint value has not changed,
457 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
458 * removed from the system
459 *
460 * Callers should ensure that the target device is not RPM_SUSPENDED before
461 * using this function for requests of type DEV_PM_QOS_FLAGS.
462 */
463int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
464{
465	int ret;
466
467	mutex_lock(&dev_pm_qos_mtx);
468	ret = __dev_pm_qos_update_request(req, new_value);
469	mutex_unlock(&dev_pm_qos_mtx);
470	return ret;
471}
472EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
473
474static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
475{
476	int ret;
477
478	if (!req) /*guard against callers passing in null */
479		return -EINVAL;
480
481	if (WARN(!dev_pm_qos_request_active(req),
482		 "%s() called for unknown object\n", __func__))
483		return -EINVAL;
484
485	if (IS_ERR_OR_NULL(req->dev->power.qos))
486		return -ENODEV;
487
488	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
489					PM_QOS_DEFAULT_VALUE);
490	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
491	memset(req, 0, sizeof(*req));
492	return ret;
493}
494
495/**
496 * dev_pm_qos_remove_request - modifies an existing qos request
497 * @req: handle to request list element
498 *
499 * Will remove pm qos request from the list of constraints and
500 * recompute the current target value. Call this on slow code paths.
501 *
502 * Returns 1 if the aggregated constraint value has changed,
503 * 0 if the aggregated constraint value has not changed,
504 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
505 * removed from the system
506 *
507 * Callers should ensure that the target device is not RPM_SUSPENDED before
508 * using this function for requests of type DEV_PM_QOS_FLAGS.
509 */
510int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
511{
512	int ret;
513
514	mutex_lock(&dev_pm_qos_mtx);
515	ret = __dev_pm_qos_remove_request(req);
516	mutex_unlock(&dev_pm_qos_mtx);
517	return ret;
518}
519EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
520
521/**
522 * dev_pm_qos_add_notifier - sets notification entry for changes to target value
523 * of per-device PM QoS constraints
524 *
525 * @dev: target device for the constraint
526 * @notifier: notifier block managed by caller.
527 * @type: request type.
528 *
529 * Will register the notifier into a notification chain that gets called
530 * upon changes to the target value for the device.
531 *
532 * If the device's constraints object doesn't exist when this routine is called,
533 * it will be created (or error code will be returned if that fails).
534 */
535int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
536			    enum dev_pm_qos_req_type type)
537{
538	int ret = 0;
539
540	mutex_lock(&dev_pm_qos_mtx);
541
542	if (IS_ERR(dev->power.qos))
543		ret = -ENODEV;
544	else if (!dev->power.qos)
545		ret = dev_pm_qos_constraints_allocate(dev);
546
547	if (ret)
548		goto unlock;
549
550	switch (type) {
551	case DEV_PM_QOS_RESUME_LATENCY:
552		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
553						       notifier);
554		break;
555	case DEV_PM_QOS_MIN_FREQUENCY:
556		ret = freq_qos_add_notifier(&dev->power.qos->freq,
557					    FREQ_QOS_MIN, notifier);
558		break;
559	case DEV_PM_QOS_MAX_FREQUENCY:
560		ret = freq_qos_add_notifier(&dev->power.qos->freq,
561					    FREQ_QOS_MAX, notifier);
562		break;
563	default:
564		WARN_ON(1);
565		ret = -EINVAL;
566	}
567
568unlock:
569	mutex_unlock(&dev_pm_qos_mtx);
570	return ret;
571}
572EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
573
574/**
575 * dev_pm_qos_remove_notifier - deletes notification for changes to target value
576 * of per-device PM QoS constraints
577 *
578 * @dev: target device for the constraint
579 * @notifier: notifier block to be removed.
580 * @type: request type.
581 *
582 * Will remove the notifier from the notification chain that gets called
583 * upon changes to the target value.
584 */
585int dev_pm_qos_remove_notifier(struct device *dev,
586			       struct notifier_block *notifier,
587			       enum dev_pm_qos_req_type type)
588{
589	int ret = 0;
590
591	mutex_lock(&dev_pm_qos_mtx);
592
593	/* Silently return if the constraints object is not present. */
594	if (IS_ERR_OR_NULL(dev->power.qos))
595		goto unlock;
596
597	switch (type) {
598	case DEV_PM_QOS_RESUME_LATENCY:
599		ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
600							 notifier);
601		break;
602	case DEV_PM_QOS_MIN_FREQUENCY:
603		ret = freq_qos_remove_notifier(&dev->power.qos->freq,
604					       FREQ_QOS_MIN, notifier);
605		break;
606	case DEV_PM_QOS_MAX_FREQUENCY:
607		ret = freq_qos_remove_notifier(&dev->power.qos->freq,
608					       FREQ_QOS_MAX, notifier);
609		break;
610	default:
611		WARN_ON(1);
612		ret = -EINVAL;
613	}
614
615unlock:
616	mutex_unlock(&dev_pm_qos_mtx);
617	return ret;
618}
619EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
620
621/**
622 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
623 * @dev: Device whose ancestor to add the request for.
624 * @req: Pointer to the preallocated handle.
625 * @type: Type of the request.
626 * @value: Constraint latency value.
627 */
628int dev_pm_qos_add_ancestor_request(struct device *dev,
629				    struct dev_pm_qos_request *req,
630				    enum dev_pm_qos_req_type type, s32 value)
631{
632	struct device *ancestor = dev->parent;
633	int ret = -ENODEV;
634
635	switch (type) {
636	case DEV_PM_QOS_RESUME_LATENCY:
637		while (ancestor && !ancestor->power.ignore_children)
638			ancestor = ancestor->parent;
639
640		break;
641	case DEV_PM_QOS_LATENCY_TOLERANCE:
642		while (ancestor && !ancestor->power.set_latency_tolerance)
643			ancestor = ancestor->parent;
644
645		break;
646	default:
647		ancestor = NULL;
648	}
649	if (ancestor)
650		ret = dev_pm_qos_add_request(ancestor, req, type, value);
651
652	if (ret < 0)
653		req->dev = NULL;
654
655	return ret;
656}
657EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
658
659static void __dev_pm_qos_drop_user_request(struct device *dev,
660					   enum dev_pm_qos_req_type type)
661{
662	struct dev_pm_qos_request *req = NULL;
663
664	switch(type) {
665	case DEV_PM_QOS_RESUME_LATENCY:
666		req = dev->power.qos->resume_latency_req;
667		dev->power.qos->resume_latency_req = NULL;
668		break;
669	case DEV_PM_QOS_LATENCY_TOLERANCE:
670		req = dev->power.qos->latency_tolerance_req;
671		dev->power.qos->latency_tolerance_req = NULL;
672		break;
673	case DEV_PM_QOS_FLAGS:
674		req = dev->power.qos->flags_req;
675		dev->power.qos->flags_req = NULL;
676		break;
677	default:
678		WARN_ON(1);
679		return;
680	}
681	__dev_pm_qos_remove_request(req);
682	kfree(req);
683}
684
685static void dev_pm_qos_drop_user_request(struct device *dev,
686					 enum dev_pm_qos_req_type type)
687{
688	mutex_lock(&dev_pm_qos_mtx);
689	__dev_pm_qos_drop_user_request(dev, type);
690	mutex_unlock(&dev_pm_qos_mtx);
691}
692
693/**
694 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
695 * @dev: Device whose PM QoS latency limit is to be exposed to user space.
696 * @value: Initial value of the latency limit.
697 */
698int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
699{
700	struct dev_pm_qos_request *req;
701	int ret;
702
703	if (!device_is_registered(dev) || value < 0)
704		return -EINVAL;
705
706	req = kzalloc(sizeof(*req), GFP_KERNEL);
707	if (!req)
708		return -ENOMEM;
709
710	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
711	if (ret < 0) {
712		kfree(req);
713		return ret;
714	}
715
716	mutex_lock(&dev_pm_qos_sysfs_mtx);
717
718	mutex_lock(&dev_pm_qos_mtx);
719
720	if (IS_ERR_OR_NULL(dev->power.qos))
721		ret = -ENODEV;
722	else if (dev->power.qos->resume_latency_req)
723		ret = -EEXIST;
724
725	if (ret < 0) {
726		__dev_pm_qos_remove_request(req);
727		kfree(req);
728		mutex_unlock(&dev_pm_qos_mtx);
729		goto out;
730	}
731	dev->power.qos->resume_latency_req = req;
732
733	mutex_unlock(&dev_pm_qos_mtx);
734
735	ret = pm_qos_sysfs_add_resume_latency(dev);
736	if (ret)
737		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
738
739 out:
740	mutex_unlock(&dev_pm_qos_sysfs_mtx);
741	return ret;
742}
743EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
744
745static void __dev_pm_qos_hide_latency_limit(struct device *dev)
746{
747	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
748		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
749}
750
751/**
752 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
753 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
754 */
755void dev_pm_qos_hide_latency_limit(struct device *dev)
756{
757	mutex_lock(&dev_pm_qos_sysfs_mtx);
758
759	pm_qos_sysfs_remove_resume_latency(dev);
760
761	mutex_lock(&dev_pm_qos_mtx);
762	__dev_pm_qos_hide_latency_limit(dev);
763	mutex_unlock(&dev_pm_qos_mtx);
764
765	mutex_unlock(&dev_pm_qos_sysfs_mtx);
766}
767EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
768
769/**
770 * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
771 * @dev: Device whose PM QoS flags are to be exposed to user space.
772 * @val: Initial values of the flags.
773 */
774int dev_pm_qos_expose_flags(struct device *dev, s32 val)
775{
776	struct dev_pm_qos_request *req;
777	int ret;
778
779	if (!device_is_registered(dev))
780		return -EINVAL;
781
782	req = kzalloc(sizeof(*req), GFP_KERNEL);
783	if (!req)
784		return -ENOMEM;
785
786	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
787	if (ret < 0) {
788		kfree(req);
789		return ret;
790	}
791
792	pm_runtime_get_sync(dev);
793	mutex_lock(&dev_pm_qos_sysfs_mtx);
794
795	mutex_lock(&dev_pm_qos_mtx);
796
797	if (IS_ERR_OR_NULL(dev->power.qos))
798		ret = -ENODEV;
799	else if (dev->power.qos->flags_req)
800		ret = -EEXIST;
801
802	if (ret < 0) {
803		__dev_pm_qos_remove_request(req);
804		kfree(req);
805		mutex_unlock(&dev_pm_qos_mtx);
806		goto out;
807	}
808	dev->power.qos->flags_req = req;
809
810	mutex_unlock(&dev_pm_qos_mtx);
811
812	ret = pm_qos_sysfs_add_flags(dev);
813	if (ret)
814		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
815
816 out:
817	mutex_unlock(&dev_pm_qos_sysfs_mtx);
818	pm_runtime_put(dev);
819	return ret;
820}
821EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
822
823static void __dev_pm_qos_hide_flags(struct device *dev)
824{
825	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
826		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
827}
828
829/**
830 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
831 * @dev: Device whose PM QoS flags are to be hidden from user space.
832 */
833void dev_pm_qos_hide_flags(struct device *dev)
834{
835	pm_runtime_get_sync(dev);
836	mutex_lock(&dev_pm_qos_sysfs_mtx);
837
838	pm_qos_sysfs_remove_flags(dev);
839
840	mutex_lock(&dev_pm_qos_mtx);
841	__dev_pm_qos_hide_flags(dev);
842	mutex_unlock(&dev_pm_qos_mtx);
843
844	mutex_unlock(&dev_pm_qos_sysfs_mtx);
845	pm_runtime_put(dev);
846}
847EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
848
849/**
850 * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
851 * @dev: Device to update the PM QoS flags request for.
852 * @mask: Flags to set/clear.
853 * @set: Whether to set or clear the flags (true means set).
854 */
855int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
856{
857	s32 value;
858	int ret;
859
860	pm_runtime_get_sync(dev);
861	mutex_lock(&dev_pm_qos_mtx);
862
863	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
864		ret = -EINVAL;
865		goto out;
866	}
867
868	value = dev_pm_qos_requested_flags(dev);
869	if (set)
870		value |= mask;
871	else
872		value &= ~mask;
873
874	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
875
876 out:
877	mutex_unlock(&dev_pm_qos_mtx);
878	pm_runtime_put(dev);
879	return ret;
880}
881
882/**
883 * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
884 * @dev: Device to obtain the user space latency tolerance for.
885 */
886s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
887{
888	s32 ret;
889
890	mutex_lock(&dev_pm_qos_mtx);
891	ret = IS_ERR_OR_NULL(dev->power.qos)
892		|| !dev->power.qos->latency_tolerance_req ?
893			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
894			dev->power.qos->latency_tolerance_req->data.pnode.prio;
895	mutex_unlock(&dev_pm_qos_mtx);
896	return ret;
897}
898
899/**
900 * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
901 * @dev: Device to update the user space latency tolerance for.
902 * @val: New user space latency tolerance for @dev (negative values disable).
903 */
904int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
905{
906	int ret;
907
908	mutex_lock(&dev_pm_qos_mtx);
909
910	if (IS_ERR_OR_NULL(dev->power.qos)
911	    || !dev->power.qos->latency_tolerance_req) {
912		struct dev_pm_qos_request *req;
913
914		if (val < 0) {
915			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
916				ret = 0;
917			else
918				ret = -EINVAL;
919			goto out;
920		}
921		req = kzalloc(sizeof(*req), GFP_KERNEL);
922		if (!req) {
923			ret = -ENOMEM;
924			goto out;
925		}
926		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
927		if (ret < 0) {
928			kfree(req);
929			goto out;
930		}
931		dev->power.qos->latency_tolerance_req = req;
932	} else {
933		if (val < 0) {
934			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
935			ret = 0;
936		} else {
937			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
938		}
939	}
940
941 out:
942	mutex_unlock(&dev_pm_qos_mtx);
943	return ret;
944}
945EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
946
947/**
948 * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
949 * @dev: Device whose latency tolerance to expose
950 */
951int dev_pm_qos_expose_latency_tolerance(struct device *dev)
952{
953	int ret;
954
955	if (!dev->power.set_latency_tolerance)
956		return -EINVAL;
957
958	mutex_lock(&dev_pm_qos_sysfs_mtx);
959	ret = pm_qos_sysfs_add_latency_tolerance(dev);
960	mutex_unlock(&dev_pm_qos_sysfs_mtx);
961
962	return ret;
963}
964EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
965
966/**
967 * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
968 * @dev: Device whose latency tolerance to hide
969 */
970void dev_pm_qos_hide_latency_tolerance(struct device *dev)
971{
972	mutex_lock(&dev_pm_qos_sysfs_mtx);
973	pm_qos_sysfs_remove_latency_tolerance(dev);
974	mutex_unlock(&dev_pm_qos_sysfs_mtx);
975
976	/* Remove the request from user space now */
977	pm_runtime_get_sync(dev);
978	dev_pm_qos_update_user_latency_tolerance(dev,
979		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
980	pm_runtime_put(dev);
981}
982EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * Devices PM QoS constraints management
  4 *
  5 * Copyright (C) 2011 Texas Instruments, Inc.
  6 *
  7 * This module exposes the interface to kernel space for specifying
  8 * per-device PM QoS dependencies. It provides infrastructure for registration
  9 * of:
 10 *
 11 * Dependents on a QoS value : register requests
 12 * Watchers of QoS value : get notified when target QoS value changes
 13 *
 14 * This QoS design is best effort based. Dependents register their QoS needs.
 15 * Watchers register to keep track of the current QoS needs of the system.
 16 * Watchers can register a per-device notification callback using the
 17 * dev_pm_qos_*_notifier API. The notification chain data is stored in the
 18 * per-device constraint data struct.
 19 *
 20 * Note about the per-device constraint data struct allocation:
 21 * . The per-device constraints data struct ptr is stored into the device
 22 *    dev_pm_info.
 23 * . To minimize the data usage by the per-device constraints, the data struct
 24 *   is only allocated at the first call to dev_pm_qos_add_request.
 25 * . The data is later free'd when the device is removed from the system.
 26 *  . A global mutex protects the constraints users from the data being
 27 *     allocated and free'd.
 28 */
 29
 30#include <linux/pm_qos.h>
 31#include <linux/spinlock.h>
 32#include <linux/slab.h>
 33#include <linux/device.h>
 34#include <linux/mutex.h>
 35#include <linux/export.h>
 36#include <linux/pm_runtime.h>
 37#include <linux/err.h>
 38#include <trace/events/power.h>
 39
 40#include "power.h"
 41
 42static DEFINE_MUTEX(dev_pm_qos_mtx);
 43static DEFINE_MUTEX(dev_pm_qos_sysfs_mtx);
 44
 45/**
 46 * __dev_pm_qos_flags - Check PM QoS flags for a given device.
 47 * @dev: Device to check the PM QoS flags for.
 48 * @mask: Flags to check against.
 49 *
 50 * This routine must be called with dev->power.lock held.
 51 */
 52enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask)
 53{
 54	struct dev_pm_qos *qos = dev->power.qos;
 55	struct pm_qos_flags *pqf;
 56	s32 val;
 57
 58	lockdep_assert_held(&dev->power.lock);
 59
 60	if (IS_ERR_OR_NULL(qos))
 61		return PM_QOS_FLAGS_UNDEFINED;
 62
 63	pqf = &qos->flags;
 64	if (list_empty(&pqf->list))
 65		return PM_QOS_FLAGS_UNDEFINED;
 66
 67	val = pqf->effective_flags & mask;
 68	if (val)
 69		return (val == mask) ? PM_QOS_FLAGS_ALL : PM_QOS_FLAGS_SOME;
 70
 71	return PM_QOS_FLAGS_NONE;
 72}
 73
 74/**
 75 * dev_pm_qos_flags - Check PM QoS flags for a given device (locked).
 76 * @dev: Device to check the PM QoS flags for.
 77 * @mask: Flags to check against.
 78 */
 79enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask)
 80{
 81	unsigned long irqflags;
 82	enum pm_qos_flags_status ret;
 83
 84	spin_lock_irqsave(&dev->power.lock, irqflags);
 85	ret = __dev_pm_qos_flags(dev, mask);
 86	spin_unlock_irqrestore(&dev->power.lock, irqflags);
 87
 88	return ret;
 89}
 90EXPORT_SYMBOL_GPL(dev_pm_qos_flags);
 91
 92/**
 93 * __dev_pm_qos_resume_latency - Get resume latency constraint for a given device.
 94 * @dev: Device to get the PM QoS constraint value for.
 95 *
 96 * This routine must be called with dev->power.lock held.
 97 */
 98s32 __dev_pm_qos_resume_latency(struct device *dev)
 99{
100	lockdep_assert_held(&dev->power.lock);
101
102	return dev_pm_qos_raw_resume_latency(dev);
103}
104
105/**
106 * dev_pm_qos_read_value - Get PM QoS constraint for a given device (locked).
107 * @dev: Device to get the PM QoS constraint value for.
108 * @type: QoS request type.
109 */
110s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type)
111{
112	struct dev_pm_qos *qos = dev->power.qos;
113	unsigned long flags;
114	s32 ret;
115
116	spin_lock_irqsave(&dev->power.lock, flags);
117
118	switch (type) {
119	case DEV_PM_QOS_RESUME_LATENCY:
120		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_RESUME_LATENCY_NO_CONSTRAINT
121			: pm_qos_read_value(&qos->resume_latency);
122		break;
123	case DEV_PM_QOS_MIN_FREQUENCY:
124		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE
125			: freq_qos_read_value(&qos->freq, FREQ_QOS_MIN);
126		break;
127	case DEV_PM_QOS_MAX_FREQUENCY:
128		ret = IS_ERR_OR_NULL(qos) ? PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE
129			: freq_qos_read_value(&qos->freq, FREQ_QOS_MAX);
130		break;
131	default:
132		WARN_ON(1);
133		ret = 0;
134	}
135
136	spin_unlock_irqrestore(&dev->power.lock, flags);
137
138	return ret;
139}
140EXPORT_SYMBOL_GPL(dev_pm_qos_read_value);
141
142/**
143 * apply_constraint - Add/modify/remove device PM QoS request.
144 * @req: Constraint request to apply
145 * @action: Action to perform (add/update/remove).
146 * @value: Value to assign to the QoS request.
147 *
148 * Internal function to update the constraints list using the PM QoS core
149 * code and if needed call the per-device callbacks.
150 */
151static int apply_constraint(struct dev_pm_qos_request *req,
152			    enum pm_qos_req_action action, s32 value)
153{
154	struct dev_pm_qos *qos = req->dev->power.qos;
155	int ret;
156
157	switch(req->type) {
158	case DEV_PM_QOS_RESUME_LATENCY:
159		if (WARN_ON(action != PM_QOS_REMOVE_REQ && value < 0))
160			value = 0;
161
162		ret = pm_qos_update_target(&qos->resume_latency,
163					   &req->data.pnode, action, value);
164		break;
165	case DEV_PM_QOS_LATENCY_TOLERANCE:
166		ret = pm_qos_update_target(&qos->latency_tolerance,
167					   &req->data.pnode, action, value);
168		if (ret) {
169			value = pm_qos_read_value(&qos->latency_tolerance);
170			req->dev->power.set_latency_tolerance(req->dev, value);
171		}
172		break;
173	case DEV_PM_QOS_MIN_FREQUENCY:
174	case DEV_PM_QOS_MAX_FREQUENCY:
175		ret = freq_qos_apply(&req->data.freq, action, value);
176		break;
177	case DEV_PM_QOS_FLAGS:
178		ret = pm_qos_update_flags(&qos->flags, &req->data.flr,
179					  action, value);
180		break;
181	default:
182		ret = -EINVAL;
183	}
184
185	return ret;
186}
187
188/*
189 * dev_pm_qos_constraints_allocate
190 * @dev: device to allocate data for
191 *
192 * Called at the first call to add_request, for constraint data allocation
193 * Must be called with the dev_pm_qos_mtx mutex held
194 */
195static int dev_pm_qos_constraints_allocate(struct device *dev)
196{
197	struct dev_pm_qos *qos;
198	struct pm_qos_constraints *c;
199	struct blocking_notifier_head *n;
200
201	qos = kzalloc(sizeof(*qos), GFP_KERNEL);
202	if (!qos)
203		return -ENOMEM;
204
205	n = kcalloc(3, sizeof(*n), GFP_KERNEL);
206	if (!n) {
207		kfree(qos);
208		return -ENOMEM;
209	}
210
211	c = &qos->resume_latency;
212	plist_head_init(&c->list);
213	c->target_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
214	c->default_value = PM_QOS_RESUME_LATENCY_DEFAULT_VALUE;
215	c->no_constraint_value = PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
216	c->type = PM_QOS_MIN;
217	c->notifiers = n;
218	BLOCKING_INIT_NOTIFIER_HEAD(n);
219
220	c = &qos->latency_tolerance;
221	plist_head_init(&c->list);
222	c->target_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
223	c->default_value = PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE;
224	c->no_constraint_value = PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT;
225	c->type = PM_QOS_MIN;
226
227	freq_constraints_init(&qos->freq);
228
229	INIT_LIST_HEAD(&qos->flags.list);
230
231	spin_lock_irq(&dev->power.lock);
232	dev->power.qos = qos;
233	spin_unlock_irq(&dev->power.lock);
234
235	return 0;
236}
237
238static void __dev_pm_qos_hide_latency_limit(struct device *dev);
239static void __dev_pm_qos_hide_flags(struct device *dev);
240
241/**
242 * dev_pm_qos_constraints_destroy
243 * @dev: target device
244 *
245 * Called from the device PM subsystem on device removal under device_pm_lock().
246 */
247void dev_pm_qos_constraints_destroy(struct device *dev)
248{
249	struct dev_pm_qos *qos;
250	struct dev_pm_qos_request *req, *tmp;
251	struct pm_qos_constraints *c;
252	struct pm_qos_flags *f;
253
254	mutex_lock(&dev_pm_qos_sysfs_mtx);
255
256	/*
257	 * If the device's PM QoS resume latency limit or PM QoS flags have been
258	 * exposed to user space, they have to be hidden at this point.
259	 */
260	pm_qos_sysfs_remove_resume_latency(dev);
261	pm_qos_sysfs_remove_flags(dev);
262
263	mutex_lock(&dev_pm_qos_mtx);
264
265	__dev_pm_qos_hide_latency_limit(dev);
266	__dev_pm_qos_hide_flags(dev);
267
268	qos = dev->power.qos;
269	if (!qos)
270		goto out;
271
272	/* Flush the constraints lists for the device. */
273	c = &qos->resume_latency;
274	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
275		/*
276		 * Update constraints list and call the notification
277		 * callbacks if needed
278		 */
279		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
280		memset(req, 0, sizeof(*req));
281	}
282
283	c = &qos->latency_tolerance;
284	plist_for_each_entry_safe(req, tmp, &c->list, data.pnode) {
285		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
286		memset(req, 0, sizeof(*req));
287	}
288
289	c = &qos->freq.min_freq;
290	plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
291		apply_constraint(req, PM_QOS_REMOVE_REQ,
292				 PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE);
293		memset(req, 0, sizeof(*req));
294	}
295
296	c = &qos->freq.max_freq;
297	plist_for_each_entry_safe(req, tmp, &c->list, data.freq.pnode) {
298		apply_constraint(req, PM_QOS_REMOVE_REQ,
299				 PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
300		memset(req, 0, sizeof(*req));
301	}
302
303	f = &qos->flags;
304	list_for_each_entry_safe(req, tmp, &f->list, data.flr.node) {
305		apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
306		memset(req, 0, sizeof(*req));
307	}
308
309	spin_lock_irq(&dev->power.lock);
310	dev->power.qos = ERR_PTR(-ENODEV);
311	spin_unlock_irq(&dev->power.lock);
312
313	kfree(qos->resume_latency.notifiers);
314	kfree(qos);
315
316 out:
317	mutex_unlock(&dev_pm_qos_mtx);
318
319	mutex_unlock(&dev_pm_qos_sysfs_mtx);
320}
321
322static bool dev_pm_qos_invalid_req_type(struct device *dev,
323					enum dev_pm_qos_req_type type)
324{
325	return type == DEV_PM_QOS_LATENCY_TOLERANCE &&
326	       !dev->power.set_latency_tolerance;
327}
328
329static int __dev_pm_qos_add_request(struct device *dev,
330				    struct dev_pm_qos_request *req,
331				    enum dev_pm_qos_req_type type, s32 value)
332{
333	int ret = 0;
334
335	if (!dev || !req || dev_pm_qos_invalid_req_type(dev, type))
336		return -EINVAL;
337
338	if (WARN(dev_pm_qos_request_active(req),
339		 "%s() called for already added request\n", __func__))
340		return -EINVAL;
341
342	if (IS_ERR(dev->power.qos))
343		ret = -ENODEV;
344	else if (!dev->power.qos)
345		ret = dev_pm_qos_constraints_allocate(dev);
346
347	trace_dev_pm_qos_add_request(dev_name(dev), type, value);
348	if (ret)
349		return ret;
350
351	req->dev = dev;
352	req->type = type;
353	if (req->type == DEV_PM_QOS_MIN_FREQUENCY)
354		ret = freq_qos_add_request(&dev->power.qos->freq,
355					   &req->data.freq,
356					   FREQ_QOS_MIN, value);
357	else if (req->type == DEV_PM_QOS_MAX_FREQUENCY)
358		ret = freq_qos_add_request(&dev->power.qos->freq,
359					   &req->data.freq,
360					   FREQ_QOS_MAX, value);
361	else
362		ret = apply_constraint(req, PM_QOS_ADD_REQ, value);
363
364	return ret;
365}
366
367/**
368 * dev_pm_qos_add_request - inserts new qos request into the list
369 * @dev: target device for the constraint
370 * @req: pointer to a preallocated handle
371 * @type: type of the request
372 * @value: defines the qos request
373 *
374 * This function inserts a new entry in the device constraints list of
375 * requested qos performance characteristics. It recomputes the aggregate
376 * QoS expectations of parameters and initializes the dev_pm_qos_request
377 * handle.  Caller needs to save this handle for later use in updates and
378 * removal.
379 *
380 * Returns 1 if the aggregated constraint value has changed,
381 * 0 if the aggregated constraint value has not changed,
382 * -EINVAL in case of wrong parameters, -ENOMEM if there's not enough memory
383 * to allocate for data structures, -ENODEV if the device has just been removed
384 * from the system.
385 *
386 * Callers should ensure that the target device is not RPM_SUSPENDED before
387 * using this function for requests of type DEV_PM_QOS_FLAGS.
388 */
389int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
390			   enum dev_pm_qos_req_type type, s32 value)
391{
392	int ret;
393
394	mutex_lock(&dev_pm_qos_mtx);
395	ret = __dev_pm_qos_add_request(dev, req, type, value);
396	mutex_unlock(&dev_pm_qos_mtx);
397	return ret;
398}
399EXPORT_SYMBOL_GPL(dev_pm_qos_add_request);
400
401/**
402 * __dev_pm_qos_update_request - Modify an existing device PM QoS request.
403 * @req : PM QoS request to modify.
404 * @new_value: New value to request.
405 */
406static int __dev_pm_qos_update_request(struct dev_pm_qos_request *req,
407				       s32 new_value)
408{
409	s32 curr_value;
410	int ret = 0;
411
412	if (!req) /*guard against callers passing in null */
413		return -EINVAL;
414
415	if (WARN(!dev_pm_qos_request_active(req),
416		 "%s() called for unknown object\n", __func__))
417		return -EINVAL;
418
419	if (IS_ERR_OR_NULL(req->dev->power.qos))
420		return -ENODEV;
421
422	switch(req->type) {
423	case DEV_PM_QOS_RESUME_LATENCY:
424	case DEV_PM_QOS_LATENCY_TOLERANCE:
425		curr_value = req->data.pnode.prio;
426		break;
427	case DEV_PM_QOS_MIN_FREQUENCY:
428	case DEV_PM_QOS_MAX_FREQUENCY:
429		curr_value = req->data.freq.pnode.prio;
430		break;
431	case DEV_PM_QOS_FLAGS:
432		curr_value = req->data.flr.flags;
433		break;
434	default:
435		return -EINVAL;
436	}
437
438	trace_dev_pm_qos_update_request(dev_name(req->dev), req->type,
439					new_value);
440	if (curr_value != new_value)
441		ret = apply_constraint(req, PM_QOS_UPDATE_REQ, new_value);
442
443	return ret;
444}
445
446/**
447 * dev_pm_qos_update_request - modifies an existing qos request
448 * @req : handle to list element holding a dev_pm_qos request to use
449 * @new_value: defines the qos request
450 *
451 * Updates an existing dev PM qos request along with updating the
452 * target value.
453 *
454 * Attempts are made to make this code callable on hot code paths.
455 *
456 * Returns 1 if the aggregated constraint value has changed,
457 * 0 if the aggregated constraint value has not changed,
458 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
459 * removed from the system
460 *
461 * Callers should ensure that the target device is not RPM_SUSPENDED before
462 * using this function for requests of type DEV_PM_QOS_FLAGS.
463 */
464int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value)
465{
466	int ret;
467
468	mutex_lock(&dev_pm_qos_mtx);
469	ret = __dev_pm_qos_update_request(req, new_value);
470	mutex_unlock(&dev_pm_qos_mtx);
471	return ret;
472}
473EXPORT_SYMBOL_GPL(dev_pm_qos_update_request);
474
475static int __dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
476{
477	int ret;
478
479	if (!req) /*guard against callers passing in null */
480		return -EINVAL;
481
482	if (WARN(!dev_pm_qos_request_active(req),
483		 "%s() called for unknown object\n", __func__))
484		return -EINVAL;
485
486	if (IS_ERR_OR_NULL(req->dev->power.qos))
487		return -ENODEV;
488
489	trace_dev_pm_qos_remove_request(dev_name(req->dev), req->type,
490					PM_QOS_DEFAULT_VALUE);
491	ret = apply_constraint(req, PM_QOS_REMOVE_REQ, PM_QOS_DEFAULT_VALUE);
492	memset(req, 0, sizeof(*req));
493	return ret;
494}
495
496/**
497 * dev_pm_qos_remove_request - modifies an existing qos request
498 * @req: handle to request list element
499 *
500 * Will remove pm qos request from the list of constraints and
501 * recompute the current target value. Call this on slow code paths.
502 *
503 * Returns 1 if the aggregated constraint value has changed,
504 * 0 if the aggregated constraint value has not changed,
505 * -EINVAL in case of wrong parameters, -ENODEV if the device has been
506 * removed from the system
507 *
508 * Callers should ensure that the target device is not RPM_SUSPENDED before
509 * using this function for requests of type DEV_PM_QOS_FLAGS.
510 */
511int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
512{
513	int ret;
514
515	mutex_lock(&dev_pm_qos_mtx);
516	ret = __dev_pm_qos_remove_request(req);
517	mutex_unlock(&dev_pm_qos_mtx);
518	return ret;
519}
520EXPORT_SYMBOL_GPL(dev_pm_qos_remove_request);
521
522/**
523 * dev_pm_qos_add_notifier - sets notification entry for changes to target value
524 * of per-device PM QoS constraints
525 *
526 * @dev: target device for the constraint
527 * @notifier: notifier block managed by caller.
528 * @type: request type.
529 *
530 * Will register the notifier into a notification chain that gets called
531 * upon changes to the target value for the device.
532 *
533 * If the device's constraints object doesn't exist when this routine is called,
534 * it will be created (or error code will be returned if that fails).
535 */
536int dev_pm_qos_add_notifier(struct device *dev, struct notifier_block *notifier,
537			    enum dev_pm_qos_req_type type)
538{
539	int ret = 0;
540
541	mutex_lock(&dev_pm_qos_mtx);
542
543	if (IS_ERR(dev->power.qos))
544		ret = -ENODEV;
545	else if (!dev->power.qos)
546		ret = dev_pm_qos_constraints_allocate(dev);
547
548	if (ret)
549		goto unlock;
550
551	switch (type) {
552	case DEV_PM_QOS_RESUME_LATENCY:
553		ret = blocking_notifier_chain_register(dev->power.qos->resume_latency.notifiers,
554						       notifier);
555		break;
556	case DEV_PM_QOS_MIN_FREQUENCY:
557		ret = freq_qos_add_notifier(&dev->power.qos->freq,
558					    FREQ_QOS_MIN, notifier);
559		break;
560	case DEV_PM_QOS_MAX_FREQUENCY:
561		ret = freq_qos_add_notifier(&dev->power.qos->freq,
562					    FREQ_QOS_MAX, notifier);
563		break;
564	default:
565		WARN_ON(1);
566		ret = -EINVAL;
567	}
568
569unlock:
570	mutex_unlock(&dev_pm_qos_mtx);
571	return ret;
572}
573EXPORT_SYMBOL_GPL(dev_pm_qos_add_notifier);
574
575/**
576 * dev_pm_qos_remove_notifier - deletes notification for changes to target value
577 * of per-device PM QoS constraints
578 *
579 * @dev: target device for the constraint
580 * @notifier: notifier block to be removed.
581 * @type: request type.
582 *
583 * Will remove the notifier from the notification chain that gets called
584 * upon changes to the target value.
585 */
586int dev_pm_qos_remove_notifier(struct device *dev,
587			       struct notifier_block *notifier,
588			       enum dev_pm_qos_req_type type)
589{
590	int ret = 0;
591
592	mutex_lock(&dev_pm_qos_mtx);
593
594	/* Silently return if the constraints object is not present. */
595	if (IS_ERR_OR_NULL(dev->power.qos))
596		goto unlock;
597
598	switch (type) {
599	case DEV_PM_QOS_RESUME_LATENCY:
600		ret = blocking_notifier_chain_unregister(dev->power.qos->resume_latency.notifiers,
601							 notifier);
602		break;
603	case DEV_PM_QOS_MIN_FREQUENCY:
604		ret = freq_qos_remove_notifier(&dev->power.qos->freq,
605					       FREQ_QOS_MIN, notifier);
606		break;
607	case DEV_PM_QOS_MAX_FREQUENCY:
608		ret = freq_qos_remove_notifier(&dev->power.qos->freq,
609					       FREQ_QOS_MAX, notifier);
610		break;
611	default:
612		WARN_ON(1);
613		ret = -EINVAL;
614	}
615
616unlock:
617	mutex_unlock(&dev_pm_qos_mtx);
618	return ret;
619}
620EXPORT_SYMBOL_GPL(dev_pm_qos_remove_notifier);
621
622/**
623 * dev_pm_qos_add_ancestor_request - Add PM QoS request for device's ancestor.
624 * @dev: Device whose ancestor to add the request for.
625 * @req: Pointer to the preallocated handle.
626 * @type: Type of the request.
627 * @value: Constraint latency value.
628 */
629int dev_pm_qos_add_ancestor_request(struct device *dev,
630				    struct dev_pm_qos_request *req,
631				    enum dev_pm_qos_req_type type, s32 value)
632{
633	struct device *ancestor = dev->parent;
634	int ret = -ENODEV;
635
636	switch (type) {
637	case DEV_PM_QOS_RESUME_LATENCY:
638		while (ancestor && !ancestor->power.ignore_children)
639			ancestor = ancestor->parent;
640
641		break;
642	case DEV_PM_QOS_LATENCY_TOLERANCE:
643		while (ancestor && !ancestor->power.set_latency_tolerance)
644			ancestor = ancestor->parent;
645
646		break;
647	default:
648		ancestor = NULL;
649	}
650	if (ancestor)
651		ret = dev_pm_qos_add_request(ancestor, req, type, value);
652
653	if (ret < 0)
654		req->dev = NULL;
655
656	return ret;
657}
658EXPORT_SYMBOL_GPL(dev_pm_qos_add_ancestor_request);
659
660static void __dev_pm_qos_drop_user_request(struct device *dev,
661					   enum dev_pm_qos_req_type type)
662{
663	struct dev_pm_qos_request *req = NULL;
664
665	switch(type) {
666	case DEV_PM_QOS_RESUME_LATENCY:
667		req = dev->power.qos->resume_latency_req;
668		dev->power.qos->resume_latency_req = NULL;
669		break;
670	case DEV_PM_QOS_LATENCY_TOLERANCE:
671		req = dev->power.qos->latency_tolerance_req;
672		dev->power.qos->latency_tolerance_req = NULL;
673		break;
674	case DEV_PM_QOS_FLAGS:
675		req = dev->power.qos->flags_req;
676		dev->power.qos->flags_req = NULL;
677		break;
678	default:
679		WARN_ON(1);
680		return;
681	}
682	__dev_pm_qos_remove_request(req);
683	kfree(req);
684}
685
686static void dev_pm_qos_drop_user_request(struct device *dev,
687					 enum dev_pm_qos_req_type type)
688{
689	mutex_lock(&dev_pm_qos_mtx);
690	__dev_pm_qos_drop_user_request(dev, type);
691	mutex_unlock(&dev_pm_qos_mtx);
692}
693
694/**
695 * dev_pm_qos_expose_latency_limit - Expose PM QoS latency limit to user space.
696 * @dev: Device whose PM QoS latency limit is to be exposed to user space.
697 * @value: Initial value of the latency limit.
698 */
699int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
700{
701	struct dev_pm_qos_request *req;
702	int ret;
703
704	if (!device_is_registered(dev) || value < 0)
705		return -EINVAL;
706
707	req = kzalloc(sizeof(*req), GFP_KERNEL);
708	if (!req)
709		return -ENOMEM;
710
711	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_RESUME_LATENCY, value);
712	if (ret < 0) {
713		kfree(req);
714		return ret;
715	}
716
717	mutex_lock(&dev_pm_qos_sysfs_mtx);
718
719	mutex_lock(&dev_pm_qos_mtx);
720
721	if (IS_ERR_OR_NULL(dev->power.qos))
722		ret = -ENODEV;
723	else if (dev->power.qos->resume_latency_req)
724		ret = -EEXIST;
725
726	if (ret < 0) {
727		__dev_pm_qos_remove_request(req);
728		kfree(req);
729		mutex_unlock(&dev_pm_qos_mtx);
730		goto out;
731	}
732	dev->power.qos->resume_latency_req = req;
733
734	mutex_unlock(&dev_pm_qos_mtx);
735
736	ret = pm_qos_sysfs_add_resume_latency(dev);
737	if (ret)
738		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
739
740 out:
741	mutex_unlock(&dev_pm_qos_sysfs_mtx);
742	return ret;
743}
744EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_limit);
745
746static void __dev_pm_qos_hide_latency_limit(struct device *dev)
747{
748	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->resume_latency_req)
749		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_RESUME_LATENCY);
750}
751
752/**
753 * dev_pm_qos_hide_latency_limit - Hide PM QoS latency limit from user space.
754 * @dev: Device whose PM QoS latency limit is to be hidden from user space.
755 */
756void dev_pm_qos_hide_latency_limit(struct device *dev)
757{
758	mutex_lock(&dev_pm_qos_sysfs_mtx);
759
760	pm_qos_sysfs_remove_resume_latency(dev);
761
762	mutex_lock(&dev_pm_qos_mtx);
763	__dev_pm_qos_hide_latency_limit(dev);
764	mutex_unlock(&dev_pm_qos_mtx);
765
766	mutex_unlock(&dev_pm_qos_sysfs_mtx);
767}
768EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_limit);
769
770/**
771 * dev_pm_qos_expose_flags - Expose PM QoS flags of a device to user space.
772 * @dev: Device whose PM QoS flags are to be exposed to user space.
773 * @val: Initial values of the flags.
774 */
775int dev_pm_qos_expose_flags(struct device *dev, s32 val)
776{
777	struct dev_pm_qos_request *req;
778	int ret;
779
780	if (!device_is_registered(dev))
781		return -EINVAL;
782
783	req = kzalloc(sizeof(*req), GFP_KERNEL);
784	if (!req)
785		return -ENOMEM;
786
787	ret = dev_pm_qos_add_request(dev, req, DEV_PM_QOS_FLAGS, val);
788	if (ret < 0) {
789		kfree(req);
790		return ret;
791	}
792
793	pm_runtime_get_sync(dev);
794	mutex_lock(&dev_pm_qos_sysfs_mtx);
795
796	mutex_lock(&dev_pm_qos_mtx);
797
798	if (IS_ERR_OR_NULL(dev->power.qos))
799		ret = -ENODEV;
800	else if (dev->power.qos->flags_req)
801		ret = -EEXIST;
802
803	if (ret < 0) {
804		__dev_pm_qos_remove_request(req);
805		kfree(req);
806		mutex_unlock(&dev_pm_qos_mtx);
807		goto out;
808	}
809	dev->power.qos->flags_req = req;
810
811	mutex_unlock(&dev_pm_qos_mtx);
812
813	ret = pm_qos_sysfs_add_flags(dev);
814	if (ret)
815		dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
816
817 out:
818	mutex_unlock(&dev_pm_qos_sysfs_mtx);
819	pm_runtime_put(dev);
820	return ret;
821}
822EXPORT_SYMBOL_GPL(dev_pm_qos_expose_flags);
823
824static void __dev_pm_qos_hide_flags(struct device *dev)
825{
826	if (!IS_ERR_OR_NULL(dev->power.qos) && dev->power.qos->flags_req)
827		__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_FLAGS);
828}
829
830/**
831 * dev_pm_qos_hide_flags - Hide PM QoS flags of a device from user space.
832 * @dev: Device whose PM QoS flags are to be hidden from user space.
833 */
834void dev_pm_qos_hide_flags(struct device *dev)
835{
836	pm_runtime_get_sync(dev);
837	mutex_lock(&dev_pm_qos_sysfs_mtx);
838
839	pm_qos_sysfs_remove_flags(dev);
840
841	mutex_lock(&dev_pm_qos_mtx);
842	__dev_pm_qos_hide_flags(dev);
843	mutex_unlock(&dev_pm_qos_mtx);
844
845	mutex_unlock(&dev_pm_qos_sysfs_mtx);
846	pm_runtime_put(dev);
847}
848EXPORT_SYMBOL_GPL(dev_pm_qos_hide_flags);
849
850/**
851 * dev_pm_qos_update_flags - Update PM QoS flags request owned by user space.
852 * @dev: Device to update the PM QoS flags request for.
853 * @mask: Flags to set/clear.
854 * @set: Whether to set or clear the flags (true means set).
855 */
856int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set)
857{
858	s32 value;
859	int ret;
860
861	pm_runtime_get_sync(dev);
862	mutex_lock(&dev_pm_qos_mtx);
863
864	if (IS_ERR_OR_NULL(dev->power.qos) || !dev->power.qos->flags_req) {
865		ret = -EINVAL;
866		goto out;
867	}
868
869	value = dev_pm_qos_requested_flags(dev);
870	if (set)
871		value |= mask;
872	else
873		value &= ~mask;
874
875	ret = __dev_pm_qos_update_request(dev->power.qos->flags_req, value);
876
877 out:
878	mutex_unlock(&dev_pm_qos_mtx);
879	pm_runtime_put(dev);
880	return ret;
881}
882
883/**
884 * dev_pm_qos_get_user_latency_tolerance - Get user space latency tolerance.
885 * @dev: Device to obtain the user space latency tolerance for.
886 */
887s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
888{
889	s32 ret;
890
891	mutex_lock(&dev_pm_qos_mtx);
892	ret = IS_ERR_OR_NULL(dev->power.qos)
893		|| !dev->power.qos->latency_tolerance_req ?
894			PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT :
895			dev->power.qos->latency_tolerance_req->data.pnode.prio;
896	mutex_unlock(&dev_pm_qos_mtx);
897	return ret;
898}
899
900/**
901 * dev_pm_qos_update_user_latency_tolerance - Update user space latency tolerance.
902 * @dev: Device to update the user space latency tolerance for.
903 * @val: New user space latency tolerance for @dev (negative values disable).
904 */
905int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
906{
907	int ret;
908
909	mutex_lock(&dev_pm_qos_mtx);
910
911	if (IS_ERR_OR_NULL(dev->power.qos)
912	    || !dev->power.qos->latency_tolerance_req) {
913		struct dev_pm_qos_request *req;
914
915		if (val < 0) {
916			if (val == PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT)
917				ret = 0;
918			else
919				ret = -EINVAL;
920			goto out;
921		}
922		req = kzalloc(sizeof(*req), GFP_KERNEL);
923		if (!req) {
924			ret = -ENOMEM;
925			goto out;
926		}
927		ret = __dev_pm_qos_add_request(dev, req, DEV_PM_QOS_LATENCY_TOLERANCE, val);
928		if (ret < 0) {
929			kfree(req);
930			goto out;
931		}
932		dev->power.qos->latency_tolerance_req = req;
933	} else {
934		if (val < 0) {
935			__dev_pm_qos_drop_user_request(dev, DEV_PM_QOS_LATENCY_TOLERANCE);
936			ret = 0;
937		} else {
938			ret = __dev_pm_qos_update_request(dev->power.qos->latency_tolerance_req, val);
939		}
940	}
941
942 out:
943	mutex_unlock(&dev_pm_qos_mtx);
944	return ret;
945}
946EXPORT_SYMBOL_GPL(dev_pm_qos_update_user_latency_tolerance);
947
948/**
949 * dev_pm_qos_expose_latency_tolerance - Expose latency tolerance to userspace
950 * @dev: Device whose latency tolerance to expose
951 */
952int dev_pm_qos_expose_latency_tolerance(struct device *dev)
953{
954	int ret;
955
956	if (!dev->power.set_latency_tolerance)
957		return -EINVAL;
958
959	mutex_lock(&dev_pm_qos_sysfs_mtx);
960	ret = pm_qos_sysfs_add_latency_tolerance(dev);
961	mutex_unlock(&dev_pm_qos_sysfs_mtx);
962
963	return ret;
964}
965EXPORT_SYMBOL_GPL(dev_pm_qos_expose_latency_tolerance);
966
967/**
968 * dev_pm_qos_hide_latency_tolerance - Hide latency tolerance from userspace
969 * @dev: Device whose latency tolerance to hide
970 */
971void dev_pm_qos_hide_latency_tolerance(struct device *dev)
972{
973	mutex_lock(&dev_pm_qos_sysfs_mtx);
974	pm_qos_sysfs_remove_latency_tolerance(dev);
975	mutex_unlock(&dev_pm_qos_sysfs_mtx);
976
977	/* Remove the request from user space now */
978	pm_runtime_get_sync(dev);
979	dev_pm_qos_update_user_latency_tolerance(dev,
980		PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT);
981	pm_runtime_put(dev);
982}
983EXPORT_SYMBOL_GPL(dev_pm_qos_hide_latency_tolerance);