Linux Audio

Check our new training course

Loading...
v5.9
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * Definitions related to Power Management Quality of Service (PM QoS).
  4 *
  5 * Copyright (C) 2020 Intel Corporation
  6 *
  7 * Authors:
  8 *	Mark Gross <mgross@linux.intel.com>
  9 *	Rafael J. Wysocki <rafael.j.wysocki@intel.com>
 10 */
 11
 12#ifndef _LINUX_PM_QOS_H
 13#define _LINUX_PM_QOS_H
 14
 
 
 
 15#include <linux/plist.h>
 16#include <linux/notifier.h>
 
 17#include <linux/device.h>
 
 
 
 
 
 
 
 
 
 
 
 18
 19enum pm_qos_flags_status {
 20	PM_QOS_FLAGS_UNDEFINED = -1,
 21	PM_QOS_FLAGS_NONE,
 22	PM_QOS_FLAGS_SOME,
 23	PM_QOS_FLAGS_ALL,
 24};
 25
 26#define PM_QOS_DEFAULT_VALUE	(-1)
 27#define PM_QOS_LATENCY_ANY	S32_MAX
 28#define PM_QOS_LATENCY_ANY_NS	((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC)
 29
 30#define PM_QOS_CPU_LATENCY_DEFAULT_VALUE	(2000 * USEC_PER_SEC)
 31#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE	PM_QOS_LATENCY_ANY
 32#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT	PM_QOS_LATENCY_ANY
 33#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS	PM_QOS_LATENCY_ANY_NS
 34#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE	0
 35#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE	0
 36#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE	FREQ_QOS_MAX_DEFAULT_VALUE
 37#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT	(-1)
 
 38
 39#define PM_QOS_FLAG_NO_POWER_OFF	(1 << 0)
 40
 41enum pm_qos_type {
 42	PM_QOS_UNITIALIZED,
 43	PM_QOS_MAX,		/* return the largest value */
 44	PM_QOS_MIN,		/* return the smallest value */
 45};
 46
 47/*
 48 * Note: The lockless read path depends on the CPU accessing target_value
 49 * or effective_flags atomically.  Atomic access is only guaranteed on all CPU
 50 * types linux supports for 32 bit quantites
 51 */
 52struct pm_qos_constraints {
 53	struct plist_head list;
 54	s32 target_value;	/* Do not change to 64 bit */
 55	s32 default_value;
 56	s32 no_constraint_value;
 57	enum pm_qos_type type;
 58	struct blocking_notifier_head *notifiers;
 59};
 60
 61struct pm_qos_request {
 62	struct plist_node node;
 63	struct pm_qos_constraints *qos;
 
 64};
 65
 66struct pm_qos_flags_request {
 67	struct list_head node;
 68	s32 flags;	/* Do not change to 64 bit */
 69};
 70
 71struct pm_qos_flags {
 72	struct list_head list;
 73	s32 effective_flags;	/* Do not change to 64 bit */
 74};
 75
 76
 77#define FREQ_QOS_MIN_DEFAULT_VALUE	0
 78#define FREQ_QOS_MAX_DEFAULT_VALUE	S32_MAX
 79
 80enum freq_qos_req_type {
 81	FREQ_QOS_MIN = 1,
 82	FREQ_QOS_MAX,
 83};
 84
 85struct freq_constraints {
 86	struct pm_qos_constraints min_freq;
 87	struct blocking_notifier_head min_freq_notifiers;
 88	struct pm_qos_constraints max_freq;
 89	struct blocking_notifier_head max_freq_notifiers;
 90};
 91
 92struct freq_qos_request {
 93	enum freq_qos_req_type type;
 94	struct plist_node pnode;
 95	struct freq_constraints *qos;
 96};
 97
 98
 99enum dev_pm_qos_req_type {
100	DEV_PM_QOS_RESUME_LATENCY = 1,
101	DEV_PM_QOS_LATENCY_TOLERANCE,
102	DEV_PM_QOS_MIN_FREQUENCY,
103	DEV_PM_QOS_MAX_FREQUENCY,
104	DEV_PM_QOS_FLAGS,
105};
106
107struct dev_pm_qos_request {
108	enum dev_pm_qos_req_type type;
109	union {
110		struct plist_node pnode;
111		struct pm_qos_flags_request flr;
112		struct freq_qos_request freq;
113	} data;
114	struct device *dev;
115};
116
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117struct dev_pm_qos {
118	struct pm_qos_constraints resume_latency;
119	struct pm_qos_constraints latency_tolerance;
120	struct freq_constraints freq;
121	struct pm_qos_flags flags;
122	struct dev_pm_qos_request *resume_latency_req;
123	struct dev_pm_qos_request *latency_tolerance_req;
124	struct dev_pm_qos_request *flags_req;
125};
126
127/* Action requested to pm_qos_update_target */
128enum pm_qos_req_action {
129	PM_QOS_ADD_REQ,		/* Add a new request */
130	PM_QOS_UPDATE_REQ,	/* Update an existing request */
131	PM_QOS_REMOVE_REQ	/* Remove an existing request */
132};
133
134static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
135{
136	return req->dev != NULL;
137}
138
139s32 pm_qos_read_value(struct pm_qos_constraints *c);
140int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
141			 enum pm_qos_req_action action, int value);
142bool pm_qos_update_flags(struct pm_qos_flags *pqf,
143			 struct pm_qos_flags_request *req,
144			 enum pm_qos_req_action action, s32 val);
145
146#ifdef CONFIG_CPU_IDLE
147s32 cpu_latency_qos_limit(void);
148bool cpu_latency_qos_request_active(struct pm_qos_request *req);
149void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value);
150void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value);
151void cpu_latency_qos_remove_request(struct pm_qos_request *req);
152#else
153static inline s32 cpu_latency_qos_limit(void) { return INT_MAX; }
154static inline bool cpu_latency_qos_request_active(struct pm_qos_request *req)
155{
156	return false;
157}
158static inline void cpu_latency_qos_add_request(struct pm_qos_request *req,
159					       s32 value) {}
160static inline void cpu_latency_qos_update_request(struct pm_qos_request *req,
161						  s32 new_value) {}
162static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req) {}
163#endif
164
165#ifdef CONFIG_PM
166enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
167enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
168s32 __dev_pm_qos_resume_latency(struct device *dev);
169s32 dev_pm_qos_read_value(struct device *dev, enum dev_pm_qos_req_type type);
170int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
171			   enum dev_pm_qos_req_type type, s32 value);
172int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
173int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
174int dev_pm_qos_add_notifier(struct device *dev,
175			    struct notifier_block *notifier,
176			    enum dev_pm_qos_req_type type);
177int dev_pm_qos_remove_notifier(struct device *dev,
178			       struct notifier_block *notifier,
179			       enum dev_pm_qos_req_type type);
 
180void dev_pm_qos_constraints_init(struct device *dev);
181void dev_pm_qos_constraints_destroy(struct device *dev);
182int dev_pm_qos_add_ancestor_request(struct device *dev,
183				    struct dev_pm_qos_request *req,
184				    enum dev_pm_qos_req_type type, s32 value);
185int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
186void dev_pm_qos_hide_latency_limit(struct device *dev);
187int dev_pm_qos_expose_flags(struct device *dev, s32 value);
188void dev_pm_qos_hide_flags(struct device *dev);
189int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
190s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
191int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
192int dev_pm_qos_expose_latency_tolerance(struct device *dev);
193void dev_pm_qos_hide_latency_tolerance(struct device *dev);
194
195static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
196{
197	return dev->power.qos->resume_latency_req->data.pnode.prio;
198}
199
200static inline s32 dev_pm_qos_requested_flags(struct device *dev)
201{
202	return dev->power.qos->flags_req->data.flr.flags;
203}
204
205static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
206{
207	return IS_ERR_OR_NULL(dev->power.qos) ?
208		PM_QOS_RESUME_LATENCY_NO_CONSTRAINT :
209		pm_qos_read_value(&dev->power.qos->resume_latency);
210}
211#else
212static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
213							  s32 mask)
214			{ return PM_QOS_FLAGS_UNDEFINED; }
215static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
216							s32 mask)
217			{ return PM_QOS_FLAGS_UNDEFINED; }
218static inline s32 __dev_pm_qos_resume_latency(struct device *dev)
219			{ return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT; }
220static inline s32 dev_pm_qos_read_value(struct device *dev,
221					enum dev_pm_qos_req_type type)
222{
223	switch (type) {
224	case DEV_PM_QOS_RESUME_LATENCY:
225		return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
226	case DEV_PM_QOS_MIN_FREQUENCY:
227		return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
228	case DEV_PM_QOS_MAX_FREQUENCY:
229		return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
230	default:
231		WARN_ON(1);
232		return 0;
233	}
234}
235
236static inline int dev_pm_qos_add_request(struct device *dev,
237					 struct dev_pm_qos_request *req,
238					 enum dev_pm_qos_req_type type,
239					 s32 value)
240			{ return 0; }
241static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
242					    s32 new_value)
243			{ return 0; }
244static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
245			{ return 0; }
246static inline int dev_pm_qos_add_notifier(struct device *dev,
247					  struct notifier_block *notifier,
248					  enum dev_pm_qos_req_type type)
249			{ return 0; }
250static inline int dev_pm_qos_remove_notifier(struct device *dev,
251					     struct notifier_block *notifier,
252					     enum dev_pm_qos_req_type type)
 
 
 
 
 
253			{ return 0; }
254static inline void dev_pm_qos_constraints_init(struct device *dev)
255{
256	dev->power.power_state = PMSG_ON;
257}
258static inline void dev_pm_qos_constraints_destroy(struct device *dev)
259{
260	dev->power.power_state = PMSG_INVALID;
261}
262static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
263						  struct dev_pm_qos_request *req,
264						  enum dev_pm_qos_req_type type,
265						  s32 value)
266			{ return 0; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
267static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
268			{ return 0; }
269static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
270static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value)
271			{ return 0; }
272static inline void dev_pm_qos_hide_flags(struct device *dev) {}
273static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set)
274			{ return 0; }
275static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
276			{ return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
277static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
278			{ return 0; }
279static inline int dev_pm_qos_expose_latency_tolerance(struct device *dev)
280			{ return 0; }
281static inline void dev_pm_qos_hide_latency_tolerance(struct device *dev) {}
282
283static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
284{
285	return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
286}
287static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
288static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
289{
290	return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
291}
292#endif
293
294static inline int freq_qos_request_active(struct freq_qos_request *req)
295{
296	return !IS_ERR_OR_NULL(req->qos);
297}
298
299void freq_constraints_init(struct freq_constraints *qos);
300
301s32 freq_qos_read_value(struct freq_constraints *qos,
302			enum freq_qos_req_type type);
303
304int freq_qos_add_request(struct freq_constraints *qos,
305			 struct freq_qos_request *req,
306			 enum freq_qos_req_type type, s32 value);
307int freq_qos_update_request(struct freq_qos_request *req, s32 new_value);
308int freq_qos_remove_request(struct freq_qos_request *req);
309int freq_qos_apply(struct freq_qos_request *req,
310		   enum pm_qos_req_action action, s32 value);
311
312int freq_qos_add_notifier(struct freq_constraints *qos,
313			  enum freq_qos_req_type type,
314			  struct notifier_block *notifier);
315int freq_qos_remove_notifier(struct freq_constraints *qos,
316			     enum freq_qos_req_type type,
317			     struct notifier_block *notifier);
318
319#endif
v3.15
 
 
 
 
 
 
 
 
 
 
 
  1#ifndef _LINUX_PM_QOS_H
  2#define _LINUX_PM_QOS_H
  3/* interface for the pm_qos_power infrastructure of the linux kernel.
  4 *
  5 * Mark Gross <mgross@linux.intel.com>
  6 */
  7#include <linux/plist.h>
  8#include <linux/notifier.h>
  9#include <linux/miscdevice.h>
 10#include <linux/device.h>
 11#include <linux/workqueue.h>
 12
 13enum {
 14	PM_QOS_RESERVED = 0,
 15	PM_QOS_CPU_DMA_LATENCY,
 16	PM_QOS_NETWORK_LATENCY,
 17	PM_QOS_NETWORK_THROUGHPUT,
 18
 19	/* insert new class ID */
 20	PM_QOS_NUM_CLASSES,
 21};
 22
 23enum pm_qos_flags_status {
 24	PM_QOS_FLAGS_UNDEFINED = -1,
 25	PM_QOS_FLAGS_NONE,
 26	PM_QOS_FLAGS_SOME,
 27	PM_QOS_FLAGS_ALL,
 28};
 29
 30#define PM_QOS_DEFAULT_VALUE -1
 31
 32#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE	(2000 * USEC_PER_SEC)
 33#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE	(2000 * USEC_PER_SEC)
 34#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE	0
 35#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE	0
 
 
 36#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE	0
 
 
 37#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT	(-1)
 38#define PM_QOS_LATENCY_ANY			((s32)(~(__u32)0 >> 1))
 39
 40#define PM_QOS_FLAG_NO_POWER_OFF	(1 << 0)
 41#define PM_QOS_FLAG_REMOTE_WAKEUP	(1 << 1)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 42
 43struct pm_qos_request {
 44	struct plist_node node;
 45	int pm_qos_class;
 46	struct delayed_work work; /* for pm_qos_update_request_timeout */
 47};
 48
 49struct pm_qos_flags_request {
 50	struct list_head node;
 51	s32 flags;	/* Do not change to 64 bit */
 52};
 53
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 54enum dev_pm_qos_req_type {
 55	DEV_PM_QOS_RESUME_LATENCY = 1,
 56	DEV_PM_QOS_LATENCY_TOLERANCE,
 
 
 57	DEV_PM_QOS_FLAGS,
 58};
 59
 60struct dev_pm_qos_request {
 61	enum dev_pm_qos_req_type type;
 62	union {
 63		struct plist_node pnode;
 64		struct pm_qos_flags_request flr;
 
 65	} data;
 66	struct device *dev;
 67};
 68
 69enum pm_qos_type {
 70	PM_QOS_UNITIALIZED,
 71	PM_QOS_MAX,		/* return the largest value */
 72	PM_QOS_MIN		/* return the smallest value */
 73};
 74
 75/*
 76 * Note: The lockless read path depends on the CPU accessing target_value
 77 * or effective_flags atomically.  Atomic access is only guaranteed on all CPU
 78 * types linux supports for 32 bit quantites
 79 */
 80struct pm_qos_constraints {
 81	struct plist_head list;
 82	s32 target_value;	/* Do not change to 64 bit */
 83	s32 default_value;
 84	s32 no_constraint_value;
 85	enum pm_qos_type type;
 86	struct blocking_notifier_head *notifiers;
 87};
 88
 89struct pm_qos_flags {
 90	struct list_head list;
 91	s32 effective_flags;	/* Do not change to 64 bit */
 92};
 93
 94struct dev_pm_qos {
 95	struct pm_qos_constraints resume_latency;
 96	struct pm_qos_constraints latency_tolerance;
 
 97	struct pm_qos_flags flags;
 98	struct dev_pm_qos_request *resume_latency_req;
 99	struct dev_pm_qos_request *latency_tolerance_req;
100	struct dev_pm_qos_request *flags_req;
101};
102
103/* Action requested to pm_qos_update_target */
104enum pm_qos_req_action {
105	PM_QOS_ADD_REQ,		/* Add a new request */
106	PM_QOS_UPDATE_REQ,	/* Update an existing request */
107	PM_QOS_REMOVE_REQ	/* Remove an existing request */
108};
109
110static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
111{
112	return req->dev != NULL;
113}
114
 
115int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
116			 enum pm_qos_req_action action, int value);
117bool pm_qos_update_flags(struct pm_qos_flags *pqf,
118			 struct pm_qos_flags_request *req,
119			 enum pm_qos_req_action action, s32 val);
120void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
121			s32 value);
122void pm_qos_update_request(struct pm_qos_request *req,
123			   s32 new_value);
124void pm_qos_update_request_timeout(struct pm_qos_request *req,
125				   s32 new_value, unsigned long timeout_us);
126void pm_qos_remove_request(struct pm_qos_request *req);
127
128int pm_qos_request(int pm_qos_class);
129int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
130int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
131int pm_qos_request_active(struct pm_qos_request *req);
132s32 pm_qos_read_value(struct pm_qos_constraints *c);
 
 
 
 
 
 
133
134#ifdef CONFIG_PM
135enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
136enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev, s32 mask);
137s32 __dev_pm_qos_read_value(struct device *dev);
138s32 dev_pm_qos_read_value(struct device *dev);
139int dev_pm_qos_add_request(struct device *dev, struct dev_pm_qos_request *req,
140			   enum dev_pm_qos_req_type type, s32 value);
141int dev_pm_qos_update_request(struct dev_pm_qos_request *req, s32 new_value);
142int dev_pm_qos_remove_request(struct dev_pm_qos_request *req);
143int dev_pm_qos_add_notifier(struct device *dev,
144			    struct notifier_block *notifier);
 
145int dev_pm_qos_remove_notifier(struct device *dev,
146			       struct notifier_block *notifier);
147int dev_pm_qos_add_global_notifier(struct notifier_block *notifier);
148int dev_pm_qos_remove_global_notifier(struct notifier_block *notifier);
149void dev_pm_qos_constraints_init(struct device *dev);
150void dev_pm_qos_constraints_destroy(struct device *dev);
151int dev_pm_qos_add_ancestor_request(struct device *dev,
152				    struct dev_pm_qos_request *req,
153				    enum dev_pm_qos_req_type type, s32 value);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
154#else
155static inline enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev,
156							  s32 mask)
157			{ return PM_QOS_FLAGS_UNDEFINED; }
158static inline enum pm_qos_flags_status dev_pm_qos_flags(struct device *dev,
159							s32 mask)
160			{ return PM_QOS_FLAGS_UNDEFINED; }
161static inline s32 __dev_pm_qos_read_value(struct device *dev)
162			{ return 0; }
163static inline s32 dev_pm_qos_read_value(struct device *dev)
164			{ return 0; }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
165static inline int dev_pm_qos_add_request(struct device *dev,
166					 struct dev_pm_qos_request *req,
167					 enum dev_pm_qos_req_type type,
168					 s32 value)
169			{ return 0; }
170static inline int dev_pm_qos_update_request(struct dev_pm_qos_request *req,
171					    s32 new_value)
172			{ return 0; }
173static inline int dev_pm_qos_remove_request(struct dev_pm_qos_request *req)
174			{ return 0; }
175static inline int dev_pm_qos_add_notifier(struct device *dev,
176					  struct notifier_block *notifier)
 
177			{ return 0; }
178static inline int dev_pm_qos_remove_notifier(struct device *dev,
179					     struct notifier_block *notifier)
180			{ return 0; }
181static inline int dev_pm_qos_add_global_notifier(
182					struct notifier_block *notifier)
183			{ return 0; }
184static inline int dev_pm_qos_remove_global_notifier(
185					struct notifier_block *notifier)
186			{ return 0; }
187static inline void dev_pm_qos_constraints_init(struct device *dev)
188{
189	dev->power.power_state = PMSG_ON;
190}
191static inline void dev_pm_qos_constraints_destroy(struct device *dev)
192{
193	dev->power.power_state = PMSG_INVALID;
194}
195static inline int dev_pm_qos_add_ancestor_request(struct device *dev,
196						  struct dev_pm_qos_request *req,
197						  enum dev_pm_qos_req_type type,
198						  s32 value)
199			{ return 0; }
200#endif
201
202#ifdef CONFIG_PM_RUNTIME
203int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value);
204void dev_pm_qos_hide_latency_limit(struct device *dev);
205int dev_pm_qos_expose_flags(struct device *dev, s32 value);
206void dev_pm_qos_hide_flags(struct device *dev);
207int dev_pm_qos_update_flags(struct device *dev, s32 mask, bool set);
208s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev);
209int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val);
210
211static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev)
212{
213	return dev->power.qos->resume_latency_req->data.pnode.prio;
214}
215
216static inline s32 dev_pm_qos_requested_flags(struct device *dev)
217{
218	return dev->power.qos->flags_req->data.flr.flags;
219}
220#else
221static inline int dev_pm_qos_expose_latency_limit(struct device *dev, s32 value)
222			{ return 0; }
223static inline void dev_pm_qos_hide_latency_limit(struct device *dev) {}
224static inline int dev_pm_qos_expose_flags(struct device *dev, s32 value)
225			{ return 0; }
226static inline void dev_pm_qos_hide_flags(struct device *dev) {}
227static inline int dev_pm_qos_update_flags(struct device *dev, s32 m, bool set)
228			{ return 0; }
229static inline s32 dev_pm_qos_get_user_latency_tolerance(struct device *dev)
230			{ return PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT; }
231static inline int dev_pm_qos_update_user_latency_tolerance(struct device *dev, s32 val)
232			{ return 0; }
 
 
 
233
234static inline s32 dev_pm_qos_requested_resume_latency(struct device *dev) { return 0; }
 
 
 
235static inline s32 dev_pm_qos_requested_flags(struct device *dev) { return 0; }
 
 
 
 
236#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
238#endif