Linux Audio

Check our new training course

Yocto / OpenEmbedded training

Mar 24-27, 2025, special US time zones
Register
Loading...
v3.5.6
 
  1#include <linux/suspend.h>
  2#include <linux/suspend_ioctls.h>
  3#include <linux/utsname.h>
  4#include <linux/freezer.h>
 
 
 
  5
  6struct swsusp_info {
  7	struct new_utsname	uts;
  8	u32			version_code;
  9	unsigned long		num_physpages;
 10	int			cpus;
 11	unsigned long		image_pages;
 12	unsigned long		pages;
 13	unsigned long		size;
 14} __attribute__((aligned(PAGE_SIZE)));
 15
 16#ifdef CONFIG_HIBERNATION
 17/* kernel/power/snapshot.c */
 18extern void __init hibernate_reserved_size_init(void);
 19extern void __init hibernate_image_size_init(void);
 20
 21#ifdef CONFIG_ARCH_HIBERNATION_HEADER
 22/* Maximum size of architecture specific data in a hibernation header */
 23#define MAX_ARCH_HEADER_SIZE	(sizeof(struct new_utsname) + 4)
 24
 25extern int arch_hibernation_header_save(void *addr, unsigned int max_size);
 26extern int arch_hibernation_header_restore(void *addr);
 27
 28static inline int init_header_complete(struct swsusp_info *info)
 29{
 30	return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
 31}
 32
 33static inline char *check_image_kernel(struct swsusp_info *info)
 34{
 35	return arch_hibernation_header_restore(info) ?
 36			"architecture specific data" : NULL;
 37}
 38#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
 39
 40/*
 41 * Keep some memory free so that I/O operations can succeed without paging
 42 * [Might this be more than 4 MB?]
 43 */
 44#define PAGES_FOR_IO	((4096 * 1024) >> PAGE_SHIFT)
 45
 46/*
 47 * Keep 1 MB of memory free so that device drivers can allocate some pages in
 48 * their .suspend() routines without breaking the suspend to disk.
 49 */
 50#define SPARE_PAGES	((1024 * 1024) >> PAGE_SHIFT)
 51
 
 
 52/* kernel/power/hibernate.c */
 53extern bool freezer_test_done;
 54
 55extern int hibernation_snapshot(int platform_mode);
 56extern int hibernation_restore(int platform_mode);
 57extern int hibernation_platform_enter(void);
 58
 
 
 
 
 
 
 
 59#else /* !CONFIG_HIBERNATION */
 60
 61static inline void hibernate_reserved_size_init(void) {}
 62static inline void hibernate_image_size_init(void) {}
 63#endif /* !CONFIG_HIBERNATION */
 64
 65extern int pfn_is_nosave(unsigned long);
 66
 67#define power_attr(_name) \
 68static struct kobj_attribute _name##_attr = {	\
 69	.attr	= {				\
 70		.name = __stringify(_name),	\
 71		.mode = 0644,			\
 72	},					\
 73	.show	= _name##_show,			\
 74	.store	= _name##_store,		\
 75}
 76
 
 
 
 
 
 
 
 
 
 77/* Preferred image size in bytes (default 500 MB) */
 78extern unsigned long image_size;
 79/* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
 80extern unsigned long reserved_size;
 81extern int in_suspend;
 82extern dev_t swsusp_resume_device;
 83extern sector_t swsusp_resume_block;
 84
 85extern asmlinkage int swsusp_arch_suspend(void);
 86extern asmlinkage int swsusp_arch_resume(void);
 87
 88extern int create_basic_memory_bitmaps(void);
 89extern void free_basic_memory_bitmaps(void);
 90extern int hibernate_preallocate_memory(void);
 91
 
 
 92/**
 93 *	Auxiliary structure used for reading the snapshot image data and
 94 *	metadata from and writing them to the list of page backup entries
 95 *	(PBEs) which is the main data structure of swsusp.
 96 *
 97 *	Using struct snapshot_handle we can transfer the image, including its
 98 *	metadata, as a continuous sequence of bytes with the help of
 99 *	snapshot_read_next() and snapshot_write_next().
100 *
101 *	The code that writes the image to a storage or transfers it to
102 *	the user land is required to use snapshot_read_next() for this
103 *	purpose and it should not make any assumptions regarding the internal
104 *	structure of the image.  Similarly, the code that reads the image from
105 *	a storage or transfers it from the user land is required to use
106 *	snapshot_write_next().
107 *
108 *	This may allow us to change the internal structure of the image
109 *	in the future with considerably less effort.
110 */
111
112struct snapshot_handle {
113	unsigned int	cur;	/* number of the block of PAGE_SIZE bytes the
114				 * next operation will refer to (ie. current)
115				 */
116	void		*buffer;	/* address of the block to read from
117					 * or write to
118					 */
119	int		sync_read;	/* Set to one to notify the caller of
120					 * snapshot_write_next() that it may
121					 * need to call wait_on_bio_chain()
122					 */
123};
124
125/* This macro returns the address from/to which the caller of
126 * snapshot_read_next()/snapshot_write_next() is allowed to
127 * read/write data after the function returns
128 */
129#define data_of(handle)	((handle).buffer)
130
131extern unsigned int snapshot_additional_pages(struct zone *zone);
132extern unsigned long snapshot_get_image_size(void);
133extern int snapshot_read_next(struct snapshot_handle *handle);
134extern int snapshot_write_next(struct snapshot_handle *handle);
135extern void snapshot_write_finalize(struct snapshot_handle *handle);
136extern int snapshot_image_loaded(struct snapshot_handle *handle);
137
138/* If unset, the snapshot device cannot be open. */
139extern atomic_t snapshot_device_available;
140
141extern sector_t alloc_swapdev_block(int swap);
142extern void free_all_swap_pages(int swap);
143extern int swsusp_swap_in_use(void);
144
145/*
146 * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
147 * the image header.
148 */
149#define SF_PLATFORM_MODE	1
150#define SF_NOCOMPRESS_MODE	2
151#define SF_CRC32_MODE	        4
 
152
153/* kernel/power/hibernate.c */
154extern int swsusp_check(void);
155extern void swsusp_free(void);
156extern int swsusp_read(unsigned int *flags_p);
157extern int swsusp_write(unsigned int flags);
158extern void swsusp_close(fmode_t);
159
160/* kernel/power/block_io.c */
161extern struct block_device *hib_resume_bdev;
162
163extern int hib_bio_read_page(pgoff_t page_off, void *addr,
164		struct bio **bio_chain);
165extern int hib_bio_write_page(pgoff_t page_off, void *addr,
166		struct bio **bio_chain);
167extern int hib_wait_on_bio_chain(struct bio **bio_chain);
168
169struct timeval;
170/* kernel/power/swsusp.c */
171extern void swsusp_show_speed(struct timeval *, struct timeval *,
172				unsigned int, char *);
173
174#ifdef CONFIG_SUSPEND
175/* kernel/power/suspend.c */
176extern const char *const pm_states[];
 
 
177
178extern bool valid_state(suspend_state_t state);
179extern int suspend_devices_and_enter(suspend_state_t state);
180#else /* !CONFIG_SUSPEND */
 
 
181static inline int suspend_devices_and_enter(suspend_state_t state)
182{
183	return -ENOSYS;
184}
185static inline bool valid_state(suspend_state_t state) { return false; }
186#endif /* !CONFIG_SUSPEND */
187
188#ifdef CONFIG_PM_TEST_SUSPEND
189/* kernel/power/suspend_test.c */
190extern void suspend_test_start(void);
191extern void suspend_test_finish(const char *label);
192#else /* !CONFIG_PM_TEST_SUSPEND */
193static inline void suspend_test_start(void) {}
194static inline void suspend_test_finish(const char *label) {}
195#endif /* !CONFIG_PM_TEST_SUSPEND */
196
197#ifdef CONFIG_PM_SLEEP
198/* kernel/power/main.c */
 
199extern int pm_notifier_call_chain(unsigned long val);
 
 
 
 
 
200#endif
201
202#ifdef CONFIG_HIGHMEM
203int restore_highmem(void);
204#else
205static inline unsigned int count_highmem_pages(void) { return 0; }
206static inline int restore_highmem(void) { return 0; }
207#endif
208
209/*
210 * Suspend test levels
211 */
212enum {
213	/* keep first */
214	TEST_NONE,
215	TEST_CORE,
216	TEST_CPUS,
217	TEST_PLATFORM,
218	TEST_DEVICES,
219	TEST_FREEZER,
220	/* keep last */
221	__TEST_AFTER_LAST
222};
223
224#define TEST_FIRST	TEST_NONE
225#define TEST_MAX	(__TEST_AFTER_LAST - 1)
226
 
227extern int pm_test_level;
 
 
 
228
229#ifdef CONFIG_SUSPEND_FREEZER
230static inline int suspend_freeze_processes(void)
231{
232	int error;
233
234	error = freeze_processes();
235	/*
236	 * freeze_processes() automatically thaws every task if freezing
237	 * fails. So we need not do anything extra upon error.
238	 */
239	if (error)
240		return error;
241
242	error = freeze_kernel_threads();
243	/*
244	 * freeze_kernel_threads() thaws only kernel threads upon freezing
245	 * failure. So we have to thaw the userspace tasks ourselves.
246	 */
247	if (error)
248		thaw_processes();
249
250	return error;
251}
252
253static inline void suspend_thaw_processes(void)
254{
255	thaw_processes();
256}
257#else
258static inline int suspend_freeze_processes(void)
259{
260	return 0;
261}
262
263static inline void suspend_thaw_processes(void)
264{
265}
266#endif
267
268#ifdef CONFIG_PM_AUTOSLEEP
269
270/* kernel/power/autosleep.c */
271extern int pm_autosleep_init(void);
272extern int pm_autosleep_lock(void);
273extern void pm_autosleep_unlock(void);
274extern suspend_state_t pm_autosleep_state(void);
275extern int pm_autosleep_set_state(suspend_state_t state);
276
277#else /* !CONFIG_PM_AUTOSLEEP */
278
279static inline int pm_autosleep_init(void) { return 0; }
280static inline int pm_autosleep_lock(void) { return 0; }
281static inline void pm_autosleep_unlock(void) {}
282static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; }
283
284#endif /* !CONFIG_PM_AUTOSLEEP */
285
286#ifdef CONFIG_PM_WAKELOCKS
287
288/* kernel/power/wakelock.c */
289extern ssize_t pm_show_wakelocks(char *buf, bool show_active);
290extern int pm_wake_lock(const char *buf);
291extern int pm_wake_unlock(const char *buf);
292
293#endif /* !CONFIG_PM_WAKELOCKS */
v6.8
  1/* SPDX-License-Identifier: GPL-2.0 */
  2#include <linux/suspend.h>
  3#include <linux/suspend_ioctls.h>
  4#include <linux/utsname.h>
  5#include <linux/freezer.h>
  6#include <linux/compiler.h>
  7#include <linux/cpu.h>
  8#include <linux/cpuidle.h>
  9
 10struct swsusp_info {
 11	struct new_utsname	uts;
 12	u32			version_code;
 13	unsigned long		num_physpages;
 14	int			cpus;
 15	unsigned long		image_pages;
 16	unsigned long		pages;
 17	unsigned long		size;
 18} __aligned(PAGE_SIZE);
 19
 20#ifdef CONFIG_HIBERNATION
 21/* kernel/power/snapshot.c */
 22extern void __init hibernate_reserved_size_init(void);
 23extern void __init hibernate_image_size_init(void);
 24
 25#ifdef CONFIG_ARCH_HIBERNATION_HEADER
 26/* Maximum size of architecture specific data in a hibernation header */
 27#define MAX_ARCH_HEADER_SIZE	(sizeof(struct new_utsname) + 4)
 28
 
 
 
 29static inline int init_header_complete(struct swsusp_info *info)
 30{
 31	return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
 32}
 33
 34static inline const char *check_image_kernel(struct swsusp_info *info)
 35{
 36	return arch_hibernation_header_restore(info) ?
 37			"architecture specific data" : NULL;
 38}
 39#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
 40
 41/*
 42 * Keep some memory free so that I/O operations can succeed without paging
 43 * [Might this be more than 4 MB?]
 44 */
 45#define PAGES_FOR_IO	((4096 * 1024) >> PAGE_SHIFT)
 46
 47/*
 48 * Keep 1 MB of memory free so that device drivers can allocate some pages in
 49 * their .suspend() routines without breaking the suspend to disk.
 50 */
 51#define SPARE_PAGES	((1024 * 1024) >> PAGE_SHIFT)
 52
 53asmlinkage int swsusp_save(void);
 54
 55/* kernel/power/hibernate.c */
 56extern bool freezer_test_done;
 57
 58extern int hibernation_snapshot(int platform_mode);
 59extern int hibernation_restore(int platform_mode);
 60extern int hibernation_platform_enter(void);
 61
 62#ifdef CONFIG_STRICT_KERNEL_RWX
 63/* kernel/power/snapshot.c */
 64extern void enable_restore_image_protection(void);
 65#else
 66static inline void enable_restore_image_protection(void) {}
 67#endif /* CONFIG_STRICT_KERNEL_RWX */
 68
 69#else /* !CONFIG_HIBERNATION */
 70
 71static inline void hibernate_reserved_size_init(void) {}
 72static inline void hibernate_image_size_init(void) {}
 73#endif /* !CONFIG_HIBERNATION */
 74
 
 
 75#define power_attr(_name) \
 76static struct kobj_attribute _name##_attr = {	\
 77	.attr	= {				\
 78		.name = __stringify(_name),	\
 79		.mode = 0644,			\
 80	},					\
 81	.show	= _name##_show,			\
 82	.store	= _name##_store,		\
 83}
 84
 85#define power_attr_ro(_name) \
 86static struct kobj_attribute _name##_attr = {	\
 87	.attr	= {				\
 88		.name = __stringify(_name),	\
 89		.mode = S_IRUGO,		\
 90	},					\
 91	.show	= _name##_show,			\
 92}
 93
 94/* Preferred image size in bytes (default 500 MB) */
 95extern unsigned long image_size;
 96/* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
 97extern unsigned long reserved_size;
 98extern int in_suspend;
 99extern dev_t swsusp_resume_device;
100extern sector_t swsusp_resume_block;
101
 
 
 
102extern int create_basic_memory_bitmaps(void);
103extern void free_basic_memory_bitmaps(void);
104extern int hibernate_preallocate_memory(void);
105
106extern void clear_or_poison_free_pages(void);
107
108/**
109 *	Auxiliary structure used for reading the snapshot image data and
110 *	metadata from and writing them to the list of page backup entries
111 *	(PBEs) which is the main data structure of swsusp.
112 *
113 *	Using struct snapshot_handle we can transfer the image, including its
114 *	metadata, as a continuous sequence of bytes with the help of
115 *	snapshot_read_next() and snapshot_write_next().
116 *
117 *	The code that writes the image to a storage or transfers it to
118 *	the user land is required to use snapshot_read_next() for this
119 *	purpose and it should not make any assumptions regarding the internal
120 *	structure of the image.  Similarly, the code that reads the image from
121 *	a storage or transfers it from the user land is required to use
122 *	snapshot_write_next().
123 *
124 *	This may allow us to change the internal structure of the image
125 *	in the future with considerably less effort.
126 */
127
128struct snapshot_handle {
129	unsigned int	cur;	/* number of the block of PAGE_SIZE bytes the
130				 * next operation will refer to (ie. current)
131				 */
132	void		*buffer;	/* address of the block to read from
133					 * or write to
134					 */
135	int		sync_read;	/* Set to one to notify the caller of
136					 * snapshot_write_next() that it may
137					 * need to call wait_on_bio_chain()
138					 */
139};
140
141/* This macro returns the address from/to which the caller of
142 * snapshot_read_next()/snapshot_write_next() is allowed to
143 * read/write data after the function returns
144 */
145#define data_of(handle)	((handle).buffer)
146
147extern unsigned int snapshot_additional_pages(struct zone *zone);
148extern unsigned long snapshot_get_image_size(void);
149extern int snapshot_read_next(struct snapshot_handle *handle);
150extern int snapshot_write_next(struct snapshot_handle *handle);
151extern void snapshot_write_finalize(struct snapshot_handle *handle);
152extern int snapshot_image_loaded(struct snapshot_handle *handle);
153
154extern bool hibernate_acquire(void);
155extern void hibernate_release(void);
156
157extern sector_t alloc_swapdev_block(int swap);
158extern void free_all_swap_pages(int swap);
159extern int swsusp_swap_in_use(void);
160
161/*
162 * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
163 * the image header.
164 */
165#define SF_PLATFORM_MODE	1
166#define SF_NOCOMPRESS_MODE	2
167#define SF_CRC32_MODE	        4
168#define SF_HW_SIG		8
169
170/* kernel/power/hibernate.c */
171int swsusp_check(bool exclusive);
172extern void swsusp_free(void);
173extern int swsusp_read(unsigned int *flags_p);
174extern int swsusp_write(unsigned int flags);
175void swsusp_close(void);
176#ifdef CONFIG_SUSPEND
177extern int swsusp_unmark(void);
178#else
179static inline int swsusp_unmark(void) { return 0; }
180#endif
 
 
 
 
181
182struct __kernel_old_timeval;
183/* kernel/power/swsusp.c */
184extern void swsusp_show_speed(ktime_t, ktime_t, unsigned int, char *);
 
185
186#ifdef CONFIG_SUSPEND
187/* kernel/power/suspend.c */
188extern const char * const pm_labels[];
189extern const char *pm_states[];
190extern const char *mem_sleep_states[];
191
 
192extern int suspend_devices_and_enter(suspend_state_t state);
193#else /* !CONFIG_SUSPEND */
194#define mem_sleep_current	PM_SUSPEND_ON
195
196static inline int suspend_devices_and_enter(suspend_state_t state)
197{
198	return -ENOSYS;
199}
 
200#endif /* !CONFIG_SUSPEND */
201
202#ifdef CONFIG_PM_TEST_SUSPEND
203/* kernel/power/suspend_test.c */
204extern void suspend_test_start(void);
205extern void suspend_test_finish(const char *label);
206#else /* !CONFIG_PM_TEST_SUSPEND */
207static inline void suspend_test_start(void) {}
208static inline void suspend_test_finish(const char *label) {}
209#endif /* !CONFIG_PM_TEST_SUSPEND */
210
211#ifdef CONFIG_PM_SLEEP
212/* kernel/power/main.c */
213extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down);
214extern int pm_notifier_call_chain(unsigned long val);
215void pm_restrict_gfp_mask(void);
216void pm_restore_gfp_mask(void);
217#else
218static inline void pm_restrict_gfp_mask(void) {}
219static inline void pm_restore_gfp_mask(void) {}
220#endif
221
222#ifdef CONFIG_HIGHMEM
223int restore_highmem(void);
224#else
225static inline unsigned int count_highmem_pages(void) { return 0; }
226static inline int restore_highmem(void) { return 0; }
227#endif
228
229/*
230 * Suspend test levels
231 */
232enum {
233	/* keep first */
234	TEST_NONE,
235	TEST_CORE,
236	TEST_CPUS,
237	TEST_PLATFORM,
238	TEST_DEVICES,
239	TEST_FREEZER,
240	/* keep last */
241	__TEST_AFTER_LAST
242};
243
244#define TEST_FIRST	TEST_NONE
245#define TEST_MAX	(__TEST_AFTER_LAST - 1)
246
247#ifdef CONFIG_PM_SLEEP_DEBUG
248extern int pm_test_level;
249#else
250#define pm_test_level	(TEST_NONE)
251#endif
252
253#ifdef CONFIG_SUSPEND_FREEZER
254static inline int suspend_freeze_processes(void)
255{
256	int error;
257
258	error = freeze_processes();
259	/*
260	 * freeze_processes() automatically thaws every task if freezing
261	 * fails. So we need not do anything extra upon error.
262	 */
263	if (error)
264		return error;
265
266	error = freeze_kernel_threads();
267	/*
268	 * freeze_kernel_threads() thaws only kernel threads upon freezing
269	 * failure. So we have to thaw the userspace tasks ourselves.
270	 */
271	if (error)
272		thaw_processes();
273
274	return error;
275}
276
277static inline void suspend_thaw_processes(void)
278{
279	thaw_processes();
280}
281#else
282static inline int suspend_freeze_processes(void)
283{
284	return 0;
285}
286
287static inline void suspend_thaw_processes(void)
288{
289}
290#endif
291
292#ifdef CONFIG_PM_AUTOSLEEP
293
294/* kernel/power/autosleep.c */
295extern int pm_autosleep_init(void);
296extern int pm_autosleep_lock(void);
297extern void pm_autosleep_unlock(void);
298extern suspend_state_t pm_autosleep_state(void);
299extern int pm_autosleep_set_state(suspend_state_t state);
300
301#else /* !CONFIG_PM_AUTOSLEEP */
302
303static inline int pm_autosleep_init(void) { return 0; }
304static inline int pm_autosleep_lock(void) { return 0; }
305static inline void pm_autosleep_unlock(void) {}
306static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; }
307
308#endif /* !CONFIG_PM_AUTOSLEEP */
309
310#ifdef CONFIG_PM_WAKELOCKS
311
312/* kernel/power/wakelock.c */
313extern ssize_t pm_show_wakelocks(char *buf, bool show_active);
314extern int pm_wake_lock(const char *buf);
315extern int pm_wake_unlock(const char *buf);
316
317#endif /* !CONFIG_PM_WAKELOCKS */
318
319static inline int pm_sleep_disable_secondary_cpus(void)
320{
321	cpuidle_pause();
322	return suspend_disable_secondary_cpus();
323}
324
325static inline void pm_sleep_enable_secondary_cpus(void)
326{
327	suspend_enable_secondary_cpus();
328	cpuidle_resume();
329}