Loading...
1#include <linux/suspend.h>
2#include <linux/suspend_ioctls.h>
3#include <linux/utsname.h>
4#include <linux/freezer.h>
5
6struct swsusp_info {
7 struct new_utsname uts;
8 u32 version_code;
9 unsigned long num_physpages;
10 int cpus;
11 unsigned long image_pages;
12 unsigned long pages;
13 unsigned long size;
14} __attribute__((aligned(PAGE_SIZE)));
15
16#ifdef CONFIG_HIBERNATION
17/* kernel/power/snapshot.c */
18extern void __init hibernate_reserved_size_init(void);
19extern void __init hibernate_image_size_init(void);
20
21#ifdef CONFIG_ARCH_HIBERNATION_HEADER
22/* Maximum size of architecture specific data in a hibernation header */
23#define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4)
24
25extern int arch_hibernation_header_save(void *addr, unsigned int max_size);
26extern int arch_hibernation_header_restore(void *addr);
27
28static inline int init_header_complete(struct swsusp_info *info)
29{
30 return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
31}
32
33static inline char *check_image_kernel(struct swsusp_info *info)
34{
35 return arch_hibernation_header_restore(info) ?
36 "architecture specific data" : NULL;
37}
38#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
39
40/*
41 * Keep some memory free so that I/O operations can succeed without paging
42 * [Might this be more than 4 MB?]
43 */
44#define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT)
45
46/*
47 * Keep 1 MB of memory free so that device drivers can allocate some pages in
48 * their .suspend() routines without breaking the suspend to disk.
49 */
50#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
51
52/* kernel/power/hibernate.c */
53extern bool freezer_test_done;
54
55extern int hibernation_snapshot(int platform_mode);
56extern int hibernation_restore(int platform_mode);
57extern int hibernation_platform_enter(void);
58
59#else /* !CONFIG_HIBERNATION */
60
61static inline void hibernate_reserved_size_init(void) {}
62static inline void hibernate_image_size_init(void) {}
63#endif /* !CONFIG_HIBERNATION */
64
65extern int pfn_is_nosave(unsigned long);
66
67#define power_attr(_name) \
68static struct kobj_attribute _name##_attr = { \
69 .attr = { \
70 .name = __stringify(_name), \
71 .mode = 0644, \
72 }, \
73 .show = _name##_show, \
74 .store = _name##_store, \
75}
76
77/* Preferred image size in bytes (default 500 MB) */
78extern unsigned long image_size;
79/* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
80extern unsigned long reserved_size;
81extern int in_suspend;
82extern dev_t swsusp_resume_device;
83extern sector_t swsusp_resume_block;
84
85extern asmlinkage int swsusp_arch_suspend(void);
86extern asmlinkage int swsusp_arch_resume(void);
87
88extern int create_basic_memory_bitmaps(void);
89extern void free_basic_memory_bitmaps(void);
90extern int hibernate_preallocate_memory(void);
91
92/**
93 * Auxiliary structure used for reading the snapshot image data and
94 * metadata from and writing them to the list of page backup entries
95 * (PBEs) which is the main data structure of swsusp.
96 *
97 * Using struct snapshot_handle we can transfer the image, including its
98 * metadata, as a continuous sequence of bytes with the help of
99 * snapshot_read_next() and snapshot_write_next().
100 *
101 * The code that writes the image to a storage or transfers it to
102 * the user land is required to use snapshot_read_next() for this
103 * purpose and it should not make any assumptions regarding the internal
104 * structure of the image. Similarly, the code that reads the image from
105 * a storage or transfers it from the user land is required to use
106 * snapshot_write_next().
107 *
108 * This may allow us to change the internal structure of the image
109 * in the future with considerably less effort.
110 */
111
112struct snapshot_handle {
113 unsigned int cur; /* number of the block of PAGE_SIZE bytes the
114 * next operation will refer to (ie. current)
115 */
116 void *buffer; /* address of the block to read from
117 * or write to
118 */
119 int sync_read; /* Set to one to notify the caller of
120 * snapshot_write_next() that it may
121 * need to call wait_on_bio_chain()
122 */
123};
124
125/* This macro returns the address from/to which the caller of
126 * snapshot_read_next()/snapshot_write_next() is allowed to
127 * read/write data after the function returns
128 */
129#define data_of(handle) ((handle).buffer)
130
131extern unsigned int snapshot_additional_pages(struct zone *zone);
132extern unsigned long snapshot_get_image_size(void);
133extern int snapshot_read_next(struct snapshot_handle *handle);
134extern int snapshot_write_next(struct snapshot_handle *handle);
135extern void snapshot_write_finalize(struct snapshot_handle *handle);
136extern int snapshot_image_loaded(struct snapshot_handle *handle);
137
138/* If unset, the snapshot device cannot be open. */
139extern atomic_t snapshot_device_available;
140
141extern sector_t alloc_swapdev_block(int swap);
142extern void free_all_swap_pages(int swap);
143extern int swsusp_swap_in_use(void);
144
145/*
146 * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
147 * the image header.
148 */
149#define SF_PLATFORM_MODE 1
150#define SF_NOCOMPRESS_MODE 2
151#define SF_CRC32_MODE 4
152
153/* kernel/power/hibernate.c */
154extern int swsusp_check(void);
155extern void swsusp_free(void);
156extern int swsusp_read(unsigned int *flags_p);
157extern int swsusp_write(unsigned int flags);
158extern void swsusp_close(fmode_t);
159
160/* kernel/power/block_io.c */
161extern struct block_device *hib_resume_bdev;
162
163extern int hib_bio_read_page(pgoff_t page_off, void *addr,
164 struct bio **bio_chain);
165extern int hib_bio_write_page(pgoff_t page_off, void *addr,
166 struct bio **bio_chain);
167extern int hib_wait_on_bio_chain(struct bio **bio_chain);
168
169struct timeval;
170/* kernel/power/swsusp.c */
171extern void swsusp_show_speed(struct timeval *, struct timeval *,
172 unsigned int, char *);
173
174#ifdef CONFIG_SUSPEND
175/* kernel/power/suspend.c */
176extern const char *const pm_states[];
177
178extern bool valid_state(suspend_state_t state);
179extern int suspend_devices_and_enter(suspend_state_t state);
180#else /* !CONFIG_SUSPEND */
181static inline int suspend_devices_and_enter(suspend_state_t state)
182{
183 return -ENOSYS;
184}
185static inline bool valid_state(suspend_state_t state) { return false; }
186#endif /* !CONFIG_SUSPEND */
187
188#ifdef CONFIG_PM_TEST_SUSPEND
189/* kernel/power/suspend_test.c */
190extern void suspend_test_start(void);
191extern void suspend_test_finish(const char *label);
192#else /* !CONFIG_PM_TEST_SUSPEND */
193static inline void suspend_test_start(void) {}
194static inline void suspend_test_finish(const char *label) {}
195#endif /* !CONFIG_PM_TEST_SUSPEND */
196
197#ifdef CONFIG_PM_SLEEP
198/* kernel/power/main.c */
199extern int pm_notifier_call_chain(unsigned long val);
200#endif
201
202#ifdef CONFIG_HIGHMEM
203int restore_highmem(void);
204#else
205static inline unsigned int count_highmem_pages(void) { return 0; }
206static inline int restore_highmem(void) { return 0; }
207#endif
208
209/*
210 * Suspend test levels
211 */
212enum {
213 /* keep first */
214 TEST_NONE,
215 TEST_CORE,
216 TEST_CPUS,
217 TEST_PLATFORM,
218 TEST_DEVICES,
219 TEST_FREEZER,
220 /* keep last */
221 __TEST_AFTER_LAST
222};
223
224#define TEST_FIRST TEST_NONE
225#define TEST_MAX (__TEST_AFTER_LAST - 1)
226
227extern int pm_test_level;
228
229#ifdef CONFIG_SUSPEND_FREEZER
230static inline int suspend_freeze_processes(void)
231{
232 int error;
233
234 error = freeze_processes();
235 /*
236 * freeze_processes() automatically thaws every task if freezing
237 * fails. So we need not do anything extra upon error.
238 */
239 if (error)
240 return error;
241
242 error = freeze_kernel_threads();
243 /*
244 * freeze_kernel_threads() thaws only kernel threads upon freezing
245 * failure. So we have to thaw the userspace tasks ourselves.
246 */
247 if (error)
248 thaw_processes();
249
250 return error;
251}
252
253static inline void suspend_thaw_processes(void)
254{
255 thaw_processes();
256}
257#else
258static inline int suspend_freeze_processes(void)
259{
260 return 0;
261}
262
263static inline void suspend_thaw_processes(void)
264{
265}
266#endif
267
268#ifdef CONFIG_PM_AUTOSLEEP
269
270/* kernel/power/autosleep.c */
271extern int pm_autosleep_init(void);
272extern int pm_autosleep_lock(void);
273extern void pm_autosleep_unlock(void);
274extern suspend_state_t pm_autosleep_state(void);
275extern int pm_autosleep_set_state(suspend_state_t state);
276
277#else /* !CONFIG_PM_AUTOSLEEP */
278
279static inline int pm_autosleep_init(void) { return 0; }
280static inline int pm_autosleep_lock(void) { return 0; }
281static inline void pm_autosleep_unlock(void) {}
282static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; }
283
284#endif /* !CONFIG_PM_AUTOSLEEP */
285
286#ifdef CONFIG_PM_WAKELOCKS
287
288/* kernel/power/wakelock.c */
289extern ssize_t pm_show_wakelocks(char *buf, bool show_active);
290extern int pm_wake_lock(const char *buf);
291extern int pm_wake_unlock(const char *buf);
292
293#endif /* !CONFIG_PM_WAKELOCKS */
1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/suspend.h>
3#include <linux/suspend_ioctls.h>
4#include <linux/utsname.h>
5#include <linux/freezer.h>
6#include <linux/compiler.h>
7#include <linux/cpu.h>
8#include <linux/cpuidle.h>
9#include <linux/crypto.h>
10
11struct swsusp_info {
12 struct new_utsname uts;
13 u32 version_code;
14 unsigned long num_physpages;
15 int cpus;
16 unsigned long image_pages;
17 unsigned long pages;
18 unsigned long size;
19} __aligned(PAGE_SIZE);
20
21#ifdef CONFIG_HIBERNATION
22/* kernel/power/snapshot.c */
23extern void __init hibernate_reserved_size_init(void);
24extern void __init hibernate_image_size_init(void);
25
26#ifdef CONFIG_ARCH_HIBERNATION_HEADER
27/* Maximum size of architecture specific data in a hibernation header */
28#define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4)
29
30static inline int init_header_complete(struct swsusp_info *info)
31{
32 return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
33}
34
35static inline const char *check_image_kernel(struct swsusp_info *info)
36{
37 return arch_hibernation_header_restore(info) ?
38 "architecture specific data" : NULL;
39}
40#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
41
42/*
43 * Keep some memory free so that I/O operations can succeed without paging
44 * [Might this be more than 4 MB?]
45 */
46#define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT)
47
48/*
49 * Keep 1 MB of memory free so that device drivers can allocate some pages in
50 * their .suspend() routines without breaking the suspend to disk.
51 */
52#define SPARE_PAGES ((1024 * 1024) >> PAGE_SHIFT)
53
54asmlinkage int swsusp_save(void);
55
56/* kernel/power/hibernate.c */
57extern bool freezer_test_done;
58extern char hib_comp_algo[CRYPTO_MAX_ALG_NAME];
59
60/* kernel/power/swap.c */
61extern unsigned int swsusp_header_flags;
62
63extern int hibernation_snapshot(int platform_mode);
64extern int hibernation_restore(int platform_mode);
65extern int hibernation_platform_enter(void);
66
67#ifdef CONFIG_STRICT_KERNEL_RWX
68/* kernel/power/snapshot.c */
69extern void enable_restore_image_protection(void);
70#else
71static inline void enable_restore_image_protection(void) {}
72#endif /* CONFIG_STRICT_KERNEL_RWX */
73
74#else /* !CONFIG_HIBERNATION */
75
76static inline void hibernate_reserved_size_init(void) {}
77static inline void hibernate_image_size_init(void) {}
78#endif /* !CONFIG_HIBERNATION */
79
80#define power_attr(_name) \
81static struct kobj_attribute _name##_attr = { \
82 .attr = { \
83 .name = __stringify(_name), \
84 .mode = 0644, \
85 }, \
86 .show = _name##_show, \
87 .store = _name##_store, \
88}
89
90#define power_attr_ro(_name) \
91static struct kobj_attribute _name##_attr = { \
92 .attr = { \
93 .name = __stringify(_name), \
94 .mode = S_IRUGO, \
95 }, \
96 .show = _name##_show, \
97}
98
99/* Preferred image size in bytes (default 500 MB) */
100extern unsigned long image_size;
101/* Size of memory reserved for drivers (default SPARE_PAGES x PAGE_SIZE) */
102extern unsigned long reserved_size;
103extern int in_suspend;
104extern dev_t swsusp_resume_device;
105extern sector_t swsusp_resume_block;
106
107extern int create_basic_memory_bitmaps(void);
108extern void free_basic_memory_bitmaps(void);
109extern int hibernate_preallocate_memory(void);
110
111extern void clear_or_poison_free_pages(void);
112
113/**
114 * Auxiliary structure used for reading the snapshot image data and
115 * metadata from and writing them to the list of page backup entries
116 * (PBEs) which is the main data structure of swsusp.
117 *
118 * Using struct snapshot_handle we can transfer the image, including its
119 * metadata, as a continuous sequence of bytes with the help of
120 * snapshot_read_next() and snapshot_write_next().
121 *
122 * The code that writes the image to a storage or transfers it to
123 * the user land is required to use snapshot_read_next() for this
124 * purpose and it should not make any assumptions regarding the internal
125 * structure of the image. Similarly, the code that reads the image from
126 * a storage or transfers it from the user land is required to use
127 * snapshot_write_next().
128 *
129 * This may allow us to change the internal structure of the image
130 * in the future with considerably less effort.
131 */
132
133struct snapshot_handle {
134 unsigned int cur; /* number of the block of PAGE_SIZE bytes the
135 * next operation will refer to (ie. current)
136 */
137 void *buffer; /* address of the block to read from
138 * or write to
139 */
140 int sync_read; /* Set to one to notify the caller of
141 * snapshot_write_next() that it may
142 * need to call wait_on_bio_chain()
143 */
144};
145
146/* This macro returns the address from/to which the caller of
147 * snapshot_read_next()/snapshot_write_next() is allowed to
148 * read/write data after the function returns
149 */
150#define data_of(handle) ((handle).buffer)
151
152extern unsigned int snapshot_additional_pages(struct zone *zone);
153extern unsigned long snapshot_get_image_size(void);
154extern int snapshot_read_next(struct snapshot_handle *handle);
155extern int snapshot_write_next(struct snapshot_handle *handle);
156int snapshot_write_finalize(struct snapshot_handle *handle);
157extern int snapshot_image_loaded(struct snapshot_handle *handle);
158
159extern bool hibernate_acquire(void);
160extern void hibernate_release(void);
161
162extern sector_t alloc_swapdev_block(int swap);
163extern void free_all_swap_pages(int swap);
164extern int swsusp_swap_in_use(void);
165
166/*
167 * Flags that can be passed from the hibernatig hernel to the "boot" kernel in
168 * the image header.
169 */
170#define SF_COMPRESSION_ALG_LZO 0 /* dummy, details given below */
171#define SF_PLATFORM_MODE 1
172#define SF_NOCOMPRESS_MODE 2
173#define SF_CRC32_MODE 4
174#define SF_HW_SIG 8
175
176/*
177 * Bit to indicate the compression algorithm to be used(for LZ4). The same
178 * could be checked while saving/loading image to/from disk to use the
179 * corresponding algorithms.
180 *
181 * By default, LZO compression is enabled if SF_CRC32_MODE is set. Use
182 * SF_COMPRESSION_ALG_LZ4 to override this behaviour and use LZ4.
183 *
184 * SF_CRC32_MODE, SF_COMPRESSION_ALG_LZO(dummy) -> Compression, LZO
185 * SF_CRC32_MODE, SF_COMPRESSION_ALG_LZ4 -> Compression, LZ4
186 */
187#define SF_COMPRESSION_ALG_LZ4 16
188
189/* kernel/power/hibernate.c */
190int swsusp_check(bool exclusive);
191extern void swsusp_free(void);
192extern int swsusp_read(unsigned int *flags_p);
193extern int swsusp_write(unsigned int flags);
194void swsusp_close(void);
195#ifdef CONFIG_SUSPEND
196extern int swsusp_unmark(void);
197#else
198static inline int swsusp_unmark(void) { return 0; }
199#endif
200
201struct __kernel_old_timeval;
202/* kernel/power/swsusp.c */
203extern void swsusp_show_speed(ktime_t, ktime_t, unsigned int, char *);
204
205#ifdef CONFIG_SUSPEND
206/* kernel/power/suspend.c */
207extern const char * const pm_labels[];
208extern const char *pm_states[];
209extern const char *mem_sleep_states[];
210
211extern int suspend_devices_and_enter(suspend_state_t state);
212#else /* !CONFIG_SUSPEND */
213#define mem_sleep_current PM_SUSPEND_ON
214
215static inline int suspend_devices_and_enter(suspend_state_t state)
216{
217 return -ENOSYS;
218}
219#endif /* !CONFIG_SUSPEND */
220
221#ifdef CONFIG_PM_TEST_SUSPEND
222/* kernel/power/suspend_test.c */
223extern void suspend_test_start(void);
224extern void suspend_test_finish(const char *label);
225#else /* !CONFIG_PM_TEST_SUSPEND */
226static inline void suspend_test_start(void) {}
227static inline void suspend_test_finish(const char *label) {}
228#endif /* !CONFIG_PM_TEST_SUSPEND */
229
230#ifdef CONFIG_PM_SLEEP
231/* kernel/power/main.c */
232extern int pm_notifier_call_chain_robust(unsigned long val_up, unsigned long val_down);
233extern int pm_notifier_call_chain(unsigned long val);
234void pm_restrict_gfp_mask(void);
235void pm_restore_gfp_mask(void);
236#else
237static inline void pm_restrict_gfp_mask(void) {}
238static inline void pm_restore_gfp_mask(void) {}
239#endif
240
241#ifdef CONFIG_HIGHMEM
242int restore_highmem(void);
243#else
244static inline unsigned int count_highmem_pages(void) { return 0; }
245static inline int restore_highmem(void) { return 0; }
246#endif
247
248/*
249 * Suspend test levels
250 */
251enum {
252 /* keep first */
253 TEST_NONE,
254 TEST_CORE,
255 TEST_CPUS,
256 TEST_PLATFORM,
257 TEST_DEVICES,
258 TEST_FREEZER,
259 /* keep last */
260 __TEST_AFTER_LAST
261};
262
263#define TEST_FIRST TEST_NONE
264#define TEST_MAX (__TEST_AFTER_LAST - 1)
265
266#ifdef CONFIG_PM_SLEEP_DEBUG
267extern int pm_test_level;
268#else
269#define pm_test_level (TEST_NONE)
270#endif
271
272#ifdef CONFIG_SUSPEND_FREEZER
273static inline int suspend_freeze_processes(void)
274{
275 int error;
276
277 error = freeze_processes();
278 /*
279 * freeze_processes() automatically thaws every task if freezing
280 * fails. So we need not do anything extra upon error.
281 */
282 if (error)
283 return error;
284
285 error = freeze_kernel_threads();
286 /*
287 * freeze_kernel_threads() thaws only kernel threads upon freezing
288 * failure. So we have to thaw the userspace tasks ourselves.
289 */
290 if (error)
291 thaw_processes();
292
293 return error;
294}
295
296static inline void suspend_thaw_processes(void)
297{
298 thaw_processes();
299}
300#else
301static inline int suspend_freeze_processes(void)
302{
303 return 0;
304}
305
306static inline void suspend_thaw_processes(void)
307{
308}
309#endif
310
311#ifdef CONFIG_PM_AUTOSLEEP
312
313/* kernel/power/autosleep.c */
314extern int pm_autosleep_init(void);
315extern int pm_autosleep_lock(void);
316extern void pm_autosleep_unlock(void);
317extern suspend_state_t pm_autosleep_state(void);
318extern int pm_autosleep_set_state(suspend_state_t state);
319
320#else /* !CONFIG_PM_AUTOSLEEP */
321
322static inline int pm_autosleep_init(void) { return 0; }
323static inline int pm_autosleep_lock(void) { return 0; }
324static inline void pm_autosleep_unlock(void) {}
325static inline suspend_state_t pm_autosleep_state(void) { return PM_SUSPEND_ON; }
326
327#endif /* !CONFIG_PM_AUTOSLEEP */
328
329#ifdef CONFIG_PM_WAKELOCKS
330
331/* kernel/power/wakelock.c */
332extern ssize_t pm_show_wakelocks(char *buf, bool show_active);
333extern int pm_wake_lock(const char *buf);
334extern int pm_wake_unlock(const char *buf);
335
336#endif /* !CONFIG_PM_WAKELOCKS */
337
338static inline int pm_sleep_disable_secondary_cpus(void)
339{
340 cpuidle_pause();
341 return suspend_disable_secondary_cpus();
342}
343
344static inline void pm_sleep_enable_secondary_cpus(void)
345{
346 suspend_enable_secondary_cpus();
347 cpuidle_resume();
348}
349
350void dpm_save_errno(int err);