Loading...
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/kernel.h>
3#include <linux/string.h>
4#include <linux/mm.h>
5#include <linux/highmem.h>
6#include <linux/page_ext.h>
7#include <linux/poison.h>
8#include <linux/ratelimit.h>
9#include <linux/kasan.h>
10
11static bool want_page_poisoning __read_mostly;
12
13static int __init early_page_poison_param(char *buf)
14{
15 if (!buf)
16 return -EINVAL;
17 return strtobool(buf, &want_page_poisoning);
18}
19early_param("page_poison", early_page_poison_param);
20
21/**
22 * page_poisoning_enabled - check if page poisoning is enabled
23 *
24 * Return true if page poisoning is enabled, or false if not.
25 */
26bool page_poisoning_enabled(void)
27{
28 /*
29 * Assumes that debug_pagealloc_enabled is set before
30 * memblock_free_all.
31 * Page poisoning is debug page alloc for some arches. If
32 * either of those options are enabled, enable poisoning.
33 */
34 return (want_page_poisoning ||
35 (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
36 debug_pagealloc_enabled()));
37}
38EXPORT_SYMBOL_GPL(page_poisoning_enabled);
39
40static void poison_page(struct page *page)
41{
42 void *addr = kmap_atomic(page);
43
44 /* KASAN still think the page is in-use, so skip it. */
45 kasan_disable_current();
46 memset(addr, PAGE_POISON, PAGE_SIZE);
47 kasan_enable_current();
48 kunmap_atomic(addr);
49}
50
51static void poison_pages(struct page *page, int n)
52{
53 int i;
54
55 for (i = 0; i < n; i++)
56 poison_page(page + i);
57}
58
59static bool single_bit_flip(unsigned char a, unsigned char b)
60{
61 unsigned char error = a ^ b;
62
63 return error && !(error & (error - 1));
64}
65
66static void check_poison_mem(unsigned char *mem, size_t bytes)
67{
68 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
69 unsigned char *start;
70 unsigned char *end;
71
72 if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
73 return;
74
75 start = memchr_inv(mem, PAGE_POISON, bytes);
76 if (!start)
77 return;
78
79 for (end = mem + bytes - 1; end > start; end--) {
80 if (*end != PAGE_POISON)
81 break;
82 }
83
84 if (!__ratelimit(&ratelimit))
85 return;
86 else if (start == end && single_bit_flip(*start, PAGE_POISON))
87 pr_err("pagealloc: single bit error\n");
88 else
89 pr_err("pagealloc: memory corruption\n");
90
91 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
92 end - start + 1, 1);
93 dump_stack();
94}
95
96static void unpoison_page(struct page *page)
97{
98 void *addr;
99
100 addr = kmap_atomic(page);
101 /*
102 * Page poisoning when enabled poisons each and every page
103 * that is freed to buddy. Thus no extra check is done to
104 * see if a page was poisoned.
105 */
106 check_poison_mem(addr, PAGE_SIZE);
107 kunmap_atomic(addr);
108}
109
110static void unpoison_pages(struct page *page, int n)
111{
112 int i;
113
114 for (i = 0; i < n; i++)
115 unpoison_page(page + i);
116}
117
118void kernel_poison_pages(struct page *page, int numpages, int enable)
119{
120 if (!page_poisoning_enabled())
121 return;
122
123 if (enable)
124 unpoison_pages(page, numpages);
125 else
126 poison_pages(page, numpages);
127}
128
129#ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
130void __kernel_map_pages(struct page *page, int numpages, int enable)
131{
132 /* This function does nothing, all work is done via poison pages */
133}
134#endif
1#include <linux/kernel.h>
2#include <linux/string.h>
3#include <linux/mm.h>
4#include <linux/highmem.h>
5#include <linux/page_ext.h>
6#include <linux/poison.h>
7#include <linux/ratelimit.h>
8
9static bool __page_poisoning_enabled __read_mostly;
10static bool want_page_poisoning __read_mostly;
11
12static int early_page_poison_param(char *buf)
13{
14 if (!buf)
15 return -EINVAL;
16
17 if (strcmp(buf, "on") == 0)
18 want_page_poisoning = true;
19 else if (strcmp(buf, "off") == 0)
20 want_page_poisoning = false;
21
22 return 0;
23}
24early_param("page_poison", early_page_poison_param);
25
26bool page_poisoning_enabled(void)
27{
28 return __page_poisoning_enabled;
29}
30
31static bool need_page_poisoning(void)
32{
33 return want_page_poisoning;
34}
35
36static void init_page_poisoning(void)
37{
38 /*
39 * page poisoning is debug page alloc for some arches. If either
40 * of those options are enabled, enable poisoning
41 */
42 if (!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC)) {
43 if (!want_page_poisoning && !debug_pagealloc_enabled())
44 return;
45 } else {
46 if (!want_page_poisoning)
47 return;
48 }
49
50 __page_poisoning_enabled = true;
51}
52
53struct page_ext_operations page_poisoning_ops = {
54 .need = need_page_poisoning,
55 .init = init_page_poisoning,
56};
57
58static inline void set_page_poison(struct page *page)
59{
60 struct page_ext *page_ext;
61
62 page_ext = lookup_page_ext(page);
63 __set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
64}
65
66static inline void clear_page_poison(struct page *page)
67{
68 struct page_ext *page_ext;
69
70 page_ext = lookup_page_ext(page);
71 __clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
72}
73
74bool page_is_poisoned(struct page *page)
75{
76 struct page_ext *page_ext;
77
78 page_ext = lookup_page_ext(page);
79 if (!page_ext)
80 return false;
81
82 return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
83}
84
85static void poison_page(struct page *page)
86{
87 void *addr = kmap_atomic(page);
88
89 set_page_poison(page);
90 memset(addr, PAGE_POISON, PAGE_SIZE);
91 kunmap_atomic(addr);
92}
93
94static void poison_pages(struct page *page, int n)
95{
96 int i;
97
98 for (i = 0; i < n; i++)
99 poison_page(page + i);
100}
101
102static bool single_bit_flip(unsigned char a, unsigned char b)
103{
104 unsigned char error = a ^ b;
105
106 return error && !(error & (error - 1));
107}
108
109static void check_poison_mem(unsigned char *mem, size_t bytes)
110{
111 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
112 unsigned char *start;
113 unsigned char *end;
114
115 if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
116 return;
117
118 start = memchr_inv(mem, PAGE_POISON, bytes);
119 if (!start)
120 return;
121
122 for (end = mem + bytes - 1; end > start; end--) {
123 if (*end != PAGE_POISON)
124 break;
125 }
126
127 if (!__ratelimit(&ratelimit))
128 return;
129 else if (start == end && single_bit_flip(*start, PAGE_POISON))
130 pr_err("pagealloc: single bit error\n");
131 else
132 pr_err("pagealloc: memory corruption\n");
133
134 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
135 end - start + 1, 1);
136 dump_stack();
137}
138
139static void unpoison_page(struct page *page)
140{
141 void *addr;
142
143 if (!page_is_poisoned(page))
144 return;
145
146 addr = kmap_atomic(page);
147 check_poison_mem(addr, PAGE_SIZE);
148 clear_page_poison(page);
149 kunmap_atomic(addr);
150}
151
152static void unpoison_pages(struct page *page, int n)
153{
154 int i;
155
156 for (i = 0; i < n; i++)
157 unpoison_page(page + i);
158}
159
160void kernel_poison_pages(struct page *page, int numpages, int enable)
161{
162 if (!page_poisoning_enabled())
163 return;
164
165 if (enable)
166 unpoison_pages(page, numpages);
167 else
168 poison_pages(page, numpages);
169}
170
171#ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
172void __kernel_map_pages(struct page *page, int numpages, int enable)
173{
174 /* This function does nothing, all work is done via poison pages */
175}
176#endif