Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/string.h>
  4#include <linux/mm.h>
  5#include <linux/highmem.h>
  6#include <linux/page_ext.h>
  7#include <linux/poison.h>
  8#include <linux/ratelimit.h>
  9#include <linux/kasan.h>
 10
 11static bool want_page_poisoning __read_mostly;
 12
 13static int __init early_page_poison_param(char *buf)
 14{
 15	if (!buf)
 16		return -EINVAL;
 17	return strtobool(buf, &want_page_poisoning);
 18}
 19early_param("page_poison", early_page_poison_param);
 20
 21/**
 22 * page_poisoning_enabled - check if page poisoning is enabled
 23 *
 24 * Return true if page poisoning is enabled, or false if not.
 25 */
 26bool page_poisoning_enabled(void)
 27{
 28	/*
 29	 * Assumes that debug_pagealloc_enabled is set before
 30	 * memblock_free_all.
 31	 * Page poisoning is debug page alloc for some arches. If
 32	 * either of those options are enabled, enable poisoning.
 33	 */
 34	return (want_page_poisoning ||
 35		(!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
 36		debug_pagealloc_enabled()));
 37}
 38EXPORT_SYMBOL_GPL(page_poisoning_enabled);
 39
 40static void poison_page(struct page *page)
 41{
 42	void *addr = kmap_atomic(page);
 43
 44	/* KASAN still think the page is in-use, so skip it. */
 45	kasan_disable_current();
 46	memset(addr, PAGE_POISON, PAGE_SIZE);
 47	kasan_enable_current();
 48	kunmap_atomic(addr);
 49}
 50
 51static void poison_pages(struct page *page, int n)
 52{
 53	int i;
 54
 55	for (i = 0; i < n; i++)
 56		poison_page(page + i);
 57}
 58
 59static bool single_bit_flip(unsigned char a, unsigned char b)
 60{
 61	unsigned char error = a ^ b;
 62
 63	return error && !(error & (error - 1));
 64}
 65
 66static void check_poison_mem(unsigned char *mem, size_t bytes)
 67{
 68	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
 69	unsigned char *start;
 70	unsigned char *end;
 71
 72	if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
 73		return;
 74
 75	start = memchr_inv(mem, PAGE_POISON, bytes);
 76	if (!start)
 77		return;
 78
 79	for (end = mem + bytes - 1; end > start; end--) {
 80		if (*end != PAGE_POISON)
 81			break;
 82	}
 83
 84	if (!__ratelimit(&ratelimit))
 85		return;
 86	else if (start == end && single_bit_flip(*start, PAGE_POISON))
 87		pr_err("pagealloc: single bit error\n");
 88	else
 89		pr_err("pagealloc: memory corruption\n");
 90
 91	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
 92			end - start + 1, 1);
 93	dump_stack();
 94}
 95
 96static void unpoison_page(struct page *page)
 97{
 98	void *addr;
 99
100	addr = kmap_atomic(page);
101	/*
102	 * Page poisoning when enabled poisons each and every page
103	 * that is freed to buddy. Thus no extra check is done to
104	 * see if a page was poisoned.
105	 */
106	check_poison_mem(addr, PAGE_SIZE);
107	kunmap_atomic(addr);
108}
109
110static void unpoison_pages(struct page *page, int n)
111{
112	int i;
113
114	for (i = 0; i < n; i++)
115		unpoison_page(page + i);
116}
117
118void kernel_poison_pages(struct page *page, int numpages, int enable)
119{
120	if (!page_poisoning_enabled())
121		return;
122
123	if (enable)
124		unpoison_pages(page, numpages);
125	else
126		poison_pages(page, numpages);
127}
128
129#ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
130void __kernel_map_pages(struct page *page, int numpages, int enable)
131{
132	/* This function does nothing, all work is done via poison pages */
133}
134#endif
v4.17
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/string.h>
  4#include <linux/mm.h>
  5#include <linux/highmem.h>
  6#include <linux/page_ext.h>
  7#include <linux/poison.h>
  8#include <linux/ratelimit.h>
 
  9
 10static bool want_page_poisoning __read_mostly;
 11
 12static int __init early_page_poison_param(char *buf)
 13{
 14	if (!buf)
 15		return -EINVAL;
 16	return strtobool(buf, &want_page_poisoning);
 17}
 18early_param("page_poison", early_page_poison_param);
 19
 
 
 
 
 
 20bool page_poisoning_enabled(void)
 21{
 22	/*
 23	 * Assumes that debug_pagealloc_enabled is set before
 24	 * free_all_bootmem.
 25	 * Page poisoning is debug page alloc for some arches. If
 26	 * either of those options are enabled, enable poisoning.
 27	 */
 28	return (want_page_poisoning ||
 29		(!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
 30		debug_pagealloc_enabled()));
 31}
 
 32
 33static void poison_page(struct page *page)
 34{
 35	void *addr = kmap_atomic(page);
 36
 
 
 37	memset(addr, PAGE_POISON, PAGE_SIZE);
 
 38	kunmap_atomic(addr);
 39}
 40
 41static void poison_pages(struct page *page, int n)
 42{
 43	int i;
 44
 45	for (i = 0; i < n; i++)
 46		poison_page(page + i);
 47}
 48
 49static bool single_bit_flip(unsigned char a, unsigned char b)
 50{
 51	unsigned char error = a ^ b;
 52
 53	return error && !(error & (error - 1));
 54}
 55
 56static void check_poison_mem(unsigned char *mem, size_t bytes)
 57{
 58	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
 59	unsigned char *start;
 60	unsigned char *end;
 61
 62	if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
 63		return;
 64
 65	start = memchr_inv(mem, PAGE_POISON, bytes);
 66	if (!start)
 67		return;
 68
 69	for (end = mem + bytes - 1; end > start; end--) {
 70		if (*end != PAGE_POISON)
 71			break;
 72	}
 73
 74	if (!__ratelimit(&ratelimit))
 75		return;
 76	else if (start == end && single_bit_flip(*start, PAGE_POISON))
 77		pr_err("pagealloc: single bit error\n");
 78	else
 79		pr_err("pagealloc: memory corruption\n");
 80
 81	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
 82			end - start + 1, 1);
 83	dump_stack();
 84}
 85
 86static void unpoison_page(struct page *page)
 87{
 88	void *addr;
 89
 90	addr = kmap_atomic(page);
 91	/*
 92	 * Page poisoning when enabled poisons each and every page
 93	 * that is freed to buddy. Thus no extra check is done to
 94	 * see if a page was posioned.
 95	 */
 96	check_poison_mem(addr, PAGE_SIZE);
 97	kunmap_atomic(addr);
 98}
 99
100static void unpoison_pages(struct page *page, int n)
101{
102	int i;
103
104	for (i = 0; i < n; i++)
105		unpoison_page(page + i);
106}
107
108void kernel_poison_pages(struct page *page, int numpages, int enable)
109{
110	if (!page_poisoning_enabled())
111		return;
112
113	if (enable)
114		unpoison_pages(page, numpages);
115	else
116		poison_pages(page, numpages);
117}
118
119#ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
120void __kernel_map_pages(struct page *page, int numpages, int enable)
121{
122	/* This function does nothing, all work is done via poison pages */
123}
124#endif