Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/string.h>
  4#include <linux/mm.h>
 
  5#include <linux/highmem.h>
  6#include <linux/page_ext.h>
  7#include <linux/poison.h>
  8#include <linux/ratelimit.h>
  9#include <linux/kasan.h>
 10
 11static bool want_page_poisoning __read_mostly;
 
 
 
 12
 13static int __init early_page_poison_param(char *buf)
 14{
 15	if (!buf)
 16		return -EINVAL;
 17	return strtobool(buf, &want_page_poisoning);
 18}
 19early_param("page_poison", early_page_poison_param);
 20
 21/**
 22 * page_poisoning_enabled - check if page poisoning is enabled
 23 *
 24 * Return true if page poisoning is enabled, or false if not.
 25 */
 26bool page_poisoning_enabled(void)
 27{
 28	/*
 29	 * Assumes that debug_pagealloc_enabled is set before
 30	 * memblock_free_all.
 31	 * Page poisoning is debug page alloc for some arches. If
 32	 * either of those options are enabled, enable poisoning.
 33	 */
 34	return (want_page_poisoning ||
 35		(!IS_ENABLED(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) &&
 36		debug_pagealloc_enabled()));
 37}
 38EXPORT_SYMBOL_GPL(page_poisoning_enabled);
 39
 40static void poison_page(struct page *page)
 41{
 42	void *addr = kmap_atomic(page);
 43
 44	/* KASAN still think the page is in-use, so skip it. */
 45	kasan_disable_current();
 46	memset(addr, PAGE_POISON, PAGE_SIZE);
 47	kasan_enable_current();
 48	kunmap_atomic(addr);
 49}
 50
 51static void poison_pages(struct page *page, int n)
 52{
 53	int i;
 54
 55	for (i = 0; i < n; i++)
 56		poison_page(page + i);
 57}
 58
 59static bool single_bit_flip(unsigned char a, unsigned char b)
 60{
 61	unsigned char error = a ^ b;
 62
 63	return error && !(error & (error - 1));
 64}
 65
 66static void check_poison_mem(unsigned char *mem, size_t bytes)
 67{
 68	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
 69	unsigned char *start;
 70	unsigned char *end;
 71
 72	if (IS_ENABLED(CONFIG_PAGE_POISONING_NO_SANITY))
 73		return;
 74
 75	start = memchr_inv(mem, PAGE_POISON, bytes);
 76	if (!start)
 77		return;
 78
 79	for (end = mem + bytes - 1; end > start; end--) {
 80		if (*end != PAGE_POISON)
 81			break;
 82	}
 83
 84	if (!__ratelimit(&ratelimit))
 85		return;
 86	else if (start == end && single_bit_flip(*start, PAGE_POISON))
 87		pr_err("pagealloc: single bit error\n");
 88	else
 89		pr_err("pagealloc: memory corruption\n");
 90
 91	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
 92			end - start + 1, 1);
 93	dump_stack();
 
 94}
 95
 96static void unpoison_page(struct page *page)
 97{
 98	void *addr;
 99
100	addr = kmap_atomic(page);
 
101	/*
102	 * Page poisoning when enabled poisons each and every page
103	 * that is freed to buddy. Thus no extra check is done to
104	 * see if a page was poisoned.
105	 */
106	check_poison_mem(addr, PAGE_SIZE);
 
107	kunmap_atomic(addr);
108}
109
110static void unpoison_pages(struct page *page, int n)
111{
112	int i;
113
114	for (i = 0; i < n; i++)
115		unpoison_page(page + i);
116}
117
118void kernel_poison_pages(struct page *page, int numpages, int enable)
119{
120	if (!page_poisoning_enabled())
121		return;
122
123	if (enable)
124		unpoison_pages(page, numpages);
125	else
126		poison_pages(page, numpages);
127}
128
129#ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
130void __kernel_map_pages(struct page *page, int numpages, int enable)
131{
132	/* This function does nothing, all work is done via poison pages */
133}
134#endif
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2#include <linux/kernel.h>
  3#include <linux/string.h>
  4#include <linux/mm.h>
  5#include <linux/mmdebug.h>
  6#include <linux/highmem.h>
  7#include <linux/page_ext.h>
  8#include <linux/poison.h>
  9#include <linux/ratelimit.h>
 10#include <linux/kasan.h>
 11
 12bool _page_poisoning_enabled_early;
 13EXPORT_SYMBOL(_page_poisoning_enabled_early);
 14DEFINE_STATIC_KEY_FALSE(_page_poisoning_enabled);
 15EXPORT_SYMBOL(_page_poisoning_enabled);
 16
 17static int __init early_page_poison_param(char *buf)
 18{
 19	return kstrtobool(buf, &_page_poisoning_enabled_early);
 
 
 20}
 21early_param("page_poison", early_page_poison_param);
 22
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 23static void poison_page(struct page *page)
 24{
 25	void *addr = kmap_atomic(page);
 26
 27	/* KASAN still think the page is in-use, so skip it. */
 28	kasan_disable_current();
 29	memset(kasan_reset_tag(addr), PAGE_POISON, PAGE_SIZE);
 30	kasan_enable_current();
 31	kunmap_atomic(addr);
 32}
 33
 34void __kernel_poison_pages(struct page *page, int n)
 35{
 36	int i;
 37
 38	for (i = 0; i < n; i++)
 39		poison_page(page + i);
 40}
 41
 42static bool single_bit_flip(unsigned char a, unsigned char b)
 43{
 44	unsigned char error = a ^ b;
 45
 46	return error && !(error & (error - 1));
 47}
 48
 49static void check_poison_mem(struct page *page, unsigned char *mem, size_t bytes)
 50{
 51	static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 10);
 52	unsigned char *start;
 53	unsigned char *end;
 54
 
 
 
 55	start = memchr_inv(mem, PAGE_POISON, bytes);
 56	if (!start)
 57		return;
 58
 59	for (end = mem + bytes - 1; end > start; end--) {
 60		if (*end != PAGE_POISON)
 61			break;
 62	}
 63
 64	if (!__ratelimit(&ratelimit))
 65		return;
 66	else if (start == end && single_bit_flip(*start, PAGE_POISON))
 67		pr_err("pagealloc: single bit error\n");
 68	else
 69		pr_err("pagealloc: memory corruption\n");
 70
 71	print_hex_dump(KERN_ERR, "", DUMP_PREFIX_ADDRESS, 16, 1, start,
 72			end - start + 1, 1);
 73	dump_stack();
 74	dump_page(page, "pagealloc: corrupted page details");
 75}
 76
 77static void unpoison_page(struct page *page)
 78{
 79	void *addr;
 80
 81	addr = kmap_atomic(page);
 82	kasan_disable_current();
 83	/*
 84	 * Page poisoning when enabled poisons each and every page
 85	 * that is freed to buddy. Thus no extra check is done to
 86	 * see if a page was poisoned.
 87	 */
 88	check_poison_mem(page, kasan_reset_tag(addr), PAGE_SIZE);
 89	kasan_enable_current();
 90	kunmap_atomic(addr);
 91}
 92
 93void __kernel_unpoison_pages(struct page *page, int n)
 94{
 95	int i;
 96
 97	for (i = 0; i < n; i++)
 98		unpoison_page(page + i);
 
 
 
 
 
 
 
 
 
 
 
 99}
100
101#ifndef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC
102void __kernel_map_pages(struct page *page, int numpages, int enable)
103{
104	/* This function does nothing, all work is done via poison pages */
105}
106#endif