Linux Audio

Check our new training course

Linux kernel drivers training

May 6-19, 2025
Register
Loading...
Note: File does not exist in v5.4.
  1#include <linux/gfp.h>
  2#include <linux/mm_types.h>
  3#include <linux/mm.h>
  4#include <linux/slab.h>
  5#include "slab.h"
  6#include <linux/kmemcheck.h>
  7
  8void kmemcheck_alloc_shadow(struct page *page, int order, gfp_t flags, int node)
  9{
 10	struct page *shadow;
 11	int pages;
 12	int i;
 13
 14	pages = 1 << order;
 15
 16	/*
 17	 * With kmemcheck enabled, we need to allocate a memory area for the
 18	 * shadow bits as well.
 19	 */
 20	shadow = alloc_pages_node(node, flags | __GFP_NOTRACK, order);
 21	if (!shadow) {
 22		if (printk_ratelimit())
 23			pr_err("kmemcheck: failed to allocate shadow bitmap\n");
 24		return;
 25	}
 26
 27	for(i = 0; i < pages; ++i)
 28		page[i].shadow = page_address(&shadow[i]);
 29
 30	/*
 31	 * Mark it as non-present for the MMU so that our accesses to
 32	 * this memory will trigger a page fault and let us analyze
 33	 * the memory accesses.
 34	 */
 35	kmemcheck_hide_pages(page, pages);
 36}
 37
 38void kmemcheck_free_shadow(struct page *page, int order)
 39{
 40	struct page *shadow;
 41	int pages;
 42	int i;
 43
 44	if (!kmemcheck_page_is_tracked(page))
 45		return;
 46
 47	pages = 1 << order;
 48
 49	kmemcheck_show_pages(page, pages);
 50
 51	shadow = virt_to_page(page[0].shadow);
 52
 53	for(i = 0; i < pages; ++i)
 54		page[i].shadow = NULL;
 55
 56	__free_pages(shadow, order);
 57}
 58
 59void kmemcheck_slab_alloc(struct kmem_cache *s, gfp_t gfpflags, void *object,
 60			  size_t size)
 61{
 62	if (unlikely(!object)) /* Skip object if allocation failed */
 63		return;
 64
 65	/*
 66	 * Has already been memset(), which initializes the shadow for us
 67	 * as well.
 68	 */
 69	if (gfpflags & __GFP_ZERO)
 70		return;
 71
 72	/* No need to initialize the shadow of a non-tracked slab. */
 73	if (s->flags & SLAB_NOTRACK)
 74		return;
 75
 76	if (!kmemcheck_enabled || gfpflags & __GFP_NOTRACK) {
 77		/*
 78		 * Allow notracked objects to be allocated from
 79		 * tracked caches. Note however that these objects
 80		 * will still get page faults on access, they just
 81		 * won't ever be flagged as uninitialized. If page
 82		 * faults are not acceptable, the slab cache itself
 83		 * should be marked NOTRACK.
 84		 */
 85		kmemcheck_mark_initialized(object, size);
 86	} else if (!s->ctor) {
 87		/*
 88		 * New objects should be marked uninitialized before
 89		 * they're returned to the called.
 90		 */
 91		kmemcheck_mark_uninitialized(object, size);
 92	}
 93}
 94
 95void kmemcheck_slab_free(struct kmem_cache *s, void *object, size_t size)
 96{
 97	/* TODO: RCU freeing is unsupported for now; hide false positives. */
 98	if (!s->ctor && !(s->flags & SLAB_DESTROY_BY_RCU))
 99		kmemcheck_mark_freed(object, size);
100}
101
102void kmemcheck_pagealloc_alloc(struct page *page, unsigned int order,
103			       gfp_t gfpflags)
104{
105	int pages;
106
107	if (gfpflags & (__GFP_HIGHMEM | __GFP_NOTRACK))
108		return;
109
110	pages = 1 << order;
111
112	/*
113	 * NOTE: We choose to track GFP_ZERO pages too; in fact, they
114	 * can become uninitialized by copying uninitialized memory
115	 * into them.
116	 */
117
118	/* XXX: Can use zone->node for node? */
119	kmemcheck_alloc_shadow(page, order, gfpflags, -1);
120
121	if (gfpflags & __GFP_ZERO)
122		kmemcheck_mark_initialized_pages(page, pages);
123	else
124		kmemcheck_mark_uninitialized_pages(page, pages);
125}