Linux Audio

Check our new training course

Embedded Linux training

Mar 31-Apr 8, 2025
Register
Loading...
v5.4
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * mm_init.c - Memory initialisation verification and debugging
  4 *
  5 * Copyright 2008 IBM Corporation, 2008
  6 * Author Mel Gorman <mel@csn.ul.ie>
  7 *
  8 */
  9#include <linux/kernel.h>
 10#include <linux/init.h>
 11#include <linux/kobject.h>
 12#include <linux/export.h>
 13#include <linux/memory.h>
 14#include <linux/notifier.h>
 15#include <linux/sched.h>
 
 16#include "internal.h"
 17
 18#ifdef CONFIG_DEBUG_MEMORY_INIT
 19int __meminitdata mminit_loglevel;
 20
 21#ifndef SECTIONS_SHIFT
 22#define SECTIONS_SHIFT	0
 23#endif
 24
 25/* The zonelists are simply reported, validation is manual. */
 26void __init mminit_verify_zonelist(void)
 27{
 28	int nid;
 29
 30	if (mminit_loglevel < MMINIT_VERIFY)
 31		return;
 32
 33	for_each_online_node(nid) {
 34		pg_data_t *pgdat = NODE_DATA(nid);
 35		struct zone *zone;
 36		struct zoneref *z;
 37		struct zonelist *zonelist;
 38		int i, listid, zoneid;
 39
 40		BUG_ON(MAX_ZONELISTS > 2);
 41		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
 42
 43			/* Identify the zone and nodelist */
 44			zoneid = i % MAX_NR_ZONES;
 45			listid = i / MAX_NR_ZONES;
 46			zonelist = &pgdat->node_zonelists[listid];
 47			zone = &pgdat->node_zones[zoneid];
 48			if (!populated_zone(zone))
 49				continue;
 50
 51			/* Print information about the zonelist */
 52			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
 53				listid > 0 ? "thisnode" : "general", nid,
 54				zone->name);
 55
 56			/* Iterate the zonelist */
 57			for_each_zone_zonelist(zone, z, zonelist, zoneid)
 58				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
 59			pr_cont("\n");
 60		}
 61	}
 62}
 63
 64void __init mminit_verify_pageflags_layout(void)
 65{
 66	int shift, width;
 67	unsigned long or_mask, add_mask;
 68
 69	shift = 8 * sizeof(unsigned long);
 70	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_CPUPID_SHIFT;
 
 71	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
 72		"Section %d Node %d Zone %d Lastcpupid %d Flags %d\n",
 73		SECTIONS_WIDTH,
 74		NODES_WIDTH,
 75		ZONES_WIDTH,
 76		LAST_CPUPID_WIDTH,
 
 
 
 77		NR_PAGEFLAGS);
 78	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
 79		"Section %d Node %d Zone %d Lastcpupid %d\n",
 80		SECTIONS_SHIFT,
 81		NODES_SHIFT,
 82		ZONES_SHIFT,
 83		LAST_CPUPID_SHIFT);
 
 84	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
 85		"Section %lu Node %lu Zone %lu Lastcpupid %lu\n",
 86		(unsigned long)SECTIONS_PGSHIFT,
 87		(unsigned long)NODES_PGSHIFT,
 88		(unsigned long)ZONES_PGSHIFT,
 89		(unsigned long)LAST_CPUPID_PGSHIFT);
 
 90	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
 91		"Node/Zone ID: %lu -> %lu\n",
 92		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
 93		(unsigned long)ZONEID_PGOFF);
 94	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
 95		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
 96		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
 97#ifdef NODE_NOT_IN_PAGE_FLAGS
 98	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
 99		"Node not in page flags");
100#endif
101#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
102	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
103		"Last cpupid not in page flags");
104#endif
105
106	if (SECTIONS_WIDTH) {
107		shift -= SECTIONS_WIDTH;
108		BUG_ON(shift != SECTIONS_PGSHIFT);
109	}
110	if (NODES_WIDTH) {
111		shift -= NODES_WIDTH;
112		BUG_ON(shift != NODES_PGSHIFT);
113	}
114	if (ZONES_WIDTH) {
115		shift -= ZONES_WIDTH;
116		BUG_ON(shift != ZONES_PGSHIFT);
117	}
118
119	/* Check for bitmask overlaps */
120	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
121			(NODES_MASK << NODES_PGSHIFT) |
122			(SECTIONS_MASK << SECTIONS_PGSHIFT);
123	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
124			(NODES_MASK << NODES_PGSHIFT) +
125			(SECTIONS_MASK << SECTIONS_PGSHIFT);
126	BUG_ON(or_mask != add_mask);
127}
128
129static __init int set_mminit_loglevel(char *str)
130{
131	get_option(&str, &mminit_loglevel);
132	return 0;
133}
134early_param("mminit_loglevel", set_mminit_loglevel);
135#endif /* CONFIG_DEBUG_MEMORY_INIT */
136
137struct kobject *mm_kobj;
138EXPORT_SYMBOL_GPL(mm_kobj);
139
140#ifdef CONFIG_SMP
141s32 vm_committed_as_batch = 32;
142
143static void __meminit mm_compute_batch(void)
144{
145	u64 memsized_batch;
146	s32 nr = num_present_cpus();
147	s32 batch = max_t(s32, nr*2, 32);
 
148
149	/* batch size set to 0.4% of (total memory/#cpus), or max int32 */
150	memsized_batch = min_t(u64, (totalram_pages()/nr)/256, 0x7fffffff);
 
 
 
 
 
 
 
 
151
152	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
153}
154
155static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
156					unsigned long action, void *arg)
157{
158	switch (action) {
159	case MEM_ONLINE:
160	case MEM_OFFLINE:
161		mm_compute_batch();
 
162	default:
163		break;
164	}
165	return NOTIFY_OK;
166}
167
168static struct notifier_block compute_batch_nb __meminitdata = {
169	.notifier_call = mm_compute_batch_notifier,
170	.priority = IPC_CALLBACK_PRI, /* use lowest priority */
171};
172
173static int __init mm_compute_batch_init(void)
174{
175	mm_compute_batch();
176	register_hotmemory_notifier(&compute_batch_nb);
177
178	return 0;
179}
180
181__initcall(mm_compute_batch_init);
182
183#endif
184
185static int __init mm_sysfs_init(void)
186{
187	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
188	if (!mm_kobj)
189		return -ENOMEM;
190
191	return 0;
192}
193postcore_initcall(mm_sysfs_init);
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * mm_init.c - Memory initialisation verification and debugging
  4 *
  5 * Copyright 2008 IBM Corporation, 2008
  6 * Author Mel Gorman <mel@csn.ul.ie>
  7 *
  8 */
  9#include <linux/kernel.h>
 10#include <linux/init.h>
 11#include <linux/kobject.h>
 12#include <linux/export.h>
 13#include <linux/memory.h>
 14#include <linux/notifier.h>
 15#include <linux/sched.h>
 16#include <linux/mman.h>
 17#include "internal.h"
 18
 19#ifdef CONFIG_DEBUG_MEMORY_INIT
 20int __meminitdata mminit_loglevel;
 21
 
 
 
 
 22/* The zonelists are simply reported, validation is manual. */
 23void __init mminit_verify_zonelist(void)
 24{
 25	int nid;
 26
 27	if (mminit_loglevel < MMINIT_VERIFY)
 28		return;
 29
 30	for_each_online_node(nid) {
 31		pg_data_t *pgdat = NODE_DATA(nid);
 32		struct zone *zone;
 33		struct zoneref *z;
 34		struct zonelist *zonelist;
 35		int i, listid, zoneid;
 36
 37		BUILD_BUG_ON(MAX_ZONELISTS > 2);
 38		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
 39
 40			/* Identify the zone and nodelist */
 41			zoneid = i % MAX_NR_ZONES;
 42			listid = i / MAX_NR_ZONES;
 43			zonelist = &pgdat->node_zonelists[listid];
 44			zone = &pgdat->node_zones[zoneid];
 45			if (!populated_zone(zone))
 46				continue;
 47
 48			/* Print information about the zonelist */
 49			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
 50				listid > 0 ? "thisnode" : "general", nid,
 51				zone->name);
 52
 53			/* Iterate the zonelist */
 54			for_each_zone_zonelist(zone, z, zonelist, zoneid)
 55				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
 56			pr_cont("\n");
 57		}
 58	}
 59}
 60
 61void __init mminit_verify_pageflags_layout(void)
 62{
 63	int shift, width;
 64	unsigned long or_mask, add_mask;
 65
 66	shift = 8 * sizeof(unsigned long);
 67	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
 68		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
 69	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
 70		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
 71		SECTIONS_WIDTH,
 72		NODES_WIDTH,
 73		ZONES_WIDTH,
 74		LAST_CPUPID_WIDTH,
 75		KASAN_TAG_WIDTH,
 76		LRU_GEN_WIDTH,
 77		LRU_REFS_WIDTH,
 78		NR_PAGEFLAGS);
 79	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
 80		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
 81		SECTIONS_SHIFT,
 82		NODES_SHIFT,
 83		ZONES_SHIFT,
 84		LAST_CPUPID_SHIFT,
 85		KASAN_TAG_WIDTH);
 86	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
 87		"Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
 88		(unsigned long)SECTIONS_PGSHIFT,
 89		(unsigned long)NODES_PGSHIFT,
 90		(unsigned long)ZONES_PGSHIFT,
 91		(unsigned long)LAST_CPUPID_PGSHIFT,
 92		(unsigned long)KASAN_TAG_PGSHIFT);
 93	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
 94		"Node/Zone ID: %lu -> %lu\n",
 95		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
 96		(unsigned long)ZONEID_PGOFF);
 97	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
 98		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
 99		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
100#ifdef NODE_NOT_IN_PAGE_FLAGS
101	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
102		"Node not in page flags");
103#endif
104#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
105	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
106		"Last cpupid not in page flags");
107#endif
108
109	if (SECTIONS_WIDTH) {
110		shift -= SECTIONS_WIDTH;
111		BUG_ON(shift != SECTIONS_PGSHIFT);
112	}
113	if (NODES_WIDTH) {
114		shift -= NODES_WIDTH;
115		BUG_ON(shift != NODES_PGSHIFT);
116	}
117	if (ZONES_WIDTH) {
118		shift -= ZONES_WIDTH;
119		BUG_ON(shift != ZONES_PGSHIFT);
120	}
121
122	/* Check for bitmask overlaps */
123	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
124			(NODES_MASK << NODES_PGSHIFT) |
125			(SECTIONS_MASK << SECTIONS_PGSHIFT);
126	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
127			(NODES_MASK << NODES_PGSHIFT) +
128			(SECTIONS_MASK << SECTIONS_PGSHIFT);
129	BUG_ON(or_mask != add_mask);
130}
131
132static __init int set_mminit_loglevel(char *str)
133{
134	get_option(&str, &mminit_loglevel);
135	return 0;
136}
137early_param("mminit_loglevel", set_mminit_loglevel);
138#endif /* CONFIG_DEBUG_MEMORY_INIT */
139
140struct kobject *mm_kobj;
141EXPORT_SYMBOL_GPL(mm_kobj);
142
143#ifdef CONFIG_SMP
144s32 vm_committed_as_batch = 32;
145
146void mm_compute_batch(int overcommit_policy)
147{
148	u64 memsized_batch;
149	s32 nr = num_present_cpus();
150	s32 batch = max_t(s32, nr*2, 32);
151	unsigned long ram_pages = totalram_pages();
152
153	/*
154	 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
155	 * (total memory/#cpus), and lift it to 25% for other policies
156	 * to easy the possible lock contention for percpu_counter
157	 * vm_committed_as, while the max limit is INT_MAX
158	 */
159	if (overcommit_policy == OVERCOMMIT_NEVER)
160		memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
161	else
162		memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
163
164	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
165}
166
167static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
168					unsigned long action, void *arg)
169{
170	switch (action) {
171	case MEM_ONLINE:
172	case MEM_OFFLINE:
173		mm_compute_batch(sysctl_overcommit_memory);
174		break;
175	default:
176		break;
177	}
178	return NOTIFY_OK;
179}
180
 
 
 
 
 
181static int __init mm_compute_batch_init(void)
182{
183	mm_compute_batch(sysctl_overcommit_memory);
184	hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI);
 
185	return 0;
186}
187
188__initcall(mm_compute_batch_init);
189
190#endif
191
192static int __init mm_sysfs_init(void)
193{
194	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
195	if (!mm_kobj)
196		return -ENOMEM;
197
198	return 0;
199}
200postcore_initcall(mm_sysfs_init);