Linux Audio

Check our new training course

Loading...
v4.6
 
  1/*
  2 * mm_init.c - Memory initialisation verification and debugging
  3 *
  4 * Copyright 2008 IBM Corporation, 2008
  5 * Author Mel Gorman <mel@csn.ul.ie>
  6 *
  7 */
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/kobject.h>
 11#include <linux/export.h>
 12#include <linux/memory.h>
 13#include <linux/notifier.h>
 14#include <linux/sched.h>
 
 15#include "internal.h"
 16
 17#ifdef CONFIG_DEBUG_MEMORY_INIT
 18int __meminitdata mminit_loglevel;
 19
 20#ifndef SECTIONS_SHIFT
 21#define SECTIONS_SHIFT	0
 22#endif
 23
 24/* The zonelists are simply reported, validation is manual. */
 25void __init mminit_verify_zonelist(void)
 26{
 27	int nid;
 28
 29	if (mminit_loglevel < MMINIT_VERIFY)
 30		return;
 31
 32	for_each_online_node(nid) {
 33		pg_data_t *pgdat = NODE_DATA(nid);
 34		struct zone *zone;
 35		struct zoneref *z;
 36		struct zonelist *zonelist;
 37		int i, listid, zoneid;
 38
 39		BUG_ON(MAX_ZONELISTS > 2);
 40		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
 41
 42			/* Identify the zone and nodelist */
 43			zoneid = i % MAX_NR_ZONES;
 44			listid = i / MAX_NR_ZONES;
 45			zonelist = &pgdat->node_zonelists[listid];
 46			zone = &pgdat->node_zones[zoneid];
 47			if (!populated_zone(zone))
 48				continue;
 49
 50			/* Print information about the zonelist */
 51			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
 52				listid > 0 ? "thisnode" : "general", nid,
 53				zone->name);
 54
 55			/* Iterate the zonelist */
 56			for_each_zone_zonelist(zone, z, zonelist, zoneid) {
 57#ifdef CONFIG_NUMA
 58				pr_cont("%d:%s ", zone->node, zone->name);
 59#else
 60				pr_cont("0:%s ", zone->name);
 61#endif /* CONFIG_NUMA */
 62			}
 63			pr_cont("\n");
 64		}
 65	}
 66}
 67
 68void __init mminit_verify_pageflags_layout(void)
 69{
 70	int shift, width;
 71	unsigned long or_mask, add_mask;
 72
 73	shift = 8 * sizeof(unsigned long);
 74	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_CPUPID_SHIFT;
 
 75	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
 76		"Section %d Node %d Zone %d Lastcpupid %d Flags %d\n",
 77		SECTIONS_WIDTH,
 78		NODES_WIDTH,
 79		ZONES_WIDTH,
 80		LAST_CPUPID_WIDTH,
 
 81		NR_PAGEFLAGS);
 82	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
 83		"Section %d Node %d Zone %d Lastcpupid %d\n",
 84		SECTIONS_SHIFT,
 85		NODES_SHIFT,
 86		ZONES_SHIFT,
 87		LAST_CPUPID_SHIFT);
 
 88	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
 89		"Section %lu Node %lu Zone %lu Lastcpupid %lu\n",
 90		(unsigned long)SECTIONS_PGSHIFT,
 91		(unsigned long)NODES_PGSHIFT,
 92		(unsigned long)ZONES_PGSHIFT,
 93		(unsigned long)LAST_CPUPID_PGSHIFT);
 
 94	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
 95		"Node/Zone ID: %lu -> %lu\n",
 96		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
 97		(unsigned long)ZONEID_PGOFF);
 98	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
 99		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
100		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
101#ifdef NODE_NOT_IN_PAGE_FLAGS
102	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
103		"Node not in page flags");
104#endif
105#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
106	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
107		"Last cpupid not in page flags");
108#endif
109
110	if (SECTIONS_WIDTH) {
111		shift -= SECTIONS_WIDTH;
112		BUG_ON(shift != SECTIONS_PGSHIFT);
113	}
114	if (NODES_WIDTH) {
115		shift -= NODES_WIDTH;
116		BUG_ON(shift != NODES_PGSHIFT);
117	}
118	if (ZONES_WIDTH) {
119		shift -= ZONES_WIDTH;
120		BUG_ON(shift != ZONES_PGSHIFT);
121	}
122
123	/* Check for bitmask overlaps */
124	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
125			(NODES_MASK << NODES_PGSHIFT) |
126			(SECTIONS_MASK << SECTIONS_PGSHIFT);
127	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
128			(NODES_MASK << NODES_PGSHIFT) +
129			(SECTIONS_MASK << SECTIONS_PGSHIFT);
130	BUG_ON(or_mask != add_mask);
131}
132
133static __init int set_mminit_loglevel(char *str)
134{
135	get_option(&str, &mminit_loglevel);
136	return 0;
137}
138early_param("mminit_loglevel", set_mminit_loglevel);
139#endif /* CONFIG_DEBUG_MEMORY_INIT */
140
141struct kobject *mm_kobj;
142EXPORT_SYMBOL_GPL(mm_kobj);
143
144#ifdef CONFIG_SMP
145s32 vm_committed_as_batch = 32;
146
147static void __meminit mm_compute_batch(void)
148{
149	u64 memsized_batch;
150	s32 nr = num_present_cpus();
151	s32 batch = max_t(s32, nr*2, 32);
 
152
153	/* batch size set to 0.4% of (total memory/#cpus), or max int32 */
154	memsized_batch = min_t(u64, (totalram_pages/nr)/256, 0x7fffffff);
 
 
 
 
 
 
 
 
155
156	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
157}
158
159static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
160					unsigned long action, void *arg)
161{
162	switch (action) {
163	case MEM_ONLINE:
164	case MEM_OFFLINE:
165		mm_compute_batch();
 
166	default:
167		break;
168	}
169	return NOTIFY_OK;
170}
171
172static struct notifier_block compute_batch_nb __meminitdata = {
173	.notifier_call = mm_compute_batch_notifier,
174	.priority = IPC_CALLBACK_PRI, /* use lowest priority */
175};
176
177static int __init mm_compute_batch_init(void)
178{
179	mm_compute_batch();
180	register_hotmemory_notifier(&compute_batch_nb);
181
182	return 0;
183}
184
185__initcall(mm_compute_batch_init);
186
187#endif
188
189static int __init mm_sysfs_init(void)
190{
191	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
192	if (!mm_kobj)
193		return -ENOMEM;
194
195	return 0;
196}
197postcore_initcall(mm_sysfs_init);
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * mm_init.c - Memory initialisation verification and debugging
  4 *
  5 * Copyright 2008 IBM Corporation, 2008
  6 * Author Mel Gorman <mel@csn.ul.ie>
  7 *
  8 */
  9#include <linux/kernel.h>
 10#include <linux/init.h>
 11#include <linux/kobject.h>
 12#include <linux/export.h>
 13#include <linux/memory.h>
 14#include <linux/notifier.h>
 15#include <linux/sched.h>
 16#include <linux/mman.h>
 17#include "internal.h"
 18
 19#ifdef CONFIG_DEBUG_MEMORY_INIT
 20int __meminitdata mminit_loglevel;
 21
 
 
 
 
 22/* The zonelists are simply reported, validation is manual. */
 23void __init mminit_verify_zonelist(void)
 24{
 25	int nid;
 26
 27	if (mminit_loglevel < MMINIT_VERIFY)
 28		return;
 29
 30	for_each_online_node(nid) {
 31		pg_data_t *pgdat = NODE_DATA(nid);
 32		struct zone *zone;
 33		struct zoneref *z;
 34		struct zonelist *zonelist;
 35		int i, listid, zoneid;
 36
 37		BUILD_BUG_ON(MAX_ZONELISTS > 2);
 38		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
 39
 40			/* Identify the zone and nodelist */
 41			zoneid = i % MAX_NR_ZONES;
 42			listid = i / MAX_NR_ZONES;
 43			zonelist = &pgdat->node_zonelists[listid];
 44			zone = &pgdat->node_zones[zoneid];
 45			if (!populated_zone(zone))
 46				continue;
 47
 48			/* Print information about the zonelist */
 49			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
 50				listid > 0 ? "thisnode" : "general", nid,
 51				zone->name);
 52
 53			/* Iterate the zonelist */
 54			for_each_zone_zonelist(zone, z, zonelist, zoneid)
 55				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
 
 
 
 
 
 56			pr_cont("\n");
 57		}
 58	}
 59}
 60
 61void __init mminit_verify_pageflags_layout(void)
 62{
 63	int shift, width;
 64	unsigned long or_mask, add_mask;
 65
 66	shift = 8 * sizeof(unsigned long);
 67	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
 68		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH;
 69	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
 70		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n",
 71		SECTIONS_WIDTH,
 72		NODES_WIDTH,
 73		ZONES_WIDTH,
 74		LAST_CPUPID_WIDTH,
 75		KASAN_TAG_WIDTH,
 76		NR_PAGEFLAGS);
 77	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
 78		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
 79		SECTIONS_SHIFT,
 80		NODES_SHIFT,
 81		ZONES_SHIFT,
 82		LAST_CPUPID_SHIFT,
 83		KASAN_TAG_WIDTH);
 84	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
 85		"Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
 86		(unsigned long)SECTIONS_PGSHIFT,
 87		(unsigned long)NODES_PGSHIFT,
 88		(unsigned long)ZONES_PGSHIFT,
 89		(unsigned long)LAST_CPUPID_PGSHIFT,
 90		(unsigned long)KASAN_TAG_PGSHIFT);
 91	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
 92		"Node/Zone ID: %lu -> %lu\n",
 93		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
 94		(unsigned long)ZONEID_PGOFF);
 95	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
 96		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
 97		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
 98#ifdef NODE_NOT_IN_PAGE_FLAGS
 99	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
100		"Node not in page flags");
101#endif
102#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
103	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
104		"Last cpupid not in page flags");
105#endif
106
107	if (SECTIONS_WIDTH) {
108		shift -= SECTIONS_WIDTH;
109		BUG_ON(shift != SECTIONS_PGSHIFT);
110	}
111	if (NODES_WIDTH) {
112		shift -= NODES_WIDTH;
113		BUG_ON(shift != NODES_PGSHIFT);
114	}
115	if (ZONES_WIDTH) {
116		shift -= ZONES_WIDTH;
117		BUG_ON(shift != ZONES_PGSHIFT);
118	}
119
120	/* Check for bitmask overlaps */
121	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
122			(NODES_MASK << NODES_PGSHIFT) |
123			(SECTIONS_MASK << SECTIONS_PGSHIFT);
124	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
125			(NODES_MASK << NODES_PGSHIFT) +
126			(SECTIONS_MASK << SECTIONS_PGSHIFT);
127	BUG_ON(or_mask != add_mask);
128}
129
130static __init int set_mminit_loglevel(char *str)
131{
132	get_option(&str, &mminit_loglevel);
133	return 0;
134}
135early_param("mminit_loglevel", set_mminit_loglevel);
136#endif /* CONFIG_DEBUG_MEMORY_INIT */
137
138struct kobject *mm_kobj;
139EXPORT_SYMBOL_GPL(mm_kobj);
140
141#ifdef CONFIG_SMP
142s32 vm_committed_as_batch = 32;
143
144void mm_compute_batch(int overcommit_policy)
145{
146	u64 memsized_batch;
147	s32 nr = num_present_cpus();
148	s32 batch = max_t(s32, nr*2, 32);
149	unsigned long ram_pages = totalram_pages();
150
151	/*
152	 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
153	 * (total memory/#cpus), and lift it to 25% for other policies
154	 * to easy the possible lock contention for percpu_counter
155	 * vm_committed_as, while the max limit is INT_MAX
156	 */
157	if (overcommit_policy == OVERCOMMIT_NEVER)
158		memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
159	else
160		memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
161
162	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
163}
164
165static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
166					unsigned long action, void *arg)
167{
168	switch (action) {
169	case MEM_ONLINE:
170	case MEM_OFFLINE:
171		mm_compute_batch(sysctl_overcommit_memory);
172		break;
173	default:
174		break;
175	}
176	return NOTIFY_OK;
177}
178
179static struct notifier_block compute_batch_nb __meminitdata = {
180	.notifier_call = mm_compute_batch_notifier,
181	.priority = IPC_CALLBACK_PRI, /* use lowest priority */
182};
183
184static int __init mm_compute_batch_init(void)
185{
186	mm_compute_batch(sysctl_overcommit_memory);
187	register_hotmemory_notifier(&compute_batch_nb);
188
189	return 0;
190}
191
192__initcall(mm_compute_batch_init);
193
194#endif
195
196static int __init mm_sysfs_init(void)
197{
198	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
199	if (!mm_kobj)
200		return -ENOMEM;
201
202	return 0;
203}
204postcore_initcall(mm_sysfs_init);