Linux Audio

Check our new training course

Loading...
v4.17
 
  1/*
  2 * mm_init.c - Memory initialisation verification and debugging
  3 *
  4 * Copyright 2008 IBM Corporation, 2008
  5 * Author Mel Gorman <mel@csn.ul.ie>
  6 *
  7 */
  8#include <linux/kernel.h>
  9#include <linux/init.h>
 10#include <linux/kobject.h>
 11#include <linux/export.h>
 12#include <linux/memory.h>
 13#include <linux/notifier.h>
 14#include <linux/sched.h>
 
 15#include "internal.h"
 16
 17#ifdef CONFIG_DEBUG_MEMORY_INIT
 18int __meminitdata mminit_loglevel;
 19
 20#ifndef SECTIONS_SHIFT
 21#define SECTIONS_SHIFT	0
 22#endif
 23
 24/* The zonelists are simply reported, validation is manual. */
 25void __init mminit_verify_zonelist(void)
 26{
 27	int nid;
 28
 29	if (mminit_loglevel < MMINIT_VERIFY)
 30		return;
 31
 32	for_each_online_node(nid) {
 33		pg_data_t *pgdat = NODE_DATA(nid);
 34		struct zone *zone;
 35		struct zoneref *z;
 36		struct zonelist *zonelist;
 37		int i, listid, zoneid;
 38
 39		BUG_ON(MAX_ZONELISTS > 2);
 40		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
 41
 42			/* Identify the zone and nodelist */
 43			zoneid = i % MAX_NR_ZONES;
 44			listid = i / MAX_NR_ZONES;
 45			zonelist = &pgdat->node_zonelists[listid];
 46			zone = &pgdat->node_zones[zoneid];
 47			if (!populated_zone(zone))
 48				continue;
 49
 50			/* Print information about the zonelist */
 51			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
 52				listid > 0 ? "thisnode" : "general", nid,
 53				zone->name);
 54
 55			/* Iterate the zonelist */
 56			for_each_zone_zonelist(zone, z, zonelist, zoneid) {
 57#ifdef CONFIG_NUMA
 58				pr_cont("%d:%s ", zone->node, zone->name);
 59#else
 60				pr_cont("0:%s ", zone->name);
 61#endif /* CONFIG_NUMA */
 62			}
 63			pr_cont("\n");
 64		}
 65	}
 66}
 67
 68void __init mminit_verify_pageflags_layout(void)
 69{
 70	int shift, width;
 71	unsigned long or_mask, add_mask;
 72
 73	shift = 8 * sizeof(unsigned long);
 74	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH - LAST_CPUPID_SHIFT;
 
 75	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
 76		"Section %d Node %d Zone %d Lastcpupid %d Flags %d\n",
 77		SECTIONS_WIDTH,
 78		NODES_WIDTH,
 79		ZONES_WIDTH,
 80		LAST_CPUPID_WIDTH,
 
 81		NR_PAGEFLAGS);
 82	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
 83		"Section %d Node %d Zone %d Lastcpupid %d\n",
 84		SECTIONS_SHIFT,
 85		NODES_SHIFT,
 86		ZONES_SHIFT,
 87		LAST_CPUPID_SHIFT);
 
 88	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
 89		"Section %lu Node %lu Zone %lu Lastcpupid %lu\n",
 90		(unsigned long)SECTIONS_PGSHIFT,
 91		(unsigned long)NODES_PGSHIFT,
 92		(unsigned long)ZONES_PGSHIFT,
 93		(unsigned long)LAST_CPUPID_PGSHIFT);
 
 94	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
 95		"Node/Zone ID: %lu -> %lu\n",
 96		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
 97		(unsigned long)ZONEID_PGOFF);
 98	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
 99		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
100		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
101#ifdef NODE_NOT_IN_PAGE_FLAGS
102	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
103		"Node not in page flags");
104#endif
105#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
106	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
107		"Last cpupid not in page flags");
108#endif
109
110	if (SECTIONS_WIDTH) {
111		shift -= SECTIONS_WIDTH;
112		BUG_ON(shift != SECTIONS_PGSHIFT);
113	}
114	if (NODES_WIDTH) {
115		shift -= NODES_WIDTH;
116		BUG_ON(shift != NODES_PGSHIFT);
117	}
118	if (ZONES_WIDTH) {
119		shift -= ZONES_WIDTH;
120		BUG_ON(shift != ZONES_PGSHIFT);
121	}
122
123	/* Check for bitmask overlaps */
124	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
125			(NODES_MASK << NODES_PGSHIFT) |
126			(SECTIONS_MASK << SECTIONS_PGSHIFT);
127	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
128			(NODES_MASK << NODES_PGSHIFT) +
129			(SECTIONS_MASK << SECTIONS_PGSHIFT);
130	BUG_ON(or_mask != add_mask);
131}
132
133static __init int set_mminit_loglevel(char *str)
134{
135	get_option(&str, &mminit_loglevel);
136	return 0;
137}
138early_param("mminit_loglevel", set_mminit_loglevel);
139#endif /* CONFIG_DEBUG_MEMORY_INIT */
140
141struct kobject *mm_kobj;
142EXPORT_SYMBOL_GPL(mm_kobj);
143
144#ifdef CONFIG_SMP
145s32 vm_committed_as_batch = 32;
146
147static void __meminit mm_compute_batch(void)
148{
149	u64 memsized_batch;
150	s32 nr = num_present_cpus();
151	s32 batch = max_t(s32, nr*2, 32);
 
152
153	/* batch size set to 0.4% of (total memory/#cpus), or max int32 */
154	memsized_batch = min_t(u64, (totalram_pages/nr)/256, 0x7fffffff);
 
 
 
 
 
 
 
 
155
156	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
157}
158
159static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
160					unsigned long action, void *arg)
161{
162	switch (action) {
163	case MEM_ONLINE:
164	case MEM_OFFLINE:
165		mm_compute_batch();
166	default:
167		break;
168	}
169	return NOTIFY_OK;
170}
171
172static struct notifier_block compute_batch_nb __meminitdata = {
173	.notifier_call = mm_compute_batch_notifier,
174	.priority = IPC_CALLBACK_PRI, /* use lowest priority */
175};
176
177static int __init mm_compute_batch_init(void)
178{
179	mm_compute_batch();
180	register_hotmemory_notifier(&compute_batch_nb);
181
182	return 0;
183}
184
185__initcall(mm_compute_batch_init);
186
187#endif
188
189static int __init mm_sysfs_init(void)
190{
191	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
192	if (!mm_kobj)
193		return -ENOMEM;
194
195	return 0;
196}
197postcore_initcall(mm_sysfs_init);
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * mm_init.c - Memory initialisation verification and debugging
  4 *
  5 * Copyright 2008 IBM Corporation, 2008
  6 * Author Mel Gorman <mel@csn.ul.ie>
  7 *
  8 */
  9#include <linux/kernel.h>
 10#include <linux/init.h>
 11#include <linux/kobject.h>
 12#include <linux/export.h>
 13#include <linux/memory.h>
 14#include <linux/notifier.h>
 15#include <linux/sched.h>
 16#include <linux/mman.h>
 17#include "internal.h"
 18
 19#ifdef CONFIG_DEBUG_MEMORY_INIT
 20int __meminitdata mminit_loglevel;
 21
 22#ifndef SECTIONS_SHIFT
 23#define SECTIONS_SHIFT	0
 24#endif
 25
 26/* The zonelists are simply reported, validation is manual. */
 27void __init mminit_verify_zonelist(void)
 28{
 29	int nid;
 30
 31	if (mminit_loglevel < MMINIT_VERIFY)
 32		return;
 33
 34	for_each_online_node(nid) {
 35		pg_data_t *pgdat = NODE_DATA(nid);
 36		struct zone *zone;
 37		struct zoneref *z;
 38		struct zonelist *zonelist;
 39		int i, listid, zoneid;
 40
 41		BUILD_BUG_ON(MAX_ZONELISTS > 2);
 42		for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
 43
 44			/* Identify the zone and nodelist */
 45			zoneid = i % MAX_NR_ZONES;
 46			listid = i / MAX_NR_ZONES;
 47			zonelist = &pgdat->node_zonelists[listid];
 48			zone = &pgdat->node_zones[zoneid];
 49			if (!populated_zone(zone))
 50				continue;
 51
 52			/* Print information about the zonelist */
 53			printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
 54				listid > 0 ? "thisnode" : "general", nid,
 55				zone->name);
 56
 57			/* Iterate the zonelist */
 58			for_each_zone_zonelist(zone, z, zonelist, zoneid)
 59				pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
 
 
 
 
 
 60			pr_cont("\n");
 61		}
 62	}
 63}
 64
 65void __init mminit_verify_pageflags_layout(void)
 66{
 67	int shift, width;
 68	unsigned long or_mask, add_mask;
 69
 70	shift = 8 * sizeof(unsigned long);
 71	width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
 72		- LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH;
 73	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
 74		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Flags %d\n",
 75		SECTIONS_WIDTH,
 76		NODES_WIDTH,
 77		ZONES_WIDTH,
 78		LAST_CPUPID_WIDTH,
 79		KASAN_TAG_WIDTH,
 80		NR_PAGEFLAGS);
 81	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
 82		"Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
 83		SECTIONS_SHIFT,
 84		NODES_SHIFT,
 85		ZONES_SHIFT,
 86		LAST_CPUPID_SHIFT,
 87		KASAN_TAG_WIDTH);
 88	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
 89		"Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
 90		(unsigned long)SECTIONS_PGSHIFT,
 91		(unsigned long)NODES_PGSHIFT,
 92		(unsigned long)ZONES_PGSHIFT,
 93		(unsigned long)LAST_CPUPID_PGSHIFT,
 94		(unsigned long)KASAN_TAG_PGSHIFT);
 95	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
 96		"Node/Zone ID: %lu -> %lu\n",
 97		(unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
 98		(unsigned long)ZONEID_PGOFF);
 99	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
100		"location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
101		shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
102#ifdef NODE_NOT_IN_PAGE_FLAGS
103	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
104		"Node not in page flags");
105#endif
106#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
107	mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
108		"Last cpupid not in page flags");
109#endif
110
111	if (SECTIONS_WIDTH) {
112		shift -= SECTIONS_WIDTH;
113		BUG_ON(shift != SECTIONS_PGSHIFT);
114	}
115	if (NODES_WIDTH) {
116		shift -= NODES_WIDTH;
117		BUG_ON(shift != NODES_PGSHIFT);
118	}
119	if (ZONES_WIDTH) {
120		shift -= ZONES_WIDTH;
121		BUG_ON(shift != ZONES_PGSHIFT);
122	}
123
124	/* Check for bitmask overlaps */
125	or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
126			(NODES_MASK << NODES_PGSHIFT) |
127			(SECTIONS_MASK << SECTIONS_PGSHIFT);
128	add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
129			(NODES_MASK << NODES_PGSHIFT) +
130			(SECTIONS_MASK << SECTIONS_PGSHIFT);
131	BUG_ON(or_mask != add_mask);
132}
133
134static __init int set_mminit_loglevel(char *str)
135{
136	get_option(&str, &mminit_loglevel);
137	return 0;
138}
139early_param("mminit_loglevel", set_mminit_loglevel);
140#endif /* CONFIG_DEBUG_MEMORY_INIT */
141
142struct kobject *mm_kobj;
143EXPORT_SYMBOL_GPL(mm_kobj);
144
145#ifdef CONFIG_SMP
146s32 vm_committed_as_batch = 32;
147
148void mm_compute_batch(int overcommit_policy)
149{
150	u64 memsized_batch;
151	s32 nr = num_present_cpus();
152	s32 batch = max_t(s32, nr*2, 32);
153	unsigned long ram_pages = totalram_pages();
154
155	/*
156	 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
157	 * (total memory/#cpus), and lift it to 25% for other policies
158	 * to easy the possible lock contention for percpu_counter
159	 * vm_committed_as, while the max limit is INT_MAX
160	 */
161	if (overcommit_policy == OVERCOMMIT_NEVER)
162		memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
163	else
164		memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
165
166	vm_committed_as_batch = max_t(s32, memsized_batch, batch);
167}
168
169static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
170					unsigned long action, void *arg)
171{
172	switch (action) {
173	case MEM_ONLINE:
174	case MEM_OFFLINE:
175		mm_compute_batch(sysctl_overcommit_memory);
176	default:
177		break;
178	}
179	return NOTIFY_OK;
180}
181
182static struct notifier_block compute_batch_nb __meminitdata = {
183	.notifier_call = mm_compute_batch_notifier,
184	.priority = IPC_CALLBACK_PRI, /* use lowest priority */
185};
186
187static int __init mm_compute_batch_init(void)
188{
189	mm_compute_batch(sysctl_overcommit_memory);
190	register_hotmemory_notifier(&compute_batch_nb);
191
192	return 0;
193}
194
195__initcall(mm_compute_batch_init);
196
197#endif
198
199static int __init mm_sysfs_init(void)
200{
201	mm_kobj = kobject_create_and_add("mm", kernel_kobj);
202	if (!mm_kobj)
203		return -ENOMEM;
204
205	return 0;
206}
207postcore_initcall(mm_sysfs_init);