Loading...
1/*
2 * mm_init.c - Memory initialisation verification and debugging
3 *
4 * Copyright 2008 IBM Corporation, 2008
5 * Author Mel Gorman <mel@csn.ul.ie>
6 *
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10#include <linux/kobject.h>
11#include <linux/module.h>
12#include "internal.h"
13
14#ifdef CONFIG_DEBUG_MEMORY_INIT
15int mminit_loglevel;
16
17#ifndef SECTIONS_SHIFT
18#define SECTIONS_SHIFT 0
19#endif
20
21/* The zonelists are simply reported, validation is manual. */
22void mminit_verify_zonelist(void)
23{
24 int nid;
25
26 if (mminit_loglevel < MMINIT_VERIFY)
27 return;
28
29 for_each_online_node(nid) {
30 pg_data_t *pgdat = NODE_DATA(nid);
31 struct zone *zone;
32 struct zoneref *z;
33 struct zonelist *zonelist;
34 int i, listid, zoneid;
35
36 BUG_ON(MAX_ZONELISTS > 2);
37 for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
38
39 /* Identify the zone and nodelist */
40 zoneid = i % MAX_NR_ZONES;
41 listid = i / MAX_NR_ZONES;
42 zonelist = &pgdat->node_zonelists[listid];
43 zone = &pgdat->node_zones[zoneid];
44 if (!populated_zone(zone))
45 continue;
46
47 /* Print information about the zonelist */
48 printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
49 listid > 0 ? "thisnode" : "general", nid,
50 zone->name);
51
52 /* Iterate the zonelist */
53 for_each_zone_zonelist(zone, z, zonelist, zoneid) {
54#ifdef CONFIG_NUMA
55 printk(KERN_CONT "%d:%s ",
56 zone->node, zone->name);
57#else
58 printk(KERN_CONT "0:%s ", zone->name);
59#endif /* CONFIG_NUMA */
60 }
61 printk(KERN_CONT "\n");
62 }
63 }
64}
65
66void __init mminit_verify_pageflags_layout(void)
67{
68 int shift, width;
69 unsigned long or_mask, add_mask;
70
71 shift = 8 * sizeof(unsigned long);
72 width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH;
73 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
74 "Section %d Node %d Zone %d Flags %d\n",
75 SECTIONS_WIDTH,
76 NODES_WIDTH,
77 ZONES_WIDTH,
78 NR_PAGEFLAGS);
79 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
80 "Section %d Node %d Zone %d\n",
81 SECTIONS_SHIFT,
82 NODES_SHIFT,
83 ZONES_SHIFT);
84 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_offsets",
85 "Section %lu Node %lu Zone %lu\n",
86 (unsigned long)SECTIONS_PGSHIFT,
87 (unsigned long)NODES_PGSHIFT,
88 (unsigned long)ZONES_PGSHIFT);
89 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_zoneid",
90 "Zone ID: %lu -> %lu\n",
91 (unsigned long)ZONEID_PGOFF,
92 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT));
93 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
94 "location: %d -> %d unused %d -> %d flags %d -> %d\n",
95 shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
96#ifdef NODE_NOT_IN_PAGE_FLAGS
97 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
98 "Node not in page flags");
99#endif
100
101 if (SECTIONS_WIDTH) {
102 shift -= SECTIONS_WIDTH;
103 BUG_ON(shift != SECTIONS_PGSHIFT);
104 }
105 if (NODES_WIDTH) {
106 shift -= NODES_WIDTH;
107 BUG_ON(shift != NODES_PGSHIFT);
108 }
109 if (ZONES_WIDTH) {
110 shift -= ZONES_WIDTH;
111 BUG_ON(shift != ZONES_PGSHIFT);
112 }
113
114 /* Check for bitmask overlaps */
115 or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
116 (NODES_MASK << NODES_PGSHIFT) |
117 (SECTIONS_MASK << SECTIONS_PGSHIFT);
118 add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
119 (NODES_MASK << NODES_PGSHIFT) +
120 (SECTIONS_MASK << SECTIONS_PGSHIFT);
121 BUG_ON(or_mask != add_mask);
122}
123
124void __meminit mminit_verify_page_links(struct page *page, enum zone_type zone,
125 unsigned long nid, unsigned long pfn)
126{
127 BUG_ON(page_to_nid(page) != nid);
128 BUG_ON(page_zonenum(page) != zone);
129 BUG_ON(page_to_pfn(page) != pfn);
130}
131
132static __init int set_mminit_loglevel(char *str)
133{
134 get_option(&str, &mminit_loglevel);
135 return 0;
136}
137early_param("mminit_loglevel", set_mminit_loglevel);
138#endif /* CONFIG_DEBUG_MEMORY_INIT */
139
140struct kobject *mm_kobj;
141EXPORT_SYMBOL_GPL(mm_kobj);
142
143static int __init mm_sysfs_init(void)
144{
145 mm_kobj = kobject_create_and_add("mm", kernel_kobj);
146 if (!mm_kobj)
147 return -ENOMEM;
148
149 return 0;
150}
151
152__initcall(mm_sysfs_init);
1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * mm_init.c - Memory initialisation verification and debugging
4 *
5 * Copyright 2008 IBM Corporation, 2008
6 * Author Mel Gorman <mel@csn.ul.ie>
7 *
8 */
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/kobject.h>
12#include <linux/export.h>
13#include <linux/memory.h>
14#include <linux/notifier.h>
15#include <linux/sched.h>
16#include <linux/mman.h>
17#include "internal.h"
18
19#ifdef CONFIG_DEBUG_MEMORY_INIT
20int __meminitdata mminit_loglevel;
21
22/* The zonelists are simply reported, validation is manual. */
23void __init mminit_verify_zonelist(void)
24{
25 int nid;
26
27 if (mminit_loglevel < MMINIT_VERIFY)
28 return;
29
30 for_each_online_node(nid) {
31 pg_data_t *pgdat = NODE_DATA(nid);
32 struct zone *zone;
33 struct zoneref *z;
34 struct zonelist *zonelist;
35 int i, listid, zoneid;
36
37 BUILD_BUG_ON(MAX_ZONELISTS > 2);
38 for (i = 0; i < MAX_ZONELISTS * MAX_NR_ZONES; i++) {
39
40 /* Identify the zone and nodelist */
41 zoneid = i % MAX_NR_ZONES;
42 listid = i / MAX_NR_ZONES;
43 zonelist = &pgdat->node_zonelists[listid];
44 zone = &pgdat->node_zones[zoneid];
45 if (!populated_zone(zone))
46 continue;
47
48 /* Print information about the zonelist */
49 printk(KERN_DEBUG "mminit::zonelist %s %d:%s = ",
50 listid > 0 ? "thisnode" : "general", nid,
51 zone->name);
52
53 /* Iterate the zonelist */
54 for_each_zone_zonelist(zone, z, zonelist, zoneid)
55 pr_cont("%d:%s ", zone_to_nid(zone), zone->name);
56 pr_cont("\n");
57 }
58 }
59}
60
61void __init mminit_verify_pageflags_layout(void)
62{
63 int shift, width;
64 unsigned long or_mask, add_mask;
65
66 shift = 8 * sizeof(unsigned long);
67 width = shift - SECTIONS_WIDTH - NODES_WIDTH - ZONES_WIDTH
68 - LAST_CPUPID_SHIFT - KASAN_TAG_WIDTH - LRU_GEN_WIDTH - LRU_REFS_WIDTH;
69 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_widths",
70 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d Gen %d Tier %d Flags %d\n",
71 SECTIONS_WIDTH,
72 NODES_WIDTH,
73 ZONES_WIDTH,
74 LAST_CPUPID_WIDTH,
75 KASAN_TAG_WIDTH,
76 LRU_GEN_WIDTH,
77 LRU_REFS_WIDTH,
78 NR_PAGEFLAGS);
79 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_shifts",
80 "Section %d Node %d Zone %d Lastcpupid %d Kasantag %d\n",
81 SECTIONS_SHIFT,
82 NODES_SHIFT,
83 ZONES_SHIFT,
84 LAST_CPUPID_SHIFT,
85 KASAN_TAG_WIDTH);
86 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_pgshifts",
87 "Section %lu Node %lu Zone %lu Lastcpupid %lu Kasantag %lu\n",
88 (unsigned long)SECTIONS_PGSHIFT,
89 (unsigned long)NODES_PGSHIFT,
90 (unsigned long)ZONES_PGSHIFT,
91 (unsigned long)LAST_CPUPID_PGSHIFT,
92 (unsigned long)KASAN_TAG_PGSHIFT);
93 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodezoneid",
94 "Node/Zone ID: %lu -> %lu\n",
95 (unsigned long)(ZONEID_PGOFF + ZONEID_SHIFT),
96 (unsigned long)ZONEID_PGOFF);
97 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_usage",
98 "location: %d -> %d layout %d -> %d unused %d -> %d page-flags\n",
99 shift, width, width, NR_PAGEFLAGS, NR_PAGEFLAGS, 0);
100#ifdef NODE_NOT_IN_PAGE_FLAGS
101 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
102 "Node not in page flags");
103#endif
104#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
105 mminit_dprintk(MMINIT_TRACE, "pageflags_layout_nodeflags",
106 "Last cpupid not in page flags");
107#endif
108
109 if (SECTIONS_WIDTH) {
110 shift -= SECTIONS_WIDTH;
111 BUG_ON(shift != SECTIONS_PGSHIFT);
112 }
113 if (NODES_WIDTH) {
114 shift -= NODES_WIDTH;
115 BUG_ON(shift != NODES_PGSHIFT);
116 }
117 if (ZONES_WIDTH) {
118 shift -= ZONES_WIDTH;
119 BUG_ON(shift != ZONES_PGSHIFT);
120 }
121
122 /* Check for bitmask overlaps */
123 or_mask = (ZONES_MASK << ZONES_PGSHIFT) |
124 (NODES_MASK << NODES_PGSHIFT) |
125 (SECTIONS_MASK << SECTIONS_PGSHIFT);
126 add_mask = (ZONES_MASK << ZONES_PGSHIFT) +
127 (NODES_MASK << NODES_PGSHIFT) +
128 (SECTIONS_MASK << SECTIONS_PGSHIFT);
129 BUG_ON(or_mask != add_mask);
130}
131
132static __init int set_mminit_loglevel(char *str)
133{
134 get_option(&str, &mminit_loglevel);
135 return 0;
136}
137early_param("mminit_loglevel", set_mminit_loglevel);
138#endif /* CONFIG_DEBUG_MEMORY_INIT */
139
140struct kobject *mm_kobj;
141EXPORT_SYMBOL_GPL(mm_kobj);
142
143#ifdef CONFIG_SMP
144s32 vm_committed_as_batch = 32;
145
146void mm_compute_batch(int overcommit_policy)
147{
148 u64 memsized_batch;
149 s32 nr = num_present_cpus();
150 s32 batch = max_t(s32, nr*2, 32);
151 unsigned long ram_pages = totalram_pages();
152
153 /*
154 * For policy OVERCOMMIT_NEVER, set batch size to 0.4% of
155 * (total memory/#cpus), and lift it to 25% for other policies
156 * to easy the possible lock contention for percpu_counter
157 * vm_committed_as, while the max limit is INT_MAX
158 */
159 if (overcommit_policy == OVERCOMMIT_NEVER)
160 memsized_batch = min_t(u64, ram_pages/nr/256, INT_MAX);
161 else
162 memsized_batch = min_t(u64, ram_pages/nr/4, INT_MAX);
163
164 vm_committed_as_batch = max_t(s32, memsized_batch, batch);
165}
166
167static int __meminit mm_compute_batch_notifier(struct notifier_block *self,
168 unsigned long action, void *arg)
169{
170 switch (action) {
171 case MEM_ONLINE:
172 case MEM_OFFLINE:
173 mm_compute_batch(sysctl_overcommit_memory);
174 break;
175 default:
176 break;
177 }
178 return NOTIFY_OK;
179}
180
181static int __init mm_compute_batch_init(void)
182{
183 mm_compute_batch(sysctl_overcommit_memory);
184 hotplug_memory_notifier(mm_compute_batch_notifier, MM_COMPUTE_BATCH_PRI);
185 return 0;
186}
187
188__initcall(mm_compute_batch_init);
189
190#endif
191
192static int __init mm_sysfs_init(void)
193{
194 mm_kobj = kobject_create_and_add("mm", kernel_kobj);
195 if (!mm_kobj)
196 return -ENOMEM;
197
198 return 0;
199}
200postcore_initcall(mm_sysfs_init);