Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Bootmem core functions.
4 *
5 * Copyright (c) 2020, Bytedance.
6 *
7 * Author: Muchun Song <songmuchun@bytedance.com>
8 *
9 */
10#include <linux/mm.h>
11#include <linux/compiler.h>
12#include <linux/memblock.h>
13#include <linux/bootmem_info.h>
14#include <linux/memory_hotplug.h>
15
16void get_page_bootmem(unsigned long info, struct page *page, unsigned long type)
17{
18 page->freelist = (void *)type;
19 SetPagePrivate(page);
20 set_page_private(page, info);
21 page_ref_inc(page);
22}
23
24void put_page_bootmem(struct page *page)
25{
26 unsigned long type;
27
28 type = (unsigned long) page->freelist;
29 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
30 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
31
32 if (page_ref_dec_return(page) == 1) {
33 page->freelist = NULL;
34 ClearPagePrivate(page);
35 set_page_private(page, 0);
36 INIT_LIST_HEAD(&page->lru);
37 free_reserved_page(page);
38 }
39}
40
41#ifndef CONFIG_SPARSEMEM_VMEMMAP
42static void register_page_bootmem_info_section(unsigned long start_pfn)
43{
44 unsigned long mapsize, section_nr, i;
45 struct mem_section *ms;
46 struct page *page, *memmap;
47 struct mem_section_usage *usage;
48
49 section_nr = pfn_to_section_nr(start_pfn);
50 ms = __nr_to_section(section_nr);
51
52 /* Get section's memmap address */
53 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
54
55 /*
56 * Get page for the memmap's phys address
57 * XXX: need more consideration for sparse_vmemmap...
58 */
59 page = virt_to_page(memmap);
60 mapsize = sizeof(struct page) * PAGES_PER_SECTION;
61 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
62
63 /* remember memmap's page */
64 for (i = 0; i < mapsize; i++, page++)
65 get_page_bootmem(section_nr, page, SECTION_INFO);
66
67 usage = ms->usage;
68 page = virt_to_page(usage);
69
70 mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
71
72 for (i = 0; i < mapsize; i++, page++)
73 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
74
75}
76#else /* CONFIG_SPARSEMEM_VMEMMAP */
77static void register_page_bootmem_info_section(unsigned long start_pfn)
78{
79 unsigned long mapsize, section_nr, i;
80 struct mem_section *ms;
81 struct page *page, *memmap;
82 struct mem_section_usage *usage;
83
84 section_nr = pfn_to_section_nr(start_pfn);
85 ms = __nr_to_section(section_nr);
86
87 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
88
89 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
90
91 usage = ms->usage;
92 page = virt_to_page(usage);
93
94 mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
95
96 for (i = 0; i < mapsize; i++, page++)
97 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
98}
99#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
100
101void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
102{
103 unsigned long i, pfn, end_pfn, nr_pages;
104 int node = pgdat->node_id;
105 struct page *page;
106
107 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
108 page = virt_to_page(pgdat);
109
110 for (i = 0; i < nr_pages; i++, page++)
111 get_page_bootmem(node, page, NODE_INFO);
112
113 pfn = pgdat->node_start_pfn;
114 end_pfn = pgdat_end_pfn(pgdat);
115
116 /* register section info */
117 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
118 /*
119 * Some platforms can assign the same pfn to multiple nodes - on
120 * node0 as well as nodeN. To avoid registering a pfn against
121 * multiple nodes we check that this pfn does not already
122 * reside in some other nodes.
123 */
124 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
125 register_page_bootmem_info_section(pfn);
126 }
127}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Bootmem core functions.
4 *
5 * Copyright (c) 2020, Bytedance.
6 *
7 * Author: Muchun Song <songmuchun@bytedance.com>
8 *
9 */
10#include <linux/mm.h>
11#include <linux/compiler.h>
12#include <linux/memblock.h>
13#include <linux/bootmem_info.h>
14#include <linux/memory_hotplug.h>
15#include <linux/kmemleak.h>
16
17void get_page_bootmem(unsigned long info, struct page *page,
18 enum bootmem_type type)
19{
20 BUG_ON(type > 0xf);
21 BUG_ON(info > (ULONG_MAX >> 4));
22 SetPagePrivate(page);
23 set_page_private(page, info << 4 | type);
24 page_ref_inc(page);
25}
26
27void put_page_bootmem(struct page *page)
28{
29 enum bootmem_type type = bootmem_type(page);
30
31 BUG_ON(type < MEMORY_HOTPLUG_MIN_BOOTMEM_TYPE ||
32 type > MEMORY_HOTPLUG_MAX_BOOTMEM_TYPE);
33
34 if (page_ref_dec_return(page) == 1) {
35 ClearPagePrivate(page);
36 set_page_private(page, 0);
37 INIT_LIST_HEAD(&page->lru);
38 kmemleak_free_part_phys(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
39 free_reserved_page(page);
40 }
41}
42
43#ifndef CONFIG_SPARSEMEM_VMEMMAP
44static void __init register_page_bootmem_info_section(unsigned long start_pfn)
45{
46 unsigned long mapsize, section_nr, i;
47 struct mem_section *ms;
48 struct page *page, *memmap;
49 struct mem_section_usage *usage;
50
51 section_nr = pfn_to_section_nr(start_pfn);
52 ms = __nr_to_section(section_nr);
53
54 /* Get section's memmap address */
55 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
56
57 /*
58 * Get page for the memmap's phys address
59 * XXX: need more consideration for sparse_vmemmap...
60 */
61 page = virt_to_page(memmap);
62 mapsize = sizeof(struct page) * PAGES_PER_SECTION;
63 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
64
65 /* remember memmap's page */
66 for (i = 0; i < mapsize; i++, page++)
67 get_page_bootmem(section_nr, page, SECTION_INFO);
68
69 usage = ms->usage;
70 page = virt_to_page(usage);
71
72 mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
73
74 for (i = 0; i < mapsize; i++, page++)
75 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
76
77}
78#else /* CONFIG_SPARSEMEM_VMEMMAP */
79static void __init register_page_bootmem_info_section(unsigned long start_pfn)
80{
81 unsigned long mapsize, section_nr, i;
82 struct mem_section *ms;
83 struct page *page, *memmap;
84 struct mem_section_usage *usage;
85
86 section_nr = pfn_to_section_nr(start_pfn);
87 ms = __nr_to_section(section_nr);
88
89 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
90
91 register_page_bootmem_memmap(section_nr, memmap, PAGES_PER_SECTION);
92
93 usage = ms->usage;
94 page = virt_to_page(usage);
95
96 mapsize = PAGE_ALIGN(mem_section_usage_size()) >> PAGE_SHIFT;
97
98 for (i = 0; i < mapsize; i++, page++)
99 get_page_bootmem(section_nr, page, MIX_SECTION_INFO);
100}
101#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
102
103void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
104{
105 unsigned long i, pfn, end_pfn, nr_pages;
106 int node = pgdat->node_id;
107 struct page *page;
108
109 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
110 page = virt_to_page(pgdat);
111
112 for (i = 0; i < nr_pages; i++, page++)
113 get_page_bootmem(node, page, NODE_INFO);
114
115 pfn = pgdat->node_start_pfn;
116 end_pfn = pgdat_end_pfn(pgdat);
117
118 /* register section info */
119 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
120 /*
121 * Some platforms can assign the same pfn to multiple nodes - on
122 * node0 as well as nodeN. To avoid registering a pfn against
123 * multiple nodes we check that this pfn does not already
124 * reside in some other nodes.
125 */
126 if (pfn_valid(pfn) && (early_pfn_to_nid(pfn) == node))
127 register_page_bootmem_info_section(pfn);
128 }
129}