Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 *
7 * Derived from "arch/i386/mm/init.c"
8 * Copyright (C) 1995 Linus Torvalds
9 */
10
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/swap.h>
21#include <linux/swiotlb.h>
22#include <linux/smp.h>
23#include <linux/init.h>
24#include <linux/pagemap.h>
25#include <linux/memblock.h>
26#include <linux/memory.h>
27#include <linux/pfn.h>
28#include <linux/poison.h>
29#include <linux/initrd.h>
30#include <linux/export.h>
31#include <linux/cma.h>
32#include <linux/gfp.h>
33#include <linux/dma-direct.h>
34#include <asm/processor.h>
35#include <linux/uaccess.h>
36#include <asm/pgtable.h>
37#include <asm/pgalloc.h>
38#include <asm/dma.h>
39#include <asm/lowcore.h>
40#include <asm/tlb.h>
41#include <asm/tlbflush.h>
42#include <asm/sections.h>
43#include <asm/ctl_reg.h>
44#include <asm/sclp.h>
45#include <asm/set_memory.h>
46#include <asm/kasan.h>
47#include <asm/dma-mapping.h>
48#include <asm/uv.h>
49
50pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
51
52unsigned long empty_zero_page, zero_page_mask;
53EXPORT_SYMBOL(empty_zero_page);
54EXPORT_SYMBOL(zero_page_mask);
55
56bool initmem_freed;
57
58static void __init setup_zero_pages(void)
59{
60 unsigned int order;
61 struct page *page;
62 int i;
63
64 /* Latest machines require a mapping granularity of 512KB */
65 order = 7;
66
67 /* Limit number of empty zero pages for small memory sizes */
68 while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
69 order--;
70
71 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
72 if (!empty_zero_page)
73 panic("Out of memory in setup_zero_pages");
74
75 page = virt_to_page((void *) empty_zero_page);
76 split_page(page, order);
77 for (i = 1 << order; i > 0; i--) {
78 mark_page_reserved(page);
79 page++;
80 }
81
82 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
83}
84
85/*
86 * paging_init() sets up the page tables
87 */
88void __init paging_init(void)
89{
90 unsigned long max_zone_pfns[MAX_NR_ZONES];
91 unsigned long pgd_type, asce_bits;
92 psw_t psw;
93
94 init_mm.pgd = swapper_pg_dir;
95 if (VMALLOC_END > _REGION2_SIZE) {
96 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
97 pgd_type = _REGION2_ENTRY_EMPTY;
98 } else {
99 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
100 pgd_type = _REGION3_ENTRY_EMPTY;
101 }
102 init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
103 S390_lowcore.kernel_asce = init_mm.context.asce;
104 S390_lowcore.user_asce = S390_lowcore.kernel_asce;
105 crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
106 vmem_map_init();
107 kasan_copy_shadow(init_mm.pgd);
108
109 /* enable virtual mapping in kernel mode */
110 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
111 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
112 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
113 psw.mask = __extract_psw();
114 psw_bits(psw).dat = 1;
115 psw_bits(psw).as = PSW_BITS_AS_HOME;
116 __load_psw_mask(psw.mask);
117 kasan_free_early_identity();
118
119 sparse_memory_present_with_active_regions(MAX_NUMNODES);
120 sparse_init();
121 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
122 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
123 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
124 free_area_init_nodes(max_zone_pfns);
125}
126
127void mark_rodata_ro(void)
128{
129 unsigned long size = __end_ro_after_init - __start_ro_after_init;
130
131 set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
132 pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
133}
134
135int set_memory_encrypted(unsigned long addr, int numpages)
136{
137 int i;
138
139 /* make specified pages unshared, (swiotlb, dma_free) */
140 for (i = 0; i < numpages; ++i) {
141 uv_remove_shared(addr);
142 addr += PAGE_SIZE;
143 }
144 return 0;
145}
146
147int set_memory_decrypted(unsigned long addr, int numpages)
148{
149 int i;
150 /* make specified pages shared (swiotlb, dma_alloca) */
151 for (i = 0; i < numpages; ++i) {
152 uv_set_shared(addr);
153 addr += PAGE_SIZE;
154 }
155 return 0;
156}
157
158/* are we a protected virtualization guest? */
159bool force_dma_unencrypted(struct device *dev)
160{
161 return is_prot_virt_guest();
162}
163
164/* protected virtualization */
165static void pv_init(void)
166{
167 if (!is_prot_virt_guest())
168 return;
169
170 /* make sure bounce buffers are shared */
171 swiotlb_init(1);
172 swiotlb_update_mem_attributes();
173 swiotlb_force = SWIOTLB_FORCE;
174}
175
176void __init mem_init(void)
177{
178 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
179 cpumask_set_cpu(0, mm_cpumask(&init_mm));
180
181 set_max_mapnr(max_low_pfn);
182 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
183
184 pv_init();
185
186 /* Setup guest page hinting */
187 cmma_init();
188
189 /* this will put all low memory onto the freelists */
190 memblock_free_all();
191 setup_zero_pages(); /* Setup zeroed pages. */
192
193 cmma_init_nodat();
194
195 mem_init_print_info(NULL);
196}
197
198void free_initmem(void)
199{
200 initmem_freed = true;
201 __set_memory((unsigned long)_sinittext,
202 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
203 SET_MEMORY_RW | SET_MEMORY_NX);
204 free_initmem_default(POISON_FREE_INITMEM);
205}
206
207unsigned long memory_block_size_bytes(void)
208{
209 /*
210 * Make sure the memory block size is always greater
211 * or equal than the memory increment size.
212 */
213 return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
214}
215
216#ifdef CONFIG_MEMORY_HOTPLUG
217
218#ifdef CONFIG_CMA
219
220/* Prevent memory blocks which contain cma regions from going offline */
221
222struct s390_cma_mem_data {
223 unsigned long start;
224 unsigned long end;
225};
226
227static int s390_cma_check_range(struct cma *cma, void *data)
228{
229 struct s390_cma_mem_data *mem_data;
230 unsigned long start, end;
231
232 mem_data = data;
233 start = cma_get_base(cma);
234 end = start + cma_get_size(cma);
235 if (end < mem_data->start)
236 return 0;
237 if (start >= mem_data->end)
238 return 0;
239 return -EBUSY;
240}
241
242static int s390_cma_mem_notifier(struct notifier_block *nb,
243 unsigned long action, void *data)
244{
245 struct s390_cma_mem_data mem_data;
246 struct memory_notify *arg;
247 int rc = 0;
248
249 arg = data;
250 mem_data.start = arg->start_pfn << PAGE_SHIFT;
251 mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
252 if (action == MEM_GOING_OFFLINE)
253 rc = cma_for_each_area(s390_cma_check_range, &mem_data);
254 return notifier_from_errno(rc);
255}
256
257static struct notifier_block s390_cma_mem_nb = {
258 .notifier_call = s390_cma_mem_notifier,
259};
260
261static int __init s390_cma_mem_init(void)
262{
263 return register_memory_notifier(&s390_cma_mem_nb);
264}
265device_initcall(s390_cma_mem_init);
266
267#endif /* CONFIG_CMA */
268
269int arch_add_memory(int nid, u64 start, u64 size,
270 struct mhp_restrictions *restrictions)
271{
272 unsigned long start_pfn = PFN_DOWN(start);
273 unsigned long size_pages = PFN_DOWN(size);
274 int rc;
275
276 if (WARN_ON_ONCE(restrictions->altmap))
277 return -EINVAL;
278
279 rc = vmem_add_mapping(start, size);
280 if (rc)
281 return rc;
282
283 rc = __add_pages(nid, start_pfn, size_pages, restrictions);
284 if (rc)
285 vmem_remove_mapping(start, size);
286 return rc;
287}
288
289void arch_remove_memory(int nid, u64 start, u64 size,
290 struct vmem_altmap *altmap)
291{
292 unsigned long start_pfn = start >> PAGE_SHIFT;
293 unsigned long nr_pages = size >> PAGE_SHIFT;
294 struct zone *zone;
295
296 zone = page_zone(pfn_to_page(start_pfn));
297 __remove_pages(zone, start_pfn, nr_pages, altmap);
298 vmem_remove_mapping(start, size);
299}
300#endif /* CONFIG_MEMORY_HOTPLUG */
1/*
2 * S390 version
3 * Copyright IBM Corp. 1999
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 *
6 * Derived from "arch/i386/mm/init.c"
7 * Copyright (C) 1995 Linus Torvalds
8 */
9
10#include <linux/signal.h>
11#include <linux/sched.h>
12#include <linux/kernel.h>
13#include <linux/errno.h>
14#include <linux/string.h>
15#include <linux/types.h>
16#include <linux/ptrace.h>
17#include <linux/mman.h>
18#include <linux/mm.h>
19#include <linux/swap.h>
20#include <linux/smp.h>
21#include <linux/init.h>
22#include <linux/pagemap.h>
23#include <linux/bootmem.h>
24#include <linux/memory.h>
25#include <linux/pfn.h>
26#include <linux/poison.h>
27#include <linux/initrd.h>
28#include <linux/export.h>
29#include <linux/gfp.h>
30#include <linux/memblock.h>
31#include <asm/processor.h>
32#include <asm/uaccess.h>
33#include <asm/pgtable.h>
34#include <asm/pgalloc.h>
35#include <asm/dma.h>
36#include <asm/lowcore.h>
37#include <asm/tlb.h>
38#include <asm/tlbflush.h>
39#include <asm/sections.h>
40#include <asm/ctl_reg.h>
41#include <asm/sclp.h>
42
43pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((__aligned__(PAGE_SIZE)));
44
45unsigned long empty_zero_page, zero_page_mask;
46EXPORT_SYMBOL(empty_zero_page);
47EXPORT_SYMBOL(zero_page_mask);
48
49static void __init setup_zero_pages(void)
50{
51 unsigned int order;
52 struct page *page;
53 int i;
54
55 /* Latest machines require a mapping granularity of 512KB */
56 order = 7;
57
58 /* Limit number of empty zero pages for small memory sizes */
59 while (order > 2 && (totalram_pages >> 10) < (1UL << order))
60 order--;
61
62 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
63 if (!empty_zero_page)
64 panic("Out of memory in setup_zero_pages");
65
66 page = virt_to_page((void *) empty_zero_page);
67 split_page(page, order);
68 for (i = 1 << order; i > 0; i--) {
69 mark_page_reserved(page);
70 page++;
71 }
72
73 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
74}
75
76/*
77 * paging_init() sets up the page tables
78 */
79void __init paging_init(void)
80{
81 unsigned long max_zone_pfns[MAX_NR_ZONES];
82 unsigned long pgd_type, asce_bits;
83
84 init_mm.pgd = swapper_pg_dir;
85 if (VMALLOC_END > (1UL << 42)) {
86 asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
87 pgd_type = _REGION2_ENTRY_EMPTY;
88 } else {
89 asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
90 pgd_type = _REGION3_ENTRY_EMPTY;
91 }
92 init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
93 S390_lowcore.kernel_asce = init_mm.context.asce;
94 clear_table((unsigned long *) init_mm.pgd, pgd_type,
95 sizeof(unsigned long)*2048);
96 vmem_map_init();
97
98 /* enable virtual mapping in kernel mode */
99 __ctl_load(S390_lowcore.kernel_asce, 1, 1);
100 __ctl_load(S390_lowcore.kernel_asce, 7, 7);
101 __ctl_load(S390_lowcore.kernel_asce, 13, 13);
102 __arch_local_irq_stosm(0x04);
103
104 sparse_memory_present_with_active_regions(MAX_NUMNODES);
105 sparse_init();
106 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
107 max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
108 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
109 free_area_init_nodes(max_zone_pfns);
110}
111
112void mark_rodata_ro(void)
113{
114 /* Text and rodata are already protected. Nothing to do here. */
115 pr_info("Write protecting the kernel read-only data: %luk\n",
116 ((unsigned long)&_eshared - (unsigned long)&_stext) >> 10);
117}
118
119void __init mem_init(void)
120{
121 if (MACHINE_HAS_TLB_LC)
122 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
123 cpumask_set_cpu(0, mm_cpumask(&init_mm));
124 atomic_set(&init_mm.context.attach_count, 1);
125
126 set_max_mapnr(max_low_pfn);
127 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
128
129 /* Setup guest page hinting */
130 cmma_init();
131
132 /* this will put all low memory onto the freelists */
133 free_all_bootmem();
134 setup_zero_pages(); /* Setup zeroed pages. */
135
136 mem_init_print_info(NULL);
137}
138
139void free_initmem(void)
140{
141 free_initmem_default(POISON_FREE_INITMEM);
142}
143
144#ifdef CONFIG_BLK_DEV_INITRD
145void __init free_initrd_mem(unsigned long start, unsigned long end)
146{
147 free_reserved_area((void *)start, (void *)end, POISON_FREE_INITMEM,
148 "initrd");
149}
150#endif
151
152#ifdef CONFIG_MEMORY_HOTPLUG
153int arch_add_memory(int nid, u64 start, u64 size, bool for_device)
154{
155 unsigned long normal_end_pfn = PFN_DOWN(memblock_end_of_DRAM());
156 unsigned long dma_end_pfn = PFN_DOWN(MAX_DMA_ADDRESS);
157 unsigned long start_pfn = PFN_DOWN(start);
158 unsigned long size_pages = PFN_DOWN(size);
159 unsigned long nr_pages;
160 int rc, zone_enum;
161
162 rc = vmem_add_mapping(start, size);
163 if (rc)
164 return rc;
165
166 while (size_pages > 0) {
167 if (start_pfn < dma_end_pfn) {
168 nr_pages = (start_pfn + size_pages > dma_end_pfn) ?
169 dma_end_pfn - start_pfn : size_pages;
170 zone_enum = ZONE_DMA;
171 } else if (start_pfn < normal_end_pfn) {
172 nr_pages = (start_pfn + size_pages > normal_end_pfn) ?
173 normal_end_pfn - start_pfn : size_pages;
174 zone_enum = ZONE_NORMAL;
175 } else {
176 nr_pages = size_pages;
177 zone_enum = ZONE_MOVABLE;
178 }
179 rc = __add_pages(nid, NODE_DATA(nid)->node_zones + zone_enum,
180 start_pfn, size_pages);
181 if (rc)
182 break;
183 start_pfn += nr_pages;
184 size_pages -= nr_pages;
185 }
186 if (rc)
187 vmem_remove_mapping(start, size);
188 return rc;
189}
190
191unsigned long memory_block_size_bytes(void)
192{
193 /*
194 * Make sure the memory block size is always greater
195 * or equal than the memory increment size.
196 */
197 return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
198}
199
200#ifdef CONFIG_MEMORY_HOTREMOVE
201int arch_remove_memory(u64 start, u64 size)
202{
203 /*
204 * There is no hardware or firmware interface which could trigger a
205 * hot memory remove on s390. So there is nothing that needs to be
206 * implemented.
207 */
208 return -EBUSY;
209}
210#endif
211#endif /* CONFIG_MEMORY_HOTPLUG */