Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 *
7 * Derived from "arch/i386/mm/init.c"
8 * Copyright (C) 1995 Linus Torvalds
9 */
10
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/swap.h>
21#include <linux/swiotlb.h>
22#include <linux/smp.h>
23#include <linux/init.h>
24#include <linux/pagemap.h>
25#include <linux/memblock.h>
26#include <linux/memory.h>
27#include <linux/pfn.h>
28#include <linux/poison.h>
29#include <linux/initrd.h>
30#include <linux/export.h>
31#include <linux/cma.h>
32#include <linux/gfp.h>
33#include <linux/dma-direct.h>
34#include <linux/percpu.h>
35#include <asm/processor.h>
36#include <linux/uaccess.h>
37#include <asm/pgalloc.h>
38#include <asm/ctlreg.h>
39#include <asm/kfence.h>
40#include <asm/ptdump.h>
41#include <asm/dma.h>
42#include <asm/abs_lowcore.h>
43#include <asm/tlb.h>
44#include <asm/tlbflush.h>
45#include <asm/sections.h>
46#include <asm/sclp.h>
47#include <asm/set_memory.h>
48#include <asm/kasan.h>
49#include <asm/dma-mapping.h>
50#include <asm/uv.h>
51#include <linux/virtio_anchor.h>
52#include <linux/virtio_config.h>
53
54pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
55pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
56
57struct ctlreg __bootdata_preserved(s390_invalid_asce);
58
59unsigned long empty_zero_page, zero_page_mask;
60EXPORT_SYMBOL(empty_zero_page);
61EXPORT_SYMBOL(zero_page_mask);
62
63static void __init setup_zero_pages(void)
64{
65 unsigned int order;
66 struct page *page;
67 int i;
68
69 /* Latest machines require a mapping granularity of 512KB */
70 order = 7;
71
72 /* Limit number of empty zero pages for small memory sizes */
73 while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
74 order--;
75
76 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
77 if (!empty_zero_page)
78 panic("Out of memory in setup_zero_pages");
79
80 page = virt_to_page((void *) empty_zero_page);
81 split_page(page, order);
82 for (i = 1 << order; i > 0; i--) {
83 mark_page_reserved(page);
84 page++;
85 }
86
87 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
88}
89
90/*
91 * paging_init() sets up the page tables
92 */
93void __init paging_init(void)
94{
95 unsigned long max_zone_pfns[MAX_NR_ZONES];
96
97 vmem_map_init();
98 sparse_init();
99 zone_dma_bits = 31;
100 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
101 max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS);
102 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
103 free_area_init(max_zone_pfns);
104}
105
106void mark_rodata_ro(void)
107{
108 unsigned long size = __end_ro_after_init - __start_ro_after_init;
109
110 __set_memory_ro(__start_ro_after_init, __end_ro_after_init);
111 pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
112 debug_checkwx();
113}
114
115int set_memory_encrypted(unsigned long vaddr, int numpages)
116{
117 int i;
118
119 /* make specified pages unshared, (swiotlb, dma_free) */
120 for (i = 0; i < numpages; ++i) {
121 uv_remove_shared(virt_to_phys((void *)vaddr));
122 vaddr += PAGE_SIZE;
123 }
124 return 0;
125}
126
127int set_memory_decrypted(unsigned long vaddr, int numpages)
128{
129 int i;
130 /* make specified pages shared (swiotlb, dma_alloca) */
131 for (i = 0; i < numpages; ++i) {
132 uv_set_shared(virt_to_phys((void *)vaddr));
133 vaddr += PAGE_SIZE;
134 }
135 return 0;
136}
137
138/* are we a protected virtualization guest? */
139bool force_dma_unencrypted(struct device *dev)
140{
141 return is_prot_virt_guest();
142}
143
144/* protected virtualization */
145static void pv_init(void)
146{
147 if (!is_prot_virt_guest())
148 return;
149
150 virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
151
152 /* make sure bounce buffers are shared */
153 swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE);
154 swiotlb_update_mem_attributes();
155}
156
157void __init mem_init(void)
158{
159 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
160 cpumask_set_cpu(0, mm_cpumask(&init_mm));
161
162 set_max_mapnr(max_low_pfn);
163 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
164
165 pv_init();
166 kfence_split_mapping();
167
168 /* this will put all low memory onto the freelists */
169 memblock_free_all();
170 setup_zero_pages(); /* Setup zeroed pages. */
171}
172
173void free_initmem(void)
174{
175 set_memory_rwnx((unsigned long)_sinittext,
176 (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
177 free_initmem_default(POISON_FREE_INITMEM);
178}
179
180unsigned long memory_block_size_bytes(void)
181{
182 /*
183 * Make sure the memory block size is always greater
184 * or equal than the memory increment size.
185 */
186 return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
187}
188
189unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
190EXPORT_SYMBOL(__per_cpu_offset);
191
192static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
193{
194 return LOCAL_DISTANCE;
195}
196
197static int __init pcpu_cpu_to_node(int cpu)
198{
199 return 0;
200}
201
202void __init setup_per_cpu_areas(void)
203{
204 unsigned long delta;
205 unsigned int cpu;
206 int rc;
207
208 /*
209 * Always reserve area for module percpu variables. That's
210 * what the legacy allocator did.
211 */
212 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
213 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
214 pcpu_cpu_distance,
215 pcpu_cpu_to_node);
216 if (rc < 0)
217 panic("Failed to initialize percpu areas.");
218
219 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
220 for_each_possible_cpu(cpu)
221 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
222}
223
224#ifdef CONFIG_MEMORY_HOTPLUG
225
226#ifdef CONFIG_CMA
227
228/* Prevent memory blocks which contain cma regions from going offline */
229
230struct s390_cma_mem_data {
231 unsigned long start;
232 unsigned long end;
233};
234
235static int s390_cma_check_range(struct cma *cma, void *data)
236{
237 struct s390_cma_mem_data *mem_data;
238 unsigned long start, end;
239
240 mem_data = data;
241 start = cma_get_base(cma);
242 end = start + cma_get_size(cma);
243 if (end < mem_data->start)
244 return 0;
245 if (start >= mem_data->end)
246 return 0;
247 return -EBUSY;
248}
249
250static int s390_cma_mem_notifier(struct notifier_block *nb,
251 unsigned long action, void *data)
252{
253 struct s390_cma_mem_data mem_data;
254 struct memory_notify *arg;
255 int rc = 0;
256
257 arg = data;
258 mem_data.start = arg->start_pfn << PAGE_SHIFT;
259 mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
260 if (action == MEM_GOING_OFFLINE)
261 rc = cma_for_each_area(s390_cma_check_range, &mem_data);
262 return notifier_from_errno(rc);
263}
264
265static struct notifier_block s390_cma_mem_nb = {
266 .notifier_call = s390_cma_mem_notifier,
267};
268
269static int __init s390_cma_mem_init(void)
270{
271 return register_memory_notifier(&s390_cma_mem_nb);
272}
273device_initcall(s390_cma_mem_init);
274
275#endif /* CONFIG_CMA */
276
277int arch_add_memory(int nid, u64 start, u64 size,
278 struct mhp_params *params)
279{
280 unsigned long start_pfn = PFN_DOWN(start);
281 unsigned long size_pages = PFN_DOWN(size);
282 int rc;
283
284 if (WARN_ON_ONCE(params->altmap))
285 return -EINVAL;
286
287 if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
288 return -EINVAL;
289
290 VM_BUG_ON(!mhp_range_allowed(start, size, true));
291 rc = vmem_add_mapping(start, size);
292 if (rc)
293 return rc;
294
295 rc = __add_pages(nid, start_pfn, size_pages, params);
296 if (rc)
297 vmem_remove_mapping(start, size);
298 return rc;
299}
300
301void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
302{
303 unsigned long start_pfn = start >> PAGE_SHIFT;
304 unsigned long nr_pages = size >> PAGE_SHIFT;
305
306 __remove_pages(start_pfn, nr_pages, altmap);
307 vmem_remove_mapping(start, size);
308}
309#endif /* CONFIG_MEMORY_HOTPLUG */
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 *
7 * Derived from "arch/i386/mm/init.c"
8 * Copyright (C) 1995 Linus Torvalds
9 */
10
11#include <linux/signal.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/string.h>
16#include <linux/types.h>
17#include <linux/ptrace.h>
18#include <linux/mman.h>
19#include <linux/mm.h>
20#include <linux/swap.h>
21#include <linux/swiotlb.h>
22#include <linux/smp.h>
23#include <linux/init.h>
24#include <linux/pagemap.h>
25#include <linux/memblock.h>
26#include <linux/memory.h>
27#include <linux/pfn.h>
28#include <linux/poison.h>
29#include <linux/initrd.h>
30#include <linux/export.h>
31#include <linux/cma.h>
32#include <linux/gfp.h>
33#include <linux/dma-direct.h>
34#include <linux/percpu.h>
35#include <asm/processor.h>
36#include <linux/uaccess.h>
37#include <asm/pgalloc.h>
38#include <asm/ctlreg.h>
39#include <asm/kfence.h>
40#include <asm/dma.h>
41#include <asm/abs_lowcore.h>
42#include <asm/tlb.h>
43#include <asm/tlbflush.h>
44#include <asm/sections.h>
45#include <asm/sclp.h>
46#include <asm/set_memory.h>
47#include <asm/kasan.h>
48#include <asm/dma-mapping.h>
49#include <asm/uv.h>
50#include <linux/virtio_anchor.h>
51#include <linux/virtio_config.h>
52#include <linux/execmem.h>
53
54pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
55pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
56
57struct ctlreg __bootdata_preserved(s390_invalid_asce);
58
59unsigned long empty_zero_page, zero_page_mask;
60EXPORT_SYMBOL(empty_zero_page);
61EXPORT_SYMBOL(zero_page_mask);
62
63static void __init setup_zero_pages(void)
64{
65 unsigned long total_pages = memblock_estimated_nr_free_pages();
66 unsigned int order;
67 struct page *page;
68 int i;
69
70 /* Latest machines require a mapping granularity of 512KB */
71 order = 7;
72
73 /* Limit number of empty zero pages for small memory sizes */
74 while (order > 2 && (total_pages >> 10) < (1UL << order))
75 order--;
76
77 empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
78 if (!empty_zero_page)
79 panic("Out of memory in setup_zero_pages");
80
81 page = virt_to_page((void *) empty_zero_page);
82 split_page(page, order);
83 for (i = 1 << order; i > 0; i--) {
84 mark_page_reserved(page);
85 page++;
86 }
87
88 zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
89}
90
91/*
92 * paging_init() sets up the page tables
93 */
94void __init paging_init(void)
95{
96 unsigned long max_zone_pfns[MAX_NR_ZONES];
97
98 vmem_map_init();
99 sparse_init();
100 zone_dma_limit = DMA_BIT_MASK(31);
101 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
102 max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS);
103 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
104 free_area_init(max_zone_pfns);
105}
106
107void mark_rodata_ro(void)
108{
109 unsigned long size = __end_ro_after_init - __start_ro_after_init;
110
111 if (MACHINE_HAS_NX)
112 system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
113 __set_memory_ro(__start_ro_after_init, __end_ro_after_init);
114 pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
115}
116
117int set_memory_encrypted(unsigned long vaddr, int numpages)
118{
119 int i;
120
121 /* make specified pages unshared, (swiotlb, dma_free) */
122 for (i = 0; i < numpages; ++i) {
123 uv_remove_shared(virt_to_phys((void *)vaddr));
124 vaddr += PAGE_SIZE;
125 }
126 return 0;
127}
128
129int set_memory_decrypted(unsigned long vaddr, int numpages)
130{
131 int i;
132 /* make specified pages shared (swiotlb, dma_alloca) */
133 for (i = 0; i < numpages; ++i) {
134 uv_set_shared(virt_to_phys((void *)vaddr));
135 vaddr += PAGE_SIZE;
136 }
137 return 0;
138}
139
140/* are we a protected virtualization guest? */
141bool force_dma_unencrypted(struct device *dev)
142{
143 return is_prot_virt_guest();
144}
145
146/* protected virtualization */
147static void pv_init(void)
148{
149 if (!is_prot_virt_guest())
150 return;
151
152 virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
153
154 /* make sure bounce buffers are shared */
155 swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE);
156 swiotlb_update_mem_attributes();
157}
158
159void __init mem_init(void)
160{
161 cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
162 cpumask_set_cpu(0, mm_cpumask(&init_mm));
163
164 set_max_mapnr(max_low_pfn);
165 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
166
167 pv_init();
168 kfence_split_mapping();
169
170 /* this will put all low memory onto the freelists */
171 memblock_free_all();
172 setup_zero_pages(); /* Setup zeroed pages. */
173}
174
175unsigned long memory_block_size_bytes(void)
176{
177 /*
178 * Make sure the memory block size is always greater
179 * or equal than the memory increment size.
180 */
181 return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
182}
183
184unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
185EXPORT_SYMBOL(__per_cpu_offset);
186
187static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
188{
189 return LOCAL_DISTANCE;
190}
191
192static int __init pcpu_cpu_to_node(int cpu)
193{
194 return 0;
195}
196
197void __init setup_per_cpu_areas(void)
198{
199 unsigned long delta;
200 unsigned int cpu;
201 int rc;
202
203 /*
204 * Always reserve area for module percpu variables. That's
205 * what the legacy allocator did.
206 */
207 rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
208 PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
209 pcpu_cpu_distance,
210 pcpu_cpu_to_node);
211 if (rc < 0)
212 panic("Failed to initialize percpu areas.");
213
214 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
215 for_each_possible_cpu(cpu)
216 __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
217}
218
219#ifdef CONFIG_MEMORY_HOTPLUG
220
221#ifdef CONFIG_CMA
222
223/* Prevent memory blocks which contain cma regions from going offline */
224
225struct s390_cma_mem_data {
226 unsigned long start;
227 unsigned long end;
228};
229
230static int s390_cma_check_range(struct cma *cma, void *data)
231{
232 struct s390_cma_mem_data *mem_data;
233 unsigned long start, end;
234
235 mem_data = data;
236 start = cma_get_base(cma);
237 end = start + cma_get_size(cma);
238 if (end < mem_data->start)
239 return 0;
240 if (start >= mem_data->end)
241 return 0;
242 return -EBUSY;
243}
244
245static int s390_cma_mem_notifier(struct notifier_block *nb,
246 unsigned long action, void *data)
247{
248 struct s390_cma_mem_data mem_data;
249 struct memory_notify *arg;
250 int rc = 0;
251
252 arg = data;
253 mem_data.start = arg->start_pfn << PAGE_SHIFT;
254 mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
255 if (action == MEM_GOING_OFFLINE)
256 rc = cma_for_each_area(s390_cma_check_range, &mem_data);
257 return notifier_from_errno(rc);
258}
259
260static struct notifier_block s390_cma_mem_nb = {
261 .notifier_call = s390_cma_mem_notifier,
262};
263
264static int __init s390_cma_mem_init(void)
265{
266 return register_memory_notifier(&s390_cma_mem_nb);
267}
268device_initcall(s390_cma_mem_init);
269
270#endif /* CONFIG_CMA */
271
272int arch_add_memory(int nid, u64 start, u64 size,
273 struct mhp_params *params)
274{
275 unsigned long start_pfn = PFN_DOWN(start);
276 unsigned long size_pages = PFN_DOWN(size);
277 int rc;
278
279 if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
280 return -EINVAL;
281
282 VM_BUG_ON(!mhp_range_allowed(start, size, true));
283 rc = vmem_add_mapping(start, size);
284 if (rc)
285 return rc;
286
287 rc = __add_pages(nid, start_pfn, size_pages, params);
288 if (rc)
289 vmem_remove_mapping(start, size);
290 return rc;
291}
292
293void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
294{
295 unsigned long start_pfn = start >> PAGE_SHIFT;
296 unsigned long nr_pages = size >> PAGE_SHIFT;
297
298 __remove_pages(start_pfn, nr_pages, altmap);
299 vmem_remove_mapping(start, size);
300}
301#endif /* CONFIG_MEMORY_HOTPLUG */
302
303#ifdef CONFIG_EXECMEM
304static struct execmem_info execmem_info __ro_after_init;
305
306struct execmem_info __init *execmem_arch_setup(void)
307{
308 unsigned long module_load_offset = 0;
309 unsigned long start;
310
311 if (kaslr_enabled())
312 module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
313
314 start = MODULES_VADDR + module_load_offset;
315
316 execmem_info = (struct execmem_info){
317 .ranges = {
318 [EXECMEM_DEFAULT] = {
319 .flags = EXECMEM_KASAN_SHADOW,
320 .start = start,
321 .end = MODULES_END,
322 .pgprot = PAGE_KERNEL,
323 .alignment = MODULE_ALIGN,
324 },
325 },
326 };
327
328 return &execmem_info;
329}
330#endif /* CONFIG_EXECMEM */