Linux Audio

Check our new training course

Loading...
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999
  5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
  6 *
  7 *  Derived from "arch/i386/mm/init.c"
  8 *    Copyright (C) 1995  Linus Torvalds
  9 */
 10
 11#include <linux/signal.h>
 12#include <linux/sched.h>
 13#include <linux/kernel.h>
 14#include <linux/errno.h>
 15#include <linux/string.h>
 16#include <linux/types.h>
 17#include <linux/ptrace.h>
 18#include <linux/mman.h>
 19#include <linux/mm.h>
 20#include <linux/swap.h>
 21#include <linux/swiotlb.h>
 22#include <linux/smp.h>
 23#include <linux/init.h>
 24#include <linux/pagemap.h>
 25#include <linux/memblock.h>
 26#include <linux/memory.h>
 27#include <linux/pfn.h>
 28#include <linux/poison.h>
 29#include <linux/initrd.h>
 30#include <linux/export.h>
 31#include <linux/cma.h>
 32#include <linux/gfp.h>
 33#include <linux/dma-direct.h>
 34#include <linux/percpu.h>
 35#include <asm/processor.h>
 36#include <linux/uaccess.h>
 37#include <asm/pgalloc.h>
 38#include <asm/ctlreg.h>
 39#include <asm/kfence.h>
 40#include <asm/dma.h>
 41#include <asm/abs_lowcore.h>
 42#include <asm/tlb.h>
 43#include <asm/tlbflush.h>
 44#include <asm/sections.h>
 
 45#include <asm/sclp.h>
 46#include <asm/set_memory.h>
 47#include <asm/kasan.h>
 48#include <asm/dma-mapping.h>
 49#include <asm/uv.h>
 50#include <linux/virtio_anchor.h>
 51#include <linux/virtio_config.h>
 52#include <linux/execmem.h>
 53
 54pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
 55pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
 56
 57struct ctlreg __bootdata_preserved(s390_invalid_asce);
 58
 59unsigned long empty_zero_page, zero_page_mask;
 60EXPORT_SYMBOL(empty_zero_page);
 61EXPORT_SYMBOL(zero_page_mask);
 62
 
 
 63static void __init setup_zero_pages(void)
 64{
 65	unsigned long total_pages = memblock_estimated_nr_free_pages();
 66	unsigned int order;
 67	struct page *page;
 68	int i;
 69
 70	/* Latest machines require a mapping granularity of 512KB */
 71	order = 7;
 72
 73	/* Limit number of empty zero pages for small memory sizes */
 74	while (order > 2 && (total_pages >> 10) < (1UL << order))
 75		order--;
 76
 77	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 78	if (!empty_zero_page)
 79		panic("Out of memory in setup_zero_pages");
 80
 81	page = virt_to_page((void *) empty_zero_page);
 82	split_page(page, order);
 83	for (i = 1 << order; i > 0; i--) {
 84		mark_page_reserved(page);
 85		page++;
 86	}
 87
 88	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
 89}
 90
 91/*
 92 * paging_init() sets up the page tables
 93 */
 94void __init paging_init(void)
 95{
 96	unsigned long max_zone_pfns[MAX_NR_ZONES];
 
 
 97
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 98	vmem_map_init();
 
 
 
 
 
 
 
 
 
 
 
 
 99	sparse_init();
100	zone_dma_limit = DMA_BIT_MASK(31);
101	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
102	max_zone_pfns[ZONE_DMA] = virt_to_pfn(MAX_DMA_ADDRESS);
103	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
104	free_area_init(max_zone_pfns);
105}
106
107void mark_rodata_ro(void)
108{
109	unsigned long size = __end_ro_after_init - __start_ro_after_init;
110
111	if (MACHINE_HAS_NX)
112		system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT);
113	__set_memory_ro(__start_ro_after_init, __end_ro_after_init);
114	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
 
115}
116
117int set_memory_encrypted(unsigned long vaddr, int numpages)
118{
119	int i;
120
121	/* make specified pages unshared, (swiotlb, dma_free) */
122	for (i = 0; i < numpages; ++i) {
123		uv_remove_shared(virt_to_phys((void *)vaddr));
124		vaddr += PAGE_SIZE;
125	}
126	return 0;
127}
128
129int set_memory_decrypted(unsigned long vaddr, int numpages)
130{
131	int i;
132	/* make specified pages shared (swiotlb, dma_alloca) */
133	for (i = 0; i < numpages; ++i) {
134		uv_set_shared(virt_to_phys((void *)vaddr));
135		vaddr += PAGE_SIZE;
136	}
137	return 0;
138}
139
140/* are we a protected virtualization guest? */
141bool force_dma_unencrypted(struct device *dev)
142{
143	return is_prot_virt_guest();
144}
145
 
 
 
 
 
 
 
 
 
 
146/* protected virtualization */
147static void pv_init(void)
148{
149	if (!is_prot_virt_guest())
150		return;
151
152	virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
153
154	/* make sure bounce buffers are shared */
155	swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE);
 
156	swiotlb_update_mem_attributes();
157}
158
159void __init mem_init(void)
160{
161	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
162	cpumask_set_cpu(0, mm_cpumask(&init_mm));
163
164	set_max_mapnr(max_low_pfn);
165        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
166
167	pv_init();
168	kfence_split_mapping();
 
 
169
170	/* this will put all low memory onto the freelists */
171	memblock_free_all();
172	setup_zero_pages();	/* Setup zeroed pages. */
173}
174
175unsigned long memory_block_size_bytes(void)
176{
177	/*
178	 * Make sure the memory block size is always greater
179	 * or equal than the memory increment size.
180	 */
181	return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
182}
183
184unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
185EXPORT_SYMBOL(__per_cpu_offset);
186
187static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
188{
189	return LOCAL_DISTANCE;
190}
191
192static int __init pcpu_cpu_to_node(int cpu)
193{
194	return 0;
 
 
 
 
195}
196
197void __init setup_per_cpu_areas(void)
198{
199	unsigned long delta;
200	unsigned int cpu;
201	int rc;
202
203	/*
204	 * Always reserve area for module percpu variables.  That's
205	 * what the legacy allocator did.
206	 */
207	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
208				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
209				    pcpu_cpu_distance,
210				    pcpu_cpu_to_node);
211	if (rc < 0)
212		panic("Failed to initialize percpu areas.");
213
214	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
215	for_each_possible_cpu(cpu)
216		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
217}
218
219#ifdef CONFIG_MEMORY_HOTPLUG
220
221#ifdef CONFIG_CMA
222
223/* Prevent memory blocks which contain cma regions from going offline */
224
225struct s390_cma_mem_data {
226	unsigned long start;
227	unsigned long end;
228};
229
230static int s390_cma_check_range(struct cma *cma, void *data)
231{
232	struct s390_cma_mem_data *mem_data;
233	unsigned long start, end;
234
235	mem_data = data;
236	start = cma_get_base(cma);
237	end = start + cma_get_size(cma);
238	if (end < mem_data->start)
239		return 0;
240	if (start >= mem_data->end)
241		return 0;
242	return -EBUSY;
243}
244
245static int s390_cma_mem_notifier(struct notifier_block *nb,
246				 unsigned long action, void *data)
247{
248	struct s390_cma_mem_data mem_data;
249	struct memory_notify *arg;
250	int rc = 0;
251
252	arg = data;
253	mem_data.start = arg->start_pfn << PAGE_SHIFT;
254	mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
255	if (action == MEM_GOING_OFFLINE)
256		rc = cma_for_each_area(s390_cma_check_range, &mem_data);
257	return notifier_from_errno(rc);
258}
259
260static struct notifier_block s390_cma_mem_nb = {
261	.notifier_call = s390_cma_mem_notifier,
262};
263
264static int __init s390_cma_mem_init(void)
265{
266	return register_memory_notifier(&s390_cma_mem_nb);
267}
268device_initcall(s390_cma_mem_init);
269
270#endif /* CONFIG_CMA */
271
272int arch_add_memory(int nid, u64 start, u64 size,
273		    struct mhp_params *params)
274{
275	unsigned long start_pfn = PFN_DOWN(start);
276	unsigned long size_pages = PFN_DOWN(size);
277	int rc;
278
 
 
 
279	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
280		return -EINVAL;
281
282	VM_BUG_ON(!mhp_range_allowed(start, size, true));
283	rc = vmem_add_mapping(start, size);
284	if (rc)
285		return rc;
286
287	rc = __add_pages(nid, start_pfn, size_pages, params);
288	if (rc)
289		vmem_remove_mapping(start, size);
290	return rc;
291}
292
293void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
 
294{
295	unsigned long start_pfn = start >> PAGE_SHIFT;
296	unsigned long nr_pages = size >> PAGE_SHIFT;
297
298	__remove_pages(start_pfn, nr_pages, altmap);
299	vmem_remove_mapping(start, size);
300}
301#endif /* CONFIG_MEMORY_HOTPLUG */
302
303#ifdef CONFIG_EXECMEM
304static struct execmem_info execmem_info __ro_after_init;
305
306struct execmem_info __init *execmem_arch_setup(void)
307{
308	unsigned long module_load_offset = 0;
309	unsigned long start;
310
311	if (kaslr_enabled())
312		module_load_offset = get_random_u32_inclusive(1, 1024) * PAGE_SIZE;
313
314	start = MODULES_VADDR + module_load_offset;
315
316	execmem_info = (struct execmem_info){
317		.ranges = {
318			[EXECMEM_DEFAULT] = {
319				.flags	= EXECMEM_KASAN_SHADOW,
320				.start	= start,
321				.end	= MODULES_END,
322				.pgprot	= PAGE_KERNEL,
323				.alignment = MODULE_ALIGN,
324			},
325		},
326	};
327
328	return &execmem_info;
329}
330#endif /* CONFIG_EXECMEM */
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999
  5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
  6 *
  7 *  Derived from "arch/i386/mm/init.c"
  8 *    Copyright (C) 1995  Linus Torvalds
  9 */
 10
 11#include <linux/signal.h>
 12#include <linux/sched.h>
 13#include <linux/kernel.h>
 14#include <linux/errno.h>
 15#include <linux/string.h>
 16#include <linux/types.h>
 17#include <linux/ptrace.h>
 18#include <linux/mman.h>
 19#include <linux/mm.h>
 20#include <linux/swap.h>
 21#include <linux/swiotlb.h>
 22#include <linux/smp.h>
 23#include <linux/init.h>
 24#include <linux/pagemap.h>
 25#include <linux/memblock.h>
 26#include <linux/memory.h>
 27#include <linux/pfn.h>
 28#include <linux/poison.h>
 29#include <linux/initrd.h>
 30#include <linux/export.h>
 31#include <linux/cma.h>
 32#include <linux/gfp.h>
 33#include <linux/dma-direct.h>
 
 34#include <asm/processor.h>
 35#include <linux/uaccess.h>
 36#include <asm/pgalloc.h>
 37#include <asm/ptdump.h>
 
 38#include <asm/dma.h>
 39#include <asm/lowcore.h>
 40#include <asm/tlb.h>
 41#include <asm/tlbflush.h>
 42#include <asm/sections.h>
 43#include <asm/ctl_reg.h>
 44#include <asm/sclp.h>
 45#include <asm/set_memory.h>
 46#include <asm/kasan.h>
 47#include <asm/dma-mapping.h>
 48#include <asm/uv.h>
 
 49#include <linux/virtio_config.h>
 
 50
 51pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
 52static pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
 53
 54unsigned long s390_invalid_asce;
 55
 56unsigned long empty_zero_page, zero_page_mask;
 57EXPORT_SYMBOL(empty_zero_page);
 58EXPORT_SYMBOL(zero_page_mask);
 59
 60bool initmem_freed;
 61
 62static void __init setup_zero_pages(void)
 63{
 
 64	unsigned int order;
 65	struct page *page;
 66	int i;
 67
 68	/* Latest machines require a mapping granularity of 512KB */
 69	order = 7;
 70
 71	/* Limit number of empty zero pages for small memory sizes */
 72	while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
 73		order--;
 74
 75	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 76	if (!empty_zero_page)
 77		panic("Out of memory in setup_zero_pages");
 78
 79	page = virt_to_page((void *) empty_zero_page);
 80	split_page(page, order);
 81	for (i = 1 << order; i > 0; i--) {
 82		mark_page_reserved(page);
 83		page++;
 84	}
 85
 86	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
 87}
 88
 89/*
 90 * paging_init() sets up the page tables
 91 */
 92void __init paging_init(void)
 93{
 94	unsigned long max_zone_pfns[MAX_NR_ZONES];
 95	unsigned long pgd_type, asce_bits;
 96	psw_t psw;
 97
 98	s390_invalid_asce  = (unsigned long)invalid_pg_dir;
 99	s390_invalid_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
100	crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
101	init_mm.pgd = swapper_pg_dir;
102	if (VMALLOC_END > _REGION2_SIZE) {
103		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
104		pgd_type = _REGION2_ENTRY_EMPTY;
105	} else {
106		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
107		pgd_type = _REGION3_ENTRY_EMPTY;
108	}
109	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
110	S390_lowcore.kernel_asce = init_mm.context.asce;
111	S390_lowcore.user_asce = s390_invalid_asce;
112	crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
113	vmem_map_init();
114	kasan_copy_shadow_mapping();
115
116	/* enable virtual mapping in kernel mode */
117	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
118	__ctl_load(S390_lowcore.user_asce, 7, 7);
119	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
120	psw.mask = __extract_psw();
121	psw_bits(psw).dat = 1;
122	psw_bits(psw).as = PSW_BITS_AS_HOME;
123	__load_psw_mask(psw.mask);
124	kasan_free_early_identity();
125
126	sparse_init();
127	zone_dma_bits = 31;
128	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
129	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
130	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
131	free_area_init(max_zone_pfns);
132}
133
134void mark_rodata_ro(void)
135{
136	unsigned long size = __end_ro_after_init - __start_ro_after_init;
137
138	set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
 
 
139	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
140	debug_checkwx();
141}
142
143int set_memory_encrypted(unsigned long addr, int numpages)
144{
145	int i;
146
147	/* make specified pages unshared, (swiotlb, dma_free) */
148	for (i = 0; i < numpages; ++i) {
149		uv_remove_shared(addr);
150		addr += PAGE_SIZE;
151	}
152	return 0;
153}
154
155int set_memory_decrypted(unsigned long addr, int numpages)
156{
157	int i;
158	/* make specified pages shared (swiotlb, dma_alloca) */
159	for (i = 0; i < numpages; ++i) {
160		uv_set_shared(addr);
161		addr += PAGE_SIZE;
162	}
163	return 0;
164}
165
166/* are we a protected virtualization guest? */
167bool force_dma_unencrypted(struct device *dev)
168{
169	return is_prot_virt_guest();
170}
171
172#ifdef CONFIG_ARCH_HAS_RESTRICTED_VIRTIO_MEMORY_ACCESS
173
174int arch_has_restricted_virtio_memory_access(void)
175{
176	return is_prot_virt_guest();
177}
178EXPORT_SYMBOL(arch_has_restricted_virtio_memory_access);
179
180#endif
181
182/* protected virtualization */
183static void pv_init(void)
184{
185	if (!is_prot_virt_guest())
186		return;
187
 
 
188	/* make sure bounce buffers are shared */
189	swiotlb_force = SWIOTLB_FORCE;
190	swiotlb_init(1);
191	swiotlb_update_mem_attributes();
192}
193
194void __init mem_init(void)
195{
196	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
197	cpumask_set_cpu(0, mm_cpumask(&init_mm));
198
199	set_max_mapnr(max_low_pfn);
200        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
201
202	pv_init();
203
204	/* Setup guest page hinting */
205	cmma_init();
206
207	/* this will put all low memory onto the freelists */
208	memblock_free_all();
209	setup_zero_pages();	/* Setup zeroed pages. */
 
 
 
 
 
 
 
 
 
 
 
 
 
210
211	cmma_init_nodat();
 
 
212}
213
214void free_initmem(void)
215{
216	initmem_freed = true;
217	__set_memory((unsigned long)_sinittext,
218		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
219		     SET_MEMORY_RW | SET_MEMORY_NX);
220	free_initmem_default(POISON_FREE_INITMEM);
221}
222
223unsigned long memory_block_size_bytes(void)
224{
 
 
 
 
225	/*
226	 * Make sure the memory block size is always greater
227	 * or equal than the memory increment size.
228	 */
229	return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
 
 
 
 
 
 
 
 
 
230}
231
232#ifdef CONFIG_MEMORY_HOTPLUG
233
234#ifdef CONFIG_CMA
235
236/* Prevent memory blocks which contain cma regions from going offline */
237
238struct s390_cma_mem_data {
239	unsigned long start;
240	unsigned long end;
241};
242
243static int s390_cma_check_range(struct cma *cma, void *data)
244{
245	struct s390_cma_mem_data *mem_data;
246	unsigned long start, end;
247
248	mem_data = data;
249	start = cma_get_base(cma);
250	end = start + cma_get_size(cma);
251	if (end < mem_data->start)
252		return 0;
253	if (start >= mem_data->end)
254		return 0;
255	return -EBUSY;
256}
257
258static int s390_cma_mem_notifier(struct notifier_block *nb,
259				 unsigned long action, void *data)
260{
261	struct s390_cma_mem_data mem_data;
262	struct memory_notify *arg;
263	int rc = 0;
264
265	arg = data;
266	mem_data.start = arg->start_pfn << PAGE_SHIFT;
267	mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
268	if (action == MEM_GOING_OFFLINE)
269		rc = cma_for_each_area(s390_cma_check_range, &mem_data);
270	return notifier_from_errno(rc);
271}
272
273static struct notifier_block s390_cma_mem_nb = {
274	.notifier_call = s390_cma_mem_notifier,
275};
276
277static int __init s390_cma_mem_init(void)
278{
279	return register_memory_notifier(&s390_cma_mem_nb);
280}
281device_initcall(s390_cma_mem_init);
282
283#endif /* CONFIG_CMA */
284
285int arch_add_memory(int nid, u64 start, u64 size,
286		    struct mhp_params *params)
287{
288	unsigned long start_pfn = PFN_DOWN(start);
289	unsigned long size_pages = PFN_DOWN(size);
290	int rc;
291
292	if (WARN_ON_ONCE(params->altmap))
293		return -EINVAL;
294
295	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
296		return -EINVAL;
297
298	VM_BUG_ON(!mhp_range_allowed(start, size, true));
299	rc = vmem_add_mapping(start, size);
300	if (rc)
301		return rc;
302
303	rc = __add_pages(nid, start_pfn, size_pages, params);
304	if (rc)
305		vmem_remove_mapping(start, size);
306	return rc;
307}
308
309void arch_remove_memory(int nid, u64 start, u64 size,
310			struct vmem_altmap *altmap)
311{
312	unsigned long start_pfn = start >> PAGE_SHIFT;
313	unsigned long nr_pages = size >> PAGE_SHIFT;
314
315	__remove_pages(start_pfn, nr_pages, altmap);
316	vmem_remove_mapping(start, size);
317}
318#endif /* CONFIG_MEMORY_HOTPLUG */