Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999
  5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
  6 *
  7 *  Derived from "arch/i386/mm/init.c"
  8 *    Copyright (C) 1995  Linus Torvalds
  9 */
 10
 11#include <linux/signal.h>
 12#include <linux/sched.h>
 13#include <linux/kernel.h>
 14#include <linux/errno.h>
 15#include <linux/string.h>
 16#include <linux/types.h>
 17#include <linux/ptrace.h>
 18#include <linux/mman.h>
 19#include <linux/mm.h>
 20#include <linux/swap.h>
 21#include <linux/swiotlb.h>
 22#include <linux/smp.h>
 23#include <linux/init.h>
 24#include <linux/pagemap.h>
 25#include <linux/memblock.h>
 26#include <linux/memory.h>
 27#include <linux/pfn.h>
 28#include <linux/poison.h>
 29#include <linux/initrd.h>
 30#include <linux/export.h>
 31#include <linux/cma.h>
 32#include <linux/gfp.h>
 33#include <linux/dma-direct.h>
 34#include <linux/percpu.h>
 35#include <asm/processor.h>
 36#include <linux/uaccess.h>
 
 37#include <asm/pgalloc.h>
 38#include <asm/kfence.h>
 39#include <asm/ptdump.h>
 40#include <asm/dma.h>
 41#include <asm/abs_lowcore.h>
 42#include <asm/tlb.h>
 43#include <asm/tlbflush.h>
 44#include <asm/sections.h>
 45#include <asm/ctl_reg.h>
 46#include <asm/sclp.h>
 47#include <asm/set_memory.h>
 48#include <asm/kasan.h>
 49#include <asm/dma-mapping.h>
 50#include <asm/uv.h>
 51#include <linux/virtio_anchor.h>
 52#include <linux/virtio_config.h>
 53
 54pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
 55static pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir");
 56
 57unsigned long s390_invalid_asce;
 58
 59unsigned long empty_zero_page, zero_page_mask;
 60EXPORT_SYMBOL(empty_zero_page);
 61EXPORT_SYMBOL(zero_page_mask);
 62
 
 
 63static void __init setup_zero_pages(void)
 64{
 65	unsigned int order;
 66	struct page *page;
 67	int i;
 68
 69	/* Latest machines require a mapping granularity of 512KB */
 70	order = 7;
 71
 72	/* Limit number of empty zero pages for small memory sizes */
 73	while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
 74		order--;
 75
 76	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 77	if (!empty_zero_page)
 78		panic("Out of memory in setup_zero_pages");
 79
 80	page = virt_to_page((void *) empty_zero_page);
 81	split_page(page, order);
 82	for (i = 1 << order; i > 0; i--) {
 83		mark_page_reserved(page);
 84		page++;
 85	}
 86
 87	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
 88}
 89
 90/*
 91 * paging_init() sets up the page tables
 92 */
 93void __init paging_init(void)
 94{
 95	unsigned long max_zone_pfns[MAX_NR_ZONES];
 96	unsigned long pgd_type, asce_bits;
 97	psw_t psw;
 98
 99	s390_invalid_asce  = (unsigned long)invalid_pg_dir;
100	s390_invalid_asce |= _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
101	crst_table_init((unsigned long *)invalid_pg_dir, _REGION3_ENTRY_EMPTY);
102	init_mm.pgd = swapper_pg_dir;
103	if (VMALLOC_END > _REGION2_SIZE) {
104		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
105		pgd_type = _REGION2_ENTRY_EMPTY;
106	} else {
107		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
108		pgd_type = _REGION3_ENTRY_EMPTY;
109	}
110	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
111	S390_lowcore.kernel_asce = init_mm.context.asce;
112	S390_lowcore.user_asce = s390_invalid_asce;
113	crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
114	vmem_map_init();
115	kasan_copy_shadow_mapping();
116
117	/* enable virtual mapping in kernel mode */
118	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
119	__ctl_load(S390_lowcore.user_asce, 7, 7);
120	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
121	psw.mask = __extract_psw();
122	psw_bits(psw).dat = 1;
123	psw_bits(psw).as = PSW_BITS_AS_HOME;
124	__load_psw_mask(psw.mask);
125	kasan_free_early_identity();
126
 
127	sparse_init();
128	zone_dma_bits = 31;
129	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
130	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
131	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
132	free_area_init(max_zone_pfns);
133}
134
135void mark_rodata_ro(void)
136{
137	unsigned long size = __end_ro_after_init - __start_ro_after_init;
138
139	set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
140	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
141	debug_checkwx();
142}
143
144int set_memory_encrypted(unsigned long vaddr, int numpages)
145{
146	int i;
147
148	/* make specified pages unshared, (swiotlb, dma_free) */
149	for (i = 0; i < numpages; ++i) {
150		uv_remove_shared(virt_to_phys((void *)vaddr));
151		vaddr += PAGE_SIZE;
152	}
153	return 0;
154}
155
156int set_memory_decrypted(unsigned long vaddr, int numpages)
157{
158	int i;
159	/* make specified pages shared (swiotlb, dma_alloca) */
160	for (i = 0; i < numpages; ++i) {
161		uv_set_shared(virt_to_phys((void *)vaddr));
162		vaddr += PAGE_SIZE;
163	}
164	return 0;
165}
166
167/* are we a protected virtualization guest? */
168bool force_dma_unencrypted(struct device *dev)
169{
170	return is_prot_virt_guest();
171}
172
173/* protected virtualization */
174static void pv_init(void)
175{
176	if (!is_prot_virt_guest())
177		return;
178
179	virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
180
181	/* make sure bounce buffers are shared */
182	swiotlb_init(true, SWIOTLB_FORCE | SWIOTLB_VERBOSE);
183	swiotlb_update_mem_attributes();
 
184}
185
186void __init mem_init(void)
187{
188	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
189	cpumask_set_cpu(0, mm_cpumask(&init_mm));
190
191	set_max_mapnr(max_low_pfn);
192        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
193
194	pv_init();
195	kfence_split_mapping();
196	/* Setup guest page hinting */
197	cmma_init();
198
199	/* this will put all low memory onto the freelists */
200	memblock_free_all();
201	setup_zero_pages();	/* Setup zeroed pages. */
202
203	cmma_init_nodat();
 
 
204}
205
206void free_initmem(void)
207{
 
208	__set_memory((unsigned long)_sinittext,
209		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
210		     SET_MEMORY_RW | SET_MEMORY_NX);
211	free_initmem_default(POISON_FREE_INITMEM);
212}
213
214unsigned long memory_block_size_bytes(void)
215{
216	/*
217	 * Make sure the memory block size is always greater
218	 * or equal than the memory increment size.
219	 */
220	return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
221}
222
223unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
224EXPORT_SYMBOL(__per_cpu_offset);
225
226static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
227{
228	return LOCAL_DISTANCE;
229}
230
231static int __init pcpu_cpu_to_node(int cpu)
232{
233	return 0;
234}
235
236void __init setup_per_cpu_areas(void)
237{
238	unsigned long delta;
239	unsigned int cpu;
240	int rc;
241
242	/*
243	 * Always reserve area for module percpu variables.  That's
244	 * what the legacy allocator did.
245	 */
246	rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
247				    PERCPU_DYNAMIC_RESERVE, PAGE_SIZE,
248				    pcpu_cpu_distance,
249				    pcpu_cpu_to_node);
250	if (rc < 0)
251		panic("Failed to initialize percpu areas.");
252
253	delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
254	for_each_possible_cpu(cpu)
255		__per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
256}
257
258#ifdef CONFIG_MEMORY_HOTPLUG
259
260#ifdef CONFIG_CMA
261
262/* Prevent memory blocks which contain cma regions from going offline */
263
264struct s390_cma_mem_data {
265	unsigned long start;
266	unsigned long end;
267};
268
269static int s390_cma_check_range(struct cma *cma, void *data)
270{
271	struct s390_cma_mem_data *mem_data;
272	unsigned long start, end;
273
274	mem_data = data;
275	start = cma_get_base(cma);
276	end = start + cma_get_size(cma);
277	if (end < mem_data->start)
278		return 0;
279	if (start >= mem_data->end)
280		return 0;
281	return -EBUSY;
282}
283
284static int s390_cma_mem_notifier(struct notifier_block *nb,
285				 unsigned long action, void *data)
286{
287	struct s390_cma_mem_data mem_data;
288	struct memory_notify *arg;
289	int rc = 0;
290
291	arg = data;
292	mem_data.start = arg->start_pfn << PAGE_SHIFT;
293	mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
294	if (action == MEM_GOING_OFFLINE)
295		rc = cma_for_each_area(s390_cma_check_range, &mem_data);
296	return notifier_from_errno(rc);
297}
298
299static struct notifier_block s390_cma_mem_nb = {
300	.notifier_call = s390_cma_mem_notifier,
301};
302
303static int __init s390_cma_mem_init(void)
304{
305	return register_memory_notifier(&s390_cma_mem_nb);
306}
307device_initcall(s390_cma_mem_init);
308
309#endif /* CONFIG_CMA */
310
311int arch_add_memory(int nid, u64 start, u64 size,
312		    struct mhp_params *params)
313{
314	unsigned long start_pfn = PFN_DOWN(start);
315	unsigned long size_pages = PFN_DOWN(size);
316	int rc;
317
318	if (WARN_ON_ONCE(params->altmap))
319		return -EINVAL;
320
321	if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
322		return -EINVAL;
323
324	VM_BUG_ON(!mhp_range_allowed(start, size, true));
325	rc = vmem_add_mapping(start, size);
326	if (rc)
327		return rc;
328
329	rc = __add_pages(nid, start_pfn, size_pages, params);
330	if (rc)
331		vmem_remove_mapping(start, size);
332	return rc;
333}
334
335void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
 
336{
337	unsigned long start_pfn = start >> PAGE_SHIFT;
338	unsigned long nr_pages = size >> PAGE_SHIFT;
 
339
340	__remove_pages(start_pfn, nr_pages, altmap);
 
341	vmem_remove_mapping(start, size);
342}
343#endif /* CONFIG_MEMORY_HOTPLUG */
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  S390 version
  4 *    Copyright IBM Corp. 1999
  5 *    Author(s): Hartmut Penner (hp@de.ibm.com)
  6 *
  7 *  Derived from "arch/i386/mm/init.c"
  8 *    Copyright (C) 1995  Linus Torvalds
  9 */
 10
 11#include <linux/signal.h>
 12#include <linux/sched.h>
 13#include <linux/kernel.h>
 14#include <linux/errno.h>
 15#include <linux/string.h>
 16#include <linux/types.h>
 17#include <linux/ptrace.h>
 18#include <linux/mman.h>
 19#include <linux/mm.h>
 20#include <linux/swap.h>
 21#include <linux/swiotlb.h>
 22#include <linux/smp.h>
 23#include <linux/init.h>
 24#include <linux/pagemap.h>
 25#include <linux/memblock.h>
 26#include <linux/memory.h>
 27#include <linux/pfn.h>
 28#include <linux/poison.h>
 29#include <linux/initrd.h>
 30#include <linux/export.h>
 31#include <linux/cma.h>
 32#include <linux/gfp.h>
 33#include <linux/dma-direct.h>
 
 34#include <asm/processor.h>
 35#include <linux/uaccess.h>
 36#include <asm/pgtable.h>
 37#include <asm/pgalloc.h>
 
 
 38#include <asm/dma.h>
 39#include <asm/lowcore.h>
 40#include <asm/tlb.h>
 41#include <asm/tlbflush.h>
 42#include <asm/sections.h>
 43#include <asm/ctl_reg.h>
 44#include <asm/sclp.h>
 45#include <asm/set_memory.h>
 46#include <asm/kasan.h>
 47#include <asm/dma-mapping.h>
 48#include <asm/uv.h>
 
 
 49
 50pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(.bss..swapper_pg_dir);
 
 
 
 51
 52unsigned long empty_zero_page, zero_page_mask;
 53EXPORT_SYMBOL(empty_zero_page);
 54EXPORT_SYMBOL(zero_page_mask);
 55
 56bool initmem_freed;
 57
 58static void __init setup_zero_pages(void)
 59{
 60	unsigned int order;
 61	struct page *page;
 62	int i;
 63
 64	/* Latest machines require a mapping granularity of 512KB */
 65	order = 7;
 66
 67	/* Limit number of empty zero pages for small memory sizes */
 68	while (order > 2 && (totalram_pages() >> 10) < (1UL << order))
 69		order--;
 70
 71	empty_zero_page = __get_free_pages(GFP_KERNEL | __GFP_ZERO, order);
 72	if (!empty_zero_page)
 73		panic("Out of memory in setup_zero_pages");
 74
 75	page = virt_to_page((void *) empty_zero_page);
 76	split_page(page, order);
 77	for (i = 1 << order; i > 0; i--) {
 78		mark_page_reserved(page);
 79		page++;
 80	}
 81
 82	zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
 83}
 84
 85/*
 86 * paging_init() sets up the page tables
 87 */
 88void __init paging_init(void)
 89{
 90	unsigned long max_zone_pfns[MAX_NR_ZONES];
 91	unsigned long pgd_type, asce_bits;
 92	psw_t psw;
 93
 
 
 
 94	init_mm.pgd = swapper_pg_dir;
 95	if (VMALLOC_END > _REGION2_SIZE) {
 96		asce_bits = _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
 97		pgd_type = _REGION2_ENTRY_EMPTY;
 98	} else {
 99		asce_bits = _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
100		pgd_type = _REGION3_ENTRY_EMPTY;
101	}
102	init_mm.context.asce = (__pa(init_mm.pgd) & PAGE_MASK) | asce_bits;
103	S390_lowcore.kernel_asce = init_mm.context.asce;
104	S390_lowcore.user_asce = S390_lowcore.kernel_asce;
105	crst_table_init((unsigned long *) init_mm.pgd, pgd_type);
106	vmem_map_init();
107	kasan_copy_shadow(init_mm.pgd);
108
109	/* enable virtual mapping in kernel mode */
110	__ctl_load(S390_lowcore.kernel_asce, 1, 1);
111	__ctl_load(S390_lowcore.kernel_asce, 7, 7);
112	__ctl_load(S390_lowcore.kernel_asce, 13, 13);
113	psw.mask = __extract_psw();
114	psw_bits(psw).dat = 1;
115	psw_bits(psw).as = PSW_BITS_AS_HOME;
116	__load_psw_mask(psw.mask);
117	kasan_free_early_identity();
118
119	sparse_memory_present_with_active_regions(MAX_NUMNODES);
120	sparse_init();
 
121	memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
122	max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS);
123	max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
124	free_area_init_nodes(max_zone_pfns);
125}
126
127void mark_rodata_ro(void)
128{
129	unsigned long size = __end_ro_after_init - __start_ro_after_init;
130
131	set_memory_ro((unsigned long)__start_ro_after_init, size >> PAGE_SHIFT);
132	pr_info("Write protected read-only-after-init data: %luk\n", size >> 10);
 
133}
134
135int set_memory_encrypted(unsigned long addr, int numpages)
136{
137	int i;
138
139	/* make specified pages unshared, (swiotlb, dma_free) */
140	for (i = 0; i < numpages; ++i) {
141		uv_remove_shared(addr);
142		addr += PAGE_SIZE;
143	}
144	return 0;
145}
146
147int set_memory_decrypted(unsigned long addr, int numpages)
148{
149	int i;
150	/* make specified pages shared (swiotlb, dma_alloca) */
151	for (i = 0; i < numpages; ++i) {
152		uv_set_shared(addr);
153		addr += PAGE_SIZE;
154	}
155	return 0;
156}
157
158/* are we a protected virtualization guest? */
159bool force_dma_unencrypted(struct device *dev)
160{
161	return is_prot_virt_guest();
162}
163
164/* protected virtualization */
165static void pv_init(void)
166{
167	if (!is_prot_virt_guest())
168		return;
169
 
 
170	/* make sure bounce buffers are shared */
171	swiotlb_init(1);
172	swiotlb_update_mem_attributes();
173	swiotlb_force = SWIOTLB_FORCE;
174}
175
176void __init mem_init(void)
177{
178	cpumask_set_cpu(0, &init_mm.context.cpu_attach_mask);
179	cpumask_set_cpu(0, mm_cpumask(&init_mm));
180
181	set_max_mapnr(max_low_pfn);
182        high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
183
184	pv_init();
185
186	/* Setup guest page hinting */
187	cmma_init();
188
189	/* this will put all low memory onto the freelists */
190	memblock_free_all();
191	setup_zero_pages();	/* Setup zeroed pages. */
192
193	cmma_init_nodat();
194
195	mem_init_print_info(NULL);
196}
197
198void free_initmem(void)
199{
200	initmem_freed = true;
201	__set_memory((unsigned long)_sinittext,
202		     (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT,
203		     SET_MEMORY_RW | SET_MEMORY_NX);
204	free_initmem_default(POISON_FREE_INITMEM);
205}
206
207unsigned long memory_block_size_bytes(void)
208{
209	/*
210	 * Make sure the memory block size is always greater
211	 * or equal than the memory increment size.
212	 */
213	return max_t(unsigned long, MIN_MEMORY_BLOCK_SIZE, sclp.rzm);
214}
215
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
216#ifdef CONFIG_MEMORY_HOTPLUG
217
218#ifdef CONFIG_CMA
219
220/* Prevent memory blocks which contain cma regions from going offline */
221
222struct s390_cma_mem_data {
223	unsigned long start;
224	unsigned long end;
225};
226
227static int s390_cma_check_range(struct cma *cma, void *data)
228{
229	struct s390_cma_mem_data *mem_data;
230	unsigned long start, end;
231
232	mem_data = data;
233	start = cma_get_base(cma);
234	end = start + cma_get_size(cma);
235	if (end < mem_data->start)
236		return 0;
237	if (start >= mem_data->end)
238		return 0;
239	return -EBUSY;
240}
241
242static int s390_cma_mem_notifier(struct notifier_block *nb,
243				 unsigned long action, void *data)
244{
245	struct s390_cma_mem_data mem_data;
246	struct memory_notify *arg;
247	int rc = 0;
248
249	arg = data;
250	mem_data.start = arg->start_pfn << PAGE_SHIFT;
251	mem_data.end = mem_data.start + (arg->nr_pages << PAGE_SHIFT);
252	if (action == MEM_GOING_OFFLINE)
253		rc = cma_for_each_area(s390_cma_check_range, &mem_data);
254	return notifier_from_errno(rc);
255}
256
257static struct notifier_block s390_cma_mem_nb = {
258	.notifier_call = s390_cma_mem_notifier,
259};
260
261static int __init s390_cma_mem_init(void)
262{
263	return register_memory_notifier(&s390_cma_mem_nb);
264}
265device_initcall(s390_cma_mem_init);
266
267#endif /* CONFIG_CMA */
268
269int arch_add_memory(int nid, u64 start, u64 size,
270		struct mhp_restrictions *restrictions)
271{
272	unsigned long start_pfn = PFN_DOWN(start);
273	unsigned long size_pages = PFN_DOWN(size);
274	int rc;
275
276	if (WARN_ON_ONCE(restrictions->altmap))
 
 
 
277		return -EINVAL;
278
 
279	rc = vmem_add_mapping(start, size);
280	if (rc)
281		return rc;
282
283	rc = __add_pages(nid, start_pfn, size_pages, restrictions);
284	if (rc)
285		vmem_remove_mapping(start, size);
286	return rc;
287}
288
289void arch_remove_memory(int nid, u64 start, u64 size,
290			struct vmem_altmap *altmap)
291{
292	unsigned long start_pfn = start >> PAGE_SHIFT;
293	unsigned long nr_pages = size >> PAGE_SHIFT;
294	struct zone *zone;
295
296	zone = page_zone(pfn_to_page(start_pfn));
297	__remove_pages(zone, start_pfn, nr_pages, altmap);
298	vmem_remove_mapping(start, size);
299}
300#endif /* CONFIG_MEMORY_HOTPLUG */