Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Initialize MMU support.
4 *
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10
11#include <linux/bootmem.h>
12#include <linux/efi.h>
13#include <linux/elf.h>
14#include <linux/memblock.h>
15#include <linux/mm.h>
16#include <linux/sched/signal.h>
17#include <linux/mmzone.h>
18#include <linux/module.h>
19#include <linux/personality.h>
20#include <linux/reboot.h>
21#include <linux/slab.h>
22#include <linux/swap.h>
23#include <linux/proc_fs.h>
24#include <linux/bitops.h>
25#include <linux/kexec.h>
26
27#include <asm/dma.h>
28#include <asm/io.h>
29#include <asm/machvec.h>
30#include <asm/numa.h>
31#include <asm/patch.h>
32#include <asm/pgalloc.h>
33#include <asm/sal.h>
34#include <asm/sections.h>
35#include <asm/tlb.h>
36#include <linux/uaccess.h>
37#include <asm/unistd.h>
38#include <asm/mca.h>
39
40extern void ia64_tlb_init (void);
41
42unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
43
44#ifdef CONFIG_VIRTUAL_MEM_MAP
45unsigned long VMALLOC_END = VMALLOC_END_INIT;
46EXPORT_SYMBOL(VMALLOC_END);
47struct page *vmem_map;
48EXPORT_SYMBOL(vmem_map);
49#endif
50
51struct page *zero_page_memmap_ptr; /* map entry for zero page */
52EXPORT_SYMBOL(zero_page_memmap_ptr);
53
54void
55__ia64_sync_icache_dcache (pte_t pte)
56{
57 unsigned long addr;
58 struct page *page;
59
60 page = pte_page(pte);
61 addr = (unsigned long) page_address(page);
62
63 if (test_bit(PG_arch_1, &page->flags))
64 return; /* i-cache is already coherent with d-cache */
65
66 flush_icache_range(addr, addr + (PAGE_SIZE << compound_order(page)));
67 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
68}
69
70/*
71 * Since DMA is i-cache coherent, any (complete) pages that were written via
72 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
73 * flush them when they get mapped into an executable vm-area.
74 */
75void
76dma_mark_clean(void *addr, size_t size)
77{
78 unsigned long pg_addr, end;
79
80 pg_addr = PAGE_ALIGN((unsigned long) addr);
81 end = (unsigned long) addr + size;
82 while (pg_addr + PAGE_SIZE <= end) {
83 struct page *page = virt_to_page(pg_addr);
84 set_bit(PG_arch_1, &page->flags);
85 pg_addr += PAGE_SIZE;
86 }
87}
88
89inline void
90ia64_set_rbs_bot (void)
91{
92 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
93
94 if (stack_size > MAX_USER_STACK_SIZE)
95 stack_size = MAX_USER_STACK_SIZE;
96 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
97}
98
99/*
100 * This performs some platform-dependent address space initialization.
101 * On IA-64, we want to setup the VM area for the register backing
102 * store (which grows upwards) and install the gateway page which is
103 * used for signal trampolines, etc.
104 */
105void
106ia64_init_addr_space (void)
107{
108 struct vm_area_struct *vma;
109
110 ia64_set_rbs_bot();
111
112 /*
113 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
114 * the problem. When the process attempts to write to the register backing store
115 * for the first time, it will get a SEGFAULT in this case.
116 */
117 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
118 if (vma) {
119 INIT_LIST_HEAD(&vma->anon_vma_chain);
120 vma->vm_mm = current->mm;
121 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
122 vma->vm_end = vma->vm_start + PAGE_SIZE;
123 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
124 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
125 down_write(¤t->mm->mmap_sem);
126 if (insert_vm_struct(current->mm, vma)) {
127 up_write(¤t->mm->mmap_sem);
128 kmem_cache_free(vm_area_cachep, vma);
129 return;
130 }
131 up_write(¤t->mm->mmap_sem);
132 }
133
134 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
135 if (!(current->personality & MMAP_PAGE_ZERO)) {
136 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
137 if (vma) {
138 INIT_LIST_HEAD(&vma->anon_vma_chain);
139 vma->vm_mm = current->mm;
140 vma->vm_end = PAGE_SIZE;
141 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
142 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
143 VM_DONTEXPAND | VM_DONTDUMP;
144 down_write(¤t->mm->mmap_sem);
145 if (insert_vm_struct(current->mm, vma)) {
146 up_write(¤t->mm->mmap_sem);
147 kmem_cache_free(vm_area_cachep, vma);
148 return;
149 }
150 up_write(¤t->mm->mmap_sem);
151 }
152 }
153}
154
155void
156free_initmem (void)
157{
158 free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
159 -1, "unused kernel");
160}
161
162void __init
163free_initrd_mem (unsigned long start, unsigned long end)
164{
165 /*
166 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
167 * Thus EFI and the kernel may have different page sizes. It is
168 * therefore possible to have the initrd share the same page as
169 * the end of the kernel (given current setup).
170 *
171 * To avoid freeing/using the wrong page (kernel sized) we:
172 * - align up the beginning of initrd
173 * - align down the end of initrd
174 *
175 * | |
176 * |=============| a000
177 * | |
178 * | |
179 * | | 9000
180 * |/////////////|
181 * |/////////////|
182 * |=============| 8000
183 * |///INITRD////|
184 * |/////////////|
185 * |/////////////| 7000
186 * | |
187 * |KKKKKKKKKKKKK|
188 * |=============| 6000
189 * |KKKKKKKKKKKKK|
190 * |KKKKKKKKKKKKK|
191 * K=kernel using 8KB pages
192 *
193 * In this example, we must free page 8000 ONLY. So we must align up
194 * initrd_start and keep initrd_end as is.
195 */
196 start = PAGE_ALIGN(start);
197 end = end & PAGE_MASK;
198
199 if (start < end)
200 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
201
202 for (; start < end; start += PAGE_SIZE) {
203 if (!virt_addr_valid(start))
204 continue;
205 free_reserved_page(virt_to_page(start));
206 }
207}
208
209/*
210 * This installs a clean page in the kernel's page table.
211 */
212static struct page * __init
213put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
214{
215 pgd_t *pgd;
216 pud_t *pud;
217 pmd_t *pmd;
218 pte_t *pte;
219
220 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
221
222 {
223 pud = pud_alloc(&init_mm, pgd, address);
224 if (!pud)
225 goto out;
226 pmd = pmd_alloc(&init_mm, pud, address);
227 if (!pmd)
228 goto out;
229 pte = pte_alloc_kernel(pmd, address);
230 if (!pte)
231 goto out;
232 if (!pte_none(*pte))
233 goto out;
234 set_pte(pte, mk_pte(page, pgprot));
235 }
236 out:
237 /* no need for flush_tlb */
238 return page;
239}
240
241static void __init
242setup_gate (void)
243{
244 struct page *page;
245
246 /*
247 * Map the gate page twice: once read-only to export the ELF
248 * headers etc. and once execute-only page to enable
249 * privilege-promotion via "epc":
250 */
251 page = virt_to_page(ia64_imva(__start_gate_section));
252 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
253#ifdef HAVE_BUGGY_SEGREL
254 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
255 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
256#else
257 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
258 /* Fill in the holes (if any) with read-only zero pages: */
259 {
260 unsigned long addr;
261
262 for (addr = GATE_ADDR + PAGE_SIZE;
263 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
264 addr += PAGE_SIZE)
265 {
266 put_kernel_page(ZERO_PAGE(0), addr,
267 PAGE_READONLY);
268 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
269 PAGE_READONLY);
270 }
271 }
272#endif
273 ia64_patch_gate();
274}
275
276static struct vm_area_struct gate_vma;
277
278static int __init gate_vma_init(void)
279{
280 gate_vma.vm_mm = NULL;
281 gate_vma.vm_start = FIXADDR_USER_START;
282 gate_vma.vm_end = FIXADDR_USER_END;
283 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
284 gate_vma.vm_page_prot = __P101;
285
286 return 0;
287}
288__initcall(gate_vma_init);
289
290struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
291{
292 return &gate_vma;
293}
294
295int in_gate_area_no_mm(unsigned long addr)
296{
297 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
298 return 1;
299 return 0;
300}
301
302int in_gate_area(struct mm_struct *mm, unsigned long addr)
303{
304 return in_gate_area_no_mm(addr);
305}
306
307void ia64_mmu_init(void *my_cpu_data)
308{
309 unsigned long pta, impl_va_bits;
310 extern void tlb_init(void);
311
312#ifdef CONFIG_DISABLE_VHPT
313# define VHPT_ENABLE_BIT 0
314#else
315# define VHPT_ENABLE_BIT 1
316#endif
317
318 /*
319 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
320 * address space. The IA-64 architecture guarantees that at least 50 bits of
321 * virtual address space are implemented but if we pick a large enough page size
322 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
323 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
324 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
325 * problem in practice. Alternatively, we could truncate the top of the mapped
326 * address space to not permit mappings that would overlap with the VMLPT.
327 * --davidm 00/12/06
328 */
329# define pte_bits 3
330# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
331 /*
332 * The virtual page table has to cover the entire implemented address space within
333 * a region even though not all of this space may be mappable. The reason for
334 * this is that the Access bit and Dirty bit fault handlers perform
335 * non-speculative accesses to the virtual page table, so the address range of the
336 * virtual page table itself needs to be covered by virtual page table.
337 */
338# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
339# define POW2(n) (1ULL << (n))
340
341 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
342
343 if (impl_va_bits < 51 || impl_va_bits > 61)
344 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
345 /*
346 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
347 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
348 * the test makes sure that our mapped space doesn't overlap the
349 * unimplemented hole in the middle of the region.
350 */
351 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
352 (mapped_space_bits > impl_va_bits - 1))
353 panic("Cannot build a big enough virtual-linear page table"
354 " to cover mapped address space.\n"
355 " Try using a smaller page size.\n");
356
357
358 /* place the VMLPT at the end of each page-table mapped region: */
359 pta = POW2(61) - POW2(vmlpt_bits);
360
361 /*
362 * Set the (virtually mapped linear) page table address. Bit
363 * 8 selects between the short and long format, bits 2-7 the
364 * size of the table, and bit 0 whether the VHPT walker is
365 * enabled.
366 */
367 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
368
369 ia64_tlb_init();
370
371#ifdef CONFIG_HUGETLB_PAGE
372 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
373 ia64_srlz_d();
374#endif
375}
376
377#ifdef CONFIG_VIRTUAL_MEM_MAP
378int vmemmap_find_next_valid_pfn(int node, int i)
379{
380 unsigned long end_address, hole_next_pfn;
381 unsigned long stop_address;
382 pg_data_t *pgdat = NODE_DATA(node);
383
384 end_address = (unsigned long) &vmem_map[pgdat->node_start_pfn + i];
385 end_address = PAGE_ALIGN(end_address);
386 stop_address = (unsigned long) &vmem_map[pgdat_end_pfn(pgdat)];
387
388 do {
389 pgd_t *pgd;
390 pud_t *pud;
391 pmd_t *pmd;
392 pte_t *pte;
393
394 pgd = pgd_offset_k(end_address);
395 if (pgd_none(*pgd)) {
396 end_address += PGDIR_SIZE;
397 continue;
398 }
399
400 pud = pud_offset(pgd, end_address);
401 if (pud_none(*pud)) {
402 end_address += PUD_SIZE;
403 continue;
404 }
405
406 pmd = pmd_offset(pud, end_address);
407 if (pmd_none(*pmd)) {
408 end_address += PMD_SIZE;
409 continue;
410 }
411
412 pte = pte_offset_kernel(pmd, end_address);
413retry_pte:
414 if (pte_none(*pte)) {
415 end_address += PAGE_SIZE;
416 pte++;
417 if ((end_address < stop_address) &&
418 (end_address != ALIGN(end_address, 1UL << PMD_SHIFT)))
419 goto retry_pte;
420 continue;
421 }
422 /* Found next valid vmem_map page */
423 break;
424 } while (end_address < stop_address);
425
426 end_address = min(end_address, stop_address);
427 end_address = end_address - (unsigned long) vmem_map + sizeof(struct page) - 1;
428 hole_next_pfn = end_address / sizeof(struct page);
429 return hole_next_pfn - pgdat->node_start_pfn;
430}
431
432int __init create_mem_map_page_table(u64 start, u64 end, void *arg)
433{
434 unsigned long address, start_page, end_page;
435 struct page *map_start, *map_end;
436 int node;
437 pgd_t *pgd;
438 pud_t *pud;
439 pmd_t *pmd;
440 pte_t *pte;
441
442 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
443 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
444
445 start_page = (unsigned long) map_start & PAGE_MASK;
446 end_page = PAGE_ALIGN((unsigned long) map_end);
447 node = paddr_to_nid(__pa(start));
448
449 for (address = start_page; address < end_page; address += PAGE_SIZE) {
450 pgd = pgd_offset_k(address);
451 if (pgd_none(*pgd))
452 pgd_populate(&init_mm, pgd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
453 pud = pud_offset(pgd, address);
454
455 if (pud_none(*pud))
456 pud_populate(&init_mm, pud, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
457 pmd = pmd_offset(pud, address);
458
459 if (pmd_none(*pmd))
460 pmd_populate_kernel(&init_mm, pmd, alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE));
461 pte = pte_offset_kernel(pmd, address);
462
463 if (pte_none(*pte))
464 set_pte(pte, pfn_pte(__pa(alloc_bootmem_pages_node(NODE_DATA(node), PAGE_SIZE)) >> PAGE_SHIFT,
465 PAGE_KERNEL));
466 }
467 return 0;
468}
469
470struct memmap_init_callback_data {
471 struct page *start;
472 struct page *end;
473 int nid;
474 unsigned long zone;
475};
476
477static int __meminit
478virtual_memmap_init(u64 start, u64 end, void *arg)
479{
480 struct memmap_init_callback_data *args;
481 struct page *map_start, *map_end;
482
483 args = (struct memmap_init_callback_data *) arg;
484 map_start = vmem_map + (__pa(start) >> PAGE_SHIFT);
485 map_end = vmem_map + (__pa(end) >> PAGE_SHIFT);
486
487 if (map_start < args->start)
488 map_start = args->start;
489 if (map_end > args->end)
490 map_end = args->end;
491
492 /*
493 * We have to initialize "out of bounds" struct page elements that fit completely
494 * on the same pages that were allocated for the "in bounds" elements because they
495 * may be referenced later (and found to be "reserved").
496 */
497 map_start -= ((unsigned long) map_start & (PAGE_SIZE - 1)) / sizeof(struct page);
498 map_end += ((PAGE_ALIGN((unsigned long) map_end) - (unsigned long) map_end)
499 / sizeof(struct page));
500
501 if (map_start < map_end)
502 memmap_init_zone((unsigned long)(map_end - map_start),
503 args->nid, args->zone, page_to_pfn(map_start),
504 MEMMAP_EARLY, NULL);
505 return 0;
506}
507
508void __meminit
509memmap_init (unsigned long size, int nid, unsigned long zone,
510 unsigned long start_pfn)
511{
512 if (!vmem_map) {
513 memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
514 NULL);
515 } else {
516 struct page *start;
517 struct memmap_init_callback_data args;
518
519 start = pfn_to_page(start_pfn);
520 args.start = start;
521 args.end = start + size;
522 args.nid = nid;
523 args.zone = zone;
524
525 efi_memmap_walk(virtual_memmap_init, &args);
526 }
527}
528
529int
530ia64_pfn_valid (unsigned long pfn)
531{
532 char byte;
533 struct page *pg = pfn_to_page(pfn);
534
535 return (__get_user(byte, (char __user *) pg) == 0)
536 && ((((u64)pg & PAGE_MASK) == (((u64)(pg + 1) - 1) & PAGE_MASK))
537 || (__get_user(byte, (char __user *) (pg + 1) - 1) == 0));
538}
539EXPORT_SYMBOL(ia64_pfn_valid);
540
541int __init find_largest_hole(u64 start, u64 end, void *arg)
542{
543 u64 *max_gap = arg;
544
545 static u64 last_end = PAGE_OFFSET;
546
547 /* NOTE: this algorithm assumes efi memmap table is ordered */
548
549 if (*max_gap < (start - last_end))
550 *max_gap = start - last_end;
551 last_end = end;
552 return 0;
553}
554
555#endif /* CONFIG_VIRTUAL_MEM_MAP */
556
557int __init register_active_ranges(u64 start, u64 len, int nid)
558{
559 u64 end = start + len;
560
561#ifdef CONFIG_KEXEC
562 if (start > crashk_res.start && start < crashk_res.end)
563 start = crashk_res.end;
564 if (end > crashk_res.start && end < crashk_res.end)
565 end = crashk_res.start;
566#endif
567
568 if (start < end)
569 memblock_add_node(__pa(start), end - start, nid);
570 return 0;
571}
572
573int
574find_max_min_low_pfn (u64 start, u64 end, void *arg)
575{
576 unsigned long pfn_start, pfn_end;
577#ifdef CONFIG_FLATMEM
578 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
579 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
580#else
581 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
582 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
583#endif
584 min_low_pfn = min(min_low_pfn, pfn_start);
585 max_low_pfn = max(max_low_pfn, pfn_end);
586 return 0;
587}
588
589/*
590 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
591 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
592 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
593 * useful for performance testing, but conceivably could also come in handy for debugging
594 * purposes.
595 */
596
597static int nolwsys __initdata;
598
599static int __init
600nolwsys_setup (char *s)
601{
602 nolwsys = 1;
603 return 1;
604}
605
606__setup("nolwsys", nolwsys_setup);
607
608void __init
609mem_init (void)
610{
611 int i;
612
613 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
614 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
615 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
616
617#ifdef CONFIG_PCI
618 /*
619 * This needs to be called _after_ the command line has been parsed but _before_
620 * any drivers that may need the PCI DMA interface are initialized or bootmem has
621 * been freed.
622 */
623 platform_dma_init();
624#endif
625
626#ifdef CONFIG_FLATMEM
627 BUG_ON(!mem_map);
628#endif
629
630 set_max_mapnr(max_low_pfn);
631 high_memory = __va(max_low_pfn * PAGE_SIZE);
632 free_all_bootmem();
633 mem_init_print_info(NULL);
634
635 /*
636 * For fsyscall entrpoints with no light-weight handler, use the ordinary
637 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
638 * code can tell them apart.
639 */
640 for (i = 0; i < NR_syscalls; ++i) {
641 extern unsigned long fsyscall_table[NR_syscalls];
642 extern unsigned long sys_call_table[NR_syscalls];
643
644 if (!fsyscall_table[i] || nolwsys)
645 fsyscall_table[i] = sys_call_table[i] | 1;
646 }
647 setup_gate();
648}
649
650#ifdef CONFIG_MEMORY_HOTPLUG
651int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
652 bool want_memblock)
653{
654 unsigned long start_pfn = start >> PAGE_SHIFT;
655 unsigned long nr_pages = size >> PAGE_SHIFT;
656 int ret;
657
658 ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock);
659 if (ret)
660 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
661 __func__, ret);
662
663 return ret;
664}
665
666#ifdef CONFIG_MEMORY_HOTREMOVE
667int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
668{
669 unsigned long start_pfn = start >> PAGE_SHIFT;
670 unsigned long nr_pages = size >> PAGE_SHIFT;
671 struct zone *zone;
672 int ret;
673
674 zone = page_zone(pfn_to_page(start_pfn));
675 ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
676 if (ret)
677 pr_warn("%s: Problem encountered in __remove_pages() as"
678 " ret=%d\n", __func__, ret);
679
680 return ret;
681}
682#endif
683#endif
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Initialize MMU support.
4 *
5 * Copyright (C) 1998-2003 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 */
8#include <linux/kernel.h>
9#include <linux/init.h>
10
11#include <linux/dma-map-ops.h>
12#include <linux/dmar.h>
13#include <linux/efi.h>
14#include <linux/elf.h>
15#include <linux/memblock.h>
16#include <linux/mm.h>
17#include <linux/sched/signal.h>
18#include <linux/mmzone.h>
19#include <linux/module.h>
20#include <linux/personality.h>
21#include <linux/reboot.h>
22#include <linux/slab.h>
23#include <linux/swap.h>
24#include <linux/proc_fs.h>
25#include <linux/bitops.h>
26#include <linux/kexec.h>
27#include <linux/swiotlb.h>
28
29#include <asm/dma.h>
30#include <asm/efi.h>
31#include <asm/io.h>
32#include <asm/numa.h>
33#include <asm/patch.h>
34#include <asm/pgalloc.h>
35#include <asm/sal.h>
36#include <asm/sections.h>
37#include <asm/tlb.h>
38#include <linux/uaccess.h>
39#include <asm/unistd.h>
40#include <asm/mca.h>
41
42extern void ia64_tlb_init (void);
43
44unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
45
46struct page *zero_page_memmap_ptr; /* map entry for zero page */
47EXPORT_SYMBOL(zero_page_memmap_ptr);
48
49void
50__ia64_sync_icache_dcache (pte_t pte)
51{
52 unsigned long addr;
53 struct page *page;
54
55 page = pte_page(pte);
56 addr = (unsigned long) page_address(page);
57
58 if (test_bit(PG_arch_1, &page->flags))
59 return; /* i-cache is already coherent with d-cache */
60
61 flush_icache_range(addr, addr + page_size(page));
62 set_bit(PG_arch_1, &page->flags); /* mark page as clean */
63}
64
65/*
66 * Since DMA is i-cache coherent, any (complete) pages that were written via
67 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
68 * flush them when they get mapped into an executable vm-area.
69 */
70void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
71{
72 unsigned long pfn = PHYS_PFN(paddr);
73
74 do {
75 set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
76 } while (++pfn <= PHYS_PFN(paddr + size - 1));
77}
78
79inline void
80ia64_set_rbs_bot (void)
81{
82 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
83
84 if (stack_size > MAX_USER_STACK_SIZE)
85 stack_size = MAX_USER_STACK_SIZE;
86 current->thread.rbs_bot = PAGE_ALIGN(current->mm->start_stack - stack_size);
87}
88
89/*
90 * This performs some platform-dependent address space initialization.
91 * On IA-64, we want to setup the VM area for the register backing
92 * store (which grows upwards) and install the gateway page which is
93 * used for signal trampolines, etc.
94 */
95void
96ia64_init_addr_space (void)
97{
98 struct vm_area_struct *vma;
99
100 ia64_set_rbs_bot();
101
102 /*
103 * If we're out of memory and kmem_cache_alloc() returns NULL, we simply ignore
104 * the problem. When the process attempts to write to the register backing store
105 * for the first time, it will get a SEGFAULT in this case.
106 */
107 vma = vm_area_alloc(current->mm);
108 if (vma) {
109 vma_set_anonymous(vma);
110 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
111 vma->vm_end = vma->vm_start + PAGE_SIZE;
112 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
113 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
114 mmap_write_lock(current->mm);
115 if (insert_vm_struct(current->mm, vma)) {
116 mmap_write_unlock(current->mm);
117 vm_area_free(vma);
118 return;
119 }
120 mmap_write_unlock(current->mm);
121 }
122
123 /* map NaT-page at address zero to speed up speculative dereferencing of NULL: */
124 if (!(current->personality & MMAP_PAGE_ZERO)) {
125 vma = vm_area_alloc(current->mm);
126 if (vma) {
127 vma_set_anonymous(vma);
128 vma->vm_end = PAGE_SIZE;
129 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
130 vma->vm_flags = VM_READ | VM_MAYREAD | VM_IO |
131 VM_DONTEXPAND | VM_DONTDUMP;
132 mmap_write_lock(current->mm);
133 if (insert_vm_struct(current->mm, vma)) {
134 mmap_write_unlock(current->mm);
135 vm_area_free(vma);
136 return;
137 }
138 mmap_write_unlock(current->mm);
139 }
140 }
141}
142
143void
144free_initmem (void)
145{
146 free_reserved_area(ia64_imva(__init_begin), ia64_imva(__init_end),
147 -1, "unused kernel");
148}
149
150void __init
151free_initrd_mem (unsigned long start, unsigned long end)
152{
153 /*
154 * EFI uses 4KB pages while the kernel can use 4KB or bigger.
155 * Thus EFI and the kernel may have different page sizes. It is
156 * therefore possible to have the initrd share the same page as
157 * the end of the kernel (given current setup).
158 *
159 * To avoid freeing/using the wrong page (kernel sized) we:
160 * - align up the beginning of initrd
161 * - align down the end of initrd
162 *
163 * | |
164 * |=============| a000
165 * | |
166 * | |
167 * | | 9000
168 * |/////////////|
169 * |/////////////|
170 * |=============| 8000
171 * |///INITRD////|
172 * |/////////////|
173 * |/////////////| 7000
174 * | |
175 * |KKKKKKKKKKKKK|
176 * |=============| 6000
177 * |KKKKKKKKKKKKK|
178 * |KKKKKKKKKKKKK|
179 * K=kernel using 8KB pages
180 *
181 * In this example, we must free page 8000 ONLY. So we must align up
182 * initrd_start and keep initrd_end as is.
183 */
184 start = PAGE_ALIGN(start);
185 end = end & PAGE_MASK;
186
187 if (start < end)
188 printk(KERN_INFO "Freeing initrd memory: %ldkB freed\n", (end - start) >> 10);
189
190 for (; start < end; start += PAGE_SIZE) {
191 if (!virt_addr_valid(start))
192 continue;
193 free_reserved_page(virt_to_page(start));
194 }
195}
196
197/*
198 * This installs a clean page in the kernel's page table.
199 */
200static struct page * __init
201put_kernel_page (struct page *page, unsigned long address, pgprot_t pgprot)
202{
203 pgd_t *pgd;
204 p4d_t *p4d;
205 pud_t *pud;
206 pmd_t *pmd;
207 pte_t *pte;
208
209 pgd = pgd_offset_k(address); /* note: this is NOT pgd_offset()! */
210
211 {
212 p4d = p4d_alloc(&init_mm, pgd, address);
213 if (!p4d)
214 goto out;
215 pud = pud_alloc(&init_mm, p4d, address);
216 if (!pud)
217 goto out;
218 pmd = pmd_alloc(&init_mm, pud, address);
219 if (!pmd)
220 goto out;
221 pte = pte_alloc_kernel(pmd, address);
222 if (!pte)
223 goto out;
224 if (!pte_none(*pte))
225 goto out;
226 set_pte(pte, mk_pte(page, pgprot));
227 }
228 out:
229 /* no need for flush_tlb */
230 return page;
231}
232
233static void __init
234setup_gate (void)
235{
236 struct page *page;
237
238 /*
239 * Map the gate page twice: once read-only to export the ELF
240 * headers etc. and once execute-only page to enable
241 * privilege-promotion via "epc":
242 */
243 page = virt_to_page(ia64_imva(__start_gate_section));
244 put_kernel_page(page, GATE_ADDR, PAGE_READONLY);
245#ifdef HAVE_BUGGY_SEGREL
246 page = virt_to_page(ia64_imva(__start_gate_section + PAGE_SIZE));
247 put_kernel_page(page, GATE_ADDR + PAGE_SIZE, PAGE_GATE);
248#else
249 put_kernel_page(page, GATE_ADDR + PERCPU_PAGE_SIZE, PAGE_GATE);
250 /* Fill in the holes (if any) with read-only zero pages: */
251 {
252 unsigned long addr;
253
254 for (addr = GATE_ADDR + PAGE_SIZE;
255 addr < GATE_ADDR + PERCPU_PAGE_SIZE;
256 addr += PAGE_SIZE)
257 {
258 put_kernel_page(ZERO_PAGE(0), addr,
259 PAGE_READONLY);
260 put_kernel_page(ZERO_PAGE(0), addr + PERCPU_PAGE_SIZE,
261 PAGE_READONLY);
262 }
263 }
264#endif
265 ia64_patch_gate();
266}
267
268static struct vm_area_struct gate_vma;
269
270static int __init gate_vma_init(void)
271{
272 vma_init(&gate_vma, NULL);
273 gate_vma.vm_start = FIXADDR_USER_START;
274 gate_vma.vm_end = FIXADDR_USER_END;
275 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
276 gate_vma.vm_page_prot = __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX);
277
278 return 0;
279}
280__initcall(gate_vma_init);
281
282struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
283{
284 return &gate_vma;
285}
286
287int in_gate_area_no_mm(unsigned long addr)
288{
289 if ((addr >= FIXADDR_USER_START) && (addr < FIXADDR_USER_END))
290 return 1;
291 return 0;
292}
293
294int in_gate_area(struct mm_struct *mm, unsigned long addr)
295{
296 return in_gate_area_no_mm(addr);
297}
298
299void ia64_mmu_init(void *my_cpu_data)
300{
301 unsigned long pta, impl_va_bits;
302 extern void tlb_init(void);
303
304#ifdef CONFIG_DISABLE_VHPT
305# define VHPT_ENABLE_BIT 0
306#else
307# define VHPT_ENABLE_BIT 1
308#endif
309
310 /*
311 * Check if the virtually mapped linear page table (VMLPT) overlaps with a mapped
312 * address space. The IA-64 architecture guarantees that at least 50 bits of
313 * virtual address space are implemented but if we pick a large enough page size
314 * (e.g., 64KB), the mapped address space is big enough that it will overlap with
315 * VMLPT. I assume that once we run on machines big enough to warrant 64KB pages,
316 * IMPL_VA_MSB will be significantly bigger, so this is unlikely to become a
317 * problem in practice. Alternatively, we could truncate the top of the mapped
318 * address space to not permit mappings that would overlap with the VMLPT.
319 * --davidm 00/12/06
320 */
321# define pte_bits 3
322# define mapped_space_bits (3*(PAGE_SHIFT - pte_bits) + PAGE_SHIFT)
323 /*
324 * The virtual page table has to cover the entire implemented address space within
325 * a region even though not all of this space may be mappable. The reason for
326 * this is that the Access bit and Dirty bit fault handlers perform
327 * non-speculative accesses to the virtual page table, so the address range of the
328 * virtual page table itself needs to be covered by virtual page table.
329 */
330# define vmlpt_bits (impl_va_bits - PAGE_SHIFT + pte_bits)
331# define POW2(n) (1ULL << (n))
332
333 impl_va_bits = ffz(~(local_cpu_data->unimpl_va_mask | (7UL << 61)));
334
335 if (impl_va_bits < 51 || impl_va_bits > 61)
336 panic("CPU has bogus IMPL_VA_MSB value of %lu!\n", impl_va_bits - 1);
337 /*
338 * mapped_space_bits - PAGE_SHIFT is the total number of ptes we need,
339 * which must fit into "vmlpt_bits - pte_bits" slots. Second half of
340 * the test makes sure that our mapped space doesn't overlap the
341 * unimplemented hole in the middle of the region.
342 */
343 if ((mapped_space_bits - PAGE_SHIFT > vmlpt_bits - pte_bits) ||
344 (mapped_space_bits > impl_va_bits - 1))
345 panic("Cannot build a big enough virtual-linear page table"
346 " to cover mapped address space.\n"
347 " Try using a smaller page size.\n");
348
349
350 /* place the VMLPT at the end of each page-table mapped region: */
351 pta = POW2(61) - POW2(vmlpt_bits);
352
353 /*
354 * Set the (virtually mapped linear) page table address. Bit
355 * 8 selects between the short and long format, bits 2-7 the
356 * size of the table, and bit 0 whether the VHPT walker is
357 * enabled.
358 */
359 ia64_set_pta(pta | (0 << 8) | (vmlpt_bits << 2) | VHPT_ENABLE_BIT);
360
361 ia64_tlb_init();
362
363#ifdef CONFIG_HUGETLB_PAGE
364 ia64_set_rr(HPAGE_REGION_BASE, HPAGE_SHIFT << 2);
365 ia64_srlz_d();
366#endif
367}
368
369int __init register_active_ranges(u64 start, u64 len, int nid)
370{
371 u64 end = start + len;
372
373#ifdef CONFIG_KEXEC
374 if (start > crashk_res.start && start < crashk_res.end)
375 start = crashk_res.end;
376 if (end > crashk_res.start && end < crashk_res.end)
377 end = crashk_res.start;
378#endif
379
380 if (start < end)
381 memblock_add_node(__pa(start), end - start, nid, MEMBLOCK_NONE);
382 return 0;
383}
384
385int
386find_max_min_low_pfn (u64 start, u64 end, void *arg)
387{
388 unsigned long pfn_start, pfn_end;
389#ifdef CONFIG_FLATMEM
390 pfn_start = (PAGE_ALIGN(__pa(start))) >> PAGE_SHIFT;
391 pfn_end = (PAGE_ALIGN(__pa(end - 1))) >> PAGE_SHIFT;
392#else
393 pfn_start = GRANULEROUNDDOWN(__pa(start)) >> PAGE_SHIFT;
394 pfn_end = GRANULEROUNDUP(__pa(end - 1)) >> PAGE_SHIFT;
395#endif
396 min_low_pfn = min(min_low_pfn, pfn_start);
397 max_low_pfn = max(max_low_pfn, pfn_end);
398 return 0;
399}
400
401/*
402 * Boot command-line option "nolwsys" can be used to disable the use of any light-weight
403 * system call handler. When this option is in effect, all fsyscalls will end up bubbling
404 * down into the kernel and calling the normal (heavy-weight) syscall handler. This is
405 * useful for performance testing, but conceivably could also come in handy for debugging
406 * purposes.
407 */
408
409static int nolwsys __initdata;
410
411static int __init
412nolwsys_setup (char *s)
413{
414 nolwsys = 1;
415 return 1;
416}
417
418__setup("nolwsys", nolwsys_setup);
419
420void __init
421mem_init (void)
422{
423 int i;
424
425 BUG_ON(PTRS_PER_PGD * sizeof(pgd_t) != PAGE_SIZE);
426 BUG_ON(PTRS_PER_PMD * sizeof(pmd_t) != PAGE_SIZE);
427 BUG_ON(PTRS_PER_PTE * sizeof(pte_t) != PAGE_SIZE);
428
429 /*
430 * This needs to be called _after_ the command line has been parsed but
431 * _before_ any drivers that may need the PCI DMA interface are
432 * initialized or bootmem has been freed.
433 */
434 do {
435#ifdef CONFIG_INTEL_IOMMU
436 detect_intel_iommu();
437 if (iommu_detected)
438 break;
439#endif
440 swiotlb_init(true, SWIOTLB_VERBOSE);
441 } while (0);
442
443#ifdef CONFIG_FLATMEM
444 BUG_ON(!mem_map);
445#endif
446
447 set_max_mapnr(max_low_pfn);
448 high_memory = __va(max_low_pfn * PAGE_SIZE);
449 memblock_free_all();
450
451 /*
452 * For fsyscall entrypoints with no light-weight handler, use the ordinary
453 * (heavy-weight) handler, but mark it by setting bit 0, so the fsyscall entry
454 * code can tell them apart.
455 */
456 for (i = 0; i < NR_syscalls; ++i) {
457 extern unsigned long fsyscall_table[NR_syscalls];
458 extern unsigned long sys_call_table[NR_syscalls];
459
460 if (!fsyscall_table[i] || nolwsys)
461 fsyscall_table[i] = sys_call_table[i] | 1;
462 }
463 setup_gate();
464}
465
466#ifdef CONFIG_MEMORY_HOTPLUG
467int arch_add_memory(int nid, u64 start, u64 size,
468 struct mhp_params *params)
469{
470 unsigned long start_pfn = start >> PAGE_SHIFT;
471 unsigned long nr_pages = size >> PAGE_SHIFT;
472 int ret;
473
474 if (WARN_ON_ONCE(params->pgprot.pgprot != PAGE_KERNEL.pgprot))
475 return -EINVAL;
476
477 ret = __add_pages(nid, start_pfn, nr_pages, params);
478 if (ret)
479 printk("%s: Problem encountered in __add_pages() as ret=%d\n",
480 __func__, ret);
481
482 return ret;
483}
484
485void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
486{
487 unsigned long start_pfn = start >> PAGE_SHIFT;
488 unsigned long nr_pages = size >> PAGE_SHIFT;
489
490 __remove_pages(start_pfn, nr_pages, altmap);
491}
492#endif
493
494static const pgprot_t protection_map[16] = {
495 [VM_NONE] = PAGE_NONE,
496 [VM_READ] = PAGE_READONLY,
497 [VM_WRITE] = PAGE_READONLY,
498 [VM_WRITE | VM_READ] = PAGE_READONLY,
499 [VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
500 _PAGE_AR_X_RX),
501 [VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
502 _PAGE_AR_RX),
503 [VM_EXEC | VM_WRITE] = PAGE_COPY_EXEC,
504 [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_EXEC,
505 [VM_SHARED] = PAGE_NONE,
506 [VM_SHARED | VM_READ] = PAGE_READONLY,
507 [VM_SHARED | VM_WRITE] = PAGE_SHARED,
508 [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED,
509 [VM_SHARED | VM_EXEC] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
510 _PAGE_AR_X_RX),
511 [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
512 _PAGE_AR_RX),
513 [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
514 _PAGE_AR_RWX),
515 [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(__ACCESS_BITS | _PAGE_PL_3 |
516 _PAGE_AR_RWX)
517};
518DECLARE_VM_GET_PAGE_PROT