Linux Audio

Check our new training course

In-person Linux kernel drivers training

Jun 16-20, 2025
Register
Loading...
Note: File does not exist in v6.8.
  1/*
  2 * Copyright 2005, Paul Mackerras, IBM Corporation.
  3 * Copyright 2009, Benjamin Herrenschmidt, IBM Corporation.
  4 * Copyright 2015-2016, Aneesh Kumar K.V, IBM Corporation.
  5 *
  6 * This program is free software; you can redistribute it and/or
  7 * modify it under the terms of the GNU General Public License
  8 * as published by the Free Software Foundation; either version
  9 * 2 of the License, or (at your option) any later version.
 10 */
 11
 12#include <linux/sched.h>
 13#include <linux/memblock.h>
 14#include <asm/pgalloc.h>
 15#include <asm/tlb.h>
 16#include <asm/dma.h>
 17
 18#include "mmu_decl.h"
 19
 20#ifdef CONFIG_SPARSEMEM_VMEMMAP
 21/*
 22 * On Book3E CPUs, the vmemmap is currently mapped in the top half of
 23 * the vmalloc space using normal page tables, though the size of
 24 * pages encoded in the PTEs can be different
 25 */
 26int __meminit vmemmap_create_mapping(unsigned long start,
 27				     unsigned long page_size,
 28				     unsigned long phys)
 29{
 30	/* Create a PTE encoding without page size */
 31	unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED |
 32		_PAGE_KERNEL_RW;
 33
 34	/* PTEs only contain page size encodings up to 32M */
 35	BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf);
 36
 37	/* Encode the size in the PTE */
 38	flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8;
 39
 40	/* For each PTE for that area, map things. Note that we don't
 41	 * increment phys because all PTEs are of the large size and
 42	 * thus must have the low bits clear
 43	 */
 44	for (i = 0; i < page_size; i += PAGE_SIZE)
 45		BUG_ON(map_kernel_page(start + i, phys, flags));
 46
 47	return 0;
 48}
 49
 50#ifdef CONFIG_MEMORY_HOTPLUG
 51void vmemmap_remove_mapping(unsigned long start,
 52			    unsigned long page_size)
 53{
 54}
 55#endif
 56#endif /* CONFIG_SPARSEMEM_VMEMMAP */
 57
 58static __ref void *early_alloc_pgtable(unsigned long size)
 59{
 60	void *pt;
 61
 62	pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS)));
 63	memset(pt, 0, size);
 64
 65	return pt;
 66}
 67
 68/*
 69 * map_kernel_page currently only called by __ioremap
 70 * map_kernel_page adds an entry to the ioremap page table
 71 * and adds an entry to the HPT, possibly bolting it
 72 */
 73int map_kernel_page(unsigned long ea, unsigned long pa, unsigned long flags)
 74{
 75	pgd_t *pgdp;
 76	pud_t *pudp;
 77	pmd_t *pmdp;
 78	pte_t *ptep;
 79
 80	BUILD_BUG_ON(TASK_SIZE_USER64 > PGTABLE_RANGE);
 81	if (slab_is_available()) {
 82		pgdp = pgd_offset_k(ea);
 83		pudp = pud_alloc(&init_mm, pgdp, ea);
 84		if (!pudp)
 85			return -ENOMEM;
 86		pmdp = pmd_alloc(&init_mm, pudp, ea);
 87		if (!pmdp)
 88			return -ENOMEM;
 89		ptep = pte_alloc_kernel(pmdp, ea);
 90		if (!ptep)
 91			return -ENOMEM;
 92		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
 93							  __pgprot(flags)));
 94	} else {
 95		pgdp = pgd_offset_k(ea);
 96#ifndef __PAGETABLE_PUD_FOLDED
 97		if (pgd_none(*pgdp)) {
 98			pudp = early_alloc_pgtable(PUD_TABLE_SIZE);
 99			BUG_ON(pudp == NULL);
100			pgd_populate(&init_mm, pgdp, pudp);
101		}
102#endif /* !__PAGETABLE_PUD_FOLDED */
103		pudp = pud_offset(pgdp, ea);
104		if (pud_none(*pudp)) {
105			pmdp = early_alloc_pgtable(PMD_TABLE_SIZE);
106			BUG_ON(pmdp == NULL);
107			pud_populate(&init_mm, pudp, pmdp);
108		}
109		pmdp = pmd_offset(pudp, ea);
110		if (!pmd_present(*pmdp)) {
111			ptep = early_alloc_pgtable(PAGE_SIZE);
112			BUG_ON(ptep == NULL);
113			pmd_populate_kernel(&init_mm, pmdp, ptep);
114		}
115		ptep = pte_offset_kernel(pmdp, ea);
116		set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT,
117							  __pgprot(flags)));
118	}
119
120	smp_wmb();
121	return 0;
122}