Loading...
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 | /* SPDX-License-Identifier: GPL-2.0-or-later */ #ifndef _ASM_POWERPC_BOOK3S_64_PGALLOC_H #define _ASM_POWERPC_BOOK3S_64_PGALLOC_H /* */ #include <linux/slab.h> #include <linux/cpumask.h> #include <linux/kmemleak.h> #include <linux/percpu.h> struct vmemmap_backing { struct vmemmap_backing *list; unsigned long phys; unsigned long virt_addr; }; extern struct vmemmap_backing *vmemmap_list; extern pmd_t *pmd_fragment_alloc(struct mm_struct *, unsigned long); extern void pmd_fragment_free(unsigned long *); extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift); extern void __tlb_remove_table(void *_table); void pte_frag_destroy(void *pte_frag); static inline pgd_t *radix__pgd_alloc(struct mm_struct *mm) { #ifdef CONFIG_PPC_64K_PAGES return (pgd_t *)__get_free_page(pgtable_gfp_flags(mm, PGALLOC_GFP)); #else struct page *page; page = alloc_pages(pgtable_gfp_flags(mm, PGALLOC_GFP | __GFP_RETRY_MAYFAIL), 4); if (!page) return NULL; return (pgd_t *) page_address(page); #endif } static inline void radix__pgd_free(struct mm_struct *mm, pgd_t *pgd) { #ifdef CONFIG_PPC_64K_PAGES free_page((unsigned long)pgd); #else free_pages((unsigned long)pgd, 4); #endif } static inline pgd_t *pgd_alloc(struct mm_struct *mm) { pgd_t *pgd; if (radix_enabled()) return radix__pgd_alloc(mm); pgd = kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), pgtable_gfp_flags(mm, GFP_KERNEL)); if (unlikely(!pgd)) return pgd; /* * Don't scan the PGD for pointers, it contains references to PUDs but * those references are not full pointers and so can't be recognised by * kmemleak. */ kmemleak_no_scan(pgd); /* * With hugetlb, we don't clear the second half of the page table. * If we share the same slab cache with the pmd or pud level table, * we need to make sure we zero out the full table on alloc. * With 4K we don't store slot in the second half. Hence we don't * need to do this for 4k. */ #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PPC_64K_PAGES) && \ (H_PGD_INDEX_SIZE == H_PUD_CACHE_INDEX) memset(pgd, 0, PGD_TABLE_SIZE); #endif return pgd; } static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { if (radix_enabled()) return radix__pgd_free(mm, pgd); kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); } static inline void p4d_populate(struct mm_struct *mm, p4d_t *pgd, pud_t *pud) { *pgd = __p4d(__pgtable_ptr_val(pud) | PGD_VAL_BITS); } static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) { pud_t *pud; pud = kmem_cache_alloc(PGT_CACHE(PUD_CACHE_INDEX), pgtable_gfp_flags(mm, GFP_KERNEL)); /* * Tell kmemleak to ignore the PUD, that means don't scan it for * pointers and don't consider it a leak. PUDs are typically only * referred to by their PGD, but kmemleak is not able to recognise those * as pointers, leading to false leak reports. */ kmemleak_ignore(pud); return pud; } static inline void __pud_free(pud_t *pud) { struct page *page = virt_to_page(pud); /* * Early pud pages allocated via memblock allocator * can't be directly freed to slab. KFENCE pages have * both reserved and slab flags set so need to be freed * kmem_cache_free. */ if (PageReserved(page) && !PageSlab(page)) free_reserved_page(page); else kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), pud); } static inline void pud_free(struct mm_struct *mm, pud_t *pud) { return __pud_free(pud); } static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) { *pud = __pud(__pgtable_ptr_val(pmd) | PUD_VAL_BITS); } static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, unsigned long address) { pgtable_free_tlb(tlb, pud, PUD_INDEX); } static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) { return pmd_fragment_alloc(mm, addr); } static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) { pmd_fragment_free((unsigned long *)pmd); } static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, unsigned long address) { return pgtable_free_tlb(tlb, pmd, PMD_INDEX); } static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { *pmd = __pmd(__pgtable_ptr_val(pte) | PMD_VAL_BITS); } static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page) { *pmd = __pmd(__pgtable_ptr_val(pte_page) | PMD_VAL_BITS); } static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, unsigned long address) { pgtable_free_tlb(tlb, table, PTE_INDEX); } extern atomic_long_t direct_pages_count[MMU_PAGE_COUNT]; static inline void update_page_count(int psize, long count) { if (IS_ENABLED(CONFIG_PROC_FS)) atomic_long_add(count, &direct_pages_count[psize]); } #endif /* _ASM_POWERPC_BOOK3S_64_PGALLOC_H */ |