Loading...
1/*
2 * SPARC64 Huge TLB page support.
3 *
4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
5 */
6
7#include <linux/init.h>
8#include <linux/fs.h>
9#include <linux/mm.h>
10#include <linux/hugetlb.h>
11#include <linux/pagemap.h>
12#include <linux/sysctl.h>
13
14#include <asm/mman.h>
15#include <asm/pgalloc.h>
16#include <asm/tlb.h>
17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h>
19#include <asm/mmu_context.h>
20
21/* Slightly simplified from the non-hugepage variant because by
22 * definition we don't have to worry about any page coloring stuff
23 */
24#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
25#define VA_EXCLUDE_END (0xfffff80000000000UL + (1UL << 32UL))
26
27static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
28 unsigned long addr,
29 unsigned long len,
30 unsigned long pgoff,
31 unsigned long flags)
32{
33 struct mm_struct *mm = current->mm;
34 struct vm_area_struct * vma;
35 unsigned long task_size = TASK_SIZE;
36 unsigned long start_addr;
37
38 if (test_thread_flag(TIF_32BIT))
39 task_size = STACK_TOP32;
40 if (unlikely(len >= VA_EXCLUDE_START))
41 return -ENOMEM;
42
43 if (len > mm->cached_hole_size) {
44 start_addr = addr = mm->free_area_cache;
45 } else {
46 start_addr = addr = TASK_UNMAPPED_BASE;
47 mm->cached_hole_size = 0;
48 }
49
50 task_size -= len;
51
52full_search:
53 addr = ALIGN(addr, HPAGE_SIZE);
54
55 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
56 /* At this point: (!vma || addr < vma->vm_end). */
57 if (addr < VA_EXCLUDE_START &&
58 (addr + len) >= VA_EXCLUDE_START) {
59 addr = VA_EXCLUDE_END;
60 vma = find_vma(mm, VA_EXCLUDE_END);
61 }
62 if (unlikely(task_size < addr)) {
63 if (start_addr != TASK_UNMAPPED_BASE) {
64 start_addr = addr = TASK_UNMAPPED_BASE;
65 mm->cached_hole_size = 0;
66 goto full_search;
67 }
68 return -ENOMEM;
69 }
70 if (likely(!vma || addr + len <= vma->vm_start)) {
71 /*
72 * Remember the place where we stopped the search:
73 */
74 mm->free_area_cache = addr + len;
75 return addr;
76 }
77 if (addr + mm->cached_hole_size < vma->vm_start)
78 mm->cached_hole_size = vma->vm_start - addr;
79
80 addr = ALIGN(vma->vm_end, HPAGE_SIZE);
81 }
82}
83
84static unsigned long
85hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
86 const unsigned long len,
87 const unsigned long pgoff,
88 const unsigned long flags)
89{
90 struct vm_area_struct *vma;
91 struct mm_struct *mm = current->mm;
92 unsigned long addr = addr0;
93
94 /* This should only ever run for 32-bit processes. */
95 BUG_ON(!test_thread_flag(TIF_32BIT));
96
97 /* check if free_area_cache is useful for us */
98 if (len <= mm->cached_hole_size) {
99 mm->cached_hole_size = 0;
100 mm->free_area_cache = mm->mmap_base;
101 }
102
103 /* either no address requested or can't fit in requested address hole */
104 addr = mm->free_area_cache & HPAGE_MASK;
105
106 /* make sure it can fit in the remaining address space */
107 if (likely(addr > len)) {
108 vma = find_vma(mm, addr-len);
109 if (!vma || addr <= vma->vm_start) {
110 /* remember the address as a hint for next time */
111 return (mm->free_area_cache = addr-len);
112 }
113 }
114
115 if (unlikely(mm->mmap_base < len))
116 goto bottomup;
117
118 addr = (mm->mmap_base-len) & HPAGE_MASK;
119
120 do {
121 /*
122 * Lookup failure means no vma is above this address,
123 * else if new region fits below vma->vm_start,
124 * return with success:
125 */
126 vma = find_vma(mm, addr);
127 if (likely(!vma || addr+len <= vma->vm_start)) {
128 /* remember the address as a hint for next time */
129 return (mm->free_area_cache = addr);
130 }
131
132 /* remember the largest hole we saw so far */
133 if (addr + mm->cached_hole_size < vma->vm_start)
134 mm->cached_hole_size = vma->vm_start - addr;
135
136 /* try just below the current vma->vm_start */
137 addr = (vma->vm_start-len) & HPAGE_MASK;
138 } while (likely(len < vma->vm_start));
139
140bottomup:
141 /*
142 * A failed mmap() very likely causes application failure,
143 * so fall back to the bottom-up function here. This scenario
144 * can happen with large stack limits and large mmap()
145 * allocations.
146 */
147 mm->cached_hole_size = ~0UL;
148 mm->free_area_cache = TASK_UNMAPPED_BASE;
149 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
150 /*
151 * Restore the topdown base:
152 */
153 mm->free_area_cache = mm->mmap_base;
154 mm->cached_hole_size = ~0UL;
155
156 return addr;
157}
158
159unsigned long
160hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
161 unsigned long len, unsigned long pgoff, unsigned long flags)
162{
163 struct mm_struct *mm = current->mm;
164 struct vm_area_struct *vma;
165 unsigned long task_size = TASK_SIZE;
166
167 if (test_thread_flag(TIF_32BIT))
168 task_size = STACK_TOP32;
169
170 if (len & ~HPAGE_MASK)
171 return -EINVAL;
172 if (len > task_size)
173 return -ENOMEM;
174
175 if (flags & MAP_FIXED) {
176 if (prepare_hugepage_range(file, addr, len))
177 return -EINVAL;
178 return addr;
179 }
180
181 if (addr) {
182 addr = ALIGN(addr, HPAGE_SIZE);
183 vma = find_vma(mm, addr);
184 if (task_size - len >= addr &&
185 (!vma || addr + len <= vma->vm_start))
186 return addr;
187 }
188 if (mm->get_unmapped_area == arch_get_unmapped_area)
189 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
190 pgoff, flags);
191 else
192 return hugetlb_get_unmapped_area_topdown(file, addr, len,
193 pgoff, flags);
194}
195
196pte_t *huge_pte_alloc(struct mm_struct *mm,
197 unsigned long addr, unsigned long sz)
198{
199 pgd_t *pgd;
200 pud_t *pud;
201 pmd_t *pmd;
202 pte_t *pte = NULL;
203
204 /* We must align the address, because our caller will run
205 * set_huge_pte_at() on whatever we return, which writes out
206 * all of the sub-ptes for the hugepage range. So we have
207 * to give it the first such sub-pte.
208 */
209 addr &= HPAGE_MASK;
210
211 pgd = pgd_offset(mm, addr);
212 pud = pud_alloc(mm, pgd, addr);
213 if (pud) {
214 pmd = pmd_alloc(mm, pud, addr);
215 if (pmd)
216 pte = pte_alloc_map(mm, NULL, pmd, addr);
217 }
218 return pte;
219}
220
221pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
222{
223 pgd_t *pgd;
224 pud_t *pud;
225 pmd_t *pmd;
226 pte_t *pte = NULL;
227
228 addr &= HPAGE_MASK;
229
230 pgd = pgd_offset(mm, addr);
231 if (!pgd_none(*pgd)) {
232 pud = pud_offset(pgd, addr);
233 if (!pud_none(*pud)) {
234 pmd = pmd_offset(pud, addr);
235 if (!pmd_none(*pmd))
236 pte = pte_offset_map(pmd, addr);
237 }
238 }
239 return pte;
240}
241
242int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
243{
244 return 0;
245}
246
247void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
248 pte_t *ptep, pte_t entry)
249{
250 int i;
251
252 if (!pte_present(*ptep) && pte_present(entry))
253 mm->context.huge_pte_count++;
254
255 addr &= HPAGE_MASK;
256 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
257 set_pte_at(mm, addr, ptep, entry);
258 ptep++;
259 addr += PAGE_SIZE;
260 pte_val(entry) += PAGE_SIZE;
261 }
262}
263
264pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
265 pte_t *ptep)
266{
267 pte_t entry;
268 int i;
269
270 entry = *ptep;
271 if (pte_present(entry))
272 mm->context.huge_pte_count--;
273
274 addr &= HPAGE_MASK;
275
276 for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
277 pte_clear(mm, addr, ptep);
278 addr += PAGE_SIZE;
279 ptep++;
280 }
281
282 return entry;
283}
284
285struct page *follow_huge_addr(struct mm_struct *mm,
286 unsigned long address, int write)
287{
288 return ERR_PTR(-EINVAL);
289}
290
291int pmd_huge(pmd_t pmd)
292{
293 return 0;
294}
295
296int pud_huge(pud_t pud)
297{
298 return 0;
299}
300
301struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
302 pmd_t *pmd, int write)
303{
304 return NULL;
305}
306
307static void context_reload(void *__data)
308{
309 struct mm_struct *mm = __data;
310
311 if (mm == current->mm)
312 load_secondary_context(mm);
313}
314
315void hugetlb_prefault_arch_hook(struct mm_struct *mm)
316{
317 struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
318
319 if (likely(tp->tsb != NULL))
320 return;
321
322 tsb_grow(mm, MM_TSB_HUGE, 0);
323 tsb_context_switch(mm);
324 smp_tsb_sync(mm);
325
326 /* On UltraSPARC-III+ and later, configure the second half of
327 * the Data-TLB for huge pages.
328 */
329 if (tlb_type == cheetah_plus) {
330 unsigned long ctx;
331
332 spin_lock(&ctx_alloc_lock);
333 ctx = mm->context.sparc64_ctx_val;
334 ctx &= ~CTX_PGSZ_MASK;
335 ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
336 ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
337
338 if (ctx != mm->context.sparc64_ctx_val) {
339 /* When changing the page size fields, we
340 * must perform a context flush so that no
341 * stale entries match. This flush must
342 * occur with the original context register
343 * settings.
344 */
345 do_flush_tlb_mm(mm);
346
347 /* Reload the context register of all processors
348 * also executing in this address space.
349 */
350 mm->context.sparc64_ctx_val = ctx;
351 on_each_cpu(context_reload, mm, 0);
352 }
353 spin_unlock(&ctx_alloc_lock);
354 }
355}
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * SPARC64 Huge TLB page support.
4 *
5 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
6 */
7
8#include <linux/fs.h>
9#include <linux/mm.h>
10#include <linux/sched/mm.h>
11#include <linux/hugetlb.h>
12#include <linux/pagemap.h>
13#include <linux/sysctl.h>
14
15#include <asm/mman.h>
16#include <asm/pgalloc.h>
17#include <asm/tlb.h>
18#include <asm/tlbflush.h>
19#include <asm/cacheflush.h>
20#include <asm/mmu_context.h>
21
22
23static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
24{
25 return entry;
26}
27
28static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
29{
30 unsigned long hugepage_size = _PAGE_SZ4MB_4V;
31
32 pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
33
34 switch (shift) {
35 case HPAGE_16GB_SHIFT:
36 hugepage_size = _PAGE_SZ16GB_4V;
37 pte_val(entry) |= _PAGE_PUD_HUGE;
38 break;
39 case HPAGE_2GB_SHIFT:
40 hugepage_size = _PAGE_SZ2GB_4V;
41 pte_val(entry) |= _PAGE_PMD_HUGE;
42 break;
43 case HPAGE_256MB_SHIFT:
44 hugepage_size = _PAGE_SZ256MB_4V;
45 pte_val(entry) |= _PAGE_PMD_HUGE;
46 break;
47 case HPAGE_SHIFT:
48 pte_val(entry) |= _PAGE_PMD_HUGE;
49 break;
50 case HPAGE_64K_SHIFT:
51 hugepage_size = _PAGE_SZ64K_4V;
52 break;
53 default:
54 WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
55 }
56
57 pte_val(entry) = pte_val(entry) | hugepage_size;
58 return entry;
59}
60
61static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
62{
63 if (tlb_type == hypervisor)
64 return sun4v_hugepage_shift_to_tte(entry, shift);
65 else
66 return sun4u_hugepage_shift_to_tte(entry, shift);
67}
68
69pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
70{
71 pte_t pte;
72
73 entry = pte_mkhuge(entry);
74 pte = hugepage_shift_to_tte(entry, shift);
75
76#ifdef CONFIG_SPARC64
77 /* If this vma has ADI enabled on it, turn on TTE.mcd
78 */
79 if (flags & VM_SPARC_ADI)
80 return pte_mkmcd(pte);
81 else
82 return pte_mknotmcd(pte);
83#else
84 return pte;
85#endif
86}
87
88static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
89{
90 unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
91 unsigned int shift;
92
93 switch (tte_szbits) {
94 case _PAGE_SZ16GB_4V:
95 shift = HPAGE_16GB_SHIFT;
96 break;
97 case _PAGE_SZ2GB_4V:
98 shift = HPAGE_2GB_SHIFT;
99 break;
100 case _PAGE_SZ256MB_4V:
101 shift = HPAGE_256MB_SHIFT;
102 break;
103 case _PAGE_SZ4MB_4V:
104 shift = REAL_HPAGE_SHIFT;
105 break;
106 case _PAGE_SZ64K_4V:
107 shift = HPAGE_64K_SHIFT;
108 break;
109 default:
110 shift = PAGE_SHIFT;
111 break;
112 }
113 return shift;
114}
115
116static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
117{
118 unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
119 unsigned int shift;
120
121 switch (tte_szbits) {
122 case _PAGE_SZ256MB_4U:
123 shift = HPAGE_256MB_SHIFT;
124 break;
125 case _PAGE_SZ4MB_4U:
126 shift = REAL_HPAGE_SHIFT;
127 break;
128 case _PAGE_SZ64K_4U:
129 shift = HPAGE_64K_SHIFT;
130 break;
131 default:
132 shift = PAGE_SHIFT;
133 break;
134 }
135 return shift;
136}
137
138static unsigned long tte_to_shift(pte_t entry)
139{
140 if (tlb_type == hypervisor)
141 return sun4v_huge_tte_to_shift(entry);
142
143 return sun4u_huge_tte_to_shift(entry);
144}
145
146static unsigned int huge_tte_to_shift(pte_t entry)
147{
148 unsigned long shift = tte_to_shift(entry);
149
150 if (shift == PAGE_SHIFT)
151 WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
152 pte_val(entry));
153
154 return shift;
155}
156
157static unsigned long huge_tte_to_size(pte_t pte)
158{
159 unsigned long size = 1UL << huge_tte_to_shift(pte);
160
161 if (size == REAL_HPAGE_SIZE)
162 size = HPAGE_SIZE;
163 return size;
164}
165
166unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); }
167unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
168unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
169
170pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
171 unsigned long addr, unsigned long sz)
172{
173 pgd_t *pgd;
174 p4d_t *p4d;
175 pud_t *pud;
176 pmd_t *pmd;
177
178 pgd = pgd_offset(mm, addr);
179 p4d = p4d_offset(pgd, addr);
180 pud = pud_alloc(mm, p4d, addr);
181 if (!pud)
182 return NULL;
183 if (sz >= PUD_SIZE)
184 return (pte_t *)pud;
185 pmd = pmd_alloc(mm, pud, addr);
186 if (!pmd)
187 return NULL;
188 if (sz >= PMD_SIZE)
189 return (pte_t *)pmd;
190 return pte_alloc_huge(mm, pmd, addr);
191}
192
193pte_t *huge_pte_offset(struct mm_struct *mm,
194 unsigned long addr, unsigned long sz)
195{
196 pgd_t *pgd;
197 p4d_t *p4d;
198 pud_t *pud;
199 pmd_t *pmd;
200
201 pgd = pgd_offset(mm, addr);
202 if (pgd_none(*pgd))
203 return NULL;
204 p4d = p4d_offset(pgd, addr);
205 if (p4d_none(*p4d))
206 return NULL;
207 pud = pud_offset(p4d, addr);
208 if (pud_none(*pud))
209 return NULL;
210 if (is_hugetlb_pud(*pud))
211 return (pte_t *)pud;
212 pmd = pmd_offset(pud, addr);
213 if (pmd_none(*pmd))
214 return NULL;
215 if (is_hugetlb_pmd(*pmd))
216 return (pte_t *)pmd;
217 return pte_offset_huge(pmd, addr);
218}
219
220void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
221 pte_t *ptep, pte_t entry)
222{
223 unsigned int nptes, orig_shift, shift;
224 unsigned long i, size;
225 pte_t orig;
226
227 size = huge_tte_to_size(entry);
228
229 shift = PAGE_SHIFT;
230 if (size >= PUD_SIZE)
231 shift = PUD_SHIFT;
232 else if (size >= PMD_SIZE)
233 shift = PMD_SHIFT;
234 else
235 shift = PAGE_SHIFT;
236
237 nptes = size >> shift;
238
239 if (!pte_present(*ptep) && pte_present(entry))
240 mm->context.hugetlb_pte_count += nptes;
241
242 addr &= ~(size - 1);
243 orig = *ptep;
244 orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
245
246 for (i = 0; i < nptes; i++)
247 ptep[i] = __pte(pte_val(entry) + (i << shift));
248
249 maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
250 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
251 if (size == HPAGE_SIZE)
252 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
253 orig_shift);
254}
255
256void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
257 pte_t *ptep, pte_t entry, unsigned long sz)
258{
259 __set_huge_pte_at(mm, addr, ptep, entry);
260}
261
262pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
263 pte_t *ptep, unsigned long sz)
264{
265 unsigned int i, nptes, orig_shift, shift;
266 unsigned long size;
267 pte_t entry;
268
269 entry = *ptep;
270 size = huge_tte_to_size(entry);
271
272 shift = PAGE_SHIFT;
273 if (size >= PUD_SIZE)
274 shift = PUD_SHIFT;
275 else if (size >= PMD_SIZE)
276 shift = PMD_SHIFT;
277 else
278 shift = PAGE_SHIFT;
279
280 nptes = size >> shift;
281 orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
282
283 if (pte_present(entry))
284 mm->context.hugetlb_pte_count -= nptes;
285
286 addr &= ~(size - 1);
287 for (i = 0; i < nptes; i++)
288 ptep[i] = __pte(0UL);
289
290 maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
291 /* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
292 if (size == HPAGE_SIZE)
293 maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
294 orig_shift);
295
296 return entry;
297}
298
299static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
300 unsigned long addr)
301{
302 pgtable_t token = pmd_pgtable(*pmd);
303
304 pmd_clear(pmd);
305 pte_free_tlb(tlb, token, addr);
306 mm_dec_nr_ptes(tlb->mm);
307}
308
309static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
310 unsigned long addr, unsigned long end,
311 unsigned long floor, unsigned long ceiling)
312{
313 pmd_t *pmd;
314 unsigned long next;
315 unsigned long start;
316
317 start = addr;
318 pmd = pmd_offset(pud, addr);
319 do {
320 next = pmd_addr_end(addr, end);
321 if (pmd_none(*pmd))
322 continue;
323 if (is_hugetlb_pmd(*pmd))
324 pmd_clear(pmd);
325 else
326 hugetlb_free_pte_range(tlb, pmd, addr);
327 } while (pmd++, addr = next, addr != end);
328
329 start &= PUD_MASK;
330 if (start < floor)
331 return;
332 if (ceiling) {
333 ceiling &= PUD_MASK;
334 if (!ceiling)
335 return;
336 }
337 if (end - 1 > ceiling - 1)
338 return;
339
340 pmd = pmd_offset(pud, start);
341 pud_clear(pud);
342 pmd_free_tlb(tlb, pmd, start);
343 mm_dec_nr_pmds(tlb->mm);
344}
345
346static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
347 unsigned long addr, unsigned long end,
348 unsigned long floor, unsigned long ceiling)
349{
350 pud_t *pud;
351 unsigned long next;
352 unsigned long start;
353
354 start = addr;
355 pud = pud_offset(p4d, addr);
356 do {
357 next = pud_addr_end(addr, end);
358 if (pud_none_or_clear_bad(pud))
359 continue;
360 if (is_hugetlb_pud(*pud))
361 pud_clear(pud);
362 else
363 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
364 ceiling);
365 } while (pud++, addr = next, addr != end);
366
367 start &= PGDIR_MASK;
368 if (start < floor)
369 return;
370 if (ceiling) {
371 ceiling &= PGDIR_MASK;
372 if (!ceiling)
373 return;
374 }
375 if (end - 1 > ceiling - 1)
376 return;
377
378 pud = pud_offset(p4d, start);
379 p4d_clear(p4d);
380 pud_free_tlb(tlb, pud, start);
381 mm_dec_nr_puds(tlb->mm);
382}
383
384void hugetlb_free_pgd_range(struct mmu_gather *tlb,
385 unsigned long addr, unsigned long end,
386 unsigned long floor, unsigned long ceiling)
387{
388 pgd_t *pgd;
389 p4d_t *p4d;
390 unsigned long next;
391
392 addr &= PMD_MASK;
393 if (addr < floor) {
394 addr += PMD_SIZE;
395 if (!addr)
396 return;
397 }
398 if (ceiling) {
399 ceiling &= PMD_MASK;
400 if (!ceiling)
401 return;
402 }
403 if (end - 1 > ceiling - 1)
404 end -= PMD_SIZE;
405 if (addr > end - 1)
406 return;
407
408 pgd = pgd_offset(tlb->mm, addr);
409 p4d = p4d_offset(pgd, addr);
410 do {
411 next = p4d_addr_end(addr, end);
412 if (p4d_none_or_clear_bad(p4d))
413 continue;
414 hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
415 } while (p4d++, addr = next, addr != end);
416}