Linux Audio

Check our new training course

Loading...
v3.1
 
  1/*
  2 * SPARC64 Huge TLB page support.
  3 *
  4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
  5 */
  6
  7#include <linux/init.h>
  8#include <linux/module.h>
  9#include <linux/fs.h>
 10#include <linux/mm.h>
 
 11#include <linux/hugetlb.h>
 12#include <linux/pagemap.h>
 13#include <linux/sysctl.h>
 14
 15#include <asm/mman.h>
 16#include <asm/pgalloc.h>
 17#include <asm/tlb.h>
 18#include <asm/tlbflush.h>
 19#include <asm/cacheflush.h>
 20#include <asm/mmu_context.h>
 21
 22/* Slightly simplified from the non-hugepage variant because by
 23 * definition we don't have to worry about any page coloring stuff
 24 */
 25#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
 26#define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
 27
 28static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
 29							unsigned long addr,
 30							unsigned long len,
 31							unsigned long pgoff,
 32							unsigned long flags)
 33{
 34	struct mm_struct *mm = current->mm;
 35	struct vm_area_struct * vma;
 36	unsigned long task_size = TASK_SIZE;
 37	unsigned long start_addr;
 38
 39	if (test_thread_flag(TIF_32BIT))
 40		task_size = STACK_TOP32;
 41	if (unlikely(len >= VA_EXCLUDE_START))
 42		return -ENOMEM;
 43
 44	if (len > mm->cached_hole_size) {
 45	        start_addr = addr = mm->free_area_cache;
 46	} else {
 47	        start_addr = addr = TASK_UNMAPPED_BASE;
 48	        mm->cached_hole_size = 0;
 
 
 
 
 
 
 
 
 
 
 
 49	}
 50
 51	task_size -= len;
 
 
 52
 53full_search:
 54	addr = ALIGN(addr, HPAGE_SIZE);
 
 
 
 
 
 55
 56	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
 57		/* At this point:  (!vma || addr < vma->vm_end). */
 58		if (addr < VA_EXCLUDE_START &&
 59		    (addr + len) >= VA_EXCLUDE_START) {
 60			addr = VA_EXCLUDE_END;
 61			vma = find_vma(mm, VA_EXCLUDE_END);
 62		}
 63		if (unlikely(task_size < addr)) {
 64			if (start_addr != TASK_UNMAPPED_BASE) {
 65				start_addr = addr = TASK_UNMAPPED_BASE;
 66				mm->cached_hole_size = 0;
 67				goto full_search;
 68			}
 69			return -ENOMEM;
 70		}
 71		if (likely(!vma || addr + len <= vma->vm_start)) {
 72			/*
 73			 * Remember the place where we stopped the search:
 74			 */
 75			mm->free_area_cache = addr + len;
 76			return addr;
 77		}
 78		if (addr + mm->cached_hole_size < vma->vm_start)
 79		        mm->cached_hole_size = vma->vm_start - addr;
 80
 81		addr = ALIGN(vma->vm_end, HPAGE_SIZE);
 82	}
 
 
 
 
 
 
 
 
 
 
 
 83}
 84
 85static unsigned long
 86hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 87				  const unsigned long len,
 88				  const unsigned long pgoff,
 89				  const unsigned long flags)
 90{
 91	struct vm_area_struct *vma;
 92	struct mm_struct *mm = current->mm;
 93	unsigned long addr = addr0;
 94
 95	/* This should only ever run for 32-bit processes.  */
 96	BUG_ON(!test_thread_flag(TIF_32BIT));
 97
 98	/* check if free_area_cache is useful for us */
 99	if (len <= mm->cached_hole_size) {
100 	        mm->cached_hole_size = 0;
101 		mm->free_area_cache = mm->mmap_base;
102 	}
103
104	/* either no address requested or can't fit in requested address hole */
105	addr = mm->free_area_cache & HPAGE_MASK;
106
107	/* make sure it can fit in the remaining address space */
108	if (likely(addr > len)) {
109		vma = find_vma(mm, addr-len);
110		if (!vma || addr <= vma->vm_start) {
111			/* remember the address as a hint for next time */
112			return (mm->free_area_cache = addr-len);
113		}
114	}
 
 
115
116	if (unlikely(mm->mmap_base < len))
117		goto bottomup;
 
 
118
119	addr = (mm->mmap_base-len) & HPAGE_MASK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
121	do {
122		/*
123		 * Lookup failure means no vma is above this address,
124		 * else if new region fits below vma->vm_start,
125		 * return with success:
126		 */
127		vma = find_vma(mm, addr);
128		if (likely(!vma || addr+len <= vma->vm_start)) {
129			/* remember the address as a hint for next time */
130			return (mm->free_area_cache = addr);
131		}
132
133 		/* remember the largest hole we saw so far */
134 		if (addr + mm->cached_hole_size < vma->vm_start)
135 		        mm->cached_hole_size = vma->vm_start - addr;
136
137		/* try just below the current vma->vm_start */
138		addr = (vma->vm_start-len) & HPAGE_MASK;
139	} while (likely(len < vma->vm_start));
140
141bottomup:
142	/*
143	 * A failed mmap() very likely causes application failure,
144	 * so fall back to the bottom-up function here. This scenario
145	 * can happen with large stack limits and large mmap()
146	 * allocations.
147	 */
148	mm->cached_hole_size = ~0UL;
149  	mm->free_area_cache = TASK_UNMAPPED_BASE;
150	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
151	/*
152	 * Restore the topdown base:
153	 */
154	mm->free_area_cache = mm->mmap_base;
155	mm->cached_hole_size = ~0UL;
156
157	return addr;
158}
159
160unsigned long
161hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
162		unsigned long len, unsigned long pgoff, unsigned long flags)
163{
164	struct mm_struct *mm = current->mm;
165	struct vm_area_struct *vma;
166	unsigned long task_size = TASK_SIZE;
167
168	if (test_thread_flag(TIF_32BIT))
169		task_size = STACK_TOP32;
 
170
171	if (len & ~HPAGE_MASK)
172		return -EINVAL;
173	if (len > task_size)
174		return -ENOMEM;
175
176	if (flags & MAP_FIXED) {
177		if (prepare_hugepage_range(file, addr, len))
178			return -EINVAL;
179		return addr;
180	}
181
182	if (addr) {
183		addr = ALIGN(addr, HPAGE_SIZE);
184		vma = find_vma(mm, addr);
185		if (task_size - len >= addr &&
186		    (!vma || addr + len <= vma->vm_start))
187			return addr;
188	}
189	if (mm->get_unmapped_area == arch_get_unmapped_area)
190		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
191				pgoff, flags);
192	else
193		return hugetlb_get_unmapped_area_topdown(file, addr, len,
194				pgoff, flags);
195}
196
197pte_t *huge_pte_alloc(struct mm_struct *mm,
 
 
 
 
198			unsigned long addr, unsigned long sz)
199{
200	pgd_t *pgd;
 
201	pud_t *pud;
202	pmd_t *pmd;
203	pte_t *pte = NULL;
204
205	/* We must align the address, because our caller will run
206	 * set_huge_pte_at() on whatever we return, which writes out
207	 * all of the sub-ptes for the hugepage range.  So we have
208	 * to give it the first such sub-pte.
209	 */
210	addr &= HPAGE_MASK;
211
212	pgd = pgd_offset(mm, addr);
213	pud = pud_alloc(mm, pgd, addr);
214	if (pud) {
215		pmd = pmd_alloc(mm, pud, addr);
216		if (pmd)
217			pte = pte_alloc_map(mm, NULL, pmd, addr);
218	}
219	return pte;
 
 
 
 
 
220}
221
222pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 
223{
224	pgd_t *pgd;
 
225	pud_t *pud;
226	pmd_t *pmd;
227	pte_t *pte = NULL;
228
229	addr &= HPAGE_MASK;
230
231	pgd = pgd_offset(mm, addr);
232	if (!pgd_none(*pgd)) {
233		pud = pud_offset(pgd, addr);
234		if (!pud_none(*pud)) {
235			pmd = pmd_offset(pud, addr);
236			if (!pmd_none(*pmd))
237				pte = pte_offset_map(pmd, addr);
238		}
239	}
240	return pte;
 
 
 
 
 
 
 
241}
242
243int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
244{
245	return 0;
246}
247
248void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
249		     pte_t *ptep, pte_t entry)
250{
251	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
252
253	if (!pte_present(*ptep) && pte_present(entry))
254		mm->context.huge_pte_count++;
255
256	addr &= HPAGE_MASK;
257	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
258		set_pte_at(mm, addr, ptep, entry);
259		ptep++;
260		addr += PAGE_SIZE;
261		pte_val(entry) += PAGE_SIZE;
262	}
 
 
 
 
 
 
 
 
 
 
 
263}
264
265pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
266			      pte_t *ptep)
267{
 
 
268	pte_t entry;
269	int i;
270
271	entry = *ptep;
272	if (pte_present(entry))
273		mm->context.huge_pte_count--;
274
275	addr &= HPAGE_MASK;
 
 
 
 
 
 
276
277	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
278		pte_clear(mm, addr, ptep);
279		addr += PAGE_SIZE;
280		ptep++;
281	}
 
 
 
 
 
 
 
 
 
 
282
283	return entry;
284}
285
286struct page *follow_huge_addr(struct mm_struct *mm,
287			      unsigned long address, int write)
288{
289	return ERR_PTR(-EINVAL);
290}
291
292int pmd_huge(pmd_t pmd)
293{
294	return 0;
295}
296
297int pud_huge(pud_t pud)
 
 
298{
299	return 0;
300}
 
301
302struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
303			     pmd_t *pmd, int write)
304{
305	return NULL;
306}
 
 
 
 
 
 
307
308static void context_reload(void *__data)
309{
310	struct mm_struct *mm = __data;
 
 
 
 
 
 
 
311
312	if (mm == current->mm)
313		load_secondary_context(mm);
 
 
314}
315
316void hugetlb_prefault_arch_hook(struct mm_struct *mm)
 
 
317{
318	struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
320	if (likely(tp->tsb != NULL))
 
 
 
 
 
 
 
 
321		return;
322
323	tsb_grow(mm, MM_TSB_HUGE, 0);
324	tsb_context_switch(mm);
325	smp_tsb_sync(mm);
 
 
326
327	/* On UltraSPARC-III+ and later, configure the second half of
328	 * the Data-TLB for huge pages.
329	 */
330	if (tlb_type == cheetah_plus) {
331		unsigned long ctx;
 
 
332
333		spin_lock(&ctx_alloc_lock);
334		ctx = mm->context.sparc64_ctx_val;
335		ctx &= ~CTX_PGSZ_MASK;
336		ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
337		ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
338
339		if (ctx != mm->context.sparc64_ctx_val) {
340			/* When changing the page size fields, we
341			 * must perform a context flush so that no
342			 * stale entries match.  This flush must
343			 * occur with the original context register
344			 * settings.
345			 */
346			do_flush_tlb_mm(mm);
347
348			/* Reload the context register of all processors
349			 * also executing in this address space.
350			 */
351			mm->context.sparc64_ctx_val = ctx;
352			on_each_cpu(context_reload, mm, 0);
353		}
354		spin_unlock(&ctx_alloc_lock);
355	}
 
 
 
 
 
 
 
 
 
 
 
 
 
356}
v6.13.7
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * SPARC64 Huge TLB page support.
  4 *
  5 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
  6 */
  7
 
 
  8#include <linux/fs.h>
  9#include <linux/mm.h>
 10#include <linux/sched/mm.h>
 11#include <linux/hugetlb.h>
 12#include <linux/pagemap.h>
 13#include <linux/sysctl.h>
 14
 15#include <asm/mman.h>
 16#include <asm/pgalloc.h>
 17#include <asm/tlb.h>
 18#include <asm/tlbflush.h>
 19#include <asm/cacheflush.h>
 20#include <asm/mmu_context.h>
 21
 
 
 
 
 
 22
 23static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
 24{
 25	return entry;
 26}
 27
 28static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
 29{
 30	unsigned long hugepage_size = _PAGE_SZ4MB_4V;
 31
 32	pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
 33
 34	switch (shift) {
 35	case HPAGE_16GB_SHIFT:
 36		hugepage_size = _PAGE_SZ16GB_4V;
 37		pte_val(entry) |= _PAGE_PUD_HUGE;
 38		break;
 39	case HPAGE_2GB_SHIFT:
 40		hugepage_size = _PAGE_SZ2GB_4V;
 41		pte_val(entry) |= _PAGE_PMD_HUGE;
 42		break;
 43	case HPAGE_256MB_SHIFT:
 44		hugepage_size = _PAGE_SZ256MB_4V;
 45		pte_val(entry) |= _PAGE_PMD_HUGE;
 46		break;
 47	case HPAGE_SHIFT:
 48		pte_val(entry) |= _PAGE_PMD_HUGE;
 49		break;
 50	case HPAGE_64K_SHIFT:
 51		hugepage_size = _PAGE_SZ64K_4V;
 52		break;
 53	default:
 54		WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
 55	}
 56
 57	pte_val(entry) = pte_val(entry) | hugepage_size;
 58	return entry;
 59}
 60
 61static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
 62{
 63	if (tlb_type == hypervisor)
 64		return sun4v_hugepage_shift_to_tte(entry, shift);
 65	else
 66		return sun4u_hugepage_shift_to_tte(entry, shift);
 67}
 68
 69pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
 70{
 71	pte_t pte;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 72
 73	entry = pte_mkhuge(entry);
 74	pte = hugepage_shift_to_tte(entry, shift);
 75
 76#ifdef CONFIG_SPARC64
 77	/* If this vma has ADI enabled on it, turn on TTE.mcd
 78	 */
 79	if (flags & VM_SPARC_ADI)
 80		return pte_mkmcd(pte);
 81	else
 82		return pte_mknotmcd(pte);
 83#else
 84	return pte;
 85#endif
 86}
 87
 88static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
 89{
 90	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
 91	unsigned int shift;
 92
 93	switch (tte_szbits) {
 94	case _PAGE_SZ16GB_4V:
 95		shift = HPAGE_16GB_SHIFT;
 96		break;
 97	case _PAGE_SZ2GB_4V:
 98		shift = HPAGE_2GB_SHIFT;
 99		break;
100	case _PAGE_SZ256MB_4V:
101		shift = HPAGE_256MB_SHIFT;
102		break;
103	case _PAGE_SZ4MB_4V:
104		shift = REAL_HPAGE_SHIFT;
105		break;
106	case _PAGE_SZ64K_4V:
107		shift = HPAGE_64K_SHIFT;
108		break;
109	default:
110		shift = PAGE_SHIFT;
111		break;
 
 
 
 
 
112	}
113	return shift;
114}
115
116static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
117{
118	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
119	unsigned int shift;
120
121	switch (tte_szbits) {
122	case _PAGE_SZ256MB_4U:
123		shift = HPAGE_256MB_SHIFT;
124		break;
125	case _PAGE_SZ4MB_4U:
126		shift = REAL_HPAGE_SHIFT;
127		break;
128	case _PAGE_SZ64K_4U:
129		shift = HPAGE_64K_SHIFT;
130		break;
131	default:
132		shift = PAGE_SHIFT;
133		break;
134	}
135	return shift;
136}
137
138static unsigned long tte_to_shift(pte_t entry)
139{
140	if (tlb_type == hypervisor)
141		return sun4v_huge_tte_to_shift(entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
142
143	return sun4u_huge_tte_to_shift(entry);
144}
145
146static unsigned int huge_tte_to_shift(pte_t entry)
 
 
147{
148	unsigned long shift = tte_to_shift(entry);
 
 
149
150	if (shift == PAGE_SHIFT)
151		WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
152			  pte_val(entry));
153
154	return shift;
155}
 
 
156
157static unsigned long huge_tte_to_size(pte_t pte)
158{
159	unsigned long size = 1UL << huge_tte_to_shift(pte);
 
 
160
161	if (size == REAL_HPAGE_SIZE)
162		size = HPAGE_SIZE;
163	return size;
 
 
 
 
 
 
 
 
 
 
164}
165
166unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); }
167unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
168unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
169
170pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
171			unsigned long addr, unsigned long sz)
172{
173	pgd_t *pgd;
174	p4d_t *p4d;
175	pud_t *pud;
176	pmd_t *pmd;
 
 
 
 
 
 
 
 
177
178	pgd = pgd_offset(mm, addr);
179	p4d = p4d_offset(pgd, addr);
180	pud = pud_alloc(mm, p4d, addr);
181	if (!pud)
182		return NULL;
183	if (sz >= PUD_SIZE)
184		return (pte_t *)pud;
185	pmd = pmd_alloc(mm, pud, addr);
186	if (!pmd)
187		return NULL;
188	if (sz >= PMD_SIZE)
189		return (pte_t *)pmd;
190	return pte_alloc_huge(mm, pmd, addr);
191}
192
193pte_t *huge_pte_offset(struct mm_struct *mm,
194		       unsigned long addr, unsigned long sz)
195{
196	pgd_t *pgd;
197	p4d_t *p4d;
198	pud_t *pud;
199	pmd_t *pmd;
 
 
 
200
201	pgd = pgd_offset(mm, addr);
202	if (pgd_none(*pgd))
203		return NULL;
204	p4d = p4d_offset(pgd, addr);
205	if (p4d_none(*p4d))
206		return NULL;
207	pud = pud_offset(p4d, addr);
208	if (pud_none(*pud))
209		return NULL;
210	if (is_hugetlb_pud(*pud))
211		return (pte_t *)pud;
212	pmd = pmd_offset(pud, addr);
213	if (pmd_none(*pmd))
214		return NULL;
215	if (is_hugetlb_pmd(*pmd))
216		return (pte_t *)pmd;
217	return pte_offset_huge(pmd, addr);
218}
219
220void __set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
 
 
 
 
 
221		     pte_t *ptep, pte_t entry)
222{
223	unsigned int nptes, orig_shift, shift;
224	unsigned long i, size;
225	pte_t orig;
226
227	size = huge_tte_to_size(entry);
228
229	shift = PAGE_SHIFT;
230	if (size >= PUD_SIZE)
231		shift = PUD_SHIFT;
232	else if (size >= PMD_SIZE)
233		shift = PMD_SHIFT;
234	else
235		shift = PAGE_SHIFT;
236
237	nptes = size >> shift;
238
239	if (!pte_present(*ptep) && pte_present(entry))
240		mm->context.hugetlb_pte_count += nptes;
241
242	addr &= ~(size - 1);
243	orig = *ptep;
244	orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
245
246	for (i = 0; i < nptes; i++)
247		ptep[i] = __pte(pte_val(entry) + (i << shift));
248
249	maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
250	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
251	if (size == HPAGE_SIZE)
252		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
253				    orig_shift);
254}
255
256void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
257		     pte_t *ptep, pte_t entry, unsigned long sz)
258{
259	__set_huge_pte_at(mm, addr, ptep, entry);
260}
261
262pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
263			      pte_t *ptep, unsigned long sz)
264{
265	unsigned int i, nptes, orig_shift, shift;
266	unsigned long size;
267	pte_t entry;
 
268
269	entry = *ptep;
270	size = huge_tte_to_size(entry);
 
271
272	shift = PAGE_SHIFT;
273	if (size >= PUD_SIZE)
274		shift = PUD_SHIFT;
275	else if (size >= PMD_SIZE)
276		shift = PMD_SHIFT;
277	else
278		shift = PAGE_SHIFT;
279
280	nptes = size >> shift;
281	orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
282
283	if (pte_present(entry))
284		mm->context.hugetlb_pte_count -= nptes;
285
286	addr &= ~(size - 1);
287	for (i = 0; i < nptes; i++)
288		ptep[i] = __pte(0UL);
289
290	maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
291	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
292	if (size == HPAGE_SIZE)
293		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
294				    orig_shift);
295
296	return entry;
297}
298
299static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
300			   unsigned long addr)
301{
302	pgtable_t token = pmd_pgtable(*pmd);
 
303
304	pmd_clear(pmd);
305	pte_free_tlb(tlb, token, addr);
306	mm_dec_nr_ptes(tlb->mm);
307}
308
309static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
310				   unsigned long addr, unsigned long end,
311				   unsigned long floor, unsigned long ceiling)
312{
313	pmd_t *pmd;
314	unsigned long next;
315	unsigned long start;
316
317	start = addr;
318	pmd = pmd_offset(pud, addr);
319	do {
320		next = pmd_addr_end(addr, end);
321		if (pmd_none(*pmd))
322			continue;
323		if (is_hugetlb_pmd(*pmd))
324			pmd_clear(pmd);
325		else
326			hugetlb_free_pte_range(tlb, pmd, addr);
327	} while (pmd++, addr = next, addr != end);
328
329	start &= PUD_MASK;
330	if (start < floor)
331		return;
332	if (ceiling) {
333		ceiling &= PUD_MASK;
334		if (!ceiling)
335			return;
336	}
337	if (end - 1 > ceiling - 1)
338		return;
339
340	pmd = pmd_offset(pud, start);
341	pud_clear(pud);
342	pmd_free_tlb(tlb, pmd, start);
343	mm_dec_nr_pmds(tlb->mm);
344}
345
346static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
347				   unsigned long addr, unsigned long end,
348				   unsigned long floor, unsigned long ceiling)
349{
350	pud_t *pud;
351	unsigned long next;
352	unsigned long start;
353
354	start = addr;
355	pud = pud_offset(p4d, addr);
356	do {
357		next = pud_addr_end(addr, end);
358		if (pud_none_or_clear_bad(pud))
359			continue;
360		if (is_hugetlb_pud(*pud))
361			pud_clear(pud);
362		else
363			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
364					       ceiling);
365	} while (pud++, addr = next, addr != end);
366
367	start &= PGDIR_MASK;
368	if (start < floor)
369		return;
370	if (ceiling) {
371		ceiling &= PGDIR_MASK;
372		if (!ceiling)
373			return;
374	}
375	if (end - 1 > ceiling - 1)
376		return;
377
378	pud = pud_offset(p4d, start);
379	p4d_clear(p4d);
380	pud_free_tlb(tlb, pud, start);
381	mm_dec_nr_puds(tlb->mm);
382}
383
384void hugetlb_free_pgd_range(struct mmu_gather *tlb,
385			    unsigned long addr, unsigned long end,
386			    unsigned long floor, unsigned long ceiling)
387{
388	pgd_t *pgd;
389	p4d_t *p4d;
390	unsigned long next;
391
392	addr &= PMD_MASK;
393	if (addr < floor) {
394		addr += PMD_SIZE;
395		if (!addr)
396			return;
397	}
398	if (ceiling) {
399		ceiling &= PMD_MASK;
400		if (!ceiling)
401			return;
 
 
 
 
 
 
 
 
 
 
 
 
402	}
403	if (end - 1 > ceiling - 1)
404		end -= PMD_SIZE;
405	if (addr > end - 1)
406		return;
407
408	pgd = pgd_offset(tlb->mm, addr);
409	p4d = p4d_offset(pgd, addr);
410	do {
411		next = p4d_addr_end(addr, end);
412		if (p4d_none_or_clear_bad(p4d))
413			continue;
414		hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
415	} while (p4d++, addr = next, addr != end);
416}