Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * SPARC64 Huge TLB page support.
  4 *
  5 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
  6 */
  7
 
  8#include <linux/fs.h>
  9#include <linux/mm.h>
 10#include <linux/sched/mm.h>
 11#include <linux/hugetlb.h>
 12#include <linux/pagemap.h>
 13#include <linux/sysctl.h>
 14
 15#include <asm/mman.h>
 16#include <asm/pgalloc.h>
 17#include <asm/tlb.h>
 18#include <asm/tlbflush.h>
 19#include <asm/cacheflush.h>
 20#include <asm/mmu_context.h>
 21
 22/* Slightly simplified from the non-hugepage variant because by
 23 * definition we don't have to worry about any page coloring stuff
 24 */
 
 
 25
 26static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
 27							unsigned long addr,
 28							unsigned long len,
 29							unsigned long pgoff,
 30							unsigned long flags)
 31{
 32	struct hstate *h = hstate_file(filp);
 
 33	unsigned long task_size = TASK_SIZE;
 34	struct vm_unmapped_area_info info;
 35
 36	if (test_thread_flag(TIF_32BIT))
 37		task_size = STACK_TOP32;
 
 
 38
 39	info.flags = 0;
 40	info.length = len;
 41	info.low_limit = TASK_UNMAPPED_BASE;
 42	info.high_limit = min(task_size, VA_EXCLUDE_START);
 43	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 44	info.align_offset = 0;
 45	addr = vm_unmapped_area(&info);
 46
 47	if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
 48		VM_BUG_ON(addr != -ENOMEM);
 49		info.low_limit = VA_EXCLUDE_END;
 50		info.high_limit = task_size;
 51		addr = vm_unmapped_area(&info);
 52	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 53
 54	return addr;
 
 55}
 56
 57static unsigned long
 58hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 59				  const unsigned long len,
 60				  const unsigned long pgoff,
 61				  const unsigned long flags)
 62{
 63	struct hstate *h = hstate_file(filp);
 64	struct mm_struct *mm = current->mm;
 65	unsigned long addr = addr0;
 66	struct vm_unmapped_area_info info;
 67
 68	/* This should only ever run for 32-bit processes.  */
 69	BUG_ON(!test_thread_flag(TIF_32BIT));
 70
 71	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 72	info.length = len;
 73	info.low_limit = PAGE_SIZE;
 74	info.high_limit = mm->mmap_base;
 75	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 76	info.align_offset = 0;
 77	addr = vm_unmapped_area(&info);
 
 
 
 
 
 
 
 
 
 
 
 
 
 78
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 79	/*
 80	 * A failed mmap() very likely causes application failure,
 81	 * so fall back to the bottom-up function here. This scenario
 82	 * can happen with large stack limits and large mmap()
 83	 * allocations.
 84	 */
 85	if (addr & ~PAGE_MASK) {
 86		VM_BUG_ON(addr != -ENOMEM);
 87		info.flags = 0;
 88		info.low_limit = TASK_UNMAPPED_BASE;
 89		info.high_limit = STACK_TOP32;
 90		addr = vm_unmapped_area(&info);
 91	}
 
 92
 93	return addr;
 94}
 95
 96unsigned long
 97hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
 98		unsigned long len, unsigned long pgoff, unsigned long flags)
 99{
100	struct hstate *h = hstate_file(file);
101	struct mm_struct *mm = current->mm;
102	struct vm_area_struct *vma;
103	unsigned long task_size = TASK_SIZE;
104
105	if (test_thread_flag(TIF_32BIT))
106		task_size = STACK_TOP32;
107
108	if (len & ~huge_page_mask(h))
109		return -EINVAL;
110	if (len > task_size)
111		return -ENOMEM;
112
113	if (flags & MAP_FIXED) {
114		if (prepare_hugepage_range(file, addr, len))
115			return -EINVAL;
116		return addr;
117	}
118
119	if (addr) {
120		addr = ALIGN(addr, huge_page_size(h));
121		vma = find_vma(mm, addr);
122		if (task_size - len >= addr &&
123		    (!vma || addr + len <= vm_start_gap(vma)))
124			return addr;
125	}
126	if (mm->get_unmapped_area == arch_get_unmapped_area)
127		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
128				pgoff, flags);
129	else
130		return hugetlb_get_unmapped_area_topdown(file, addr, len,
131				pgoff, flags);
132}
133
134static pte_t sun4u_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
135{
136	return entry;
137}
138
139static pte_t sun4v_hugepage_shift_to_tte(pte_t entry, unsigned int shift)
140{
141	unsigned long hugepage_size = _PAGE_SZ4MB_4V;
142
143	pte_val(entry) = pte_val(entry) & ~_PAGE_SZALL_4V;
144
145	switch (shift) {
146	case HPAGE_16GB_SHIFT:
147		hugepage_size = _PAGE_SZ16GB_4V;
148		pte_val(entry) |= _PAGE_PUD_HUGE;
149		break;
150	case HPAGE_2GB_SHIFT:
151		hugepage_size = _PAGE_SZ2GB_4V;
152		pte_val(entry) |= _PAGE_PMD_HUGE;
153		break;
154	case HPAGE_256MB_SHIFT:
155		hugepage_size = _PAGE_SZ256MB_4V;
156		pte_val(entry) |= _PAGE_PMD_HUGE;
157		break;
158	case HPAGE_SHIFT:
159		pte_val(entry) |= _PAGE_PMD_HUGE;
160		break;
161	case HPAGE_64K_SHIFT:
162		hugepage_size = _PAGE_SZ64K_4V;
163		break;
164	default:
165		WARN_ONCE(1, "unsupported hugepage shift=%u\n", shift);
166	}
167
168	pte_val(entry) = pte_val(entry) | hugepage_size;
169	return entry;
170}
171
172static pte_t hugepage_shift_to_tte(pte_t entry, unsigned int shift)
173{
174	if (tlb_type == hypervisor)
175		return sun4v_hugepage_shift_to_tte(entry, shift);
176	else
177		return sun4u_hugepage_shift_to_tte(entry, shift);
178}
179
180pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
181{
182	pte_t pte;
183
184	entry = pte_mkhuge(entry);
185	pte = hugepage_shift_to_tte(entry, shift);
186
187#ifdef CONFIG_SPARC64
188	/* If this vma has ADI enabled on it, turn on TTE.mcd
 
 
189	 */
190	if (flags & VM_SPARC_ADI)
191		return pte_mkmcd(pte);
192	else
193		return pte_mknotmcd(pte);
194#else
195	return pte;
196#endif
197}
198
199static unsigned int sun4v_huge_tte_to_shift(pte_t entry)
200{
201	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4V;
202	unsigned int shift;
203
204	switch (tte_szbits) {
205	case _PAGE_SZ16GB_4V:
206		shift = HPAGE_16GB_SHIFT;
207		break;
208	case _PAGE_SZ2GB_4V:
209		shift = HPAGE_2GB_SHIFT;
210		break;
211	case _PAGE_SZ256MB_4V:
212		shift = HPAGE_256MB_SHIFT;
213		break;
214	case _PAGE_SZ4MB_4V:
215		shift = REAL_HPAGE_SHIFT;
216		break;
217	case _PAGE_SZ64K_4V:
218		shift = HPAGE_64K_SHIFT;
219		break;
220	default:
221		shift = PAGE_SHIFT;
222		break;
223	}
224	return shift;
225}
226
227static unsigned int sun4u_huge_tte_to_shift(pte_t entry)
228{
229	unsigned long tte_szbits = pte_val(entry) & _PAGE_SZALL_4U;
230	unsigned int shift;
231
232	switch (tte_szbits) {
233	case _PAGE_SZ256MB_4U:
234		shift = HPAGE_256MB_SHIFT;
235		break;
236	case _PAGE_SZ4MB_4U:
237		shift = REAL_HPAGE_SHIFT;
238		break;
239	case _PAGE_SZ64K_4U:
240		shift = HPAGE_64K_SHIFT;
241		break;
242	default:
243		shift = PAGE_SHIFT;
244		break;
245	}
246	return shift;
247}
248
249static unsigned long tte_to_shift(pte_t entry)
250{
251	if (tlb_type == hypervisor)
252		return sun4v_huge_tte_to_shift(entry);
253
254	return sun4u_huge_tte_to_shift(entry);
255}
256
257static unsigned int huge_tte_to_shift(pte_t entry)
258{
259	unsigned long shift = tte_to_shift(entry);
260
261	if (shift == PAGE_SHIFT)
262		WARN_ONCE(1, "tto_to_shift: invalid hugepage tte=0x%lx\n",
263			  pte_val(entry));
264
265	return shift;
266}
267
268static unsigned long huge_tte_to_size(pte_t pte)
269{
270	unsigned long size = 1UL << huge_tte_to_shift(pte);
271
272	if (size == REAL_HPAGE_SIZE)
273		size = HPAGE_SIZE;
274	return size;
275}
276
277unsigned long pud_leaf_size(pud_t pud) { return 1UL << tte_to_shift(*(pte_t *)&pud); }
278unsigned long pmd_leaf_size(pmd_t pmd) { return 1UL << tte_to_shift(*(pte_t *)&pmd); }
279unsigned long pte_leaf_size(pte_t pte) { return 1UL << tte_to_shift(pte); }
280
281pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
282			unsigned long addr, unsigned long sz)
283{
284	pgd_t *pgd;
285	p4d_t *p4d;
286	pud_t *pud;
287	pmd_t *pmd;
 
 
 
288
289	pgd = pgd_offset(mm, addr);
290	p4d = p4d_offset(pgd, addr);
291	pud = pud_alloc(mm, p4d, addr);
292	if (!pud)
293		return NULL;
294	if (sz >= PUD_SIZE)
295		return (pte_t *)pud;
296	pmd = pmd_alloc(mm, pud, addr);
297	if (!pmd)
298		return NULL;
299	if (sz >= PMD_SIZE)
300		return (pte_t *)pmd;
301	return pte_alloc_map(mm, pmd, addr);
302}
303
304pte_t *huge_pte_offset(struct mm_struct *mm,
305		       unsigned long addr, unsigned long sz)
306{
307	pgd_t *pgd;
308	p4d_t *p4d;
309	pud_t *pud;
310	pmd_t *pmd;
311
312	pgd = pgd_offset(mm, addr);
313	if (pgd_none(*pgd))
314		return NULL;
315	p4d = p4d_offset(pgd, addr);
316	if (p4d_none(*p4d))
317		return NULL;
318	pud = pud_offset(p4d, addr);
319	if (pud_none(*pud))
320		return NULL;
321	if (is_hugetlb_pud(*pud))
322		return (pte_t *)pud;
323	pmd = pmd_offset(pud, addr);
324	if (pmd_none(*pmd))
325		return NULL;
326	if (is_hugetlb_pmd(*pmd))
327		return (pte_t *)pmd;
328	return pte_offset_map(pmd, addr);
329}
330
331void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
332		     pte_t *ptep, pte_t entry)
333{
334	unsigned int nptes, orig_shift, shift;
335	unsigned long i, size;
336	pte_t orig;
337
338	size = huge_tte_to_size(entry);
339
340	shift = PAGE_SHIFT;
341	if (size >= PUD_SIZE)
342		shift = PUD_SHIFT;
343	else if (size >= PMD_SIZE)
344		shift = PMD_SHIFT;
345	else
346		shift = PAGE_SHIFT;
347
348	nptes = size >> shift;
349
350	if (!pte_present(*ptep) && pte_present(entry))
351		mm->context.hugetlb_pte_count += nptes;
352
353	addr &= ~(size - 1);
354	orig = *ptep;
355	orig_shift = pte_none(orig) ? PAGE_SHIFT : huge_tte_to_shift(orig);
356
357	for (i = 0; i < nptes; i++)
358		ptep[i] = __pte(pte_val(entry) + (i << shift));
359
360	maybe_tlb_batch_add(mm, addr, ptep, orig, 0, orig_shift);
361	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
362	if (size == HPAGE_SIZE)
363		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, orig, 0,
364				    orig_shift);
365}
366
367pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
368			      pte_t *ptep)
369{
370	unsigned int i, nptes, orig_shift, shift;
371	unsigned long size;
372	pte_t entry;
 
373
374	entry = *ptep;
375	size = huge_tte_to_size(entry);
376
377	shift = PAGE_SHIFT;
378	if (size >= PUD_SIZE)
379		shift = PUD_SHIFT;
380	else if (size >= PMD_SIZE)
381		shift = PMD_SHIFT;
382	else
383		shift = PAGE_SHIFT;
384
385	nptes = size >> shift;
386	orig_shift = pte_none(entry) ? PAGE_SHIFT : huge_tte_to_shift(entry);
387
388	if (pte_present(entry))
389		mm->context.hugetlb_pte_count -= nptes;
390
391	addr &= ~(size - 1);
392	for (i = 0; i < nptes; i++)
393		ptep[i] = __pte(0UL);
394
395	maybe_tlb_batch_add(mm, addr, ptep, entry, 0, orig_shift);
396	/* An HPAGE_SIZE'ed page is composed of two REAL_HPAGE_SIZE'ed pages */
397	if (size == HPAGE_SIZE)
398		maybe_tlb_batch_add(mm, addr + REAL_HPAGE_SIZE, ptep, entry, 0,
399				    orig_shift);
400
401	return entry;
402}
403
 
 
 
 
 
 
404int pmd_huge(pmd_t pmd)
405{
406	return !pmd_none(pmd) &&
407		(pmd_val(pmd) & (_PAGE_VALID|_PAGE_PMD_HUGE)) != _PAGE_VALID;
408}
409
410int pud_huge(pud_t pud)
411{
412	return !pud_none(pud) &&
413		(pud_val(pud) & (_PAGE_VALID|_PAGE_PUD_HUGE)) != _PAGE_VALID;
414}
415
416static void hugetlb_free_pte_range(struct mmu_gather *tlb, pmd_t *pmd,
417			   unsigned long addr)
418{
419	pgtable_t token = pmd_pgtable(*pmd);
420
421	pmd_clear(pmd);
422	pte_free_tlb(tlb, token, addr);
423	mm_dec_nr_ptes(tlb->mm);
424}
425
426static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
427				   unsigned long addr, unsigned long end,
428				   unsigned long floor, unsigned long ceiling)
429{
430	pmd_t *pmd;
431	unsigned long next;
432	unsigned long start;
433
434	start = addr;
435	pmd = pmd_offset(pud, addr);
436	do {
437		next = pmd_addr_end(addr, end);
438		if (pmd_none(*pmd))
439			continue;
440		if (is_hugetlb_pmd(*pmd))
441			pmd_clear(pmd);
442		else
443			hugetlb_free_pte_range(tlb, pmd, addr);
444	} while (pmd++, addr = next, addr != end);
445
446	start &= PUD_MASK;
447	if (start < floor)
448		return;
449	if (ceiling) {
450		ceiling &= PUD_MASK;
451		if (!ceiling)
452			return;
453	}
454	if (end - 1 > ceiling - 1)
455		return;
456
457	pmd = pmd_offset(pud, start);
458	pud_clear(pud);
459	pmd_free_tlb(tlb, pmd, start);
460	mm_dec_nr_pmds(tlb->mm);
461}
462
463static void hugetlb_free_pud_range(struct mmu_gather *tlb, p4d_t *p4d,
464				   unsigned long addr, unsigned long end,
465				   unsigned long floor, unsigned long ceiling)
466{
467	pud_t *pud;
468	unsigned long next;
469	unsigned long start;
470
471	start = addr;
472	pud = pud_offset(p4d, addr);
473	do {
474		next = pud_addr_end(addr, end);
475		if (pud_none_or_clear_bad(pud))
476			continue;
477		if (is_hugetlb_pud(*pud))
478			pud_clear(pud);
479		else
480			hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
481					       ceiling);
482	} while (pud++, addr = next, addr != end);
483
484	start &= PGDIR_MASK;
485	if (start < floor)
486		return;
487	if (ceiling) {
488		ceiling &= PGDIR_MASK;
489		if (!ceiling)
490			return;
491	}
492	if (end - 1 > ceiling - 1)
493		return;
494
495	pud = pud_offset(p4d, start);
496	p4d_clear(p4d);
497	pud_free_tlb(tlb, pud, start);
498	mm_dec_nr_puds(tlb->mm);
499}
500
501void hugetlb_free_pgd_range(struct mmu_gather *tlb,
502			    unsigned long addr, unsigned long end,
503			    unsigned long floor, unsigned long ceiling)
504{
505	pgd_t *pgd;
506	p4d_t *p4d;
507	unsigned long next;
508
509	addr &= PMD_MASK;
510	if (addr < floor) {
511		addr += PMD_SIZE;
512		if (!addr)
513			return;
514	}
515	if (ceiling) {
516		ceiling &= PMD_MASK;
517		if (!ceiling)
518			return;
 
 
 
 
 
 
 
 
 
 
 
 
519	}
520	if (end - 1 > ceiling - 1)
521		end -= PMD_SIZE;
522	if (addr > end - 1)
523		return;
524
525	pgd = pgd_offset(tlb->mm, addr);
526	p4d = p4d_offset(pgd, addr);
527	do {
528		next = p4d_addr_end(addr, end);
529		if (p4d_none_or_clear_bad(p4d))
530			continue;
531		hugetlb_free_pud_range(tlb, p4d, addr, next, floor, ceiling);
532	} while (p4d++, addr = next, addr != end);
533}
v3.5.6
 
  1/*
  2 * SPARC64 Huge TLB page support.
  3 *
  4 * Copyright (C) 2002, 2003, 2006 David S. Miller (davem@davemloft.net)
  5 */
  6
  7#include <linux/init.h>
  8#include <linux/fs.h>
  9#include <linux/mm.h>
 
 10#include <linux/hugetlb.h>
 11#include <linux/pagemap.h>
 12#include <linux/sysctl.h>
 13
 14#include <asm/mman.h>
 15#include <asm/pgalloc.h>
 16#include <asm/tlb.h>
 17#include <asm/tlbflush.h>
 18#include <asm/cacheflush.h>
 19#include <asm/mmu_context.h>
 20
 21/* Slightly simplified from the non-hugepage variant because by
 22 * definition we don't have to worry about any page coloring stuff
 23 */
 24#define VA_EXCLUDE_START (0x0000080000000000UL - (1UL << 32UL))
 25#define VA_EXCLUDE_END   (0xfffff80000000000UL + (1UL << 32UL))
 26
 27static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
 28							unsigned long addr,
 29							unsigned long len,
 30							unsigned long pgoff,
 31							unsigned long flags)
 32{
 33	struct mm_struct *mm = current->mm;
 34	struct vm_area_struct * vma;
 35	unsigned long task_size = TASK_SIZE;
 36	unsigned long start_addr;
 37
 38	if (test_thread_flag(TIF_32BIT))
 39		task_size = STACK_TOP32;
 40	if (unlikely(len >= VA_EXCLUDE_START))
 41		return -ENOMEM;
 42
 43	if (len > mm->cached_hole_size) {
 44	        start_addr = addr = mm->free_area_cache;
 45	} else {
 46	        start_addr = addr = TASK_UNMAPPED_BASE;
 47	        mm->cached_hole_size = 0;
 48	}
 49
 50	task_size -= len;
 51
 52full_search:
 53	addr = ALIGN(addr, HPAGE_SIZE);
 54
 55	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
 56		/* At this point:  (!vma || addr < vma->vm_end). */
 57		if (addr < VA_EXCLUDE_START &&
 58		    (addr + len) >= VA_EXCLUDE_START) {
 59			addr = VA_EXCLUDE_END;
 60			vma = find_vma(mm, VA_EXCLUDE_END);
 61		}
 62		if (unlikely(task_size < addr)) {
 63			if (start_addr != TASK_UNMAPPED_BASE) {
 64				start_addr = addr = TASK_UNMAPPED_BASE;
 65				mm->cached_hole_size = 0;
 66				goto full_search;
 67			}
 68			return -ENOMEM;
 69		}
 70		if (likely(!vma || addr + len <= vma->vm_start)) {
 71			/*
 72			 * Remember the place where we stopped the search:
 73			 */
 74			mm->free_area_cache = addr + len;
 75			return addr;
 76		}
 77		if (addr + mm->cached_hole_size < vma->vm_start)
 78		        mm->cached_hole_size = vma->vm_start - addr;
 79
 80		addr = ALIGN(vma->vm_end, HPAGE_SIZE);
 81	}
 82}
 83
 84static unsigned long
 85hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
 86				  const unsigned long len,
 87				  const unsigned long pgoff,
 88				  const unsigned long flags)
 89{
 90	struct vm_area_struct *vma;
 91	struct mm_struct *mm = current->mm;
 92	unsigned long addr = addr0;
 
 93
 94	/* This should only ever run for 32-bit processes.  */
 95	BUG_ON(!test_thread_flag(TIF_32BIT));
 96
 97	/* check if free_area_cache is useful for us */
 98	if (len <= mm->cached_hole_size) {
 99 	        mm->cached_hole_size = 0;
100 		mm->free_area_cache = mm->mmap_base;
101 	}
102
103	/* either no address requested or can't fit in requested address hole */
104	addr = mm->free_area_cache & HPAGE_MASK;
105
106	/* make sure it can fit in the remaining address space */
107	if (likely(addr > len)) {
108		vma = find_vma(mm, addr-len);
109		if (!vma || addr <= vma->vm_start) {
110			/* remember the address as a hint for next time */
111			return (mm->free_area_cache = addr-len);
112		}
113	}
114
115	if (unlikely(mm->mmap_base < len))
116		goto bottomup;
117
118	addr = (mm->mmap_base-len) & HPAGE_MASK;
119
120	do {
121		/*
122		 * Lookup failure means no vma is above this address,
123		 * else if new region fits below vma->vm_start,
124		 * return with success:
125		 */
126		vma = find_vma(mm, addr);
127		if (likely(!vma || addr+len <= vma->vm_start)) {
128			/* remember the address as a hint for next time */
129			return (mm->free_area_cache = addr);
130		}
131
132 		/* remember the largest hole we saw so far */
133 		if (addr + mm->cached_hole_size < vma->vm_start)
134 		        mm->cached_hole_size = vma->vm_start - addr;
135
136		/* try just below the current vma->vm_start */
137		addr = (vma->vm_start-len) & HPAGE_MASK;
138	} while (likely(len < vma->vm_start));
139
140bottomup:
141	/*
142	 * A failed mmap() very likely causes application failure,
143	 * so fall back to the bottom-up function here. This scenario
144	 * can happen with large stack limits and large mmap()
145	 * allocations.
146	 */
147	mm->cached_hole_size = ~0UL;
148  	mm->free_area_cache = TASK_UNMAPPED_BASE;
149	addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
150	/*
151	 * Restore the topdown base:
152	 */
153	mm->free_area_cache = mm->mmap_base;
154	mm->cached_hole_size = ~0UL;
155
156	return addr;
157}
158
159unsigned long
160hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
161		unsigned long len, unsigned long pgoff, unsigned long flags)
162{
 
163	struct mm_struct *mm = current->mm;
164	struct vm_area_struct *vma;
165	unsigned long task_size = TASK_SIZE;
166
167	if (test_thread_flag(TIF_32BIT))
168		task_size = STACK_TOP32;
169
170	if (len & ~HPAGE_MASK)
171		return -EINVAL;
172	if (len > task_size)
173		return -ENOMEM;
174
175	if (flags & MAP_FIXED) {
176		if (prepare_hugepage_range(file, addr, len))
177			return -EINVAL;
178		return addr;
179	}
180
181	if (addr) {
182		addr = ALIGN(addr, HPAGE_SIZE);
183		vma = find_vma(mm, addr);
184		if (task_size - len >= addr &&
185		    (!vma || addr + len <= vma->vm_start))
186			return addr;
187	}
188	if (mm->get_unmapped_area == arch_get_unmapped_area)
189		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
190				pgoff, flags);
191	else
192		return hugetlb_get_unmapped_area_topdown(file, addr, len,
193				pgoff, flags);
194}
195
196pte_t *huge_pte_alloc(struct mm_struct *mm,
197			unsigned long addr, unsigned long sz)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198{
199	pgd_t *pgd;
200	pud_t *pud;
201	pmd_t *pmd;
202	pte_t *pte = NULL;
203
204	/* We must align the address, because our caller will run
205	 * set_huge_pte_at() on whatever we return, which writes out
206	 * all of the sub-ptes for the hugepage range.  So we have
207	 * to give it the first such sub-pte.
208	 */
209	addr &= HPAGE_MASK;
 
 
 
 
 
 
 
210
211	pgd = pgd_offset(mm, addr);
212	pud = pud_alloc(mm, pgd, addr);
213	if (pud) {
214		pmd = pmd_alloc(mm, pud, addr);
215		if (pmd)
216			pte = pte_alloc_map(mm, NULL, pmd, addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
217	}
218	return pte;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219}
220
221pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 
 
 
 
 
222{
223	pgd_t *pgd;
 
224	pud_t *pud;
225	pmd_t *pmd;
226	pte_t *pte = NULL;
227
228	addr &= HPAGE_MASK;
229
230	pgd = pgd_offset(mm, addr);
231	if (!pgd_none(*pgd)) {
232		pud = pud_offset(pgd, addr);
233		if (!pud_none(*pud)) {
234			pmd = pmd_offset(pud, addr);
235			if (!pmd_none(*pmd))
236				pte = pte_offset_map(pmd, addr);
237		}
238	}
239	return pte;
 
 
 
240}
241
242int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 
243{
244	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
245}
246
247void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
248		     pte_t *ptep, pte_t entry)
249{
250	int i;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
251
252	if (!pte_present(*ptep) && pte_present(entry))
253		mm->context.huge_pte_count++;
254
255	addr &= HPAGE_MASK;
256	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
257		set_pte_at(mm, addr, ptep, entry);
258		ptep++;
259		addr += PAGE_SIZE;
260		pte_val(entry) += PAGE_SIZE;
261	}
 
 
 
 
 
262}
263
264pte_t huge_ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
265			      pte_t *ptep)
266{
 
 
267	pte_t entry;
268	int i;
269
270	entry = *ptep;
 
 
 
 
 
 
 
 
 
 
 
 
 
271	if (pte_present(entry))
272		mm->context.huge_pte_count--;
273
274	addr &= HPAGE_MASK;
275
276	for (i = 0; i < (1 << HUGETLB_PAGE_ORDER); i++) {
277		pte_clear(mm, addr, ptep);
278		addr += PAGE_SIZE;
279		ptep++;
280	}
 
 
281
282	return entry;
283}
284
285struct page *follow_huge_addr(struct mm_struct *mm,
286			      unsigned long address, int write)
287{
288	return ERR_PTR(-EINVAL);
289}
290
291int pmd_huge(pmd_t pmd)
292{
293	return 0;
 
294}
295
296int pud_huge(pud_t pud)
297{
298	return 0;
 
299}
300
301struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
302			     pmd_t *pmd, int write)
303{
304	return NULL;
 
 
 
 
305}
306
307static void context_reload(void *__data)
 
 
308{
309	struct mm_struct *mm = __data;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
310
311	if (mm == current->mm)
312		load_secondary_context(mm);
 
 
313}
314
315void hugetlb_prefault_arch_hook(struct mm_struct *mm)
 
 
316{
317	struct tsb_config *tp = &mm->context.tsb_block[MM_TSB_HUGE];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
318
319	if (likely(tp->tsb != NULL))
 
 
 
 
 
 
 
 
320		return;
321
322	tsb_grow(mm, MM_TSB_HUGE, 0);
323	tsb_context_switch(mm);
324	smp_tsb_sync(mm);
 
 
325
326	/* On UltraSPARC-III+ and later, configure the second half of
327	 * the Data-TLB for huge pages.
328	 */
329	if (tlb_type == cheetah_plus) {
330		unsigned long ctx;
 
 
331
332		spin_lock(&ctx_alloc_lock);
333		ctx = mm->context.sparc64_ctx_val;
334		ctx &= ~CTX_PGSZ_MASK;
335		ctx |= CTX_PGSZ_BASE << CTX_PGSZ0_SHIFT;
336		ctx |= CTX_PGSZ_HUGE << CTX_PGSZ1_SHIFT;
337
338		if (ctx != mm->context.sparc64_ctx_val) {
339			/* When changing the page size fields, we
340			 * must perform a context flush so that no
341			 * stale entries match.  This flush must
342			 * occur with the original context register
343			 * settings.
344			 */
345			do_flush_tlb_mm(mm);
346
347			/* Reload the context register of all processors
348			 * also executing in this address space.
349			 */
350			mm->context.sparc64_ctx_val = ctx;
351			on_each_cpu(context_reload, mm, 0);
352		}
353		spin_unlock(&ctx_alloc_lock);
354	}
 
 
 
 
 
 
 
 
 
 
 
 
 
355}