Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * IA-32 Huge TLB Page Support for Kernel.
  4 *
  5 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  6 */
  7
  8#include <linux/init.h>
  9#include <linux/fs.h>
 10#include <linux/mm.h>
 11#include <linux/sched/mm.h>
 12#include <linux/hugetlb.h>
 13#include <linux/pagemap.h>
 14#include <linux/err.h>
 15#include <linux/sysctl.h>
 16#include <linux/compat.h>
 17#include <asm/mman.h>
 18#include <asm/tlb.h>
 19#include <asm/tlbflush.h>
 
 20#include <asm/elf.h>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 21
 22/*
 23 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
 24 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
 25 * Otherwise, returns 0.
 26 */
 27int pmd_huge(pmd_t pmd)
 28{
 29	return !pmd_none(pmd) &&
 30		(pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
 31}
 32
 33/*
 34 * pud_huge() returns 1 if @pud is hugetlb related entry, that is normal
 35 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
 36 * Otherwise, returns 0.
 37 */
 38int pud_huge(pud_t pud)
 39{
 40#if CONFIG_PGTABLE_LEVELS > 2
 41	return !pud_none(pud) &&
 42		(pud_val(pud) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
 43#else
 44	return 0;
 45#endif
 46}
 
 47
 48#ifdef CONFIG_HUGETLB_PAGE
 49static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
 50		unsigned long addr, unsigned long len,
 51		unsigned long pgoff, unsigned long flags)
 52{
 53	struct hstate *h = hstate_file(file);
 54	struct vm_unmapped_area_info info;
 55
 56	info.flags = 0;
 57	info.length = len;
 58	info.low_limit = get_mmap_base(1);
 59
 60	/*
 61	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
 62	 * in the full address space.
 63	 */
 64	info.high_limit = in_32bit_syscall() ?
 65		task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
 66
 67	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 68	info.align_offset = 0;
 69	return vm_unmapped_area(&info);
 70}
 71
 72static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
 73		unsigned long addr, unsigned long len,
 74		unsigned long pgoff, unsigned long flags)
 75{
 76	struct hstate *h = hstate_file(file);
 77	struct vm_unmapped_area_info info;
 78
 79	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 80	info.length = len;
 81	info.low_limit = PAGE_SIZE;
 82	info.high_limit = get_mmap_base(0);
 83
 84	/*
 85	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
 86	 * in the full address space.
 87	 */
 88	if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
 89		info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
 90
 91	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 92	info.align_offset = 0;
 93	addr = vm_unmapped_area(&info);
 94
 95	/*
 96	 * A failed mmap() very likely causes application failure,
 97	 * so fall back to the bottom-up function here. This scenario
 98	 * can happen with large stack limits and large mmap()
 99	 * allocations.
100	 */
101	if (addr & ~PAGE_MASK) {
102		VM_BUG_ON(addr != -ENOMEM);
103		info.flags = 0;
104		info.low_limit = TASK_UNMAPPED_BASE;
105		info.high_limit = TASK_SIZE_LOW;
106		addr = vm_unmapped_area(&info);
107	}
108
109	return addr;
110}
111
112unsigned long
113hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
114		unsigned long len, unsigned long pgoff, unsigned long flags)
115{
116	struct hstate *h = hstate_file(file);
117	struct mm_struct *mm = current->mm;
118	struct vm_area_struct *vma;
119
120	if (len & ~huge_page_mask(h))
121		return -EINVAL;
122
 
 
 
 
123	if (len > TASK_SIZE)
124		return -ENOMEM;
125
126	/* No address checking. See comment at mmap_address_hint_valid() */
127	if (flags & MAP_FIXED) {
128		if (prepare_hugepage_range(file, addr, len))
129			return -EINVAL;
130		return addr;
131	}
132
133	if (addr) {
134		addr &= huge_page_mask(h);
135		if (!mmap_address_hint_valid(addr, len))
136			goto get_unmapped_area;
137
138		vma = find_vma(mm, addr);
139		if (!vma || addr + len <= vm_start_gap(vma))
140			return addr;
141	}
142
143get_unmapped_area:
144	if (mm->get_unmapped_area == arch_get_unmapped_area)
145		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
146				pgoff, flags);
147	else
148		return hugetlb_get_unmapped_area_topdown(file, addr, len,
149				pgoff, flags);
150}
151#endif /* CONFIG_HUGETLB_PAGE */
152
153#ifdef CONFIG_X86_64
154bool __init arch_hugetlb_valid_size(unsigned long size)
155{
156	if (size == PMD_SIZE)
157		return true;
158	else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES))
159		return true;
160	else
161		return false;
 
 
 
 
 
 
162}
 
163
164#ifdef CONFIG_CONTIG_ALLOC
165static __init int gigantic_pages_init(void)
166{
167	/* With compaction or CMA we can allocate gigantic pages at runtime */
168	if (boot_cpu_has(X86_FEATURE_GBPAGES))
169		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
170	return 0;
171}
172arch_initcall(gigantic_pages_init);
173#endif
174#endif
v5.4
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * IA-32 Huge TLB Page Support for Kernel.
  4 *
  5 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  6 */
  7
  8#include <linux/init.h>
  9#include <linux/fs.h>
 10#include <linux/mm.h>
 11#include <linux/sched/mm.h>
 12#include <linux/hugetlb.h>
 13#include <linux/pagemap.h>
 14#include <linux/err.h>
 15#include <linux/sysctl.h>
 16#include <linux/compat.h>
 17#include <asm/mman.h>
 18#include <asm/tlb.h>
 19#include <asm/tlbflush.h>
 20#include <asm/pgalloc.h>
 21#include <asm/elf.h>
 22#include <asm/mpx.h>
 23
 24#if 0	/* This is just for testing */
 25struct page *
 26follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
 27{
 28	unsigned long start = address;
 29	int length = 1;
 30	int nr;
 31	struct page *page;
 32	struct vm_area_struct *vma;
 33
 34	vma = find_vma(mm, addr);
 35	if (!vma || !is_vm_hugetlb_page(vma))
 36		return ERR_PTR(-EINVAL);
 37
 38	pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
 39
 40	/* hugetlb should be locked, and hence, prefaulted */
 41	WARN_ON(!pte || pte_none(*pte));
 42
 43	page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
 44
 45	WARN_ON(!PageHead(page));
 46
 47	return page;
 48}
 49
 50int pmd_huge(pmd_t pmd)
 51{
 52	return 0;
 53}
 54
 55int pud_huge(pud_t pud)
 56{
 57	return 0;
 58}
 59
 60#else
 61
 62/*
 63 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
 64 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
 65 * Otherwise, returns 0.
 66 */
 67int pmd_huge(pmd_t pmd)
 68{
 69	return !pmd_none(pmd) &&
 70		(pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
 71}
 72
 
 
 
 
 
 73int pud_huge(pud_t pud)
 74{
 75	return !!(pud_val(pud) & _PAGE_PSE);
 
 
 
 
 
 76}
 77#endif
 78
 79#ifdef CONFIG_HUGETLB_PAGE
 80static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
 81		unsigned long addr, unsigned long len,
 82		unsigned long pgoff, unsigned long flags)
 83{
 84	struct hstate *h = hstate_file(file);
 85	struct vm_unmapped_area_info info;
 86
 87	info.flags = 0;
 88	info.length = len;
 89	info.low_limit = get_mmap_base(1);
 90
 91	/*
 92	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
 93	 * in the full address space.
 94	 */
 95	info.high_limit = in_32bit_syscall() ?
 96		task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
 97
 98	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 99	info.align_offset = 0;
100	return vm_unmapped_area(&info);
101}
102
103static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
104		unsigned long addr, unsigned long len,
105		unsigned long pgoff, unsigned long flags)
106{
107	struct hstate *h = hstate_file(file);
108	struct vm_unmapped_area_info info;
109
110	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
111	info.length = len;
112	info.low_limit = PAGE_SIZE;
113	info.high_limit = get_mmap_base(0);
114
115	/*
116	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
117	 * in the full address space.
118	 */
119	if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
120		info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
121
122	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
123	info.align_offset = 0;
124	addr = vm_unmapped_area(&info);
125
126	/*
127	 * A failed mmap() very likely causes application failure,
128	 * so fall back to the bottom-up function here. This scenario
129	 * can happen with large stack limits and large mmap()
130	 * allocations.
131	 */
132	if (addr & ~PAGE_MASK) {
133		VM_BUG_ON(addr != -ENOMEM);
134		info.flags = 0;
135		info.low_limit = TASK_UNMAPPED_BASE;
136		info.high_limit = TASK_SIZE_LOW;
137		addr = vm_unmapped_area(&info);
138	}
139
140	return addr;
141}
142
143unsigned long
144hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
145		unsigned long len, unsigned long pgoff, unsigned long flags)
146{
147	struct hstate *h = hstate_file(file);
148	struct mm_struct *mm = current->mm;
149	struct vm_area_struct *vma;
150
151	if (len & ~huge_page_mask(h))
152		return -EINVAL;
153
154	addr = mpx_unmapped_area_check(addr, len, flags);
155	if (IS_ERR_VALUE(addr))
156		return addr;
157
158	if (len > TASK_SIZE)
159		return -ENOMEM;
160
161	/* No address checking. See comment at mmap_address_hint_valid() */
162	if (flags & MAP_FIXED) {
163		if (prepare_hugepage_range(file, addr, len))
164			return -EINVAL;
165		return addr;
166	}
167
168	if (addr) {
169		addr &= huge_page_mask(h);
170		if (!mmap_address_hint_valid(addr, len))
171			goto get_unmapped_area;
172
173		vma = find_vma(mm, addr);
174		if (!vma || addr + len <= vm_start_gap(vma))
175			return addr;
176	}
177
178get_unmapped_area:
179	if (mm->get_unmapped_area == arch_get_unmapped_area)
180		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
181				pgoff, flags);
182	else
183		return hugetlb_get_unmapped_area_topdown(file, addr, len,
184				pgoff, flags);
185}
186#endif /* CONFIG_HUGETLB_PAGE */
187
188#ifdef CONFIG_X86_64
189static __init int setup_hugepagesz(char *opt)
190{
191	unsigned long ps = memparse(opt, &opt);
192	if (ps == PMD_SIZE) {
193		hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
194	} else if (ps == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) {
195		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
196	} else {
197		hugetlb_bad_size();
198		printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
199			ps >> 20);
200		return 0;
201	}
202	return 1;
203}
204__setup("hugepagesz=", setup_hugepagesz);
205
206#ifdef CONFIG_CONTIG_ALLOC
207static __init int gigantic_pages_init(void)
208{
209	/* With compaction or CMA we can allocate gigantic pages at runtime */
210	if (boot_cpu_has(X86_FEATURE_GBPAGES) && !size_to_hstate(1UL << PUD_SHIFT))
211		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
212	return 0;
213}
214arch_initcall(gigantic_pages_init);
215#endif
216#endif