Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * IA-32 Huge TLB Page Support for Kernel.
  4 *
  5 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  6 */
  7
  8#include <linux/init.h>
  9#include <linux/fs.h>
 10#include <linux/mm.h>
 11#include <linux/sched/mm.h>
 12#include <linux/hugetlb.h>
 13#include <linux/pagemap.h>
 14#include <linux/err.h>
 15#include <linux/sysctl.h>
 16#include <linux/compat.h>
 17#include <asm/mman.h>
 18#include <asm/tlb.h>
 19#include <asm/tlbflush.h>
 20#include <asm/elf.h>
 21
 22#if 0	/* This is just for testing */
 23struct page *
 24follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
 25{
 26	unsigned long start = address;
 27	int length = 1;
 28	int nr;
 29	struct page *page;
 30	struct vm_area_struct *vma;
 31
 32	vma = find_vma(mm, addr);
 33	if (!vma || !is_vm_hugetlb_page(vma))
 34		return ERR_PTR(-EINVAL);
 35
 36	pte = huge_pte_offset(mm, address, vma_mmu_pagesize(vma));
 37
 38	/* hugetlb should be locked, and hence, prefaulted */
 39	WARN_ON(!pte || pte_none(*pte));
 40
 41	page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
 42
 43	WARN_ON(!PageHead(page));
 44
 45	return page;
 46}
 47
 48int pmd_huge(pmd_t pmd)
 49{
 50	return 0;
 51}
 52
 53int pud_huge(pud_t pud)
 54{
 55	return 0;
 56}
 57
 58#else
 59
 60/*
 61 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
 62 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
 63 * Otherwise, returns 0.
 64 */
 65int pmd_huge(pmd_t pmd)
 66{
 67	return !pmd_none(pmd) &&
 68		(pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
 69}
 70
 71int pud_huge(pud_t pud)
 72{
 73	return !!(pud_val(pud) & _PAGE_PSE);
 74}
 75#endif
 76
 77#ifdef CONFIG_HUGETLB_PAGE
 78static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
 79		unsigned long addr, unsigned long len,
 80		unsigned long pgoff, unsigned long flags)
 81{
 82	struct hstate *h = hstate_file(file);
 83	struct vm_unmapped_area_info info;
 84
 85	info.flags = 0;
 86	info.length = len;
 87	info.low_limit = get_mmap_base(1);
 88
 89	/*
 90	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
 91	 * in the full address space.
 92	 */
 93	info.high_limit = in_32bit_syscall() ?
 94		task_size_32bit() : task_size_64bit(addr > DEFAULT_MAP_WINDOW);
 95
 96	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 97	info.align_offset = 0;
 98	return vm_unmapped_area(&info);
 99}
100
101static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
102		unsigned long addr, unsigned long len,
103		unsigned long pgoff, unsigned long flags)
104{
105	struct hstate *h = hstate_file(file);
106	struct vm_unmapped_area_info info;
 
107
108	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
109	info.length = len;
110	info.low_limit = PAGE_SIZE;
111	info.high_limit = get_mmap_base(0);
112
113	/*
114	 * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area
115	 * in the full address space.
116	 */
117	if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall())
118		info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW;
119
120	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
121	info.align_offset = 0;
122	addr = vm_unmapped_area(&info);
123
124	/*
125	 * A failed mmap() very likely causes application failure,
126	 * so fall back to the bottom-up function here. This scenario
127	 * can happen with large stack limits and large mmap()
128	 * allocations.
129	 */
130	if (addr & ~PAGE_MASK) {
131		VM_BUG_ON(addr != -ENOMEM);
132		info.flags = 0;
133		info.low_limit = TASK_UNMAPPED_BASE;
134		info.high_limit = TASK_SIZE_LOW;
135		addr = vm_unmapped_area(&info);
136	}
137
138	return addr;
139}
140
141unsigned long
142hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
143		unsigned long len, unsigned long pgoff, unsigned long flags)
144{
145	struct hstate *h = hstate_file(file);
146	struct mm_struct *mm = current->mm;
147	struct vm_area_struct *vma;
148
149	if (len & ~huge_page_mask(h))
150		return -EINVAL;
151
152	if (len > TASK_SIZE)
153		return -ENOMEM;
154
155	/* No address checking. See comment at mmap_address_hint_valid() */
156	if (flags & MAP_FIXED) {
157		if (prepare_hugepage_range(file, addr, len))
158			return -EINVAL;
159		return addr;
160	}
161
162	if (addr) {
163		addr &= huge_page_mask(h);
164		if (!mmap_address_hint_valid(addr, len))
165			goto get_unmapped_area;
166
167		vma = find_vma(mm, addr);
168		if (!vma || addr + len <= vm_start_gap(vma))
 
169			return addr;
170	}
171
172get_unmapped_area:
173	if (mm->get_unmapped_area == arch_get_unmapped_area)
174		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
175				pgoff, flags);
176	else
177		return hugetlb_get_unmapped_area_topdown(file, addr, len,
178				pgoff, flags);
179}
180#endif /* CONFIG_HUGETLB_PAGE */
181
182#ifdef CONFIG_X86_64
183bool __init arch_hugetlb_valid_size(unsigned long size)
184{
185	if (size == PMD_SIZE)
186		return true;
187	else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES))
188		return true;
189	else
190		return false;
 
 
 
 
 
191}
 
192
193#ifdef CONFIG_CONTIG_ALLOC
194static __init int gigantic_pages_init(void)
195{
196	/* With compaction or CMA we can allocate gigantic pages at runtime */
197	if (boot_cpu_has(X86_FEATURE_GBPAGES))
198		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
199	return 0;
200}
201arch_initcall(gigantic_pages_init);
202#endif
203#endif
v4.6
 
  1/*
  2 * IA-32 Huge TLB Page Support for Kernel.
  3 *
  4 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
  5 */
  6
  7#include <linux/init.h>
  8#include <linux/fs.h>
  9#include <linux/mm.h>
 
 10#include <linux/hugetlb.h>
 11#include <linux/pagemap.h>
 12#include <linux/err.h>
 13#include <linux/sysctl.h>
 
 14#include <asm/mman.h>
 15#include <asm/tlb.h>
 16#include <asm/tlbflush.h>
 17#include <asm/pgalloc.h>
 18
 19#if 0	/* This is just for testing */
 20struct page *
 21follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
 22{
 23	unsigned long start = address;
 24	int length = 1;
 25	int nr;
 26	struct page *page;
 27	struct vm_area_struct *vma;
 28
 29	vma = find_vma(mm, addr);
 30	if (!vma || !is_vm_hugetlb_page(vma))
 31		return ERR_PTR(-EINVAL);
 32
 33	pte = huge_pte_offset(mm, address);
 34
 35	/* hugetlb should be locked, and hence, prefaulted */
 36	WARN_ON(!pte || pte_none(*pte));
 37
 38	page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)];
 39
 40	WARN_ON(!PageHead(page));
 41
 42	return page;
 43}
 44
 45int pmd_huge(pmd_t pmd)
 46{
 47	return 0;
 48}
 49
 50int pud_huge(pud_t pud)
 51{
 52	return 0;
 53}
 54
 55#else
 56
 57/*
 58 * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal
 59 * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry.
 60 * Otherwise, returns 0.
 61 */
 62int pmd_huge(pmd_t pmd)
 63{
 64	return !pmd_none(pmd) &&
 65		(pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT;
 66}
 67
 68int pud_huge(pud_t pud)
 69{
 70	return !!(pud_val(pud) & _PAGE_PSE);
 71}
 72#endif
 73
 74#ifdef CONFIG_HUGETLB_PAGE
 75static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
 76		unsigned long addr, unsigned long len,
 77		unsigned long pgoff, unsigned long flags)
 78{
 79	struct hstate *h = hstate_file(file);
 80	struct vm_unmapped_area_info info;
 81
 82	info.flags = 0;
 83	info.length = len;
 84	info.low_limit = current->mm->mmap_legacy_base;
 85	info.high_limit = TASK_SIZE;
 
 
 
 
 
 
 
 86	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
 87	info.align_offset = 0;
 88	return vm_unmapped_area(&info);
 89}
 90
 91static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
 92		unsigned long addr0, unsigned long len,
 93		unsigned long pgoff, unsigned long flags)
 94{
 95	struct hstate *h = hstate_file(file);
 96	struct vm_unmapped_area_info info;
 97	unsigned long addr;
 98
 99	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
100	info.length = len;
101	info.low_limit = PAGE_SIZE;
102	info.high_limit = current->mm->mmap_base;
 
 
 
 
 
 
 
 
103	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
104	info.align_offset = 0;
105	addr = vm_unmapped_area(&info);
106
107	/*
108	 * A failed mmap() very likely causes application failure,
109	 * so fall back to the bottom-up function here. This scenario
110	 * can happen with large stack limits and large mmap()
111	 * allocations.
112	 */
113	if (addr & ~PAGE_MASK) {
114		VM_BUG_ON(addr != -ENOMEM);
115		info.flags = 0;
116		info.low_limit = TASK_UNMAPPED_BASE;
117		info.high_limit = TASK_SIZE;
118		addr = vm_unmapped_area(&info);
119	}
120
121	return addr;
122}
123
124unsigned long
125hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
126		unsigned long len, unsigned long pgoff, unsigned long flags)
127{
128	struct hstate *h = hstate_file(file);
129	struct mm_struct *mm = current->mm;
130	struct vm_area_struct *vma;
131
132	if (len & ~huge_page_mask(h))
133		return -EINVAL;
 
134	if (len > TASK_SIZE)
135		return -ENOMEM;
136
 
137	if (flags & MAP_FIXED) {
138		if (prepare_hugepage_range(file, addr, len))
139			return -EINVAL;
140		return addr;
141	}
142
143	if (addr) {
144		addr = ALIGN(addr, huge_page_size(h));
 
 
 
145		vma = find_vma(mm, addr);
146		if (TASK_SIZE - len >= addr &&
147		    (!vma || addr + len <= vma->vm_start))
148			return addr;
149	}
 
 
150	if (mm->get_unmapped_area == arch_get_unmapped_area)
151		return hugetlb_get_unmapped_area_bottomup(file, addr, len,
152				pgoff, flags);
153	else
154		return hugetlb_get_unmapped_area_topdown(file, addr, len,
155				pgoff, flags);
156}
157#endif /* CONFIG_HUGETLB_PAGE */
158
159#ifdef CONFIG_X86_64
160static __init int setup_hugepagesz(char *opt)
161{
162	unsigned long ps = memparse(opt, &opt);
163	if (ps == PMD_SIZE) {
164		hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT);
165	} else if (ps == PUD_SIZE && cpu_has_gbpages) {
166		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
167	} else {
168		printk(KERN_ERR "hugepagesz: Unsupported page size %lu M\n",
169			ps >> 20);
170		return 0;
171	}
172	return 1;
173}
174__setup("hugepagesz=", setup_hugepagesz);
175
176#if (defined(CONFIG_MEMORY_ISOLATION) && defined(CONFIG_COMPACTION)) || defined(CONFIG_CMA)
177static __init int gigantic_pages_init(void)
178{
179	/* With compaction or CMA we can allocate gigantic pages at runtime */
180	if (cpu_has_gbpages && !size_to_hstate(1UL << PUD_SHIFT))
181		hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
182	return 0;
183}
184arch_initcall(gigantic_pages_init);
185#endif
186#endif