Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 * IA-64 Huge TLB Page Support for Kernel.
  4 *
  5 * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
  6 * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
  7 *
  8 * Sep, 2003: add numa support
  9 * Feb, 2004: dynamic hugetlb page size via boot parameter
 10 */
 11
 12#include <linux/init.h>
 13#include <linux/fs.h>
 14#include <linux/mm.h>
 15#include <linux/hugetlb.h>
 16#include <linux/pagemap.h>
 17#include <linux/module.h>
 18#include <linux/sysctl.h>
 19#include <linux/log2.h>
 20#include <asm/mman.h>
 
 21#include <asm/tlb.h>
 22#include <asm/tlbflush.h>
 23
 24unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
 25EXPORT_SYMBOL(hpage_shift);
 26
 27pte_t *
 28huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
 29{
 30	unsigned long taddr = htlbpage_to_page(addr);
 31	pgd_t *pgd;
 32	p4d_t *p4d;
 33	pud_t *pud;
 34	pmd_t *pmd;
 35	pte_t *pte = NULL;
 36
 37	pgd = pgd_offset(mm, taddr);
 38	p4d = p4d_offset(pgd, taddr);
 39	pud = pud_alloc(mm, p4d, taddr);
 40	if (pud) {
 41		pmd = pmd_alloc(mm, pud, taddr);
 42		if (pmd)
 43			pte = pte_alloc_map(mm, pmd, taddr);
 44	}
 45	return pte;
 46}
 47
 48pte_t *
 49huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
 50{
 51	unsigned long taddr = htlbpage_to_page(addr);
 52	pgd_t *pgd;
 53	p4d_t *p4d;
 54	pud_t *pud;
 55	pmd_t *pmd;
 56	pte_t *pte = NULL;
 57
 58	pgd = pgd_offset(mm, taddr);
 59	if (pgd_present(*pgd)) {
 60		p4d = p4d_offset(pgd, addr);
 61		if (p4d_present(*p4d)) {
 62			pud = pud_offset(p4d, taddr);
 63			if (pud_present(*pud)) {
 64				pmd = pmd_offset(pud, taddr);
 65				if (pmd_present(*pmd))
 66					pte = pte_offset_map(pmd, taddr);
 67			}
 68		}
 69	}
 70
 71	return pte;
 72}
 73
 
 
 
 
 
 74#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
 75
 76/*
 77 * Don't actually need to do any preparation, but need to make sure
 78 * the address is in the right region.
 79 */
 80int prepare_hugepage_range(struct file *file,
 81			unsigned long addr, unsigned long len)
 82{
 83	if (len & ~HPAGE_MASK)
 84		return -EINVAL;
 85	if (addr & ~HPAGE_MASK)
 86		return -EINVAL;
 87	if (REGION_NUMBER(addr) != RGN_HPAGE)
 88		return -EINVAL;
 89
 90	return 0;
 91}
 92
 93struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
 94{
 95	struct page *page;
 96	pte_t *ptep;
 97
 98	if (REGION_NUMBER(addr) != RGN_HPAGE)
 99		return ERR_PTR(-EINVAL);
100
101	ptep = huge_pte_offset(mm, addr, HPAGE_SIZE);
102	if (!ptep || pte_none(*ptep))
103		return NULL;
104	page = pte_page(*ptep);
105	page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
106	return page;
107}
108int pmd_huge(pmd_t pmd)
109{
110	return 0;
111}
112
113int pud_huge(pud_t pud)
114{
115	return 0;
116}
117
 
 
 
 
 
 
118void hugetlb_free_pgd_range(struct mmu_gather *tlb,
119			unsigned long addr, unsigned long end,
120			unsigned long floor, unsigned long ceiling)
121{
122	/*
123	 * This is called to free hugetlb page tables.
124	 *
125	 * The offset of these addresses from the base of the hugetlb
126	 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
127	 * the standard free_pgd_range will free the right page tables.
128	 *
129	 * If floor and ceiling are also in the hugetlb region, they
130	 * must likewise be scaled down; but if outside, left unchanged.
131	 */
132
133	addr = htlbpage_to_page(addr);
134	end  = htlbpage_to_page(end);
135	if (REGION_NUMBER(floor) == RGN_HPAGE)
136		floor = htlbpage_to_page(floor);
137	if (REGION_NUMBER(ceiling) == RGN_HPAGE)
138		ceiling = htlbpage_to_page(ceiling);
139
140	free_pgd_range(tlb, addr, end, floor, ceiling);
141}
142
143unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
144		unsigned long pgoff, unsigned long flags)
145{
146	struct vm_unmapped_area_info info;
147
148	if (len > RGN_MAP_LIMIT)
149		return -ENOMEM;
150	if (len & ~HPAGE_MASK)
151		return -EINVAL;
152
153	/* Handle MAP_FIXED */
154	if (flags & MAP_FIXED) {
155		if (prepare_hugepage_range(file, addr, len))
156			return -EINVAL;
157		return addr;
158	}
159
160	/* This code assumes that RGN_HPAGE != 0. */
161	if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
162		addr = HPAGE_REGION_BASE;
163
164	info.flags = 0;
165	info.length = len;
166	info.low_limit = addr;
167	info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
168	info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
169	info.align_offset = 0;
170	return vm_unmapped_area(&info);
 
 
171}
172
173static int __init hugetlb_setup_sz(char *str)
174{
175	u64 tr_pages;
176	unsigned long long size;
177
178	if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
179		/*
180		 * shouldn't happen, but just in case.
181		 */
182		tr_pages = 0x15557000UL;
183
184	size = memparse(str, &str);
185	if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
186		size <= PAGE_SIZE ||
187		size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
188		printk(KERN_WARNING "Invalid huge page size specified\n");
189		return 1;
190	}
191
192	hpage_shift = __ffs(size);
193	/*
194	 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
195	 * override here with new page shift.
196	 */
197	ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
198	return 0;
199}
200early_param("hugepagesz", hugetlb_setup_sz);
v3.1
 
  1/*
  2 * IA-64 Huge TLB Page Support for Kernel.
  3 *
  4 * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
  5 * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
  6 *
  7 * Sep, 2003: add numa support
  8 * Feb, 2004: dynamic hugetlb page size via boot parameter
  9 */
 10
 11#include <linux/init.h>
 12#include <linux/fs.h>
 13#include <linux/mm.h>
 14#include <linux/hugetlb.h>
 15#include <linux/pagemap.h>
 16#include <linux/module.h>
 17#include <linux/sysctl.h>
 18#include <linux/log2.h>
 19#include <asm/mman.h>
 20#include <asm/pgalloc.h>
 21#include <asm/tlb.h>
 22#include <asm/tlbflush.h>
 23
 24unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
 25EXPORT_SYMBOL(hpage_shift);
 26
 27pte_t *
 28huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
 29{
 30	unsigned long taddr = htlbpage_to_page(addr);
 31	pgd_t *pgd;
 
 32	pud_t *pud;
 33	pmd_t *pmd;
 34	pte_t *pte = NULL;
 35
 36	pgd = pgd_offset(mm, taddr);
 37	pud = pud_alloc(mm, pgd, taddr);
 
 38	if (pud) {
 39		pmd = pmd_alloc(mm, pud, taddr);
 40		if (pmd)
 41			pte = pte_alloc_map(mm, NULL, pmd, taddr);
 42	}
 43	return pte;
 44}
 45
 46pte_t *
 47huge_pte_offset (struct mm_struct *mm, unsigned long addr)
 48{
 49	unsigned long taddr = htlbpage_to_page(addr);
 50	pgd_t *pgd;
 
 51	pud_t *pud;
 52	pmd_t *pmd;
 53	pte_t *pte = NULL;
 54
 55	pgd = pgd_offset(mm, taddr);
 56	if (pgd_present(*pgd)) {
 57		pud = pud_offset(pgd, taddr);
 58		if (pud_present(*pud)) {
 59			pmd = pmd_offset(pud, taddr);
 60			if (pmd_present(*pmd))
 61				pte = pte_offset_map(pmd, taddr);
 
 
 
 62		}
 63	}
 64
 65	return pte;
 66}
 67
 68int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 69{
 70	return 0;
 71}
 72
 73#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
 74
 75/*
 76 * Don't actually need to do any preparation, but need to make sure
 77 * the address is in the right region.
 78 */
 79int prepare_hugepage_range(struct file *file,
 80			unsigned long addr, unsigned long len)
 81{
 82	if (len & ~HPAGE_MASK)
 83		return -EINVAL;
 84	if (addr & ~HPAGE_MASK)
 85		return -EINVAL;
 86	if (REGION_NUMBER(addr) != RGN_HPAGE)
 87		return -EINVAL;
 88
 89	return 0;
 90}
 91
 92struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
 93{
 94	struct page *page;
 95	pte_t *ptep;
 96
 97	if (REGION_NUMBER(addr) != RGN_HPAGE)
 98		return ERR_PTR(-EINVAL);
 99
100	ptep = huge_pte_offset(mm, addr);
101	if (!ptep || pte_none(*ptep))
102		return NULL;
103	page = pte_page(*ptep);
104	page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
105	return page;
106}
107int pmd_huge(pmd_t pmd)
108{
109	return 0;
110}
111
112int pud_huge(pud_t pud)
113{
114	return 0;
115}
116
117struct page *
118follow_huge_pmd(struct mm_struct *mm, unsigned long address, pmd_t *pmd, int write)
119{
120	return NULL;
121}
122
123void hugetlb_free_pgd_range(struct mmu_gather *tlb,
124			unsigned long addr, unsigned long end,
125			unsigned long floor, unsigned long ceiling)
126{
127	/*
128	 * This is called to free hugetlb page tables.
129	 *
130	 * The offset of these addresses from the base of the hugetlb
131	 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
132	 * the standard free_pgd_range will free the right page tables.
133	 *
134	 * If floor and ceiling are also in the hugetlb region, they
135	 * must likewise be scaled down; but if outside, left unchanged.
136	 */
137
138	addr = htlbpage_to_page(addr);
139	end  = htlbpage_to_page(end);
140	if (REGION_NUMBER(floor) == RGN_HPAGE)
141		floor = htlbpage_to_page(floor);
142	if (REGION_NUMBER(ceiling) == RGN_HPAGE)
143		ceiling = htlbpage_to_page(ceiling);
144
145	free_pgd_range(tlb, addr, end, floor, ceiling);
146}
147
148unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
149		unsigned long pgoff, unsigned long flags)
150{
151	struct vm_area_struct *vmm;
152
153	if (len > RGN_MAP_LIMIT)
154		return -ENOMEM;
155	if (len & ~HPAGE_MASK)
156		return -EINVAL;
157
158	/* Handle MAP_FIXED */
159	if (flags & MAP_FIXED) {
160		if (prepare_hugepage_range(file, addr, len))
161			return -EINVAL;
162		return addr;
163	}
164
165	/* This code assumes that RGN_HPAGE != 0. */
166	if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
167		addr = HPAGE_REGION_BASE;
168	else
169		addr = ALIGN(addr, HPAGE_SIZE);
170	for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) {
171		/* At this point:  (!vmm || addr < vmm->vm_end). */
172		if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
173			return -ENOMEM;
174		if (!vmm || (addr + len) <= vmm->vm_start)
175			return addr;
176		addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
177	}
178}
179
180static int __init hugetlb_setup_sz(char *str)
181{
182	u64 tr_pages;
183	unsigned long long size;
184
185	if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
186		/*
187		 * shouldn't happen, but just in case.
188		 */
189		tr_pages = 0x15557000UL;
190
191	size = memparse(str, &str);
192	if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
193		size <= PAGE_SIZE ||
194		size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
195		printk(KERN_WARNING "Invalid huge page size specified\n");
196		return 1;
197	}
198
199	hpage_shift = __ffs(size);
200	/*
201	 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
202	 * override here with new page shift.
203	 */
204	ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
205	return 0;
206}
207early_param("hugepagesz", hugetlb_setup_sz);