Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * IA-64 Huge TLB Page Support for Kernel.
4 *
5 * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
6 * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
7 *
8 * Sep, 2003: add numa support
9 * Feb, 2004: dynamic hugetlb page size via boot parameter
10 */
11
12#include <linux/init.h>
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
16#include <linux/pagemap.h>
17#include <linux/module.h>
18#include <linux/sysctl.h>
19#include <linux/log2.h>
20#include <asm/mman.h>
21#include <asm/tlb.h>
22#include <asm/tlbflush.h>
23
24unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
25EXPORT_SYMBOL(hpage_shift);
26
27pte_t *
28huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
29{
30 unsigned long taddr = htlbpage_to_page(addr);
31 pgd_t *pgd;
32 p4d_t *p4d;
33 pud_t *pud;
34 pmd_t *pmd;
35 pte_t *pte = NULL;
36
37 pgd = pgd_offset(mm, taddr);
38 p4d = p4d_offset(pgd, taddr);
39 pud = pud_alloc(mm, p4d, taddr);
40 if (pud) {
41 pmd = pmd_alloc(mm, pud, taddr);
42 if (pmd)
43 pte = pte_alloc_map(mm, pmd, taddr);
44 }
45 return pte;
46}
47
48pte_t *
49huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
50{
51 unsigned long taddr = htlbpage_to_page(addr);
52 pgd_t *pgd;
53 p4d_t *p4d;
54 pud_t *pud;
55 pmd_t *pmd;
56 pte_t *pte = NULL;
57
58 pgd = pgd_offset(mm, taddr);
59 if (pgd_present(*pgd)) {
60 p4d = p4d_offset(pgd, addr);
61 if (p4d_present(*p4d)) {
62 pud = pud_offset(p4d, taddr);
63 if (pud_present(*pud)) {
64 pmd = pmd_offset(pud, taddr);
65 if (pmd_present(*pmd))
66 pte = pte_offset_map(pmd, taddr);
67 }
68 }
69 }
70
71 return pte;
72}
73
74#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
75
76/*
77 * Don't actually need to do any preparation, but need to make sure
78 * the address is in the right region.
79 */
80int prepare_hugepage_range(struct file *file,
81 unsigned long addr, unsigned long len)
82{
83 if (len & ~HPAGE_MASK)
84 return -EINVAL;
85 if (addr & ~HPAGE_MASK)
86 return -EINVAL;
87 if (REGION_NUMBER(addr) != RGN_HPAGE)
88 return -EINVAL;
89
90 return 0;
91}
92
93struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
94{
95 struct page *page;
96 pte_t *ptep;
97
98 if (REGION_NUMBER(addr) != RGN_HPAGE)
99 return ERR_PTR(-EINVAL);
100
101 ptep = huge_pte_offset(mm, addr, HPAGE_SIZE);
102 if (!ptep || pte_none(*ptep))
103 return NULL;
104 page = pte_page(*ptep);
105 page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
106 return page;
107}
108int pmd_huge(pmd_t pmd)
109{
110 return 0;
111}
112
113int pud_huge(pud_t pud)
114{
115 return 0;
116}
117
118void hugetlb_free_pgd_range(struct mmu_gather *tlb,
119 unsigned long addr, unsigned long end,
120 unsigned long floor, unsigned long ceiling)
121{
122 /*
123 * This is called to free hugetlb page tables.
124 *
125 * The offset of these addresses from the base of the hugetlb
126 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
127 * the standard free_pgd_range will free the right page tables.
128 *
129 * If floor and ceiling are also in the hugetlb region, they
130 * must likewise be scaled down; but if outside, left unchanged.
131 */
132
133 addr = htlbpage_to_page(addr);
134 end = htlbpage_to_page(end);
135 if (REGION_NUMBER(floor) == RGN_HPAGE)
136 floor = htlbpage_to_page(floor);
137 if (REGION_NUMBER(ceiling) == RGN_HPAGE)
138 ceiling = htlbpage_to_page(ceiling);
139
140 free_pgd_range(tlb, addr, end, floor, ceiling);
141}
142
143unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
144 unsigned long pgoff, unsigned long flags)
145{
146 struct vm_unmapped_area_info info;
147
148 if (len > RGN_MAP_LIMIT)
149 return -ENOMEM;
150 if (len & ~HPAGE_MASK)
151 return -EINVAL;
152
153 /* Handle MAP_FIXED */
154 if (flags & MAP_FIXED) {
155 if (prepare_hugepage_range(file, addr, len))
156 return -EINVAL;
157 return addr;
158 }
159
160 /* This code assumes that RGN_HPAGE != 0. */
161 if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
162 addr = HPAGE_REGION_BASE;
163
164 info.flags = 0;
165 info.length = len;
166 info.low_limit = addr;
167 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
168 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
169 info.align_offset = 0;
170 return vm_unmapped_area(&info);
171}
172
173static int __init hugetlb_setup_sz(char *str)
174{
175 u64 tr_pages;
176 unsigned long long size;
177
178 if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
179 /*
180 * shouldn't happen, but just in case.
181 */
182 tr_pages = 0x15557000UL;
183
184 size = memparse(str, &str);
185 if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
186 size <= PAGE_SIZE ||
187 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
188 printk(KERN_WARNING "Invalid huge page size specified\n");
189 return 1;
190 }
191
192 hpage_shift = __ffs(size);
193 /*
194 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
195 * override here with new page shift.
196 */
197 ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
198 return 0;
199}
200early_param("hugepagesz", hugetlb_setup_sz);
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * IA-64 Huge TLB Page Support for Kernel.
4 *
5 * Copyright (C) 2002-2004 Rohit Seth <rohit.seth@intel.com>
6 * Copyright (C) 2003-2004 Ken Chen <kenneth.w.chen@intel.com>
7 *
8 * Sep, 2003: add numa support
9 * Feb, 2004: dynamic hugetlb page size via boot parameter
10 */
11
12#include <linux/init.h>
13#include <linux/fs.h>
14#include <linux/mm.h>
15#include <linux/hugetlb.h>
16#include <linux/pagemap.h>
17#include <linux/module.h>
18#include <linux/sysctl.h>
19#include <linux/log2.h>
20#include <asm/mman.h>
21#include <asm/pgalloc.h>
22#include <asm/tlb.h>
23#include <asm/tlbflush.h>
24
25unsigned int hpage_shift = HPAGE_SHIFT_DEFAULT;
26EXPORT_SYMBOL(hpage_shift);
27
28pte_t *
29huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz)
30{
31 unsigned long taddr = htlbpage_to_page(addr);
32 pgd_t *pgd;
33 pud_t *pud;
34 pmd_t *pmd;
35 pte_t *pte = NULL;
36
37 pgd = pgd_offset(mm, taddr);
38 pud = pud_alloc(mm, pgd, taddr);
39 if (pud) {
40 pmd = pmd_alloc(mm, pud, taddr);
41 if (pmd)
42 pte = pte_alloc_map(mm, pmd, taddr);
43 }
44 return pte;
45}
46
47pte_t *
48huge_pte_offset (struct mm_struct *mm, unsigned long addr, unsigned long sz)
49{
50 unsigned long taddr = htlbpage_to_page(addr);
51 pgd_t *pgd;
52 pud_t *pud;
53 pmd_t *pmd;
54 pte_t *pte = NULL;
55
56 pgd = pgd_offset(mm, taddr);
57 if (pgd_present(*pgd)) {
58 pud = pud_offset(pgd, taddr);
59 if (pud_present(*pud)) {
60 pmd = pmd_offset(pud, taddr);
61 if (pmd_present(*pmd))
62 pte = pte_offset_map(pmd, taddr);
63 }
64 }
65
66 return pte;
67}
68
69#define mk_pte_huge(entry) { pte_val(entry) |= _PAGE_P; }
70
71/*
72 * Don't actually need to do any preparation, but need to make sure
73 * the address is in the right region.
74 */
75int prepare_hugepage_range(struct file *file,
76 unsigned long addr, unsigned long len)
77{
78 if (len & ~HPAGE_MASK)
79 return -EINVAL;
80 if (addr & ~HPAGE_MASK)
81 return -EINVAL;
82 if (REGION_NUMBER(addr) != RGN_HPAGE)
83 return -EINVAL;
84
85 return 0;
86}
87
88struct page *follow_huge_addr(struct mm_struct *mm, unsigned long addr, int write)
89{
90 struct page *page;
91 pte_t *ptep;
92
93 if (REGION_NUMBER(addr) != RGN_HPAGE)
94 return ERR_PTR(-EINVAL);
95
96 ptep = huge_pte_offset(mm, addr, HPAGE_SIZE);
97 if (!ptep || pte_none(*ptep))
98 return NULL;
99 page = pte_page(*ptep);
100 page += ((addr & ~HPAGE_MASK) >> PAGE_SHIFT);
101 return page;
102}
103int pmd_huge(pmd_t pmd)
104{
105 return 0;
106}
107
108int pud_huge(pud_t pud)
109{
110 return 0;
111}
112
113void hugetlb_free_pgd_range(struct mmu_gather *tlb,
114 unsigned long addr, unsigned long end,
115 unsigned long floor, unsigned long ceiling)
116{
117 /*
118 * This is called to free hugetlb page tables.
119 *
120 * The offset of these addresses from the base of the hugetlb
121 * region must be scaled down by HPAGE_SIZE/PAGE_SIZE so that
122 * the standard free_pgd_range will free the right page tables.
123 *
124 * If floor and ceiling are also in the hugetlb region, they
125 * must likewise be scaled down; but if outside, left unchanged.
126 */
127
128 addr = htlbpage_to_page(addr);
129 end = htlbpage_to_page(end);
130 if (REGION_NUMBER(floor) == RGN_HPAGE)
131 floor = htlbpage_to_page(floor);
132 if (REGION_NUMBER(ceiling) == RGN_HPAGE)
133 ceiling = htlbpage_to_page(ceiling);
134
135 free_pgd_range(tlb, addr, end, floor, ceiling);
136}
137
138unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
139 unsigned long pgoff, unsigned long flags)
140{
141 struct vm_unmapped_area_info info;
142
143 if (len > RGN_MAP_LIMIT)
144 return -ENOMEM;
145 if (len & ~HPAGE_MASK)
146 return -EINVAL;
147
148 /* Handle MAP_FIXED */
149 if (flags & MAP_FIXED) {
150 if (prepare_hugepage_range(file, addr, len))
151 return -EINVAL;
152 return addr;
153 }
154
155 /* This code assumes that RGN_HPAGE != 0. */
156 if ((REGION_NUMBER(addr) != RGN_HPAGE) || (addr & (HPAGE_SIZE - 1)))
157 addr = HPAGE_REGION_BASE;
158
159 info.flags = 0;
160 info.length = len;
161 info.low_limit = addr;
162 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
163 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
164 info.align_offset = 0;
165 return vm_unmapped_area(&info);
166}
167
168static int __init hugetlb_setup_sz(char *str)
169{
170 u64 tr_pages;
171 unsigned long long size;
172
173 if (ia64_pal_vm_page_size(&tr_pages, NULL) != 0)
174 /*
175 * shouldn't happen, but just in case.
176 */
177 tr_pages = 0x15557000UL;
178
179 size = memparse(str, &str);
180 if (*str || !is_power_of_2(size) || !(tr_pages & size) ||
181 size <= PAGE_SIZE ||
182 size >= (1UL << PAGE_SHIFT << MAX_ORDER)) {
183 printk(KERN_WARNING "Invalid huge page size specified\n");
184 return 1;
185 }
186
187 hpage_shift = __ffs(size);
188 /*
189 * boot cpu already executed ia64_mmu_init, and has HPAGE_SHIFT_DEFAULT
190 * override here with new page shift.
191 */
192 ia64_set_rr(HPAGE_REGION_BASE, hpage_shift << 2);
193 return 0;
194}
195early_param("hugepagesz", hugetlb_setup_sz);