Linux Audio

Check our new training course

Linux kernel drivers training

Mar 31-Apr 9, 2025, special US time zones
Register
Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  IBM System z Huge TLB Page Support for Kernel.
  4 *
  5 *    Copyright IBM Corp. 2007,2020
  6 *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
  7 */
  8
  9#define KMSG_COMPONENT "hugetlb"
 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 11
 12#include <asm/pgalloc.h>
 13#include <linux/mm.h>
 14#include <linux/hugetlb.h>
 15#include <linux/mman.h>
 16#include <linux/sched/mm.h>
 17#include <linux/security.h>
 18
 19/*
 20 * If the bit selected by single-bit bitmask "a" is set within "x", move
 21 * it to the position indicated by single-bit bitmask "b".
 22 */
 23#define move_set_bit(x, a, b)	(((x) & (a)) >> ilog2(a) << ilog2(b))
 24
 25static inline unsigned long __pte_to_rste(pte_t pte)
 26{
 27	unsigned long rste;
 28
 29	/*
 30	 * Convert encoding		  pte bits	pmd / pud bits
 31	 *				lIR.uswrdy.p	dy..R...I...wr
 32	 * empty			010.000000.0 -> 00..0...1...00
 33	 * prot-none, clean, old	111.000000.1 -> 00..1...1...00
 34	 * prot-none, clean, young	111.000001.1 -> 01..1...1...00
 35	 * prot-none, dirty, old	111.000010.1 -> 10..1...1...00
 36	 * prot-none, dirty, young	111.000011.1 -> 11..1...1...00
 37	 * read-only, clean, old	111.000100.1 -> 00..1...1...01
 38	 * read-only, clean, young	101.000101.1 -> 01..1...0...01
 39	 * read-only, dirty, old	111.000110.1 -> 10..1...1...01
 40	 * read-only, dirty, young	101.000111.1 -> 11..1...0...01
 41	 * read-write, clean, old	111.001100.1 -> 00..1...1...11
 42	 * read-write, clean, young	101.001101.1 -> 01..1...0...11
 43	 * read-write, dirty, old	110.001110.1 -> 10..0...1...11
 44	 * read-write, dirty, young	100.001111.1 -> 11..0...0...11
 45	 * HW-bits: R read-only, I invalid
 46	 * SW-bits: p present, y young, d dirty, r read, w write, s special,
 47	 *	    u unused, l large
 48	 */
 49	if (pte_present(pte)) {
 50		rste = pte_val(pte) & PAGE_MASK;
 51		rste |= move_set_bit(pte_val(pte), _PAGE_READ,
 52				     _SEGMENT_ENTRY_READ);
 53		rste |= move_set_bit(pte_val(pte), _PAGE_WRITE,
 54				     _SEGMENT_ENTRY_WRITE);
 55		rste |= move_set_bit(pte_val(pte), _PAGE_INVALID,
 56				     _SEGMENT_ENTRY_INVALID);
 57		rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT,
 58				     _SEGMENT_ENTRY_PROTECT);
 59		rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY,
 60				     _SEGMENT_ENTRY_DIRTY);
 61		rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG,
 62				     _SEGMENT_ENTRY_YOUNG);
 63#ifdef CONFIG_MEM_SOFT_DIRTY
 64		rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
 65				     _SEGMENT_ENTRY_SOFT_DIRTY);
 66#endif
 67		rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
 68				     _SEGMENT_ENTRY_NOEXEC);
 69	} else
 70		rste = _SEGMENT_ENTRY_EMPTY;
 71	return rste;
 72}
 73
 74static inline pte_t __rste_to_pte(unsigned long rste)
 75{
 76	unsigned long pteval;
 77	int present;
 
 78
 79	if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
 80		present = pud_present(__pud(rste));
 81	else
 82		present = pmd_present(__pmd(rste));
 83
 84	/*
 85	 * Convert encoding		pmd / pud bits	    pte bits
 86	 *				dy..R...I...wr	  lIR.uswrdy.p
 87	 * empty			00..0...1...00 -> 010.000000.0
 88	 * prot-none, clean, old	00..1...1...00 -> 111.000000.1
 89	 * prot-none, clean, young	01..1...1...00 -> 111.000001.1
 90	 * prot-none, dirty, old	10..1...1...00 -> 111.000010.1
 91	 * prot-none, dirty, young	11..1...1...00 -> 111.000011.1
 92	 * read-only, clean, old	00..1...1...01 -> 111.000100.1
 93	 * read-only, clean, young	01..1...0...01 -> 101.000101.1
 94	 * read-only, dirty, old	10..1...1...01 -> 111.000110.1
 95	 * read-only, dirty, young	11..1...0...01 -> 101.000111.1
 96	 * read-write, clean, old	00..1...1...11 -> 111.001100.1
 97	 * read-write, clean, young	01..1...0...11 -> 101.001101.1
 98	 * read-write, dirty, old	10..0...1...11 -> 110.001110.1
 99	 * read-write, dirty, young	11..0...0...11 -> 100.001111.1
100	 * HW-bits: R read-only, I invalid
101	 * SW-bits: p present, y young, d dirty, r read, w write, s special,
102	 *	    u unused, l large
103	 */
104	if (present) {
105		pteval = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
106		pteval |= _PAGE_LARGE | _PAGE_PRESENT;
107		pteval |= move_set_bit(rste, _SEGMENT_ENTRY_READ, _PAGE_READ);
108		pteval |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE, _PAGE_WRITE);
109		pteval |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID, _PAGE_INVALID);
110		pteval |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT, _PAGE_PROTECT);
111		pteval |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY, _PAGE_DIRTY);
112		pteval |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG, _PAGE_YOUNG);
 
 
 
 
 
 
113#ifdef CONFIG_MEM_SOFT_DIRTY
114		pteval |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY, _PAGE_SOFT_DIRTY);
 
115#endif
116		pteval |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC, _PAGE_NOEXEC);
 
117	} else
118		pteval = _PAGE_INVALID;
119	return __pte(pteval);
120}
121
122static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
123{
124	struct page *page;
125	unsigned long size, paddr;
126
127	if (!mm_uses_skeys(mm) ||
128	    rste & _SEGMENT_ENTRY_INVALID)
129		return;
130
131	if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
132		page = pud_page(__pud(rste));
133		size = PUD_SIZE;
134		paddr = rste & PUD_MASK;
135	} else {
136		page = pmd_page(__pmd(rste));
137		size = PMD_SIZE;
138		paddr = rste & PMD_MASK;
139	}
140
141	if (!test_and_set_bit(PG_arch_1, &page->flags))
142		__storage_key_init_range(paddr, paddr + size - 1);
143}
144
145void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
146		     pte_t *ptep, pte_t pte)
147{
148	unsigned long rste;
149
150	rste = __pte_to_rste(pte);
151	if (!MACHINE_HAS_NX)
152		rste &= ~_SEGMENT_ENTRY_NOEXEC;
153
154	/* Set correct table type for 2G hugepages */
155	if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
156		if (likely(pte_present(pte)))
157			rste |= _REGION3_ENTRY_LARGE;
158		rste |= _REGION_ENTRY_TYPE_R3;
159	} else if (likely(pte_present(pte)))
160		rste |= _SEGMENT_ENTRY_LARGE;
161
162	clear_huge_pte_skeys(mm, rste);
163	set_pte(ptep, __pte(rste));
164}
165
166pte_t huge_ptep_get(pte_t *ptep)
167{
168	return __rste_to_pte(pte_val(*ptep));
169}
170
171pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
172			      unsigned long addr, pte_t *ptep)
173{
174	pte_t pte = huge_ptep_get(ptep);
175	pmd_t *pmdp = (pmd_t *) ptep;
176	pud_t *pudp = (pud_t *) ptep;
177
178	if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
179		pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY));
180	else
181		pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
182	return pte;
183}
184
185pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
186			unsigned long addr, unsigned long sz)
187{
188	pgd_t *pgdp;
189	p4d_t *p4dp;
190	pud_t *pudp;
191	pmd_t *pmdp = NULL;
192
193	pgdp = pgd_offset(mm, addr);
194	p4dp = p4d_alloc(mm, pgdp, addr);
195	if (p4dp) {
196		pudp = pud_alloc(mm, p4dp, addr);
197		if (pudp) {
198			if (sz == PUD_SIZE)
199				return (pte_t *) pudp;
200			else if (sz == PMD_SIZE)
201				pmdp = pmd_alloc(mm, pudp, addr);
202		}
203	}
204	return (pte_t *) pmdp;
205}
206
207pte_t *huge_pte_offset(struct mm_struct *mm,
208		       unsigned long addr, unsigned long sz)
209{
210	pgd_t *pgdp;
211	p4d_t *p4dp;
212	pud_t *pudp;
213	pmd_t *pmdp = NULL;
214
215	pgdp = pgd_offset(mm, addr);
216	if (pgd_present(*pgdp)) {
217		p4dp = p4d_offset(pgdp, addr);
218		if (p4d_present(*p4dp)) {
219			pudp = pud_offset(p4dp, addr);
220			if (pud_present(*pudp)) {
221				if (pud_large(*pudp))
222					return (pte_t *) pudp;
223				pmdp = pmd_offset(pudp, addr);
224			}
225		}
226	}
227	return (pte_t *) pmdp;
228}
229
230int pmd_huge(pmd_t pmd)
231{
232	return pmd_large(pmd);
233}
234
235int pud_huge(pud_t pud)
236{
237	return pud_large(pud);
 
 
 
 
 
 
 
 
 
 
238}
239
240bool __init arch_hugetlb_valid_size(unsigned long size)
241{
242	if (MACHINE_HAS_EDAT1 && size == PMD_SIZE)
243		return true;
244	else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE)
245		return true;
246	else
247		return false;
248}
249
250static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
251		unsigned long addr, unsigned long len,
252		unsigned long pgoff, unsigned long flags)
253{
254	struct hstate *h = hstate_file(file);
255	struct vm_unmapped_area_info info;
256
257	info.flags = 0;
258	info.length = len;
259	info.low_limit = current->mm->mmap_base;
260	info.high_limit = TASK_SIZE;
261	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
262	info.align_offset = 0;
263	return vm_unmapped_area(&info);
264}
265
266static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
267		unsigned long addr0, unsigned long len,
268		unsigned long pgoff, unsigned long flags)
269{
270	struct hstate *h = hstate_file(file);
271	struct vm_unmapped_area_info info;
272	unsigned long addr;
273
274	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
275	info.length = len;
276	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
277	info.high_limit = current->mm->mmap_base;
278	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
279	info.align_offset = 0;
280	addr = vm_unmapped_area(&info);
281
282	/*
283	 * A failed mmap() very likely causes application failure,
284	 * so fall back to the bottom-up function here. This scenario
285	 * can happen with large stack limits and large mmap()
286	 * allocations.
287	 */
288	if (addr & ~PAGE_MASK) {
289		VM_BUG_ON(addr != -ENOMEM);
290		info.flags = 0;
291		info.low_limit = TASK_UNMAPPED_BASE;
292		info.high_limit = TASK_SIZE;
293		addr = vm_unmapped_area(&info);
294	}
295
296	return addr;
297}
298
299unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
300		unsigned long len, unsigned long pgoff, unsigned long flags)
301{
302	struct hstate *h = hstate_file(file);
303	struct mm_struct *mm = current->mm;
304	struct vm_area_struct *vma;
305
306	if (len & ~huge_page_mask(h))
307		return -EINVAL;
308	if (len > TASK_SIZE - mmap_min_addr)
309		return -ENOMEM;
310
311	if (flags & MAP_FIXED) {
312		if (prepare_hugepage_range(file, addr, len))
313			return -EINVAL;
314		goto check_asce_limit;
315	}
316
317	if (addr) {
318		addr = ALIGN(addr, huge_page_size(h));
319		vma = find_vma(mm, addr);
320		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
321		    (!vma || addr + len <= vm_start_gap(vma)))
322			goto check_asce_limit;
323	}
324
325	if (mm->get_unmapped_area == arch_get_unmapped_area)
326		addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
327				pgoff, flags);
328	else
329		addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
330				pgoff, flags);
331	if (offset_in_page(addr))
332		return addr;
333
334check_asce_limit:
335	return check_asce_limit(mm, addr, len);
336}
v5.14.15
  1// SPDX-License-Identifier: GPL-2.0
  2/*
  3 *  IBM System z Huge TLB Page Support for Kernel.
  4 *
  5 *    Copyright IBM Corp. 2007,2020
  6 *    Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
  7 */
  8
  9#define KMSG_COMPONENT "hugetlb"
 10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 11
 
 12#include <linux/mm.h>
 13#include <linux/hugetlb.h>
 14#include <linux/mman.h>
 15#include <linux/sched/mm.h>
 16#include <linux/security.h>
 17
 18/*
 19 * If the bit selected by single-bit bitmask "a" is set within "x", move
 20 * it to the position indicated by single-bit bitmask "b".
 21 */
 22#define move_set_bit(x, a, b)	(((x) & (a)) >> ilog2(a) << ilog2(b))
 23
 24static inline unsigned long __pte_to_rste(pte_t pte)
 25{
 26	unsigned long rste;
 27
 28	/*
 29	 * Convert encoding		  pte bits	pmd / pud bits
 30	 *				lIR.uswrdy.p	dy..R...I...wr
 31	 * empty			010.000000.0 -> 00..0...1...00
 32	 * prot-none, clean, old	111.000000.1 -> 00..1...1...00
 33	 * prot-none, clean, young	111.000001.1 -> 01..1...1...00
 34	 * prot-none, dirty, old	111.000010.1 -> 10..1...1...00
 35	 * prot-none, dirty, young	111.000011.1 -> 11..1...1...00
 36	 * read-only, clean, old	111.000100.1 -> 00..1...1...01
 37	 * read-only, clean, young	101.000101.1 -> 01..1...0...01
 38	 * read-only, dirty, old	111.000110.1 -> 10..1...1...01
 39	 * read-only, dirty, young	101.000111.1 -> 11..1...0...01
 40	 * read-write, clean, old	111.001100.1 -> 00..1...1...11
 41	 * read-write, clean, young	101.001101.1 -> 01..1...0...11
 42	 * read-write, dirty, old	110.001110.1 -> 10..0...1...11
 43	 * read-write, dirty, young	100.001111.1 -> 11..0...0...11
 44	 * HW-bits: R read-only, I invalid
 45	 * SW-bits: p present, y young, d dirty, r read, w write, s special,
 46	 *	    u unused, l large
 47	 */
 48	if (pte_present(pte)) {
 49		rste = pte_val(pte) & PAGE_MASK;
 50		rste |= move_set_bit(pte_val(pte), _PAGE_READ,
 51				     _SEGMENT_ENTRY_READ);
 52		rste |= move_set_bit(pte_val(pte), _PAGE_WRITE,
 53				     _SEGMENT_ENTRY_WRITE);
 54		rste |= move_set_bit(pte_val(pte), _PAGE_INVALID,
 55				     _SEGMENT_ENTRY_INVALID);
 56		rste |= move_set_bit(pte_val(pte), _PAGE_PROTECT,
 57				     _SEGMENT_ENTRY_PROTECT);
 58		rste |= move_set_bit(pte_val(pte), _PAGE_DIRTY,
 59				     _SEGMENT_ENTRY_DIRTY);
 60		rste |= move_set_bit(pte_val(pte), _PAGE_YOUNG,
 61				     _SEGMENT_ENTRY_YOUNG);
 62#ifdef CONFIG_MEM_SOFT_DIRTY
 63		rste |= move_set_bit(pte_val(pte), _PAGE_SOFT_DIRTY,
 64				     _SEGMENT_ENTRY_SOFT_DIRTY);
 65#endif
 66		rste |= move_set_bit(pte_val(pte), _PAGE_NOEXEC,
 67				     _SEGMENT_ENTRY_NOEXEC);
 68	} else
 69		rste = _SEGMENT_ENTRY_EMPTY;
 70	return rste;
 71}
 72
 73static inline pte_t __rste_to_pte(unsigned long rste)
 74{
 
 75	int present;
 76	pte_t pte;
 77
 78	if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
 79		present = pud_present(__pud(rste));
 80	else
 81		present = pmd_present(__pmd(rste));
 82
 83	/*
 84	 * Convert encoding		pmd / pud bits	    pte bits
 85	 *				dy..R...I...wr	  lIR.uswrdy.p
 86	 * empty			00..0...1...00 -> 010.000000.0
 87	 * prot-none, clean, old	00..1...1...00 -> 111.000000.1
 88	 * prot-none, clean, young	01..1...1...00 -> 111.000001.1
 89	 * prot-none, dirty, old	10..1...1...00 -> 111.000010.1
 90	 * prot-none, dirty, young	11..1...1...00 -> 111.000011.1
 91	 * read-only, clean, old	00..1...1...01 -> 111.000100.1
 92	 * read-only, clean, young	01..1...0...01 -> 101.000101.1
 93	 * read-only, dirty, old	10..1...1...01 -> 111.000110.1
 94	 * read-only, dirty, young	11..1...0...01 -> 101.000111.1
 95	 * read-write, clean, old	00..1...1...11 -> 111.001100.1
 96	 * read-write, clean, young	01..1...0...11 -> 101.001101.1
 97	 * read-write, dirty, old	10..0...1...11 -> 110.001110.1
 98	 * read-write, dirty, young	11..0...0...11 -> 100.001111.1
 99	 * HW-bits: R read-only, I invalid
100	 * SW-bits: p present, y young, d dirty, r read, w write, s special,
101	 *	    u unused, l large
102	 */
103	if (present) {
104		pte_val(pte) = rste & _SEGMENT_ENTRY_ORIGIN_LARGE;
105		pte_val(pte) |= _PAGE_LARGE | _PAGE_PRESENT;
106		pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_READ,
107					     _PAGE_READ);
108		pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_WRITE,
109					     _PAGE_WRITE);
110		pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_INVALID,
111					     _PAGE_INVALID);
112		pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_PROTECT,
113					     _PAGE_PROTECT);
114		pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_DIRTY,
115					     _PAGE_DIRTY);
116		pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_YOUNG,
117					     _PAGE_YOUNG);
118#ifdef CONFIG_MEM_SOFT_DIRTY
119		pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_SOFT_DIRTY,
120					     _PAGE_SOFT_DIRTY);
121#endif
122		pte_val(pte) |= move_set_bit(rste, _SEGMENT_ENTRY_NOEXEC,
123					     _PAGE_NOEXEC);
124	} else
125		pte_val(pte) = _PAGE_INVALID;
126	return pte;
127}
128
129static void clear_huge_pte_skeys(struct mm_struct *mm, unsigned long rste)
130{
131	struct page *page;
132	unsigned long size, paddr;
133
134	if (!mm_uses_skeys(mm) ||
135	    rste & _SEGMENT_ENTRY_INVALID)
136		return;
137
138	if ((rste & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
139		page = pud_page(__pud(rste));
140		size = PUD_SIZE;
141		paddr = rste & PUD_MASK;
142	} else {
143		page = pmd_page(__pmd(rste));
144		size = PMD_SIZE;
145		paddr = rste & PMD_MASK;
146	}
147
148	if (!test_and_set_bit(PG_arch_1, &page->flags))
149		__storage_key_init_range(paddr, paddr + size - 1);
150}
151
152void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
153		     pte_t *ptep, pte_t pte)
154{
155	unsigned long rste;
156
157	rste = __pte_to_rste(pte);
158	if (!MACHINE_HAS_NX)
159		rste &= ~_SEGMENT_ENTRY_NOEXEC;
160
161	/* Set correct table type for 2G hugepages */
162	if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) {
163		if (likely(pte_present(pte)))
164			rste |= _REGION3_ENTRY_LARGE;
165		rste |= _REGION_ENTRY_TYPE_R3;
166	} else if (likely(pte_present(pte)))
167		rste |= _SEGMENT_ENTRY_LARGE;
168
169	clear_huge_pte_skeys(mm, rste);
170	pte_val(*ptep) = rste;
171}
172
173pte_t huge_ptep_get(pte_t *ptep)
174{
175	return __rste_to_pte(pte_val(*ptep));
176}
177
178pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
179			      unsigned long addr, pte_t *ptep)
180{
181	pte_t pte = huge_ptep_get(ptep);
182	pmd_t *pmdp = (pmd_t *) ptep;
183	pud_t *pudp = (pud_t *) ptep;
184
185	if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
186		pudp_xchg_direct(mm, addr, pudp, __pud(_REGION3_ENTRY_EMPTY));
187	else
188		pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
189	return pte;
190}
191
192pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
193			unsigned long addr, unsigned long sz)
194{
195	pgd_t *pgdp;
196	p4d_t *p4dp;
197	pud_t *pudp;
198	pmd_t *pmdp = NULL;
199
200	pgdp = pgd_offset(mm, addr);
201	p4dp = p4d_alloc(mm, pgdp, addr);
202	if (p4dp) {
203		pudp = pud_alloc(mm, p4dp, addr);
204		if (pudp) {
205			if (sz == PUD_SIZE)
206				return (pte_t *) pudp;
207			else if (sz == PMD_SIZE)
208				pmdp = pmd_alloc(mm, pudp, addr);
209		}
210	}
211	return (pte_t *) pmdp;
212}
213
214pte_t *huge_pte_offset(struct mm_struct *mm,
215		       unsigned long addr, unsigned long sz)
216{
217	pgd_t *pgdp;
218	p4d_t *p4dp;
219	pud_t *pudp;
220	pmd_t *pmdp = NULL;
221
222	pgdp = pgd_offset(mm, addr);
223	if (pgd_present(*pgdp)) {
224		p4dp = p4d_offset(pgdp, addr);
225		if (p4d_present(*p4dp)) {
226			pudp = pud_offset(p4dp, addr);
227			if (pud_present(*pudp)) {
228				if (pud_large(*pudp))
229					return (pte_t *) pudp;
230				pmdp = pmd_offset(pudp, addr);
231			}
232		}
233	}
234	return (pte_t *) pmdp;
235}
236
237int pmd_huge(pmd_t pmd)
238{
239	return pmd_large(pmd);
240}
241
242int pud_huge(pud_t pud)
243{
244	return pud_large(pud);
245}
246
247struct page *
248follow_huge_pud(struct mm_struct *mm, unsigned long address,
249		pud_t *pud, int flags)
250{
251	if (flags & FOLL_GET)
252		return NULL;
253
254	return pud_page(*pud) + ((address & ~PUD_MASK) >> PAGE_SHIFT);
255}
256
257bool __init arch_hugetlb_valid_size(unsigned long size)
258{
259	if (MACHINE_HAS_EDAT1 && size == PMD_SIZE)
260		return true;
261	else if (MACHINE_HAS_EDAT2 && size == PUD_SIZE)
262		return true;
263	else
264		return false;
265}
266
267static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
268		unsigned long addr, unsigned long len,
269		unsigned long pgoff, unsigned long flags)
270{
271	struct hstate *h = hstate_file(file);
272	struct vm_unmapped_area_info info;
273
274	info.flags = 0;
275	info.length = len;
276	info.low_limit = current->mm->mmap_base;
277	info.high_limit = TASK_SIZE;
278	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
279	info.align_offset = 0;
280	return vm_unmapped_area(&info);
281}
282
283static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
284		unsigned long addr0, unsigned long len,
285		unsigned long pgoff, unsigned long flags)
286{
287	struct hstate *h = hstate_file(file);
288	struct vm_unmapped_area_info info;
289	unsigned long addr;
290
291	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
292	info.length = len;
293	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
294	info.high_limit = current->mm->mmap_base;
295	info.align_mask = PAGE_MASK & ~huge_page_mask(h);
296	info.align_offset = 0;
297	addr = vm_unmapped_area(&info);
298
299	/*
300	 * A failed mmap() very likely causes application failure,
301	 * so fall back to the bottom-up function here. This scenario
302	 * can happen with large stack limits and large mmap()
303	 * allocations.
304	 */
305	if (addr & ~PAGE_MASK) {
306		VM_BUG_ON(addr != -ENOMEM);
307		info.flags = 0;
308		info.low_limit = TASK_UNMAPPED_BASE;
309		info.high_limit = TASK_SIZE;
310		addr = vm_unmapped_area(&info);
311	}
312
313	return addr;
314}
315
316unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
317		unsigned long len, unsigned long pgoff, unsigned long flags)
318{
319	struct hstate *h = hstate_file(file);
320	struct mm_struct *mm = current->mm;
321	struct vm_area_struct *vma;
322
323	if (len & ~huge_page_mask(h))
324		return -EINVAL;
325	if (len > TASK_SIZE - mmap_min_addr)
326		return -ENOMEM;
327
328	if (flags & MAP_FIXED) {
329		if (prepare_hugepage_range(file, addr, len))
330			return -EINVAL;
331		goto check_asce_limit;
332	}
333
334	if (addr) {
335		addr = ALIGN(addr, huge_page_size(h));
336		vma = find_vma(mm, addr);
337		if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
338		    (!vma || addr + len <= vm_start_gap(vma)))
339			goto check_asce_limit;
340	}
341
342	if (mm->get_unmapped_area == arch_get_unmapped_area)
343		addr = hugetlb_get_unmapped_area_bottomup(file, addr, len,
344				pgoff, flags);
345	else
346		addr = hugetlb_get_unmapped_area_topdown(file, addr, len,
347				pgoff, flags);
348	if (offset_in_page(addr))
349		return addr;
350
351check_asce_limit:
352	return check_asce_limit(mm, addr, len);
353}