Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * IBM System z Huge TLB Page Support for Kernel.
4 *
5 * Copyright IBM Corp. 2008
6 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
7 */
8
9#ifndef _ASM_S390_HUGETLB_H
10#define _ASM_S390_HUGETLB_H
11
12#include <linux/pgtable.h>
13#include <asm/page.h>
14
15#define hugetlb_free_pgd_range free_pgd_range
16#define hugepages_supported() (MACHINE_HAS_EDAT1)
17
18void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
19 pte_t *ptep, pte_t pte);
20pte_t huge_ptep_get(pte_t *ptep);
21pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
22 unsigned long addr, pte_t *ptep);
23
24/*
25 * If the arch doesn't supply something else, assume that hugepage
26 * size aligned regions are ok without further preparation.
27 */
28static inline int prepare_hugepage_range(struct file *file,
29 unsigned long addr, unsigned long len)
30{
31 struct hstate *h = hstate_file(file);
32
33 if (len & ~huge_page_mask(h))
34 return -EINVAL;
35 if (addr & ~huge_page_mask(h))
36 return -EINVAL;
37 return 0;
38}
39
40static inline void arch_clear_hugepage_flags(struct page *page)
41{
42 clear_bit(PG_arch_1, &page->flags);
43}
44#define arch_clear_hugepage_flags arch_clear_hugepage_flags
45
46static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
47 pte_t *ptep, unsigned long sz)
48{
49 if ((pte_val(*ptep) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
50 set_pte(ptep, __pte(_REGION3_ENTRY_EMPTY));
51 else
52 set_pte(ptep, __pte(_SEGMENT_ENTRY_EMPTY));
53}
54
55static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
56 unsigned long address, pte_t *ptep)
57{
58 return huge_ptep_get_and_clear(vma->vm_mm, address, ptep);
59}
60
61static inline int huge_ptep_set_access_flags(struct vm_area_struct *vma,
62 unsigned long addr, pte_t *ptep,
63 pte_t pte, int dirty)
64{
65 int changed = !pte_same(huge_ptep_get(ptep), pte);
66 if (changed) {
67 huge_ptep_get_and_clear(vma->vm_mm, addr, ptep);
68 set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
69 }
70 return changed;
71}
72
73static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
74 unsigned long addr, pte_t *ptep)
75{
76 pte_t pte = huge_ptep_get_and_clear(mm, addr, ptep);
77 set_huge_pte_at(mm, addr, ptep, pte_wrprotect(pte));
78}
79
80static inline pte_t mk_huge_pte(struct page *page, pgprot_t pgprot)
81{
82 return mk_pte(page, pgprot);
83}
84
85static inline int huge_pte_none(pte_t pte)
86{
87 return pte_none(pte);
88}
89
90static inline int huge_pte_none_mostly(pte_t pte)
91{
92 return huge_pte_none(pte);
93}
94
95static inline int huge_pte_write(pte_t pte)
96{
97 return pte_write(pte);
98}
99
100static inline int huge_pte_dirty(pte_t pte)
101{
102 return pte_dirty(pte);
103}
104
105static inline pte_t huge_pte_mkwrite(pte_t pte)
106{
107 return pte_mkwrite(pte);
108}
109
110static inline pte_t huge_pte_mkdirty(pte_t pte)
111{
112 return pte_mkdirty(pte);
113}
114
115static inline pte_t huge_pte_wrprotect(pte_t pte)
116{
117 return pte_wrprotect(pte);
118}
119
120static inline pte_t huge_pte_modify(pte_t pte, pgprot_t newprot)
121{
122 return pte_modify(pte, newprot);
123}
124
125static inline pte_t huge_pte_mkuffd_wp(pte_t pte)
126{
127 return pte;
128}
129
130static inline pte_t huge_pte_clear_uffd_wp(pte_t pte)
131{
132 return pte;
133}
134
135static inline int huge_pte_uffd_wp(pte_t pte)
136{
137 return 0;
138}
139
140static inline bool gigantic_page_runtime_supported(void)
141{
142 return true;
143}
144
145#endif /* _ASM_S390_HUGETLB_H */
1/*
2 * IBM System z Huge TLB Page Support for Kernel.
3 *
4 * Copyright IBM Corp. 2008
5 * Author(s): Gerald Schaefer <gerald.schaefer@de.ibm.com>
6 */
7
8#ifndef _ASM_S390_HUGETLB_H
9#define _ASM_S390_HUGETLB_H
10
11#include <asm/page.h>
12#include <asm/pgtable.h>
13
14
15#define is_hugepage_only_range(mm, addr, len) 0
16#define hugetlb_free_pgd_range free_pgd_range
17
18void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
19 pte_t *ptep, pte_t pte);
20
21/*
22 * If the arch doesn't supply something else, assume that hugepage
23 * size aligned regions are ok without further preparation.
24 */
25static inline int prepare_hugepage_range(struct file *file,
26 unsigned long addr, unsigned long len)
27{
28 if (len & ~HPAGE_MASK)
29 return -EINVAL;
30 if (addr & ~HPAGE_MASK)
31 return -EINVAL;
32 return 0;
33}
34
35#define hugetlb_prefault_arch_hook(mm) do { } while (0)
36
37int arch_prepare_hugepage(struct page *page);
38void arch_release_hugepage(struct page *page);
39
40static inline pte_t huge_pte_wrprotect(pte_t pte)
41{
42 pte_val(pte) |= _PAGE_RO;
43 return pte;
44}
45
46static inline int huge_pte_none(pte_t pte)
47{
48 return (pte_val(pte) & _SEGMENT_ENTRY_INV) &&
49 !(pte_val(pte) & _SEGMENT_ENTRY_RO);
50}
51
52static inline pte_t huge_ptep_get(pte_t *ptep)
53{
54 pte_t pte = *ptep;
55 unsigned long mask;
56
57 if (!MACHINE_HAS_HPAGE) {
58 ptep = (pte_t *) (pte_val(pte) & _SEGMENT_ENTRY_ORIGIN);
59 if (ptep) {
60 mask = pte_val(pte) &
61 (_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO);
62 pte = pte_mkhuge(*ptep);
63 pte_val(pte) |= mask;
64 }
65 }
66 return pte;
67}
68
69static inline pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
70 unsigned long addr, pte_t *ptep)
71{
72 pte_t pte = huge_ptep_get(ptep);
73
74 mm->context.flush_mm = 1;
75 pmd_clear((pmd_t *) ptep);
76 return pte;
77}
78
79static inline void __pmd_csp(pmd_t *pmdp)
80{
81 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
82 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
83 _SEGMENT_ENTRY_INV;
84 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
85
86 asm volatile(
87 " csp %1,%3"
88 : "=m" (*pmdp)
89 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
90 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
91}
92
93static inline void __pmd_idte(unsigned long address, pmd_t *pmdp)
94{
95 unsigned long sto = (unsigned long) pmdp -
96 pmd_index(address) * sizeof(pmd_t);
97
98 if (!(pmd_val(*pmdp) & _SEGMENT_ENTRY_INV)) {
99 asm volatile(
100 " .insn rrf,0xb98e0000,%2,%3,0,0"
101 : "=m" (*pmdp)
102 : "m" (*pmdp), "a" (sto),
103 "a" ((address & HPAGE_MASK))
104 );
105 }
106 pmd_val(*pmdp) = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY;
107}
108
109static inline void huge_ptep_invalidate(struct mm_struct *mm,
110 unsigned long address, pte_t *ptep)
111{
112 pmd_t *pmdp = (pmd_t *) ptep;
113
114 if (MACHINE_HAS_IDTE)
115 __pmd_idte(address, pmdp);
116 else
117 __pmd_csp(pmdp);
118}
119
120#define huge_ptep_set_access_flags(__vma, __addr, __ptep, __entry, __dirty) \
121({ \
122 int __changed = !pte_same(huge_ptep_get(__ptep), __entry); \
123 if (__changed) { \
124 huge_ptep_invalidate((__vma)->vm_mm, __addr, __ptep); \
125 set_huge_pte_at((__vma)->vm_mm, __addr, __ptep, __entry); \
126 } \
127 __changed; \
128})
129
130#define huge_ptep_set_wrprotect(__mm, __addr, __ptep) \
131({ \
132 pte_t __pte = huge_ptep_get(__ptep); \
133 if (pte_write(__pte)) { \
134 (__mm)->context.flush_mm = 1; \
135 if (atomic_read(&(__mm)->context.attach_count) > 1 || \
136 (__mm) != current->active_mm) \
137 huge_ptep_invalidate(__mm, __addr, __ptep); \
138 set_huge_pte_at(__mm, __addr, __ptep, \
139 huge_pte_wrprotect(__pte)); \
140 } \
141})
142
143static inline void huge_ptep_clear_flush(struct vm_area_struct *vma,
144 unsigned long address, pte_t *ptep)
145{
146 huge_ptep_invalidate(vma->vm_mm, address, ptep);
147}
148
149#endif /* _ASM_S390_HUGETLB_H */