Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 */
7
8#ifndef _S390_PAGE_H
9#define _S390_PAGE_H
10
11#include <linux/const.h>
12#include <asm/types.h>
13
14#define _PAGE_SHIFT 12
15#define _PAGE_SIZE (_AC(1, UL) << _PAGE_SHIFT)
16#define _PAGE_MASK (~(_PAGE_SIZE - 1))
17
18/* PAGE_SHIFT determines the page size */
19#define PAGE_SHIFT _PAGE_SHIFT
20#define PAGE_SIZE _PAGE_SIZE
21#define PAGE_MASK _PAGE_MASK
22#define PAGE_DEFAULT_ACC 0
23/* storage-protection override */
24#define PAGE_SPO_ACC 9
25#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
26
27#define HPAGE_SHIFT 20
28#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
29#define HPAGE_MASK (~(HPAGE_SIZE - 1))
30#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
31#define HUGE_MAX_HSTATE 2
32
33#define ARCH_HAS_SETCLEAR_HUGE_PTE
34#define ARCH_HAS_HUGE_PTE_TYPE
35#define ARCH_HAS_PREPARE_HUGEPAGE
36#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
37
38#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
39
40#include <asm/setup.h>
41#ifndef __ASSEMBLY__
42
43void __storage_key_init_range(unsigned long start, unsigned long end);
44
45static inline void storage_key_init_range(unsigned long start, unsigned long end)
46{
47 if (PAGE_DEFAULT_KEY != 0)
48 __storage_key_init_range(start, end);
49}
50
51#define clear_page(page) memset((page), 0, PAGE_SIZE)
52
53/*
54 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
55 * bypass caches when copying a page. Especially when copying huge pages
56 * this keeps L1 and L2 data caches alive.
57 */
58static inline void copy_page(void *to, void *from)
59{
60 union register_pair dst, src;
61
62 dst.even = (unsigned long) to;
63 dst.odd = 0x1000;
64 src.even = (unsigned long) from;
65 src.odd = 0xb0001000;
66
67 asm volatile(
68 " mvcl %[dst],%[src]"
69 : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair)
70 : : "memory", "cc");
71}
72
73#define clear_user_page(page, vaddr, pg) clear_page(page)
74#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
75
76#define alloc_zeroed_user_highpage_movable(vma, vaddr) \
77 alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr)
78#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE
79
80/*
81 * These are used to make use of C type-checking..
82 */
83
84typedef struct { unsigned long pgprot; } pgprot_t;
85typedef struct { unsigned long pgste; } pgste_t;
86typedef struct { unsigned long pte; } pte_t;
87typedef struct { unsigned long pmd; } pmd_t;
88typedef struct { unsigned long pud; } pud_t;
89typedef struct { unsigned long p4d; } p4d_t;
90typedef struct { unsigned long pgd; } pgd_t;
91typedef pte_t *pgtable_t;
92
93#define pgprot_val(x) ((x).pgprot)
94#define pgste_val(x) ((x).pgste)
95
96static inline unsigned long pte_val(pte_t pte)
97{
98 return pte.pte;
99}
100
101static inline unsigned long pmd_val(pmd_t pmd)
102{
103 return pmd.pmd;
104}
105
106static inline unsigned long pud_val(pud_t pud)
107{
108 return pud.pud;
109}
110
111static inline unsigned long p4d_val(p4d_t p4d)
112{
113 return p4d.p4d;
114}
115
116static inline unsigned long pgd_val(pgd_t pgd)
117{
118 return pgd.pgd;
119}
120
121#define __pgste(x) ((pgste_t) { (x) } )
122#define __pte(x) ((pte_t) { (x) } )
123#define __pmd(x) ((pmd_t) { (x) } )
124#define __pud(x) ((pud_t) { (x) } )
125#define __p4d(x) ((p4d_t) { (x) } )
126#define __pgd(x) ((pgd_t) { (x) } )
127#define __pgprot(x) ((pgprot_t) { (x) } )
128
129static inline void page_set_storage_key(unsigned long addr,
130 unsigned char skey, int mapped)
131{
132 if (!mapped)
133 asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
134 : : "d" (skey), "a" (addr));
135 else
136 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
137}
138
139static inline unsigned char page_get_storage_key(unsigned long addr)
140{
141 unsigned char skey;
142
143 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
144 return skey;
145}
146
147static inline int page_reset_referenced(unsigned long addr)
148{
149 int cc;
150
151 asm volatile(
152 " rrbe 0,%1\n"
153 " ipm %0\n"
154 " srl %0,28\n"
155 : "=d" (cc) : "a" (addr) : "cc");
156 return cc;
157}
158
159/* Bits int the storage key */
160#define _PAGE_CHANGED 0x02 /* HW changed bit */
161#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
162#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
163#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
164
165struct page;
166void arch_free_page(struct page *page, int order);
167void arch_alloc_page(struct page *page, int order);
168void arch_set_page_dat(struct page *page, int order);
169
170static inline int devmem_is_allowed(unsigned long pfn)
171{
172 return 0;
173}
174
175#define HAVE_ARCH_FREE_PAGE
176#define HAVE_ARCH_ALLOC_PAGE
177
178#if IS_ENABLED(CONFIG_PGSTE)
179int arch_make_page_accessible(struct page *page);
180#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE
181#endif
182
183#endif /* !__ASSEMBLY__ */
184
185#define __PAGE_OFFSET 0x0UL
186#define PAGE_OFFSET 0x0UL
187
188#define __pa(x) ((unsigned long)(x))
189#define __va(x) ((void *)(unsigned long)(x))
190
191#define phys_to_pfn(phys) ((phys) >> PAGE_SHIFT)
192#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
193
194#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys))
195#define page_to_phys(page) pfn_to_phys(page_to_pfn(page))
196
197#define pfn_to_virt(pfn) __va(pfn_to_phys(pfn))
198#define virt_to_pfn(kaddr) (phys_to_pfn(__pa(kaddr)))
199#define pfn_to_kaddr(pfn) pfn_to_virt(pfn)
200
201#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
202#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
203
204#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr))
205
206#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
207
208#include <asm-generic/memory_model.h>
209#include <asm-generic/getorder.h>
210
211#endif /* _S390_PAGE_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 */
7
8#ifndef _S390_PAGE_H
9#define _S390_PAGE_H
10
11#include <linux/const.h>
12#include <asm/types.h>
13
14#define _PAGE_SHIFT 12
15#define _PAGE_SIZE (_AC(1, UL) << _PAGE_SHIFT)
16#define _PAGE_MASK (~(_PAGE_SIZE - 1))
17
18/* PAGE_SHIFT determines the page size */
19#define PAGE_SHIFT _PAGE_SHIFT
20#define PAGE_SIZE _PAGE_SIZE
21#define PAGE_MASK _PAGE_MASK
22#define PAGE_DEFAULT_ACC 0
23#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4)
24
25#define HPAGE_SHIFT 20
26#define HPAGE_SIZE (1UL << HPAGE_SHIFT)
27#define HPAGE_MASK (~(HPAGE_SIZE - 1))
28#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
29#define HUGE_MAX_HSTATE 2
30
31#define ARCH_HAS_SETCLEAR_HUGE_PTE
32#define ARCH_HAS_HUGE_PTE_TYPE
33#define ARCH_HAS_PREPARE_HUGEPAGE
34#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH
35
36#include <asm/setup.h>
37#ifndef __ASSEMBLY__
38
39void __storage_key_init_range(unsigned long start, unsigned long end);
40
41static inline void storage_key_init_range(unsigned long start, unsigned long end)
42{
43 if (PAGE_DEFAULT_KEY)
44 __storage_key_init_range(start, end);
45}
46
47#define clear_page(page) memset((page), 0, PAGE_SIZE)
48
49/*
50 * copy_page uses the mvcl instruction with 0xb0 padding byte in order to
51 * bypass caches when copying a page. Especially when copying huge pages
52 * this keeps L1 and L2 data caches alive.
53 */
54static inline void copy_page(void *to, void *from)
55{
56 register void *reg2 asm ("2") = to;
57 register unsigned long reg3 asm ("3") = 0x1000;
58 register void *reg4 asm ("4") = from;
59 register unsigned long reg5 asm ("5") = 0xb0001000;
60 asm volatile(
61 " mvcl 2,4"
62 : "+d" (reg2), "+d" (reg3), "+d" (reg4), "+d" (reg5)
63 : : "memory", "cc");
64}
65
66#define clear_user_page(page, vaddr, pg) clear_page(page)
67#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
68
69#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
70 alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
71#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
72
73/*
74 * These are used to make use of C type-checking..
75 */
76
77typedef struct { unsigned long pgprot; } pgprot_t;
78typedef struct { unsigned long pgste; } pgste_t;
79typedef struct { unsigned long pte; } pte_t;
80typedef struct { unsigned long pmd; } pmd_t;
81typedef struct { unsigned long pud; } pud_t;
82typedef struct { unsigned long p4d; } p4d_t;
83typedef struct { unsigned long pgd; } pgd_t;
84typedef pte_t *pgtable_t;
85
86#define pgprot_val(x) ((x).pgprot)
87#define pgste_val(x) ((x).pgste)
88#define pte_val(x) ((x).pte)
89#define pmd_val(x) ((x).pmd)
90#define pud_val(x) ((x).pud)
91#define p4d_val(x) ((x).p4d)
92#define pgd_val(x) ((x).pgd)
93
94#define __pgste(x) ((pgste_t) { (x) } )
95#define __pte(x) ((pte_t) { (x) } )
96#define __pmd(x) ((pmd_t) { (x) } )
97#define __pud(x) ((pud_t) { (x) } )
98#define __p4d(x) ((p4d_t) { (x) } )
99#define __pgd(x) ((pgd_t) { (x) } )
100#define __pgprot(x) ((pgprot_t) { (x) } )
101
102static inline void page_set_storage_key(unsigned long addr,
103 unsigned char skey, int mapped)
104{
105 if (!mapped)
106 asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
107 : : "d" (skey), "a" (addr));
108 else
109 asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
110}
111
112static inline unsigned char page_get_storage_key(unsigned long addr)
113{
114 unsigned char skey;
115
116 asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
117 return skey;
118}
119
120static inline int page_reset_referenced(unsigned long addr)
121{
122 int cc;
123
124 asm volatile(
125 " rrbe 0,%1\n"
126 " ipm %0\n"
127 " srl %0,28\n"
128 : "=d" (cc) : "a" (addr) : "cc");
129 return cc;
130}
131
132/* Bits int the storage key */
133#define _PAGE_CHANGED 0x02 /* HW changed bit */
134#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
135#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
136#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
137
138struct page;
139void arch_free_page(struct page *page, int order);
140void arch_alloc_page(struct page *page, int order);
141void arch_set_page_dat(struct page *page, int order);
142void arch_set_page_nodat(struct page *page, int order);
143int arch_test_page_nodat(struct page *page);
144void arch_set_page_states(int make_stable);
145
146static inline int devmem_is_allowed(unsigned long pfn)
147{
148 return 0;
149}
150
151#define HAVE_ARCH_FREE_PAGE
152#define HAVE_ARCH_ALLOC_PAGE
153
154#endif /* !__ASSEMBLY__ */
155
156#define __PAGE_OFFSET 0x0UL
157#define PAGE_OFFSET 0x0UL
158
159#define __pa(x) ((unsigned long)(x))
160#define __va(x) ((void *)(unsigned long)(x))
161
162#define virt_to_pfn(kaddr) (__pa(kaddr) >> PAGE_SHIFT)
163#define pfn_to_virt(pfn) __va((pfn) << PAGE_SHIFT)
164#define pfn_to_kaddr(pfn) pfn_to_virt(pfn)
165
166#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr))
167#define page_to_virt(page) pfn_to_virt(page_to_pfn(page))
168
169#define phys_to_pfn(kaddr) ((kaddr) >> PAGE_SHIFT)
170#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT)
171
172#define phys_to_page(kaddr) pfn_to_page(phys_to_pfn(kaddr))
173#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
174
175#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
176
177#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | \
178 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
179
180#define ARCH_ZONE_DMA_BITS 31
181
182#include <asm-generic/memory_model.h>
183#include <asm-generic/getorder.h>
184
185#endif /* _S390_PAGE_H */