Loading...
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Ulrich Weigand (weigand@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgtable.h"
10 */
11
12#ifndef _ASM_S390_PGTABLE_H
13#define _ASM_S390_PGTABLE_H
14
15#include <linux/sched.h>
16#include <linux/mm_types.h>
17#include <linux/page-flags.h>
18#include <linux/radix-tree.h>
19#include <linux/atomic.h>
20#include <asm/sections.h>
21#include <asm/bug.h>
22#include <asm/page.h>
23#include <asm/uv.h>
24
25extern pgd_t swapper_pg_dir[];
26extern void paging_init(void);
27extern unsigned long s390_invalid_asce;
28
29enum {
30 PG_DIRECT_MAP_4K = 0,
31 PG_DIRECT_MAP_1M,
32 PG_DIRECT_MAP_2G,
33 PG_DIRECT_MAP_MAX
34};
35
36extern atomic_long_t direct_pages_count[PG_DIRECT_MAP_MAX];
37
38static inline void update_page_count(int level, long count)
39{
40 if (IS_ENABLED(CONFIG_PROC_FS))
41 atomic_long_add(count, &direct_pages_count[level]);
42}
43
44struct seq_file;
45void arch_report_meminfo(struct seq_file *m);
46
47/*
48 * The S390 doesn't have any external MMU info: the kernel page
49 * tables contain all the necessary information.
50 */
51#define update_mmu_cache(vma, address, ptep) do { } while (0)
52#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
53
54/*
55 * ZERO_PAGE is a global shared page that is always zero; used
56 * for zero-mapped memory areas etc..
57 */
58
59extern unsigned long empty_zero_page;
60extern unsigned long zero_page_mask;
61
62#define ZERO_PAGE(vaddr) \
63 (virt_to_page((void *)(empty_zero_page + \
64 (((unsigned long)(vaddr)) &zero_page_mask))))
65#define __HAVE_COLOR_ZERO_PAGE
66
67/* TODO: s390 cannot support io_remap_pfn_range... */
68
69#define pte_ERROR(e) \
70 pr_err("%s:%d: bad pte %016lx.\n", __FILE__, __LINE__, pte_val(e))
71#define pmd_ERROR(e) \
72 pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
73#define pud_ERROR(e) \
74 pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
75#define p4d_ERROR(e) \
76 pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
77#define pgd_ERROR(e) \
78 pr_err("%s:%d: bad pgd %016lx.\n", __FILE__, __LINE__, pgd_val(e))
79
80/*
81 * The vmalloc and module area will always be on the topmost area of the
82 * kernel mapping. 512GB are reserved for vmalloc by default.
83 * At the top of the vmalloc area a 2GB area is reserved where modules
84 * will reside. That makes sure that inter module branches always
85 * happen without trampolines and in addition the placement within a
86 * 2GB frame is branch prediction unit friendly.
87 */
88extern unsigned long __bootdata_preserved(VMALLOC_START);
89extern unsigned long __bootdata_preserved(VMALLOC_END);
90#define VMALLOC_DEFAULT_SIZE ((512UL << 30) - MODULES_LEN)
91extern struct page *__bootdata_preserved(vmemmap);
92extern unsigned long __bootdata_preserved(vmemmap_size);
93
94#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
95
96extern unsigned long __bootdata_preserved(MODULES_VADDR);
97extern unsigned long __bootdata_preserved(MODULES_END);
98#define MODULES_VADDR MODULES_VADDR
99#define MODULES_END MODULES_END
100#define MODULES_LEN (1UL << 31)
101
102static inline int is_module_addr(void *addr)
103{
104 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
105 if (addr < (void *)MODULES_VADDR)
106 return 0;
107 if (addr > (void *)MODULES_END)
108 return 0;
109 return 1;
110}
111
112/*
113 * A 64 bit pagetable entry of S390 has following format:
114 * | PFRA |0IPC| OS |
115 * 0000000000111111111122222222223333333333444444444455555555556666
116 * 0123456789012345678901234567890123456789012345678901234567890123
117 *
118 * I Page-Invalid Bit: Page is not available for address-translation
119 * P Page-Protection Bit: Store access not possible for page
120 * C Change-bit override: HW is not required to set change bit
121 *
122 * A 64 bit segmenttable entry of S390 has following format:
123 * | P-table origin | TT
124 * 0000000000111111111122222222223333333333444444444455555555556666
125 * 0123456789012345678901234567890123456789012345678901234567890123
126 *
127 * I Segment-Invalid Bit: Segment is not available for address-translation
128 * C Common-Segment Bit: Segment is not private (PoP 3-30)
129 * P Page-Protection Bit: Store access not possible for page
130 * TT Type 00
131 *
132 * A 64 bit region table entry of S390 has following format:
133 * | S-table origin | TF TTTL
134 * 0000000000111111111122222222223333333333444444444455555555556666
135 * 0123456789012345678901234567890123456789012345678901234567890123
136 *
137 * I Segment-Invalid Bit: Segment is not available for address-translation
138 * TT Type 01
139 * TF
140 * TL Table length
141 *
142 * The 64 bit regiontable origin of S390 has following format:
143 * | region table origon | DTTL
144 * 0000000000111111111122222222223333333333444444444455555555556666
145 * 0123456789012345678901234567890123456789012345678901234567890123
146 *
147 * X Space-Switch event:
148 * G Segment-Invalid Bit:
149 * P Private-Space Bit:
150 * S Storage-Alteration:
151 * R Real space
152 * TL Table-Length:
153 *
154 * A storage key has the following format:
155 * | ACC |F|R|C|0|
156 * 0 3 4 5 6 7
157 * ACC: access key
158 * F : fetch protection bit
159 * R : referenced bit
160 * C : changed bit
161 */
162
163/* Hardware bits in the page table entry */
164#define _PAGE_NOEXEC 0x100 /* HW no-execute bit */
165#define _PAGE_PROTECT 0x200 /* HW read-only bit */
166#define _PAGE_INVALID 0x400 /* HW invalid bit */
167#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
168
169/* Software bits in the page table entry */
170#define _PAGE_PRESENT 0x001 /* SW pte present bit */
171#define _PAGE_YOUNG 0x004 /* SW pte young bit */
172#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
173#define _PAGE_READ 0x010 /* SW pte read bit */
174#define _PAGE_WRITE 0x020 /* SW pte write bit */
175#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
176#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
177
178#ifdef CONFIG_MEM_SOFT_DIRTY
179#define _PAGE_SOFT_DIRTY 0x002 /* SW pte soft dirty bit */
180#else
181#define _PAGE_SOFT_DIRTY 0x000
182#endif
183
184#define _PAGE_SWP_EXCLUSIVE _PAGE_LARGE /* SW pte exclusive swap bit */
185
186/* Set of bits not changed in pte_modify */
187#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_DIRTY | \
188 _PAGE_YOUNG | _PAGE_SOFT_DIRTY)
189
190/*
191 * handle_pte_fault uses pte_present and pte_none to find out the pte type
192 * WITHOUT holding the page table lock. The _PAGE_PRESENT bit is used to
193 * distinguish present from not-present ptes. It is changed only with the page
194 * table lock held.
195 *
196 * The following table gives the different possible bit combinations for
197 * the pte hardware and software bits in the last 12 bits of a pte
198 * (. unassigned bit, x don't care, t swap type):
199 *
200 * 842100000000
201 * 000084210000
202 * 000000008421
203 * .IR.uswrdy.p
204 * empty .10.00000000
205 * swap .11..ttttt.0
206 * prot-none, clean, old .11.xx0000.1
207 * prot-none, clean, young .11.xx0001.1
208 * prot-none, dirty, old .11.xx0010.1
209 * prot-none, dirty, young .11.xx0011.1
210 * read-only, clean, old .11.xx0100.1
211 * read-only, clean, young .01.xx0101.1
212 * read-only, dirty, old .11.xx0110.1
213 * read-only, dirty, young .01.xx0111.1
214 * read-write, clean, old .11.xx1100.1
215 * read-write, clean, young .01.xx1101.1
216 * read-write, dirty, old .10.xx1110.1
217 * read-write, dirty, young .00.xx1111.1
218 * HW-bits: R read-only, I invalid
219 * SW-bits: p present, y young, d dirty, r read, w write, s special,
220 * u unused, l large
221 *
222 * pte_none is true for the bit pattern .10.00000000, pte == 0x400
223 * pte_swap is true for the bit pattern .11..ooooo.0, (pte & 0x201) == 0x200
224 * pte_present is true for the bit pattern .xx.xxxxxx.1, (pte & 0x001) == 0x001
225 */
226
227/* Bits in the segment/region table address-space-control-element */
228#define _ASCE_ORIGIN ~0xfffUL/* region/segment table origin */
229#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
230#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
231#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
232#define _ASCE_REAL_SPACE 0x20 /* real space control */
233#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
234#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
235#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
236#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
237#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
238#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
239
240/* Bits in the region table entry */
241#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
242#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
243#define _REGION_ENTRY_NOEXEC 0x100 /* region no-execute bit */
244#define _REGION_ENTRY_OFFSET 0xc0 /* region table offset */
245#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
246#define _REGION_ENTRY_TYPE_MASK 0x0c /* region table type mask */
247#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
248#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
249#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
250#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
251
252#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
253#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
254#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
255#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
256#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
257#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
258
259#define _REGION3_ENTRY_ORIGIN_LARGE ~0x7fffffffUL /* large page address */
260#define _REGION3_ENTRY_DIRTY 0x2000 /* SW region dirty bit */
261#define _REGION3_ENTRY_YOUNG 0x1000 /* SW region young bit */
262#define _REGION3_ENTRY_LARGE 0x0400 /* RTTE-format control, large page */
263#define _REGION3_ENTRY_READ 0x0002 /* SW region read bit */
264#define _REGION3_ENTRY_WRITE 0x0001 /* SW region write bit */
265
266#ifdef CONFIG_MEM_SOFT_DIRTY
267#define _REGION3_ENTRY_SOFT_DIRTY 0x4000 /* SW region soft dirty bit */
268#else
269#define _REGION3_ENTRY_SOFT_DIRTY 0x0000 /* SW region soft dirty bit */
270#endif
271
272#define _REGION_ENTRY_BITS 0xfffffffffffff22fUL
273
274/* Bits in the segment table entry */
275#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
276#define _SEGMENT_ENTRY_HARDWARE_BITS 0xfffffffffffffe30UL
277#define _SEGMENT_ENTRY_HARDWARE_BITS_LARGE 0xfffffffffff00730UL
278#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
279#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* page table origin */
280#define _SEGMENT_ENTRY_PROTECT 0x200 /* segment protection bit */
281#define _SEGMENT_ENTRY_NOEXEC 0x100 /* segment no-execute bit */
282#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
283#define _SEGMENT_ENTRY_TYPE_MASK 0x0c /* segment table type mask */
284
285#define _SEGMENT_ENTRY (0)
286#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
287
288#define _SEGMENT_ENTRY_DIRTY 0x2000 /* SW segment dirty bit */
289#define _SEGMENT_ENTRY_YOUNG 0x1000 /* SW segment young bit */
290#define _SEGMENT_ENTRY_LARGE 0x0400 /* STE-format control, large page */
291#define _SEGMENT_ENTRY_WRITE 0x0002 /* SW segment write bit */
292#define _SEGMENT_ENTRY_READ 0x0001 /* SW segment read bit */
293
294#ifdef CONFIG_MEM_SOFT_DIRTY
295#define _SEGMENT_ENTRY_SOFT_DIRTY 0x4000 /* SW segment soft dirty bit */
296#else
297#define _SEGMENT_ENTRY_SOFT_DIRTY 0x0000 /* SW segment soft dirty bit */
298#endif
299
300#define _CRST_ENTRIES 2048 /* number of region/segment table entries */
301#define _PAGE_ENTRIES 256 /* number of page table entries */
302
303#define _CRST_TABLE_SIZE (_CRST_ENTRIES * 8)
304#define _PAGE_TABLE_SIZE (_PAGE_ENTRIES * 8)
305
306#define _REGION1_SHIFT 53
307#define _REGION2_SHIFT 42
308#define _REGION3_SHIFT 31
309#define _SEGMENT_SHIFT 20
310
311#define _REGION1_INDEX (0x7ffUL << _REGION1_SHIFT)
312#define _REGION2_INDEX (0x7ffUL << _REGION2_SHIFT)
313#define _REGION3_INDEX (0x7ffUL << _REGION3_SHIFT)
314#define _SEGMENT_INDEX (0x7ffUL << _SEGMENT_SHIFT)
315#define _PAGE_INDEX (0xffUL << _PAGE_SHIFT)
316
317#define _REGION1_SIZE (1UL << _REGION1_SHIFT)
318#define _REGION2_SIZE (1UL << _REGION2_SHIFT)
319#define _REGION3_SIZE (1UL << _REGION3_SHIFT)
320#define _SEGMENT_SIZE (1UL << _SEGMENT_SHIFT)
321
322#define _REGION1_MASK (~(_REGION1_SIZE - 1))
323#define _REGION2_MASK (~(_REGION2_SIZE - 1))
324#define _REGION3_MASK (~(_REGION3_SIZE - 1))
325#define _SEGMENT_MASK (~(_SEGMENT_SIZE - 1))
326
327#define PMD_SHIFT _SEGMENT_SHIFT
328#define PUD_SHIFT _REGION3_SHIFT
329#define P4D_SHIFT _REGION2_SHIFT
330#define PGDIR_SHIFT _REGION1_SHIFT
331
332#define PMD_SIZE _SEGMENT_SIZE
333#define PUD_SIZE _REGION3_SIZE
334#define P4D_SIZE _REGION2_SIZE
335#define PGDIR_SIZE _REGION1_SIZE
336
337#define PMD_MASK _SEGMENT_MASK
338#define PUD_MASK _REGION3_MASK
339#define P4D_MASK _REGION2_MASK
340#define PGDIR_MASK _REGION1_MASK
341
342#define PTRS_PER_PTE _PAGE_ENTRIES
343#define PTRS_PER_PMD _CRST_ENTRIES
344#define PTRS_PER_PUD _CRST_ENTRIES
345#define PTRS_PER_P4D _CRST_ENTRIES
346#define PTRS_PER_PGD _CRST_ENTRIES
347
348/*
349 * Segment table and region3 table entry encoding
350 * (R = read-only, I = invalid, y = young bit):
351 * dy..R...I...wr
352 * prot-none, clean, old 00..1...1...00
353 * prot-none, clean, young 01..1...1...00
354 * prot-none, dirty, old 10..1...1...00
355 * prot-none, dirty, young 11..1...1...00
356 * read-only, clean, old 00..1...1...01
357 * read-only, clean, young 01..1...0...01
358 * read-only, dirty, old 10..1...1...01
359 * read-only, dirty, young 11..1...0...01
360 * read-write, clean, old 00..1...1...11
361 * read-write, clean, young 01..1...0...11
362 * read-write, dirty, old 10..0...1...11
363 * read-write, dirty, young 11..0...0...11
364 * The segment table origin is used to distinguish empty (origin==0) from
365 * read-write, old segment table entries (origin!=0)
366 * HW-bits: R read-only, I invalid
367 * SW-bits: y young, d dirty, r read, w write
368 */
369
370/* Page status table bits for virtualization */
371#define PGSTE_ACC_BITS 0xf000000000000000UL
372#define PGSTE_FP_BIT 0x0800000000000000UL
373#define PGSTE_PCL_BIT 0x0080000000000000UL
374#define PGSTE_HR_BIT 0x0040000000000000UL
375#define PGSTE_HC_BIT 0x0020000000000000UL
376#define PGSTE_GR_BIT 0x0004000000000000UL
377#define PGSTE_GC_BIT 0x0002000000000000UL
378#define PGSTE_UC_BIT 0x0000800000000000UL /* user dirty (migration) */
379#define PGSTE_IN_BIT 0x0000400000000000UL /* IPTE notify bit */
380#define PGSTE_VSIE_BIT 0x0000200000000000UL /* ref'd in a shadow table */
381
382/* Guest Page State used for virtualization */
383#define _PGSTE_GPS_ZERO 0x0000000080000000UL
384#define _PGSTE_GPS_NODAT 0x0000000040000000UL
385#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
386#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
387#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
388#define _PGSTE_GPS_USAGE_POT_VOLATILE 0x0000000002000000UL
389#define _PGSTE_GPS_USAGE_VOLATILE _PGSTE_GPS_USAGE_MASK
390
391/*
392 * A user page table pointer has the space-switch-event bit, the
393 * private-space-control bit and the storage-alteration-event-control
394 * bit set. A kernel page table pointer doesn't need them.
395 */
396#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
397 _ASCE_ALT_EVENT)
398
399/*
400 * Page protection definitions.
401 */
402#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID | _PAGE_PROTECT)
403#define PAGE_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | \
404 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
405#define PAGE_RX __pgprot(_PAGE_PRESENT | _PAGE_READ | \
406 _PAGE_INVALID | _PAGE_PROTECT)
407#define PAGE_RW __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
408 _PAGE_NOEXEC | _PAGE_INVALID | _PAGE_PROTECT)
409#define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
410 _PAGE_INVALID | _PAGE_PROTECT)
411
412#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
413 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
414#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
415 _PAGE_YOUNG | _PAGE_DIRTY | _PAGE_NOEXEC)
416#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
417 _PAGE_PROTECT | _PAGE_NOEXEC)
418#define PAGE_KERNEL_EXEC __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
419 _PAGE_YOUNG | _PAGE_DIRTY)
420
421/*
422 * On s390 the page table entry has an invalid bit and a read-only bit.
423 * Read permission implies execute permission and write permission
424 * implies read permission.
425 */
426 /*xwr*/
427
428/*
429 * Segment entry (large page) protection definitions.
430 */
431#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
432 _SEGMENT_ENTRY_PROTECT)
433#define SEGMENT_RO __pgprot(_SEGMENT_ENTRY_PROTECT | \
434 _SEGMENT_ENTRY_READ | \
435 _SEGMENT_ENTRY_NOEXEC)
436#define SEGMENT_RX __pgprot(_SEGMENT_ENTRY_PROTECT | \
437 _SEGMENT_ENTRY_READ)
438#define SEGMENT_RW __pgprot(_SEGMENT_ENTRY_READ | \
439 _SEGMENT_ENTRY_WRITE | \
440 _SEGMENT_ENTRY_NOEXEC)
441#define SEGMENT_RWX __pgprot(_SEGMENT_ENTRY_READ | \
442 _SEGMENT_ENTRY_WRITE)
443#define SEGMENT_KERNEL __pgprot(_SEGMENT_ENTRY | \
444 _SEGMENT_ENTRY_LARGE | \
445 _SEGMENT_ENTRY_READ | \
446 _SEGMENT_ENTRY_WRITE | \
447 _SEGMENT_ENTRY_YOUNG | \
448 _SEGMENT_ENTRY_DIRTY | \
449 _SEGMENT_ENTRY_NOEXEC)
450#define SEGMENT_KERNEL_RO __pgprot(_SEGMENT_ENTRY | \
451 _SEGMENT_ENTRY_LARGE | \
452 _SEGMENT_ENTRY_READ | \
453 _SEGMENT_ENTRY_YOUNG | \
454 _SEGMENT_ENTRY_PROTECT | \
455 _SEGMENT_ENTRY_NOEXEC)
456#define SEGMENT_KERNEL_EXEC __pgprot(_SEGMENT_ENTRY | \
457 _SEGMENT_ENTRY_LARGE | \
458 _SEGMENT_ENTRY_READ | \
459 _SEGMENT_ENTRY_WRITE | \
460 _SEGMENT_ENTRY_YOUNG | \
461 _SEGMENT_ENTRY_DIRTY)
462
463/*
464 * Region3 entry (large page) protection definitions.
465 */
466
467#define REGION3_KERNEL __pgprot(_REGION_ENTRY_TYPE_R3 | \
468 _REGION3_ENTRY_LARGE | \
469 _REGION3_ENTRY_READ | \
470 _REGION3_ENTRY_WRITE | \
471 _REGION3_ENTRY_YOUNG | \
472 _REGION3_ENTRY_DIRTY | \
473 _REGION_ENTRY_NOEXEC)
474#define REGION3_KERNEL_RO __pgprot(_REGION_ENTRY_TYPE_R3 | \
475 _REGION3_ENTRY_LARGE | \
476 _REGION3_ENTRY_READ | \
477 _REGION3_ENTRY_YOUNG | \
478 _REGION_ENTRY_PROTECT | \
479 _REGION_ENTRY_NOEXEC)
480
481static inline bool mm_p4d_folded(struct mm_struct *mm)
482{
483 return mm->context.asce_limit <= _REGION1_SIZE;
484}
485#define mm_p4d_folded(mm) mm_p4d_folded(mm)
486
487static inline bool mm_pud_folded(struct mm_struct *mm)
488{
489 return mm->context.asce_limit <= _REGION2_SIZE;
490}
491#define mm_pud_folded(mm) mm_pud_folded(mm)
492
493static inline bool mm_pmd_folded(struct mm_struct *mm)
494{
495 return mm->context.asce_limit <= _REGION3_SIZE;
496}
497#define mm_pmd_folded(mm) mm_pmd_folded(mm)
498
499static inline int mm_has_pgste(struct mm_struct *mm)
500{
501#ifdef CONFIG_PGSTE
502 if (unlikely(mm->context.has_pgste))
503 return 1;
504#endif
505 return 0;
506}
507
508static inline int mm_is_protected(struct mm_struct *mm)
509{
510#ifdef CONFIG_PGSTE
511 if (unlikely(atomic_read(&mm->context.protected_count)))
512 return 1;
513#endif
514 return 0;
515}
516
517static inline int mm_alloc_pgste(struct mm_struct *mm)
518{
519#ifdef CONFIG_PGSTE
520 if (unlikely(mm->context.alloc_pgste))
521 return 1;
522#endif
523 return 0;
524}
525
526static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
527{
528 return __pte(pte_val(pte) & ~pgprot_val(prot));
529}
530
531static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
532{
533 return __pte(pte_val(pte) | pgprot_val(prot));
534}
535
536static inline pmd_t clear_pmd_bit(pmd_t pmd, pgprot_t prot)
537{
538 return __pmd(pmd_val(pmd) & ~pgprot_val(prot));
539}
540
541static inline pmd_t set_pmd_bit(pmd_t pmd, pgprot_t prot)
542{
543 return __pmd(pmd_val(pmd) | pgprot_val(prot));
544}
545
546static inline pud_t clear_pud_bit(pud_t pud, pgprot_t prot)
547{
548 return __pud(pud_val(pud) & ~pgprot_val(prot));
549}
550
551static inline pud_t set_pud_bit(pud_t pud, pgprot_t prot)
552{
553 return __pud(pud_val(pud) | pgprot_val(prot));
554}
555
556/*
557 * In the case that a guest uses storage keys
558 * faults should no longer be backed by zero pages
559 */
560#define mm_forbids_zeropage mm_has_pgste
561static inline int mm_uses_skeys(struct mm_struct *mm)
562{
563#ifdef CONFIG_PGSTE
564 if (mm->context.uses_skeys)
565 return 1;
566#endif
567 return 0;
568}
569
570static inline void csp(unsigned int *ptr, unsigned int old, unsigned int new)
571{
572 union register_pair r1 = { .even = old, .odd = new, };
573 unsigned long address = (unsigned long)ptr | 1;
574
575 asm volatile(
576 " csp %[r1],%[address]"
577 : [r1] "+&d" (r1.pair), "+m" (*ptr)
578 : [address] "d" (address)
579 : "cc");
580}
581
582static inline void cspg(unsigned long *ptr, unsigned long old, unsigned long new)
583{
584 union register_pair r1 = { .even = old, .odd = new, };
585 unsigned long address = (unsigned long)ptr | 1;
586
587 asm volatile(
588 " cspg %[r1],%[address]"
589 : [r1] "+&d" (r1.pair), "+m" (*ptr)
590 : [address] "d" (address)
591 : "cc");
592}
593
594#define CRDTE_DTT_PAGE 0x00UL
595#define CRDTE_DTT_SEGMENT 0x10UL
596#define CRDTE_DTT_REGION3 0x14UL
597#define CRDTE_DTT_REGION2 0x18UL
598#define CRDTE_DTT_REGION1 0x1cUL
599
600static inline void crdte(unsigned long old, unsigned long new,
601 unsigned long *table, unsigned long dtt,
602 unsigned long address, unsigned long asce)
603{
604 union register_pair r1 = { .even = old, .odd = new, };
605 union register_pair r2 = { .even = __pa(table) | dtt, .odd = address, };
606
607 asm volatile(".insn rrf,0xb98f0000,%[r1],%[r2],%[asce],0"
608 : [r1] "+&d" (r1.pair)
609 : [r2] "d" (r2.pair), [asce] "a" (asce)
610 : "memory", "cc");
611}
612
613/*
614 * pgd/p4d/pud/pmd/pte query functions
615 */
616static inline int pgd_folded(pgd_t pgd)
617{
618 return (pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1;
619}
620
621static inline int pgd_present(pgd_t pgd)
622{
623 if (pgd_folded(pgd))
624 return 1;
625 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
626}
627
628static inline int pgd_none(pgd_t pgd)
629{
630 if (pgd_folded(pgd))
631 return 0;
632 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
633}
634
635static inline int pgd_bad(pgd_t pgd)
636{
637 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R1)
638 return 0;
639 return (pgd_val(pgd) & ~_REGION_ENTRY_BITS) != 0;
640}
641
642static inline unsigned long pgd_pfn(pgd_t pgd)
643{
644 unsigned long origin_mask;
645
646 origin_mask = _REGION_ENTRY_ORIGIN;
647 return (pgd_val(pgd) & origin_mask) >> PAGE_SHIFT;
648}
649
650static inline int p4d_folded(p4d_t p4d)
651{
652 return (p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2;
653}
654
655static inline int p4d_present(p4d_t p4d)
656{
657 if (p4d_folded(p4d))
658 return 1;
659 return (p4d_val(p4d) & _REGION_ENTRY_ORIGIN) != 0UL;
660}
661
662static inline int p4d_none(p4d_t p4d)
663{
664 if (p4d_folded(p4d))
665 return 0;
666 return p4d_val(p4d) == _REGION2_ENTRY_EMPTY;
667}
668
669static inline unsigned long p4d_pfn(p4d_t p4d)
670{
671 unsigned long origin_mask;
672
673 origin_mask = _REGION_ENTRY_ORIGIN;
674 return (p4d_val(p4d) & origin_mask) >> PAGE_SHIFT;
675}
676
677static inline int pud_folded(pud_t pud)
678{
679 return (pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3;
680}
681
682static inline int pud_present(pud_t pud)
683{
684 if (pud_folded(pud))
685 return 1;
686 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
687}
688
689static inline int pud_none(pud_t pud)
690{
691 if (pud_folded(pud))
692 return 0;
693 return pud_val(pud) == _REGION3_ENTRY_EMPTY;
694}
695
696#define pud_leaf pud_large
697static inline int pud_large(pud_t pud)
698{
699 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
700 return 0;
701 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
702}
703
704#define pmd_leaf pmd_large
705static inline int pmd_large(pmd_t pmd)
706{
707 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
708}
709
710static inline int pmd_bad(pmd_t pmd)
711{
712 if ((pmd_val(pmd) & _SEGMENT_ENTRY_TYPE_MASK) > 0 || pmd_large(pmd))
713 return 1;
714 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
715}
716
717static inline int pud_bad(pud_t pud)
718{
719 unsigned long type = pud_val(pud) & _REGION_ENTRY_TYPE_MASK;
720
721 if (type > _REGION_ENTRY_TYPE_R3 || pud_large(pud))
722 return 1;
723 if (type < _REGION_ENTRY_TYPE_R3)
724 return 0;
725 return (pud_val(pud) & ~_REGION_ENTRY_BITS) != 0;
726}
727
728static inline int p4d_bad(p4d_t p4d)
729{
730 unsigned long type = p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK;
731
732 if (type > _REGION_ENTRY_TYPE_R2)
733 return 1;
734 if (type < _REGION_ENTRY_TYPE_R2)
735 return 0;
736 return (p4d_val(p4d) & ~_REGION_ENTRY_BITS) != 0;
737}
738
739static inline int pmd_present(pmd_t pmd)
740{
741 return pmd_val(pmd) != _SEGMENT_ENTRY_EMPTY;
742}
743
744static inline int pmd_none(pmd_t pmd)
745{
746 return pmd_val(pmd) == _SEGMENT_ENTRY_EMPTY;
747}
748
749#define pmd_write pmd_write
750static inline int pmd_write(pmd_t pmd)
751{
752 return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
753}
754
755#define pud_write pud_write
756static inline int pud_write(pud_t pud)
757{
758 return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
759}
760
761static inline int pmd_dirty(pmd_t pmd)
762{
763 return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
764}
765
766#define pmd_young pmd_young
767static inline int pmd_young(pmd_t pmd)
768{
769 return (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
770}
771
772static inline int pte_present(pte_t pte)
773{
774 /* Bit pattern: (pte & 0x001) == 0x001 */
775 return (pte_val(pte) & _PAGE_PRESENT) != 0;
776}
777
778static inline int pte_none(pte_t pte)
779{
780 /* Bit pattern: pte == 0x400 */
781 return pte_val(pte) == _PAGE_INVALID;
782}
783
784static inline int pte_swap(pte_t pte)
785{
786 /* Bit pattern: (pte & 0x201) == 0x200 */
787 return (pte_val(pte) & (_PAGE_PROTECT | _PAGE_PRESENT))
788 == _PAGE_PROTECT;
789}
790
791static inline int pte_special(pte_t pte)
792{
793 return (pte_val(pte) & _PAGE_SPECIAL);
794}
795
796#define __HAVE_ARCH_PTE_SAME
797static inline int pte_same(pte_t a, pte_t b)
798{
799 return pte_val(a) == pte_val(b);
800}
801
802#ifdef CONFIG_NUMA_BALANCING
803static inline int pte_protnone(pte_t pte)
804{
805 return pte_present(pte) && !(pte_val(pte) & _PAGE_READ);
806}
807
808static inline int pmd_protnone(pmd_t pmd)
809{
810 /* pmd_large(pmd) implies pmd_present(pmd) */
811 return pmd_large(pmd) && !(pmd_val(pmd) & _SEGMENT_ENTRY_READ);
812}
813#endif
814
815#define __HAVE_ARCH_PTE_SWP_EXCLUSIVE
816static inline int pte_swp_exclusive(pte_t pte)
817{
818 return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
819}
820
821static inline pte_t pte_swp_mkexclusive(pte_t pte)
822{
823 return set_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
824}
825
826static inline pte_t pte_swp_clear_exclusive(pte_t pte)
827{
828 return clear_pte_bit(pte, __pgprot(_PAGE_SWP_EXCLUSIVE));
829}
830
831static inline int pte_soft_dirty(pte_t pte)
832{
833 return pte_val(pte) & _PAGE_SOFT_DIRTY;
834}
835#define pte_swp_soft_dirty pte_soft_dirty
836
837static inline pte_t pte_mksoft_dirty(pte_t pte)
838{
839 return set_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
840}
841#define pte_swp_mksoft_dirty pte_mksoft_dirty
842
843static inline pte_t pte_clear_soft_dirty(pte_t pte)
844{
845 return clear_pte_bit(pte, __pgprot(_PAGE_SOFT_DIRTY));
846}
847#define pte_swp_clear_soft_dirty pte_clear_soft_dirty
848
849static inline int pmd_soft_dirty(pmd_t pmd)
850{
851 return pmd_val(pmd) & _SEGMENT_ENTRY_SOFT_DIRTY;
852}
853
854static inline pmd_t pmd_mksoft_dirty(pmd_t pmd)
855{
856 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
857}
858
859static inline pmd_t pmd_clear_soft_dirty(pmd_t pmd)
860{
861 return clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_SOFT_DIRTY));
862}
863
864/*
865 * query functions pte_write/pte_dirty/pte_young only work if
866 * pte_present() is true. Undefined behaviour if not..
867 */
868static inline int pte_write(pte_t pte)
869{
870 return (pte_val(pte) & _PAGE_WRITE) != 0;
871}
872
873static inline int pte_dirty(pte_t pte)
874{
875 return (pte_val(pte) & _PAGE_DIRTY) != 0;
876}
877
878static inline int pte_young(pte_t pte)
879{
880 return (pte_val(pte) & _PAGE_YOUNG) != 0;
881}
882
883#define __HAVE_ARCH_PTE_UNUSED
884static inline int pte_unused(pte_t pte)
885{
886 return pte_val(pte) & _PAGE_UNUSED;
887}
888
889/*
890 * Extract the pgprot value from the given pte while at the same time making it
891 * usable for kernel address space mappings where fault driven dirty and
892 * young/old accounting is not supported, i.e _PAGE_PROTECT and _PAGE_INVALID
893 * must not be set.
894 */
895static inline pgprot_t pte_pgprot(pte_t pte)
896{
897 unsigned long pte_flags = pte_val(pte) & _PAGE_CHG_MASK;
898
899 if (pte_write(pte))
900 pte_flags |= pgprot_val(PAGE_KERNEL);
901 else
902 pte_flags |= pgprot_val(PAGE_KERNEL_RO);
903 pte_flags |= pte_val(pte) & mio_wb_bit_mask;
904
905 return __pgprot(pte_flags);
906}
907
908/*
909 * pgd/pmd/pte modification functions
910 */
911
912static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
913{
914 WRITE_ONCE(*pgdp, pgd);
915}
916
917static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
918{
919 WRITE_ONCE(*p4dp, p4d);
920}
921
922static inline void set_pud(pud_t *pudp, pud_t pud)
923{
924 WRITE_ONCE(*pudp, pud);
925}
926
927static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
928{
929 WRITE_ONCE(*pmdp, pmd);
930}
931
932static inline void set_pte(pte_t *ptep, pte_t pte)
933{
934 WRITE_ONCE(*ptep, pte);
935}
936
937static inline void pgd_clear(pgd_t *pgd)
938{
939 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R1)
940 set_pgd(pgd, __pgd(_REGION1_ENTRY_EMPTY));
941}
942
943static inline void p4d_clear(p4d_t *p4d)
944{
945 if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
946 set_p4d(p4d, __p4d(_REGION2_ENTRY_EMPTY));
947}
948
949static inline void pud_clear(pud_t *pud)
950{
951 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
952 set_pud(pud, __pud(_REGION3_ENTRY_EMPTY));
953}
954
955static inline void pmd_clear(pmd_t *pmdp)
956{
957 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
958}
959
960static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
961{
962 set_pte(ptep, __pte(_PAGE_INVALID));
963}
964
965/*
966 * The following pte modification functions only work if
967 * pte_present() is true. Undefined behaviour if not..
968 */
969static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
970{
971 pte = clear_pte_bit(pte, __pgprot(~_PAGE_CHG_MASK));
972 pte = set_pte_bit(pte, newprot);
973 /*
974 * newprot for PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX
975 * has the invalid bit set, clear it again for readable, young pages
976 */
977 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
978 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
979 /*
980 * newprot for PAGE_RO, PAGE_RX, PAGE_RW and PAGE_RWX has the page
981 * protection bit set, clear it again for writable, dirty pages
982 */
983 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
984 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
985 return pte;
986}
987
988static inline pte_t pte_wrprotect(pte_t pte)
989{
990 pte = clear_pte_bit(pte, __pgprot(_PAGE_WRITE));
991 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
992}
993
994static inline pte_t pte_mkwrite(pte_t pte)
995{
996 pte = set_pte_bit(pte, __pgprot(_PAGE_WRITE));
997 if (pte_val(pte) & _PAGE_DIRTY)
998 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
999 return pte;
1000}
1001
1002static inline pte_t pte_mkclean(pte_t pte)
1003{
1004 pte = clear_pte_bit(pte, __pgprot(_PAGE_DIRTY));
1005 return set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1006}
1007
1008static inline pte_t pte_mkdirty(pte_t pte)
1009{
1010 pte = set_pte_bit(pte, __pgprot(_PAGE_DIRTY | _PAGE_SOFT_DIRTY));
1011 if (pte_val(pte) & _PAGE_WRITE)
1012 pte = clear_pte_bit(pte, __pgprot(_PAGE_PROTECT));
1013 return pte;
1014}
1015
1016static inline pte_t pte_mkold(pte_t pte)
1017{
1018 pte = clear_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1019 return set_pte_bit(pte, __pgprot(_PAGE_INVALID));
1020}
1021
1022static inline pte_t pte_mkyoung(pte_t pte)
1023{
1024 pte = set_pte_bit(pte, __pgprot(_PAGE_YOUNG));
1025 if (pte_val(pte) & _PAGE_READ)
1026 pte = clear_pte_bit(pte, __pgprot(_PAGE_INVALID));
1027 return pte;
1028}
1029
1030static inline pte_t pte_mkspecial(pte_t pte)
1031{
1032 return set_pte_bit(pte, __pgprot(_PAGE_SPECIAL));
1033}
1034
1035#ifdef CONFIG_HUGETLB_PAGE
1036static inline pte_t pte_mkhuge(pte_t pte)
1037{
1038 return set_pte_bit(pte, __pgprot(_PAGE_LARGE));
1039}
1040#endif
1041
1042#define IPTE_GLOBAL 0
1043#define IPTE_LOCAL 1
1044
1045#define IPTE_NODAT 0x400
1046#define IPTE_GUEST_ASCE 0x800
1047
1048static __always_inline void __ptep_ipte(unsigned long address, pte_t *ptep,
1049 unsigned long opt, unsigned long asce,
1050 int local)
1051{
1052 unsigned long pto = __pa(ptep);
1053
1054 if (__builtin_constant_p(opt) && opt == 0) {
1055 /* Invalidation + TLB flush for the pte */
1056 asm volatile(
1057 " ipte %[r1],%[r2],0,%[m4]"
1058 : "+m" (*ptep) : [r1] "a" (pto), [r2] "a" (address),
1059 [m4] "i" (local));
1060 return;
1061 }
1062
1063 /* Invalidate ptes with options + TLB flush of the ptes */
1064 opt = opt | (asce & _ASCE_ORIGIN);
1065 asm volatile(
1066 " ipte %[r1],%[r2],%[r3],%[m4]"
1067 : [r2] "+a" (address), [r3] "+a" (opt)
1068 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1069}
1070
1071static __always_inline void __ptep_ipte_range(unsigned long address, int nr,
1072 pte_t *ptep, int local)
1073{
1074 unsigned long pto = __pa(ptep);
1075
1076 /* Invalidate a range of ptes + TLB flush of the ptes */
1077 do {
1078 asm volatile(
1079 " ipte %[r1],%[r2],%[r3],%[m4]"
1080 : [r2] "+a" (address), [r3] "+a" (nr)
1081 : [r1] "a" (pto), [m4] "i" (local) : "memory");
1082 } while (nr != 255);
1083}
1084
1085/*
1086 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1087 * both clear the TLB for the unmapped pte. The reason is that
1088 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1089 * to modify an active pte. The sequence is
1090 * 1) ptep_get_and_clear
1091 * 2) set_pte_at
1092 * 3) flush_tlb_range
1093 * On s390 the tlb needs to get flushed with the modification of the pte
1094 * if the pte is active. The only way how this can be implemented is to
1095 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1096 * is a nop.
1097 */
1098pte_t ptep_xchg_direct(struct mm_struct *, unsigned long, pte_t *, pte_t);
1099pte_t ptep_xchg_lazy(struct mm_struct *, unsigned long, pte_t *, pte_t);
1100
1101#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1102static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1103 unsigned long addr, pte_t *ptep)
1104{
1105 pte_t pte = *ptep;
1106
1107 pte = ptep_xchg_direct(vma->vm_mm, addr, ptep, pte_mkold(pte));
1108 return pte_young(pte);
1109}
1110
1111#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1112static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1113 unsigned long address, pte_t *ptep)
1114{
1115 return ptep_test_and_clear_young(vma, address, ptep);
1116}
1117
1118#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1119static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1120 unsigned long addr, pte_t *ptep)
1121{
1122 pte_t res;
1123
1124 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1125 /* At this point the reference through the mapping is still present */
1126 if (mm_is_protected(mm) && pte_present(res))
1127 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1128 return res;
1129}
1130
1131#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1132pte_t ptep_modify_prot_start(struct vm_area_struct *, unsigned long, pte_t *);
1133void ptep_modify_prot_commit(struct vm_area_struct *, unsigned long,
1134 pte_t *, pte_t, pte_t);
1135
1136#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1137static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1138 unsigned long addr, pte_t *ptep)
1139{
1140 pte_t res;
1141
1142 res = ptep_xchg_direct(vma->vm_mm, addr, ptep, __pte(_PAGE_INVALID));
1143 /* At this point the reference through the mapping is still present */
1144 if (mm_is_protected(vma->vm_mm) && pte_present(res))
1145 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1146 return res;
1147}
1148
1149/*
1150 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1151 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1152 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1153 * cannot be accessed while the batched unmap is running. In this case
1154 * full==1 and a simple pte_clear is enough. See tlb.h.
1155 */
1156#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1157static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1158 unsigned long addr,
1159 pte_t *ptep, int full)
1160{
1161 pte_t res;
1162
1163 if (full) {
1164 res = *ptep;
1165 set_pte(ptep, __pte(_PAGE_INVALID));
1166 } else {
1167 res = ptep_xchg_lazy(mm, addr, ptep, __pte(_PAGE_INVALID));
1168 }
1169 /* Nothing to do */
1170 if (!mm_is_protected(mm) || !pte_present(res))
1171 return res;
1172 /*
1173 * At this point the reference through the mapping is still present.
1174 * The notifier should have destroyed all protected vCPUs at this
1175 * point, so the destroy should be successful.
1176 */
1177 if (full && !uv_destroy_owned_page(pte_val(res) & PAGE_MASK))
1178 return res;
1179 /*
1180 * If something went wrong and the page could not be destroyed, or
1181 * if this is not a mm teardown, the slower export is used as
1182 * fallback instead.
1183 */
1184 uv_convert_owned_from_secure(pte_val(res) & PAGE_MASK);
1185 return res;
1186}
1187
1188#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1189static inline void ptep_set_wrprotect(struct mm_struct *mm,
1190 unsigned long addr, pte_t *ptep)
1191{
1192 pte_t pte = *ptep;
1193
1194 if (pte_write(pte))
1195 ptep_xchg_lazy(mm, addr, ptep, pte_wrprotect(pte));
1196}
1197
1198#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1199static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1200 unsigned long addr, pte_t *ptep,
1201 pte_t entry, int dirty)
1202{
1203 if (pte_same(*ptep, entry))
1204 return 0;
1205 ptep_xchg_direct(vma->vm_mm, addr, ptep, entry);
1206 return 1;
1207}
1208
1209/*
1210 * Additional functions to handle KVM guest page tables
1211 */
1212void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
1213 pte_t *ptep, pte_t entry);
1214void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1215void ptep_notify(struct mm_struct *mm, unsigned long addr,
1216 pte_t *ptep, unsigned long bits);
1217int ptep_force_prot(struct mm_struct *mm, unsigned long gaddr,
1218 pte_t *ptep, int prot, unsigned long bit);
1219void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
1220 pte_t *ptep , int reset);
1221void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
1222int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
1223 pte_t *sptep, pte_t *tptep, pte_t pte);
1224void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep);
1225
1226bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long address,
1227 pte_t *ptep);
1228int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1229 unsigned char key, bool nq);
1230int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1231 unsigned char key, unsigned char *oldkey,
1232 bool nq, bool mr, bool mc);
1233int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr);
1234int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
1235 unsigned char *key);
1236
1237int set_pgste_bits(struct mm_struct *mm, unsigned long addr,
1238 unsigned long bits, unsigned long value);
1239int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep);
1240int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1241 unsigned long *oldpte, unsigned long *oldpgste);
1242void gmap_pmdp_csp(struct mm_struct *mm, unsigned long vmaddr);
1243void gmap_pmdp_invalidate(struct mm_struct *mm, unsigned long vmaddr);
1244void gmap_pmdp_idte_local(struct mm_struct *mm, unsigned long vmaddr);
1245void gmap_pmdp_idte_global(struct mm_struct *mm, unsigned long vmaddr);
1246
1247#define pgprot_writecombine pgprot_writecombine
1248pgprot_t pgprot_writecombine(pgprot_t prot);
1249
1250#define pgprot_writethrough pgprot_writethrough
1251pgprot_t pgprot_writethrough(pgprot_t prot);
1252
1253/*
1254 * Certain architectures need to do special things when PTEs
1255 * within a page table are directly modified. Thus, the following
1256 * hook is made available.
1257 */
1258static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
1259 pte_t *ptep, pte_t entry)
1260{
1261 if (pte_present(entry))
1262 entry = clear_pte_bit(entry, __pgprot(_PAGE_UNUSED));
1263 if (mm_has_pgste(mm))
1264 ptep_set_pte_at(mm, addr, ptep, entry);
1265 else
1266 set_pte(ptep, entry);
1267}
1268
1269/*
1270 * Conversion functions: convert a page and protection to a page entry,
1271 * and a page entry and page directory to the page they refer to.
1272 */
1273static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1274{
1275 pte_t __pte;
1276
1277 __pte = __pte(physpage | pgprot_val(pgprot));
1278 if (!MACHINE_HAS_NX)
1279 __pte = clear_pte_bit(__pte, __pgprot(_PAGE_NOEXEC));
1280 return pte_mkyoung(__pte);
1281}
1282
1283static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1284{
1285 unsigned long physpage = page_to_phys(page);
1286 pte_t __pte = mk_pte_phys(physpage, pgprot);
1287
1288 if (pte_write(__pte) && PageDirty(page))
1289 __pte = pte_mkdirty(__pte);
1290 return __pte;
1291}
1292
1293#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1294#define p4d_index(address) (((address) >> P4D_SHIFT) & (PTRS_PER_P4D-1))
1295#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1296#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1297
1298#define p4d_deref(pud) ((unsigned long)__va(p4d_val(pud) & _REGION_ENTRY_ORIGIN))
1299#define pgd_deref(pgd) ((unsigned long)__va(pgd_val(pgd) & _REGION_ENTRY_ORIGIN))
1300
1301static inline unsigned long pmd_deref(pmd_t pmd)
1302{
1303 unsigned long origin_mask;
1304
1305 origin_mask = _SEGMENT_ENTRY_ORIGIN;
1306 if (pmd_large(pmd))
1307 origin_mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1308 return (unsigned long)__va(pmd_val(pmd) & origin_mask);
1309}
1310
1311static inline unsigned long pmd_pfn(pmd_t pmd)
1312{
1313 return __pa(pmd_deref(pmd)) >> PAGE_SHIFT;
1314}
1315
1316static inline unsigned long pud_deref(pud_t pud)
1317{
1318 unsigned long origin_mask;
1319
1320 origin_mask = _REGION_ENTRY_ORIGIN;
1321 if (pud_large(pud))
1322 origin_mask = _REGION3_ENTRY_ORIGIN_LARGE;
1323 return (unsigned long)__va(pud_val(pud) & origin_mask);
1324}
1325
1326static inline unsigned long pud_pfn(pud_t pud)
1327{
1328 return __pa(pud_deref(pud)) >> PAGE_SHIFT;
1329}
1330
1331/*
1332 * The pgd_offset function *always* adds the index for the top-level
1333 * region/segment table. This is done to get a sequence like the
1334 * following to work:
1335 * pgdp = pgd_offset(current->mm, addr);
1336 * pgd = READ_ONCE(*pgdp);
1337 * p4dp = p4d_offset(&pgd, addr);
1338 * ...
1339 * The subsequent p4d_offset, pud_offset and pmd_offset functions
1340 * only add an index if they dereferenced the pointer.
1341 */
1342static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)
1343{
1344 unsigned long rste;
1345 unsigned int shift;
1346
1347 /* Get the first entry of the top level table */
1348 rste = pgd_val(*pgd);
1349 /* Pick up the shift from the table type of the first entry */
1350 shift = ((rste & _REGION_ENTRY_TYPE_MASK) >> 2) * 11 + 20;
1351 return pgd + ((address >> shift) & (PTRS_PER_PGD - 1));
1352}
1353
1354#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)
1355
1356static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
1357{
1358 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
1359 return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
1360 return (p4d_t *) pgdp;
1361}
1362#define p4d_offset_lockless p4d_offset_lockless
1363
1364static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
1365{
1366 return p4d_offset_lockless(pgdp, *pgdp, address);
1367}
1368
1369static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
1370{
1371 if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
1372 return (pud_t *) p4d_deref(p4d) + pud_index(address);
1373 return (pud_t *) p4dp;
1374}
1375#define pud_offset_lockless pud_offset_lockless
1376
1377static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
1378{
1379 return pud_offset_lockless(p4dp, *p4dp, address);
1380}
1381#define pud_offset pud_offset
1382
1383static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
1384{
1385 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
1386 return (pmd_t *) pud_deref(pud) + pmd_index(address);
1387 return (pmd_t *) pudp;
1388}
1389#define pmd_offset_lockless pmd_offset_lockless
1390
1391static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
1392{
1393 return pmd_offset_lockless(pudp, *pudp, address);
1394}
1395#define pmd_offset pmd_offset
1396
1397static inline unsigned long pmd_page_vaddr(pmd_t pmd)
1398{
1399 return (unsigned long) pmd_deref(pmd);
1400}
1401
1402static inline bool gup_fast_permitted(unsigned long start, unsigned long end)
1403{
1404 return end <= current->mm->context.asce_limit;
1405}
1406#define gup_fast_permitted gup_fast_permitted
1407
1408#define pfn_pte(pfn, pgprot) mk_pte_phys(((pfn) << PAGE_SHIFT), (pgprot))
1409#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1410#define pte_page(x) pfn_to_page(pte_pfn(x))
1411
1412#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd))
1413#define pud_page(pud) pfn_to_page(pud_pfn(pud))
1414#define p4d_page(p4d) pfn_to_page(p4d_pfn(p4d))
1415#define pgd_page(pgd) pfn_to_page(pgd_pfn(pgd))
1416
1417static inline pmd_t pmd_wrprotect(pmd_t pmd)
1418{
1419 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1420 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1421}
1422
1423static inline pmd_t pmd_mkwrite(pmd_t pmd)
1424{
1425 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_WRITE));
1426 if (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY)
1427 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1428 return pmd;
1429}
1430
1431static inline pmd_t pmd_mkclean(pmd_t pmd)
1432{
1433 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY));
1434 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1435}
1436
1437static inline pmd_t pmd_mkdirty(pmd_t pmd)
1438{
1439 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_DIRTY | _SEGMENT_ENTRY_SOFT_DIRTY));
1440 if (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE)
1441 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1442 return pmd;
1443}
1444
1445static inline pud_t pud_wrprotect(pud_t pud)
1446{
1447 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1448 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1449}
1450
1451static inline pud_t pud_mkwrite(pud_t pud)
1452{
1453 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_WRITE));
1454 if (pud_val(pud) & _REGION3_ENTRY_DIRTY)
1455 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1456 return pud;
1457}
1458
1459static inline pud_t pud_mkclean(pud_t pud)
1460{
1461 pud = clear_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY));
1462 return set_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1463}
1464
1465static inline pud_t pud_mkdirty(pud_t pud)
1466{
1467 pud = set_pud_bit(pud, __pgprot(_REGION3_ENTRY_DIRTY | _REGION3_ENTRY_SOFT_DIRTY));
1468 if (pud_val(pud) & _REGION3_ENTRY_WRITE)
1469 pud = clear_pud_bit(pud, __pgprot(_REGION_ENTRY_PROTECT));
1470 return pud;
1471}
1472
1473#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1474static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1475{
1476 /*
1477 * pgprot is PAGE_NONE, PAGE_RO, PAGE_RX, PAGE_RW or PAGE_RWX
1478 * (see __Pxxx / __Sxxx). Convert to segment table entry format.
1479 */
1480 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1481 return pgprot_val(SEGMENT_NONE);
1482 if (pgprot_val(pgprot) == pgprot_val(PAGE_RO))
1483 return pgprot_val(SEGMENT_RO);
1484 if (pgprot_val(pgprot) == pgprot_val(PAGE_RX))
1485 return pgprot_val(SEGMENT_RX);
1486 if (pgprot_val(pgprot) == pgprot_val(PAGE_RW))
1487 return pgprot_val(SEGMENT_RW);
1488 return pgprot_val(SEGMENT_RWX);
1489}
1490
1491static inline pmd_t pmd_mkyoung(pmd_t pmd)
1492{
1493 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1494 if (pmd_val(pmd) & _SEGMENT_ENTRY_READ)
1495 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1496 return pmd;
1497}
1498
1499static inline pmd_t pmd_mkold(pmd_t pmd)
1500{
1501 pmd = clear_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1502 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1503}
1504
1505static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1506{
1507 unsigned long mask;
1508
1509 mask = _SEGMENT_ENTRY_ORIGIN_LARGE;
1510 mask |= _SEGMENT_ENTRY_DIRTY;
1511 mask |= _SEGMENT_ENTRY_YOUNG;
1512 mask |= _SEGMENT_ENTRY_LARGE;
1513 mask |= _SEGMENT_ENTRY_SOFT_DIRTY;
1514 pmd = __pmd(pmd_val(pmd) & mask);
1515 pmd = set_pmd_bit(pmd, __pgprot(massage_pgprot_pmd(newprot)));
1516 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY))
1517 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1518 if (!(pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG))
1519 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_INVALID));
1520 return pmd;
1521}
1522
1523static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1524{
1525 return __pmd(physpage + massage_pgprot_pmd(pgprot));
1526}
1527
1528#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1529
1530static inline void __pmdp_csp(pmd_t *pmdp)
1531{
1532 csp((unsigned int *)pmdp + 1, pmd_val(*pmdp),
1533 pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1534}
1535
1536#define IDTE_GLOBAL 0
1537#define IDTE_LOCAL 1
1538
1539#define IDTE_PTOA 0x0800
1540#define IDTE_NODAT 0x1000
1541#define IDTE_GUEST_ASCE 0x2000
1542
1543static __always_inline void __pmdp_idte(unsigned long addr, pmd_t *pmdp,
1544 unsigned long opt, unsigned long asce,
1545 int local)
1546{
1547 unsigned long sto;
1548
1549 sto = __pa(pmdp) - pmd_index(addr) * sizeof(pmd_t);
1550 if (__builtin_constant_p(opt) && opt == 0) {
1551 /* flush without guest asce */
1552 asm volatile(
1553 " idte %[r1],0,%[r2],%[m4]"
1554 : "+m" (*pmdp)
1555 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK)),
1556 [m4] "i" (local)
1557 : "cc" );
1558 } else {
1559 /* flush with guest asce */
1560 asm volatile(
1561 " idte %[r1],%[r3],%[r2],%[m4]"
1562 : "+m" (*pmdp)
1563 : [r1] "a" (sto), [r2] "a" ((addr & HPAGE_MASK) | opt),
1564 [r3] "a" (asce), [m4] "i" (local)
1565 : "cc" );
1566 }
1567}
1568
1569static __always_inline void __pudp_idte(unsigned long addr, pud_t *pudp,
1570 unsigned long opt, unsigned long asce,
1571 int local)
1572{
1573 unsigned long r3o;
1574
1575 r3o = __pa(pudp) - pud_index(addr) * sizeof(pud_t);
1576 r3o |= _ASCE_TYPE_REGION3;
1577 if (__builtin_constant_p(opt) && opt == 0) {
1578 /* flush without guest asce */
1579 asm volatile(
1580 " idte %[r1],0,%[r2],%[m4]"
1581 : "+m" (*pudp)
1582 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK)),
1583 [m4] "i" (local)
1584 : "cc");
1585 } else {
1586 /* flush with guest asce */
1587 asm volatile(
1588 " idte %[r1],%[r3],%[r2],%[m4]"
1589 : "+m" (*pudp)
1590 : [r1] "a" (r3o), [r2] "a" ((addr & PUD_MASK) | opt),
1591 [r3] "a" (asce), [m4] "i" (local)
1592 : "cc" );
1593 }
1594}
1595
1596pmd_t pmdp_xchg_direct(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1597pmd_t pmdp_xchg_lazy(struct mm_struct *, unsigned long, pmd_t *, pmd_t);
1598pud_t pudp_xchg_direct(struct mm_struct *, unsigned long, pud_t *, pud_t);
1599
1600#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1601
1602#define __HAVE_ARCH_PGTABLE_DEPOSIT
1603void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1604 pgtable_t pgtable);
1605
1606#define __HAVE_ARCH_PGTABLE_WITHDRAW
1607pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1608
1609#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
1610static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
1611 unsigned long addr, pmd_t *pmdp,
1612 pmd_t entry, int dirty)
1613{
1614 VM_BUG_ON(addr & ~HPAGE_MASK);
1615
1616 entry = pmd_mkyoung(entry);
1617 if (dirty)
1618 entry = pmd_mkdirty(entry);
1619 if (pmd_val(*pmdp) == pmd_val(entry))
1620 return 0;
1621 pmdp_xchg_direct(vma->vm_mm, addr, pmdp, entry);
1622 return 1;
1623}
1624
1625#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1626static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1627 unsigned long addr, pmd_t *pmdp)
1628{
1629 pmd_t pmd = *pmdp;
1630
1631 pmd = pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd_mkold(pmd));
1632 return pmd_young(pmd);
1633}
1634
1635#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
1636static inline int pmdp_clear_flush_young(struct vm_area_struct *vma,
1637 unsigned long addr, pmd_t *pmdp)
1638{
1639 VM_BUG_ON(addr & ~HPAGE_MASK);
1640 return pmdp_test_and_clear_young(vma, addr, pmdp);
1641}
1642
1643static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1644 pmd_t *pmdp, pmd_t entry)
1645{
1646 if (!MACHINE_HAS_NX)
1647 entry = clear_pmd_bit(entry, __pgprot(_SEGMENT_ENTRY_NOEXEC));
1648 set_pmd(pmdp, entry);
1649}
1650
1651static inline pmd_t pmd_mkhuge(pmd_t pmd)
1652{
1653 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_LARGE));
1654 pmd = set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_YOUNG));
1655 return set_pmd_bit(pmd, __pgprot(_SEGMENT_ENTRY_PROTECT));
1656}
1657
1658#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
1659static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
1660 unsigned long addr, pmd_t *pmdp)
1661{
1662 return pmdp_xchg_direct(mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1663}
1664
1665#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR_FULL
1666static inline pmd_t pmdp_huge_get_and_clear_full(struct vm_area_struct *vma,
1667 unsigned long addr,
1668 pmd_t *pmdp, int full)
1669{
1670 if (full) {
1671 pmd_t pmd = *pmdp;
1672 set_pmd(pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1673 return pmd;
1674 }
1675 return pmdp_xchg_lazy(vma->vm_mm, addr, pmdp, __pmd(_SEGMENT_ENTRY_EMPTY));
1676}
1677
1678#define __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
1679static inline pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma,
1680 unsigned long addr, pmd_t *pmdp)
1681{
1682 return pmdp_huge_get_and_clear(vma->vm_mm, addr, pmdp);
1683}
1684
1685#define __HAVE_ARCH_PMDP_INVALIDATE
1686static inline pmd_t pmdp_invalidate(struct vm_area_struct *vma,
1687 unsigned long addr, pmd_t *pmdp)
1688{
1689 pmd_t pmd = __pmd(pmd_val(*pmdp) | _SEGMENT_ENTRY_INVALID);
1690
1691 return pmdp_xchg_direct(vma->vm_mm, addr, pmdp, pmd);
1692}
1693
1694#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1695static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1696 unsigned long addr, pmd_t *pmdp)
1697{
1698 pmd_t pmd = *pmdp;
1699
1700 if (pmd_write(pmd))
1701 pmd = pmdp_xchg_lazy(mm, addr, pmdp, pmd_wrprotect(pmd));
1702}
1703
1704static inline pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
1705 unsigned long address,
1706 pmd_t *pmdp)
1707{
1708 return pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
1709}
1710#define pmdp_collapse_flush pmdp_collapse_flush
1711
1712#define pfn_pmd(pfn, pgprot) mk_pmd_phys(((pfn) << PAGE_SHIFT), (pgprot))
1713#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1714
1715static inline int pmd_trans_huge(pmd_t pmd)
1716{
1717 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1718}
1719
1720#define has_transparent_hugepage has_transparent_hugepage
1721static inline int has_transparent_hugepage(void)
1722{
1723 return MACHINE_HAS_EDAT1 ? 1 : 0;
1724}
1725#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1726
1727/*
1728 * 64 bit swap entry format:
1729 * A page-table entry has some bits we have to treat in a special way.
1730 * Bits 54 and 63 are used to indicate the page type. Bit 53 marks the pte
1731 * as invalid.
1732 * A swap pte is indicated by bit pattern (pte & 0x201) == 0x200
1733 * | offset |E11XX|type |S0|
1734 * |0000000000111111111122222222223333333333444444444455|55555|55566|66|
1735 * |0123456789012345678901234567890123456789012345678901|23456|78901|23|
1736 *
1737 * Bits 0-51 store the offset.
1738 * Bit 52 (E) is used to remember PG_anon_exclusive.
1739 * Bits 57-61 store the type.
1740 * Bit 62 (S) is used for softdirty tracking.
1741 * Bits 55 and 56 (X) are unused.
1742 */
1743
1744#define __SWP_OFFSET_MASK ((1UL << 52) - 1)
1745#define __SWP_OFFSET_SHIFT 12
1746#define __SWP_TYPE_MASK ((1UL << 5) - 1)
1747#define __SWP_TYPE_SHIFT 2
1748
1749static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1750{
1751 unsigned long pteval;
1752
1753 pteval = _PAGE_INVALID | _PAGE_PROTECT;
1754 pteval |= (offset & __SWP_OFFSET_MASK) << __SWP_OFFSET_SHIFT;
1755 pteval |= (type & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT;
1756 return __pte(pteval);
1757}
1758
1759static inline unsigned long __swp_type(swp_entry_t entry)
1760{
1761 return (entry.val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK;
1762}
1763
1764static inline unsigned long __swp_offset(swp_entry_t entry)
1765{
1766 return (entry.val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK;
1767}
1768
1769static inline swp_entry_t __swp_entry(unsigned long type, unsigned long offset)
1770{
1771 return (swp_entry_t) { pte_val(mk_swap_pte(type, offset)) };
1772}
1773
1774#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1775#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1776
1777extern int vmem_add_mapping(unsigned long start, unsigned long size);
1778extern void vmem_remove_mapping(unsigned long start, unsigned long size);
1779extern int __vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot, bool alloc);
1780extern int vmem_map_4k_page(unsigned long addr, unsigned long phys, pgprot_t prot);
1781extern void vmem_unmap_4k_page(unsigned long addr);
1782extern pte_t *vmem_get_alloc_pte(unsigned long addr, bool alloc);
1783extern int s390_enable_sie(void);
1784extern int s390_enable_skey(void);
1785extern void s390_reset_cmma(struct mm_struct *mm);
1786
1787/* s390 has a private copy of get unmapped area to deal with cache synonyms */
1788#define HAVE_ARCH_UNMAPPED_AREA
1789#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1790
1791#define pmd_pgtable(pmd) \
1792 ((pgtable_t)__va(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE))
1793
1794#endif /* _S390_PAGE_H */
1/*
2 * S390 version
3 * Copyright IBM Corp. 1999, 2000
4 * Author(s): Hartmut Penner (hp@de.ibm.com)
5 * Ulrich Weigand (weigand@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/pgtable.h"
9 */
10
11#ifndef _ASM_S390_PGTABLE_H
12#define _ASM_S390_PGTABLE_H
13
14/*
15 * The Linux memory management assumes a three-level page table setup. For
16 * s390 31 bit we "fold" the mid level into the top-level page table, so
17 * that we physically have the same two-level page table as the s390 mmu
18 * expects in 31 bit mode. For s390 64 bit we use three of the five levels
19 * the hardware provides (region first and region second tables are not
20 * used).
21 *
22 * The "pgd_xxx()" functions are trivial for a folded two-level
23 * setup: the pgd is never bad, and a pmd always exists (as it's folded
24 * into the pgd entry)
25 *
26 * This file contains the functions and defines necessary to modify and use
27 * the S390 page table tree.
28 */
29#ifndef __ASSEMBLY__
30#include <linux/sched.h>
31#include <linux/mm_types.h>
32#include <linux/page-flags.h>
33#include <asm/bug.h>
34#include <asm/page.h>
35
36extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
37extern void paging_init(void);
38extern void vmem_map_init(void);
39
40/*
41 * The S390 doesn't have any external MMU info: the kernel page
42 * tables contain all the necessary information.
43 */
44#define update_mmu_cache(vma, address, ptep) do { } while (0)
45#define update_mmu_cache_pmd(vma, address, ptep) do { } while (0)
46
47/*
48 * ZERO_PAGE is a global shared page that is always zero; used
49 * for zero-mapped memory areas etc..
50 */
51
52extern unsigned long empty_zero_page;
53extern unsigned long zero_page_mask;
54
55#define ZERO_PAGE(vaddr) \
56 (virt_to_page((void *)(empty_zero_page + \
57 (((unsigned long)(vaddr)) &zero_page_mask))))
58#define __HAVE_COLOR_ZERO_PAGE
59
60/* TODO: s390 cannot support io_remap_pfn_range... */
61#endif /* !__ASSEMBLY__ */
62
63/*
64 * PMD_SHIFT determines the size of the area a second-level page
65 * table can map
66 * PGDIR_SHIFT determines what a third-level page table entry can map
67 */
68#ifndef CONFIG_64BIT
69# define PMD_SHIFT 20
70# define PUD_SHIFT 20
71# define PGDIR_SHIFT 20
72#else /* CONFIG_64BIT */
73# define PMD_SHIFT 20
74# define PUD_SHIFT 31
75# define PGDIR_SHIFT 42
76#endif /* CONFIG_64BIT */
77
78#define PMD_SIZE (1UL << PMD_SHIFT)
79#define PMD_MASK (~(PMD_SIZE-1))
80#define PUD_SIZE (1UL << PUD_SHIFT)
81#define PUD_MASK (~(PUD_SIZE-1))
82#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
83#define PGDIR_MASK (~(PGDIR_SIZE-1))
84
85/*
86 * entries per page directory level: the S390 is two-level, so
87 * we don't really have any PMD directory physically.
88 * for S390 segment-table entries are combined to one PGD
89 * that leads to 1024 pte per pgd
90 */
91#define PTRS_PER_PTE 256
92#ifndef CONFIG_64BIT
93#define PTRS_PER_PMD 1
94#define PTRS_PER_PUD 1
95#else /* CONFIG_64BIT */
96#define PTRS_PER_PMD 2048
97#define PTRS_PER_PUD 2048
98#endif /* CONFIG_64BIT */
99#define PTRS_PER_PGD 2048
100
101#define FIRST_USER_ADDRESS 0
102
103#define pte_ERROR(e) \
104 printk("%s:%d: bad pte %p.\n", __FILE__, __LINE__, (void *) pte_val(e))
105#define pmd_ERROR(e) \
106 printk("%s:%d: bad pmd %p.\n", __FILE__, __LINE__, (void *) pmd_val(e))
107#define pud_ERROR(e) \
108 printk("%s:%d: bad pud %p.\n", __FILE__, __LINE__, (void *) pud_val(e))
109#define pgd_ERROR(e) \
110 printk("%s:%d: bad pgd %p.\n", __FILE__, __LINE__, (void *) pgd_val(e))
111
112#ifndef __ASSEMBLY__
113/*
114 * The vmalloc and module area will always be on the topmost area of the kernel
115 * mapping. We reserve 96MB (31bit) / 128GB (64bit) for vmalloc and modules.
116 * On 64 bit kernels we have a 2GB area at the top of the vmalloc area where
117 * modules will reside. That makes sure that inter module branches always
118 * happen without trampolines and in addition the placement within a 2GB frame
119 * is branch prediction unit friendly.
120 */
121extern unsigned long VMALLOC_START;
122extern unsigned long VMALLOC_END;
123extern struct page *vmemmap;
124
125#define VMEM_MAX_PHYS ((unsigned long) vmemmap)
126
127#ifdef CONFIG_64BIT
128extern unsigned long MODULES_VADDR;
129extern unsigned long MODULES_END;
130#define MODULES_VADDR MODULES_VADDR
131#define MODULES_END MODULES_END
132#define MODULES_LEN (1UL << 31)
133#endif
134
135/*
136 * A 31 bit pagetable entry of S390 has following format:
137 * | PFRA | | OS |
138 * 0 0IP0
139 * 00000000001111111111222222222233
140 * 01234567890123456789012345678901
141 *
142 * I Page-Invalid Bit: Page is not available for address-translation
143 * P Page-Protection Bit: Store access not possible for page
144 *
145 * A 31 bit segmenttable entry of S390 has following format:
146 * | P-table origin | |PTL
147 * 0 IC
148 * 00000000001111111111222222222233
149 * 01234567890123456789012345678901
150 *
151 * I Segment-Invalid Bit: Segment is not available for address-translation
152 * C Common-Segment Bit: Segment is not private (PoP 3-30)
153 * PTL Page-Table-Length: Page-table length (PTL+1*16 entries -> up to 256)
154 *
155 * The 31 bit segmenttable origin of S390 has following format:
156 *
157 * |S-table origin | | STL |
158 * X **GPS
159 * 00000000001111111111222222222233
160 * 01234567890123456789012345678901
161 *
162 * X Space-Switch event:
163 * G Segment-Invalid Bit: *
164 * P Private-Space Bit: Segment is not private (PoP 3-30)
165 * S Storage-Alteration:
166 * STL Segment-Table-Length: Segment-table length (STL+1*16 entries -> up to 2048)
167 *
168 * A 64 bit pagetable entry of S390 has following format:
169 * | PFRA |0IPC| OS |
170 * 0000000000111111111122222222223333333333444444444455555555556666
171 * 0123456789012345678901234567890123456789012345678901234567890123
172 *
173 * I Page-Invalid Bit: Page is not available for address-translation
174 * P Page-Protection Bit: Store access not possible for page
175 * C Change-bit override: HW is not required to set change bit
176 *
177 * A 64 bit segmenttable entry of S390 has following format:
178 * | P-table origin | TT
179 * 0000000000111111111122222222223333333333444444444455555555556666
180 * 0123456789012345678901234567890123456789012345678901234567890123
181 *
182 * I Segment-Invalid Bit: Segment is not available for address-translation
183 * C Common-Segment Bit: Segment is not private (PoP 3-30)
184 * P Page-Protection Bit: Store access not possible for page
185 * TT Type 00
186 *
187 * A 64 bit region table entry of S390 has following format:
188 * | S-table origin | TF TTTL
189 * 0000000000111111111122222222223333333333444444444455555555556666
190 * 0123456789012345678901234567890123456789012345678901234567890123
191 *
192 * I Segment-Invalid Bit: Segment is not available for address-translation
193 * TT Type 01
194 * TF
195 * TL Table length
196 *
197 * The 64 bit regiontable origin of S390 has following format:
198 * | region table origon | DTTL
199 * 0000000000111111111122222222223333333333444444444455555555556666
200 * 0123456789012345678901234567890123456789012345678901234567890123
201 *
202 * X Space-Switch event:
203 * G Segment-Invalid Bit:
204 * P Private-Space Bit:
205 * S Storage-Alteration:
206 * R Real space
207 * TL Table-Length:
208 *
209 * A storage key has the following format:
210 * | ACC |F|R|C|0|
211 * 0 3 4 5 6 7
212 * ACC: access key
213 * F : fetch protection bit
214 * R : referenced bit
215 * C : changed bit
216 */
217
218/* Hardware bits in the page table entry */
219#define _PAGE_CO 0x100 /* HW Change-bit override */
220#define _PAGE_PROTECT 0x200 /* HW read-only bit */
221#define _PAGE_INVALID 0x400 /* HW invalid bit */
222#define _PAGE_LARGE 0x800 /* Bit to mark a large pte */
223
224/* Software bits in the page table entry */
225#define _PAGE_PRESENT 0x001 /* SW pte present bit */
226#define _PAGE_TYPE 0x002 /* SW pte type bit */
227#define _PAGE_YOUNG 0x004 /* SW pte young bit */
228#define _PAGE_DIRTY 0x008 /* SW pte dirty bit */
229#define _PAGE_READ 0x010 /* SW pte read bit */
230#define _PAGE_WRITE 0x020 /* SW pte write bit */
231#define _PAGE_SPECIAL 0x040 /* SW associated with special page */
232#define _PAGE_UNUSED 0x080 /* SW bit for pgste usage state */
233#define __HAVE_ARCH_PTE_SPECIAL
234
235/* Set of bits not changed in pte_modify */
236#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_SPECIAL | _PAGE_CO | \
237 _PAGE_DIRTY | _PAGE_YOUNG)
238
239/*
240 * handle_pte_fault uses pte_present, pte_none and pte_file to find out the
241 * pte type WITHOUT holding the page table lock. The _PAGE_PRESENT bit
242 * is used to distinguish present from not-present ptes. It is changed only
243 * with the page table lock held.
244 *
245 * The following table gives the different possible bit combinations for
246 * the pte hardware and software bits in the last 12 bits of a pte:
247 *
248 * 842100000000
249 * 000084210000
250 * 000000008421
251 * .IR...wrdytp
252 * empty .10...000000
253 * swap .10...xxxx10
254 * file .11...xxxxx0
255 * prot-none, clean, old .11...000001
256 * prot-none, clean, young .11...000101
257 * prot-none, dirty, old .10...001001
258 * prot-none, dirty, young .10...001101
259 * read-only, clean, old .11...010001
260 * read-only, clean, young .01...010101
261 * read-only, dirty, old .11...011001
262 * read-only, dirty, young .01...011101
263 * read-write, clean, old .11...110001
264 * read-write, clean, young .01...110101
265 * read-write, dirty, old .10...111001
266 * read-write, dirty, young .00...111101
267 *
268 * pte_present is true for the bit pattern .xx...xxxxx1, (pte & 0x001) == 0x001
269 * pte_none is true for the bit pattern .10...xxxx00, (pte & 0x603) == 0x400
270 * pte_file is true for the bit pattern .11...xxxxx0, (pte & 0x601) == 0x600
271 * pte_swap is true for the bit pattern .10...xxxx10, (pte & 0x603) == 0x402
272 */
273
274#ifndef CONFIG_64BIT
275
276/* Bits in the segment table address-space-control-element */
277#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
278#define _ASCE_ORIGIN_MASK 0x7ffff000UL /* segment table origin */
279#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
280#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
281#define _ASCE_TABLE_LENGTH 0x7f /* 128 x 64 entries = 8k */
282
283/* Bits in the segment table entry */
284#define _SEGMENT_ENTRY_BITS 0x7fffffffUL /* Valid segment table bits */
285#define _SEGMENT_ENTRY_ORIGIN 0x7fffffc0UL /* page table origin */
286#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
287#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
288#define _SEGMENT_ENTRY_COMMON 0x10 /* common segment bit */
289#define _SEGMENT_ENTRY_PTL 0x0f /* page table length */
290#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_PROTECT
291
292#define _SEGMENT_ENTRY (_SEGMENT_ENTRY_PTL)
293#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
294
295/*
296 * Segment table entry encoding (I = invalid, R = read-only bit):
297 * ..R...I.....
298 * prot-none ..1...1.....
299 * read-only ..1...0.....
300 * read-write ..0...0.....
301 * empty ..0...1.....
302 */
303
304/* Page status table bits for virtualization */
305#define PGSTE_ACC_BITS 0xf0000000UL
306#define PGSTE_FP_BIT 0x08000000UL
307#define PGSTE_PCL_BIT 0x00800000UL
308#define PGSTE_HR_BIT 0x00400000UL
309#define PGSTE_HC_BIT 0x00200000UL
310#define PGSTE_GR_BIT 0x00040000UL
311#define PGSTE_GC_BIT 0x00020000UL
312#define PGSTE_IN_BIT 0x00008000UL /* IPTE notify bit */
313
314#else /* CONFIG_64BIT */
315
316/* Bits in the segment/region table address-space-control-element */
317#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
318#define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
319#define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
320#define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
321#define _ASCE_REAL_SPACE 0x20 /* real space control */
322#define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
323#define _ASCE_TYPE_REGION1 0x0c /* region first table type */
324#define _ASCE_TYPE_REGION2 0x08 /* region second table type */
325#define _ASCE_TYPE_REGION3 0x04 /* region third table type */
326#define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
327#define _ASCE_TABLE_LENGTH 0x03 /* region table length */
328
329/* Bits in the region table entry */
330#define _REGION_ENTRY_ORIGIN ~0xfffUL/* region/segment table origin */
331#define _REGION_ENTRY_PROTECT 0x200 /* region protection bit */
332#define _REGION_ENTRY_INVALID 0x20 /* invalid region table entry */
333#define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
334#define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
335#define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
336#define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
337#define _REGION_ENTRY_LENGTH 0x03 /* region third length */
338
339#define _REGION1_ENTRY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_LENGTH)
340#define _REGION1_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R1 | _REGION_ENTRY_INVALID)
341#define _REGION2_ENTRY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_LENGTH)
342#define _REGION2_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R2 | _REGION_ENTRY_INVALID)
343#define _REGION3_ENTRY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_LENGTH)
344#define _REGION3_ENTRY_EMPTY (_REGION_ENTRY_TYPE_R3 | _REGION_ENTRY_INVALID)
345
346#define _REGION3_ENTRY_LARGE 0x400 /* RTTE-format control, large page */
347#define _REGION3_ENTRY_RO 0x200 /* page protection bit */
348#define _REGION3_ENTRY_CO 0x100 /* change-recording override */
349
350/* Bits in the segment table entry */
351#define _SEGMENT_ENTRY_BITS 0xfffffffffffffe33UL
352#define _SEGMENT_ENTRY_BITS_LARGE 0xfffffffffff1ff33UL
353#define _SEGMENT_ENTRY_ORIGIN_LARGE ~0xfffffUL /* large page address */
354#define _SEGMENT_ENTRY_ORIGIN ~0x7ffUL/* segment table origin */
355#define _SEGMENT_ENTRY_PROTECT 0x200 /* page protection bit */
356#define _SEGMENT_ENTRY_INVALID 0x20 /* invalid segment table entry */
357
358#define _SEGMENT_ENTRY (0)
359#define _SEGMENT_ENTRY_EMPTY (_SEGMENT_ENTRY_INVALID)
360
361#define _SEGMENT_ENTRY_LARGE 0x400 /* STE-format control, large page */
362#define _SEGMENT_ENTRY_CO 0x100 /* change-recording override */
363#define _SEGMENT_ENTRY_SPLIT 0x001 /* THP splitting bit */
364#define _SEGMENT_ENTRY_YOUNG 0x002 /* SW segment young bit */
365#define _SEGMENT_ENTRY_NONE _SEGMENT_ENTRY_YOUNG
366
367/*
368 * Segment table entry encoding (R = read-only, I = invalid, y = young bit):
369 * ..R...I...y.
370 * prot-none, old ..0...1...1.
371 * prot-none, young ..1...1...1.
372 * read-only, old ..1...1...0.
373 * read-only, young ..1...0...1.
374 * read-write, old ..0...1...0.
375 * read-write, young ..0...0...1.
376 * The segment table origin is used to distinguish empty (origin==0) from
377 * read-write, old segment table entries (origin!=0)
378 */
379
380#define _SEGMENT_ENTRY_SPLIT_BIT 0 /* THP splitting bit number */
381
382/* Set of bits not changed in pmd_modify */
383#define _SEGMENT_CHG_MASK (_SEGMENT_ENTRY_ORIGIN | _SEGMENT_ENTRY_LARGE \
384 | _SEGMENT_ENTRY_SPLIT | _SEGMENT_ENTRY_CO)
385
386/* Page status table bits for virtualization */
387#define PGSTE_ACC_BITS 0xf000000000000000UL
388#define PGSTE_FP_BIT 0x0800000000000000UL
389#define PGSTE_PCL_BIT 0x0080000000000000UL
390#define PGSTE_HR_BIT 0x0040000000000000UL
391#define PGSTE_HC_BIT 0x0020000000000000UL
392#define PGSTE_GR_BIT 0x0004000000000000UL
393#define PGSTE_GC_BIT 0x0002000000000000UL
394#define PGSTE_IN_BIT 0x0000800000000000UL /* IPTE notify bit */
395
396#endif /* CONFIG_64BIT */
397
398/* Guest Page State used for virtualization */
399#define _PGSTE_GPS_ZERO 0x0000000080000000UL
400#define _PGSTE_GPS_USAGE_MASK 0x0000000003000000UL
401#define _PGSTE_GPS_USAGE_STABLE 0x0000000000000000UL
402#define _PGSTE_GPS_USAGE_UNUSED 0x0000000001000000UL
403
404/*
405 * A user page table pointer has the space-switch-event bit, the
406 * private-space-control bit and the storage-alteration-event-control
407 * bit set. A kernel page table pointer doesn't need them.
408 */
409#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
410 _ASCE_ALT_EVENT)
411
412/*
413 * Page protection definitions.
414 */
415#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
416#define PAGE_READ __pgprot(_PAGE_PRESENT | _PAGE_READ | \
417 _PAGE_INVALID | _PAGE_PROTECT)
418#define PAGE_WRITE __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
419 _PAGE_INVALID | _PAGE_PROTECT)
420
421#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
422 _PAGE_YOUNG | _PAGE_DIRTY)
423#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
424 _PAGE_YOUNG | _PAGE_DIRTY)
425#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_YOUNG | \
426 _PAGE_PROTECT)
427
428/*
429 * On s390 the page table entry has an invalid bit and a read-only bit.
430 * Read permission implies execute permission and write permission
431 * implies read permission.
432 */
433 /*xwr*/
434#define __P000 PAGE_NONE
435#define __P001 PAGE_READ
436#define __P010 PAGE_READ
437#define __P011 PAGE_READ
438#define __P100 PAGE_READ
439#define __P101 PAGE_READ
440#define __P110 PAGE_READ
441#define __P111 PAGE_READ
442
443#define __S000 PAGE_NONE
444#define __S001 PAGE_READ
445#define __S010 PAGE_WRITE
446#define __S011 PAGE_WRITE
447#define __S100 PAGE_READ
448#define __S101 PAGE_READ
449#define __S110 PAGE_WRITE
450#define __S111 PAGE_WRITE
451
452/*
453 * Segment entry (large page) protection definitions.
454 */
455#define SEGMENT_NONE __pgprot(_SEGMENT_ENTRY_INVALID | \
456 _SEGMENT_ENTRY_NONE)
457#define SEGMENT_READ __pgprot(_SEGMENT_ENTRY_INVALID | \
458 _SEGMENT_ENTRY_PROTECT)
459#define SEGMENT_WRITE __pgprot(_SEGMENT_ENTRY_INVALID)
460
461static inline int mm_has_pgste(struct mm_struct *mm)
462{
463#ifdef CONFIG_PGSTE
464 if (unlikely(mm->context.has_pgste))
465 return 1;
466#endif
467 return 0;
468}
469/*
470 * pgd/pmd/pte query functions
471 */
472#ifndef CONFIG_64BIT
473
474static inline int pgd_present(pgd_t pgd) { return 1; }
475static inline int pgd_none(pgd_t pgd) { return 0; }
476static inline int pgd_bad(pgd_t pgd) { return 0; }
477
478static inline int pud_present(pud_t pud) { return 1; }
479static inline int pud_none(pud_t pud) { return 0; }
480static inline int pud_large(pud_t pud) { return 0; }
481static inline int pud_bad(pud_t pud) { return 0; }
482
483#else /* CONFIG_64BIT */
484
485static inline int pgd_present(pgd_t pgd)
486{
487 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
488 return 1;
489 return (pgd_val(pgd) & _REGION_ENTRY_ORIGIN) != 0UL;
490}
491
492static inline int pgd_none(pgd_t pgd)
493{
494 if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R2)
495 return 0;
496 return (pgd_val(pgd) & _REGION_ENTRY_INVALID) != 0UL;
497}
498
499static inline int pgd_bad(pgd_t pgd)
500{
501 /*
502 * With dynamic page table levels the pgd can be a region table
503 * entry or a segment table entry. Check for the bit that are
504 * invalid for either table entry.
505 */
506 unsigned long mask =
507 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
508 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
509 return (pgd_val(pgd) & mask) != 0;
510}
511
512static inline int pud_present(pud_t pud)
513{
514 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
515 return 1;
516 return (pud_val(pud) & _REGION_ENTRY_ORIGIN) != 0UL;
517}
518
519static inline int pud_none(pud_t pud)
520{
521 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) < _REGION_ENTRY_TYPE_R3)
522 return 0;
523 return (pud_val(pud) & _REGION_ENTRY_INVALID) != 0UL;
524}
525
526static inline int pud_large(pud_t pud)
527{
528 if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) != _REGION_ENTRY_TYPE_R3)
529 return 0;
530 return !!(pud_val(pud) & _REGION3_ENTRY_LARGE);
531}
532
533static inline int pud_bad(pud_t pud)
534{
535 /*
536 * With dynamic page table levels the pud can be a region table
537 * entry or a segment table entry. Check for the bit that are
538 * invalid for either table entry.
539 */
540 unsigned long mask =
541 ~_SEGMENT_ENTRY_ORIGIN & ~_REGION_ENTRY_INVALID &
542 ~_REGION_ENTRY_TYPE_MASK & ~_REGION_ENTRY_LENGTH;
543 return (pud_val(pud) & mask) != 0;
544}
545
546#endif /* CONFIG_64BIT */
547
548static inline int pmd_present(pmd_t pmd)
549{
550 return pmd_val(pmd) != _SEGMENT_ENTRY_INVALID;
551}
552
553static inline int pmd_none(pmd_t pmd)
554{
555 return pmd_val(pmd) == _SEGMENT_ENTRY_INVALID;
556}
557
558static inline int pmd_large(pmd_t pmd)
559{
560#ifdef CONFIG_64BIT
561 return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
562#else
563 return 0;
564#endif
565}
566
567static inline int pmd_prot_none(pmd_t pmd)
568{
569 return (pmd_val(pmd) & _SEGMENT_ENTRY_INVALID) &&
570 (pmd_val(pmd) & _SEGMENT_ENTRY_NONE);
571}
572
573static inline int pmd_bad(pmd_t pmd)
574{
575#ifdef CONFIG_64BIT
576 if (pmd_large(pmd))
577 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS_LARGE) != 0;
578#endif
579 return (pmd_val(pmd) & ~_SEGMENT_ENTRY_BITS) != 0;
580}
581
582#define __HAVE_ARCH_PMDP_SPLITTING_FLUSH
583extern void pmdp_splitting_flush(struct vm_area_struct *vma,
584 unsigned long addr, pmd_t *pmdp);
585
586#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
587extern int pmdp_set_access_flags(struct vm_area_struct *vma,
588 unsigned long address, pmd_t *pmdp,
589 pmd_t entry, int dirty);
590
591#define __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
592extern int pmdp_clear_flush_young(struct vm_area_struct *vma,
593 unsigned long address, pmd_t *pmdp);
594
595#define __HAVE_ARCH_PMD_WRITE
596static inline int pmd_write(pmd_t pmd)
597{
598 if (pmd_prot_none(pmd))
599 return 0;
600 return (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) == 0;
601}
602
603static inline int pmd_young(pmd_t pmd)
604{
605 int young = 0;
606#ifdef CONFIG_64BIT
607 if (pmd_prot_none(pmd))
608 young = (pmd_val(pmd) & _SEGMENT_ENTRY_PROTECT) != 0;
609 else
610 young = (pmd_val(pmd) & _SEGMENT_ENTRY_YOUNG) != 0;
611#endif
612 return young;
613}
614
615static inline int pte_present(pte_t pte)
616{
617 /* Bit pattern: (pte & 0x001) == 0x001 */
618 return (pte_val(pte) & _PAGE_PRESENT) != 0;
619}
620
621static inline int pte_none(pte_t pte)
622{
623 /* Bit pattern: pte == 0x400 */
624 return pte_val(pte) == _PAGE_INVALID;
625}
626
627static inline int pte_swap(pte_t pte)
628{
629 /* Bit pattern: (pte & 0x603) == 0x402 */
630 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT |
631 _PAGE_TYPE | _PAGE_PRESENT))
632 == (_PAGE_INVALID | _PAGE_TYPE);
633}
634
635static inline int pte_file(pte_t pte)
636{
637 /* Bit pattern: (pte & 0x601) == 0x600 */
638 return (pte_val(pte) & (_PAGE_INVALID | _PAGE_PROTECT | _PAGE_PRESENT))
639 == (_PAGE_INVALID | _PAGE_PROTECT);
640}
641
642static inline int pte_special(pte_t pte)
643{
644 return (pte_val(pte) & _PAGE_SPECIAL);
645}
646
647#define __HAVE_ARCH_PTE_SAME
648static inline int pte_same(pte_t a, pte_t b)
649{
650 return pte_val(a) == pte_val(b);
651}
652
653static inline pgste_t pgste_get_lock(pte_t *ptep)
654{
655 unsigned long new = 0;
656#ifdef CONFIG_PGSTE
657 unsigned long old;
658
659 preempt_disable();
660 asm(
661 " lg %0,%2\n"
662 "0: lgr %1,%0\n"
663 " nihh %0,0xff7f\n" /* clear PCL bit in old */
664 " oihh %1,0x0080\n" /* set PCL bit in new */
665 " csg %0,%1,%2\n"
666 " jl 0b\n"
667 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
668 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
669#endif
670 return __pgste(new);
671}
672
673static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
674{
675#ifdef CONFIG_PGSTE
676 asm(
677 " nihh %1,0xff7f\n" /* clear PCL bit */
678 " stg %1,%0\n"
679 : "=Q" (ptep[PTRS_PER_PTE])
680 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
681 : "cc", "memory");
682 preempt_enable();
683#endif
684}
685
686static inline pgste_t pgste_get(pte_t *ptep)
687{
688 unsigned long pgste = 0;
689#ifdef CONFIG_PGSTE
690 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
691#endif
692 return __pgste(pgste);
693}
694
695static inline void pgste_set(pte_t *ptep, pgste_t pgste)
696{
697#ifdef CONFIG_PGSTE
698 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
699#endif
700}
701
702static inline pgste_t pgste_update_all(pte_t *ptep, pgste_t pgste)
703{
704#ifdef CONFIG_PGSTE
705 unsigned long address, bits, skey;
706
707 if (pte_val(*ptep) & _PAGE_INVALID)
708 return pgste;
709 address = pte_val(*ptep) & PAGE_MASK;
710 skey = (unsigned long) page_get_storage_key(address);
711 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
712 if (!(pgste_val(pgste) & PGSTE_HC_BIT) && (bits & _PAGE_CHANGED)) {
713 /* Transfer dirty + referenced bit to host bits in pgste */
714 pgste_val(pgste) |= bits << 52;
715 page_set_storage_key(address, skey ^ bits, 0);
716 } else if (!(pgste_val(pgste) & PGSTE_HR_BIT) &&
717 (bits & _PAGE_REFERENCED)) {
718 /* Transfer referenced bit to host bit in pgste */
719 pgste_val(pgste) |= PGSTE_HR_BIT;
720 page_reset_referenced(address);
721 }
722 /* Transfer page changed & referenced bit to guest bits in pgste */
723 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
724 /* Copy page access key and fetch protection bit to pgste */
725 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
726 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
727#endif
728 return pgste;
729
730}
731
732static inline pgste_t pgste_update_young(pte_t *ptep, pgste_t pgste)
733{
734#ifdef CONFIG_PGSTE
735 if (pte_val(*ptep) & _PAGE_INVALID)
736 return pgste;
737 /* Get referenced bit from storage key */
738 if (page_reset_referenced(pte_val(*ptep) & PAGE_MASK))
739 pgste_val(pgste) |= PGSTE_HR_BIT | PGSTE_GR_BIT;
740#endif
741 return pgste;
742}
743
744static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry)
745{
746#ifdef CONFIG_PGSTE
747 unsigned long address;
748 unsigned long nkey;
749
750 if (pte_val(entry) & _PAGE_INVALID)
751 return;
752 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
753 address = pte_val(entry) & PAGE_MASK;
754 /*
755 * Set page access key and fetch protection bit from pgste.
756 * The guest C/R information is still in the PGSTE, set real
757 * key C/R to 0.
758 */
759 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
760 page_set_storage_key(address, nkey, 0);
761#endif
762}
763
764static inline void pgste_set_pte(pte_t *ptep, pte_t entry)
765{
766 if (!MACHINE_HAS_ESOP &&
767 (pte_val(entry) & _PAGE_PRESENT) &&
768 (pte_val(entry) & _PAGE_WRITE)) {
769 /*
770 * Without enhanced suppression-on-protection force
771 * the dirty bit on for all writable ptes.
772 */
773 pte_val(entry) |= _PAGE_DIRTY;
774 pte_val(entry) &= ~_PAGE_PROTECT;
775 }
776 *ptep = entry;
777}
778
779/**
780 * struct gmap_struct - guest address space
781 * @mm: pointer to the parent mm_struct
782 * @table: pointer to the page directory
783 * @asce: address space control element for gmap page table
784 * @crst_list: list of all crst tables used in the guest address space
785 * @pfault_enabled: defines if pfaults are applicable for the guest
786 */
787struct gmap {
788 struct list_head list;
789 struct mm_struct *mm;
790 unsigned long *table;
791 unsigned long asce;
792 void *private;
793 struct list_head crst_list;
794 bool pfault_enabled;
795};
796
797/**
798 * struct gmap_rmap - reverse mapping for segment table entries
799 * @gmap: pointer to the gmap_struct
800 * @entry: pointer to a segment table entry
801 * @vmaddr: virtual address in the guest address space
802 */
803struct gmap_rmap {
804 struct list_head list;
805 struct gmap *gmap;
806 unsigned long *entry;
807 unsigned long vmaddr;
808};
809
810/**
811 * struct gmap_pgtable - gmap information attached to a page table
812 * @vmaddr: address of the 1MB segment in the process virtual memory
813 * @mapper: list of segment table entries mapping a page table
814 */
815struct gmap_pgtable {
816 unsigned long vmaddr;
817 struct list_head mapper;
818};
819
820/**
821 * struct gmap_notifier - notify function block for page invalidation
822 * @notifier_call: address of callback function
823 */
824struct gmap_notifier {
825 struct list_head list;
826 void (*notifier_call)(struct gmap *gmap, unsigned long address);
827};
828
829struct gmap *gmap_alloc(struct mm_struct *mm);
830void gmap_free(struct gmap *gmap);
831void gmap_enable(struct gmap *gmap);
832void gmap_disable(struct gmap *gmap);
833int gmap_map_segment(struct gmap *gmap, unsigned long from,
834 unsigned long to, unsigned long len);
835int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len);
836unsigned long __gmap_translate(unsigned long address, struct gmap *);
837unsigned long gmap_translate(unsigned long address, struct gmap *);
838unsigned long __gmap_fault(unsigned long address, struct gmap *);
839unsigned long gmap_fault(unsigned long address, struct gmap *);
840void gmap_discard(unsigned long from, unsigned long to, struct gmap *);
841void __gmap_zap(unsigned long address, struct gmap *);
842
843void gmap_register_ipte_notifier(struct gmap_notifier *);
844void gmap_unregister_ipte_notifier(struct gmap_notifier *);
845int gmap_ipte_notify(struct gmap *, unsigned long start, unsigned long len);
846void gmap_do_ipte_notify(struct mm_struct *, pte_t *);
847
848static inline pgste_t pgste_ipte_notify(struct mm_struct *mm,
849 pte_t *ptep, pgste_t pgste)
850{
851#ifdef CONFIG_PGSTE
852 if (pgste_val(pgste) & PGSTE_IN_BIT) {
853 pgste_val(pgste) &= ~PGSTE_IN_BIT;
854 gmap_do_ipte_notify(mm, ptep);
855 }
856#endif
857 return pgste;
858}
859
860/*
861 * Certain architectures need to do special things when PTEs
862 * within a page table are directly modified. Thus, the following
863 * hook is made available.
864 */
865static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
866 pte_t *ptep, pte_t entry)
867{
868 pgste_t pgste;
869
870 if (mm_has_pgste(mm)) {
871 pgste = pgste_get_lock(ptep);
872 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
873 pgste_set_key(ptep, pgste, entry);
874 pgste_set_pte(ptep, entry);
875 pgste_set_unlock(ptep, pgste);
876 } else {
877 if (!(pte_val(entry) & _PAGE_INVALID) && MACHINE_HAS_EDAT1)
878 pte_val(entry) |= _PAGE_CO;
879 *ptep = entry;
880 }
881}
882
883/*
884 * query functions pte_write/pte_dirty/pte_young only work if
885 * pte_present() is true. Undefined behaviour if not..
886 */
887static inline int pte_write(pte_t pte)
888{
889 return (pte_val(pte) & _PAGE_WRITE) != 0;
890}
891
892static inline int pte_dirty(pte_t pte)
893{
894 return (pte_val(pte) & _PAGE_DIRTY) != 0;
895}
896
897static inline int pte_young(pte_t pte)
898{
899 return (pte_val(pte) & _PAGE_YOUNG) != 0;
900}
901
902#define __HAVE_ARCH_PTE_UNUSED
903static inline int pte_unused(pte_t pte)
904{
905 return pte_val(pte) & _PAGE_UNUSED;
906}
907
908/*
909 * pgd/pmd/pte modification functions
910 */
911
912static inline void pgd_clear(pgd_t *pgd)
913{
914#ifdef CONFIG_64BIT
915 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
916 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
917#endif
918}
919
920static inline void pud_clear(pud_t *pud)
921{
922#ifdef CONFIG_64BIT
923 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
924 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
925#endif
926}
927
928static inline void pmd_clear(pmd_t *pmdp)
929{
930 pmd_val(*pmdp) = _SEGMENT_ENTRY_INVALID;
931}
932
933static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
934{
935 pte_val(*ptep) = _PAGE_INVALID;
936}
937
938/*
939 * The following pte modification functions only work if
940 * pte_present() is true. Undefined behaviour if not..
941 */
942static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
943{
944 pte_val(pte) &= _PAGE_CHG_MASK;
945 pte_val(pte) |= pgprot_val(newprot);
946 /*
947 * newprot for PAGE_NONE, PAGE_READ and PAGE_WRITE has the
948 * invalid bit set, clear it again for readable, young pages
949 */
950 if ((pte_val(pte) & _PAGE_YOUNG) && (pte_val(pte) & _PAGE_READ))
951 pte_val(pte) &= ~_PAGE_INVALID;
952 /*
953 * newprot for PAGE_READ and PAGE_WRITE has the page protection
954 * bit set, clear it again for writable, dirty pages
955 */
956 if ((pte_val(pte) & _PAGE_DIRTY) && (pte_val(pte) & _PAGE_WRITE))
957 pte_val(pte) &= ~_PAGE_PROTECT;
958 return pte;
959}
960
961static inline pte_t pte_wrprotect(pte_t pte)
962{
963 pte_val(pte) &= ~_PAGE_WRITE;
964 pte_val(pte) |= _PAGE_PROTECT;
965 return pte;
966}
967
968static inline pte_t pte_mkwrite(pte_t pte)
969{
970 pte_val(pte) |= _PAGE_WRITE;
971 if (pte_val(pte) & _PAGE_DIRTY)
972 pte_val(pte) &= ~_PAGE_PROTECT;
973 return pte;
974}
975
976static inline pte_t pte_mkclean(pte_t pte)
977{
978 pte_val(pte) &= ~_PAGE_DIRTY;
979 pte_val(pte) |= _PAGE_PROTECT;
980 return pte;
981}
982
983static inline pte_t pte_mkdirty(pte_t pte)
984{
985 pte_val(pte) |= _PAGE_DIRTY;
986 if (pte_val(pte) & _PAGE_WRITE)
987 pte_val(pte) &= ~_PAGE_PROTECT;
988 return pte;
989}
990
991static inline pte_t pte_mkold(pte_t pte)
992{
993 pte_val(pte) &= ~_PAGE_YOUNG;
994 pte_val(pte) |= _PAGE_INVALID;
995 return pte;
996}
997
998static inline pte_t pte_mkyoung(pte_t pte)
999{
1000 pte_val(pte) |= _PAGE_YOUNG;
1001 if (pte_val(pte) & _PAGE_READ)
1002 pte_val(pte) &= ~_PAGE_INVALID;
1003 return pte;
1004}
1005
1006static inline pte_t pte_mkspecial(pte_t pte)
1007{
1008 pte_val(pte) |= _PAGE_SPECIAL;
1009 return pte;
1010}
1011
1012#ifdef CONFIG_HUGETLB_PAGE
1013static inline pte_t pte_mkhuge(pte_t pte)
1014{
1015 pte_val(pte) |= _PAGE_LARGE;
1016 return pte;
1017}
1018#endif
1019
1020/*
1021 * Get (and clear) the user dirty bit for a pte.
1022 */
1023static inline int ptep_test_and_clear_user_dirty(struct mm_struct *mm,
1024 pte_t *ptep)
1025{
1026 pgste_t pgste;
1027 int dirty = 0;
1028
1029 if (mm_has_pgste(mm)) {
1030 pgste = pgste_get_lock(ptep);
1031 pgste = pgste_update_all(ptep, pgste);
1032 dirty = !!(pgste_val(pgste) & PGSTE_HC_BIT);
1033 pgste_val(pgste) &= ~PGSTE_HC_BIT;
1034 pgste_set_unlock(ptep, pgste);
1035 return dirty;
1036 }
1037 return dirty;
1038}
1039
1040/*
1041 * Get (and clear) the user referenced bit for a pte.
1042 */
1043static inline int ptep_test_and_clear_user_young(struct mm_struct *mm,
1044 pte_t *ptep)
1045{
1046 pgste_t pgste;
1047 int young = 0;
1048
1049 if (mm_has_pgste(mm)) {
1050 pgste = pgste_get_lock(ptep);
1051 pgste = pgste_update_young(ptep, pgste);
1052 young = !!(pgste_val(pgste) & PGSTE_HR_BIT);
1053 pgste_val(pgste) &= ~PGSTE_HR_BIT;
1054 pgste_set_unlock(ptep, pgste);
1055 }
1056 return young;
1057}
1058
1059static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
1060{
1061 unsigned long pto = (unsigned long) ptep;
1062
1063#ifndef CONFIG_64BIT
1064 /* pto in ESA mode must point to the start of the segment table */
1065 pto &= 0x7ffffc00;
1066#endif
1067 /* Invalidation + global TLB flush for the pte */
1068 asm volatile(
1069 " ipte %2,%3"
1070 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1071}
1072
1073static inline void __ptep_ipte_local(unsigned long address, pte_t *ptep)
1074{
1075 unsigned long pto = (unsigned long) ptep;
1076
1077#ifndef CONFIG_64BIT
1078 /* pto in ESA mode must point to the start of the segment table */
1079 pto &= 0x7ffffc00;
1080#endif
1081 /* Invalidation + local TLB flush for the pte */
1082 asm volatile(
1083 " .insn rrf,0xb2210000,%2,%3,0,1"
1084 : "=m" (*ptep) : "m" (*ptep), "a" (pto), "a" (address));
1085}
1086
1087static inline void ptep_flush_direct(struct mm_struct *mm,
1088 unsigned long address, pte_t *ptep)
1089{
1090 int active, count;
1091
1092 if (pte_val(*ptep) & _PAGE_INVALID)
1093 return;
1094 active = (mm == current->active_mm) ? 1 : 0;
1095 count = atomic_add_return(0x10000, &mm->context.attach_count);
1096 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1097 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1098 __ptep_ipte_local(address, ptep);
1099 else
1100 __ptep_ipte(address, ptep);
1101 atomic_sub(0x10000, &mm->context.attach_count);
1102}
1103
1104static inline void ptep_flush_lazy(struct mm_struct *mm,
1105 unsigned long address, pte_t *ptep)
1106{
1107 int active, count;
1108
1109 if (pte_val(*ptep) & _PAGE_INVALID)
1110 return;
1111 active = (mm == current->active_mm) ? 1 : 0;
1112 count = atomic_add_return(0x10000, &mm->context.attach_count);
1113 if ((count & 0xffff) <= active) {
1114 pte_val(*ptep) |= _PAGE_INVALID;
1115 mm->context.flush_mm = 1;
1116 } else
1117 __ptep_ipte(address, ptep);
1118 atomic_sub(0x10000, &mm->context.attach_count);
1119}
1120
1121#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
1122static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
1123 unsigned long addr, pte_t *ptep)
1124{
1125 pgste_t pgste;
1126 pte_t pte;
1127 int young;
1128
1129 if (mm_has_pgste(vma->vm_mm)) {
1130 pgste = pgste_get_lock(ptep);
1131 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
1132 }
1133
1134 pte = *ptep;
1135 ptep_flush_direct(vma->vm_mm, addr, ptep);
1136 young = pte_young(pte);
1137 pte = pte_mkold(pte);
1138
1139 if (mm_has_pgste(vma->vm_mm)) {
1140 pgste_set_pte(ptep, pte);
1141 pgste_set_unlock(ptep, pgste);
1142 } else
1143 *ptep = pte;
1144
1145 return young;
1146}
1147
1148#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
1149static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
1150 unsigned long address, pte_t *ptep)
1151{
1152 return ptep_test_and_clear_young(vma, address, ptep);
1153}
1154
1155/*
1156 * This is hard to understand. ptep_get_and_clear and ptep_clear_flush
1157 * both clear the TLB for the unmapped pte. The reason is that
1158 * ptep_get_and_clear is used in common code (e.g. change_pte_range)
1159 * to modify an active pte. The sequence is
1160 * 1) ptep_get_and_clear
1161 * 2) set_pte_at
1162 * 3) flush_tlb_range
1163 * On s390 the tlb needs to get flushed with the modification of the pte
1164 * if the pte is active. The only way how this can be implemented is to
1165 * have ptep_get_and_clear do the tlb flush. In exchange flush_tlb_range
1166 * is a nop.
1167 */
1168#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
1169static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
1170 unsigned long address, pte_t *ptep)
1171{
1172 pgste_t pgste;
1173 pte_t pte;
1174
1175 if (mm_has_pgste(mm)) {
1176 pgste = pgste_get_lock(ptep);
1177 pgste = pgste_ipte_notify(mm, ptep, pgste);
1178 }
1179
1180 pte = *ptep;
1181 ptep_flush_lazy(mm, address, ptep);
1182 pte_val(*ptep) = _PAGE_INVALID;
1183
1184 if (mm_has_pgste(mm)) {
1185 pgste = pgste_update_all(&pte, pgste);
1186 pgste_set_unlock(ptep, pgste);
1187 }
1188 return pte;
1189}
1190
1191#define __HAVE_ARCH_PTEP_MODIFY_PROT_TRANSACTION
1192static inline pte_t ptep_modify_prot_start(struct mm_struct *mm,
1193 unsigned long address,
1194 pte_t *ptep)
1195{
1196 pgste_t pgste;
1197 pte_t pte;
1198
1199 if (mm_has_pgste(mm)) {
1200 pgste = pgste_get_lock(ptep);
1201 pgste_ipte_notify(mm, ptep, pgste);
1202 }
1203
1204 pte = *ptep;
1205 ptep_flush_lazy(mm, address, ptep);
1206
1207 if (mm_has_pgste(mm)) {
1208 pgste = pgste_update_all(&pte, pgste);
1209 pgste_set(ptep, pgste);
1210 }
1211 return pte;
1212}
1213
1214static inline void ptep_modify_prot_commit(struct mm_struct *mm,
1215 unsigned long address,
1216 pte_t *ptep, pte_t pte)
1217{
1218 pgste_t pgste;
1219
1220 if (mm_has_pgste(mm)) {
1221 pgste = pgste_get(ptep);
1222 pgste_set_key(ptep, pgste, pte);
1223 pgste_set_pte(ptep, pte);
1224 pgste_set_unlock(ptep, pgste);
1225 } else
1226 *ptep = pte;
1227}
1228
1229#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
1230static inline pte_t ptep_clear_flush(struct vm_area_struct *vma,
1231 unsigned long address, pte_t *ptep)
1232{
1233 pgste_t pgste;
1234 pte_t pte;
1235
1236 if (mm_has_pgste(vma->vm_mm)) {
1237 pgste = pgste_get_lock(ptep);
1238 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
1239 }
1240
1241 pte = *ptep;
1242 ptep_flush_direct(vma->vm_mm, address, ptep);
1243 pte_val(*ptep) = _PAGE_INVALID;
1244
1245 if (mm_has_pgste(vma->vm_mm)) {
1246 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
1247 _PGSTE_GPS_USAGE_UNUSED)
1248 pte_val(pte) |= _PAGE_UNUSED;
1249 pgste = pgste_update_all(&pte, pgste);
1250 pgste_set_unlock(ptep, pgste);
1251 }
1252 return pte;
1253}
1254
1255/*
1256 * The batched pte unmap code uses ptep_get_and_clear_full to clear the
1257 * ptes. Here an optimization is possible. tlb_gather_mmu flushes all
1258 * tlbs of an mm if it can guarantee that the ptes of the mm_struct
1259 * cannot be accessed while the batched unmap is running. In this case
1260 * full==1 and a simple pte_clear is enough. See tlb.h.
1261 */
1262#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
1263static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
1264 unsigned long address,
1265 pte_t *ptep, int full)
1266{
1267 pgste_t pgste;
1268 pte_t pte;
1269
1270 if (!full && mm_has_pgste(mm)) {
1271 pgste = pgste_get_lock(ptep);
1272 pgste = pgste_ipte_notify(mm, ptep, pgste);
1273 }
1274
1275 pte = *ptep;
1276 if (!full)
1277 ptep_flush_lazy(mm, address, ptep);
1278 pte_val(*ptep) = _PAGE_INVALID;
1279
1280 if (!full && mm_has_pgste(mm)) {
1281 pgste = pgste_update_all(&pte, pgste);
1282 pgste_set_unlock(ptep, pgste);
1283 }
1284 return pte;
1285}
1286
1287#define __HAVE_ARCH_PTEP_SET_WRPROTECT
1288static inline pte_t ptep_set_wrprotect(struct mm_struct *mm,
1289 unsigned long address, pte_t *ptep)
1290{
1291 pgste_t pgste;
1292 pte_t pte = *ptep;
1293
1294 if (pte_write(pte)) {
1295 if (mm_has_pgste(mm)) {
1296 pgste = pgste_get_lock(ptep);
1297 pgste = pgste_ipte_notify(mm, ptep, pgste);
1298 }
1299
1300 ptep_flush_lazy(mm, address, ptep);
1301 pte = pte_wrprotect(pte);
1302
1303 if (mm_has_pgste(mm)) {
1304 pgste_set_pte(ptep, pte);
1305 pgste_set_unlock(ptep, pgste);
1306 } else
1307 *ptep = pte;
1308 }
1309 return pte;
1310}
1311
1312#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
1313static inline int ptep_set_access_flags(struct vm_area_struct *vma,
1314 unsigned long address, pte_t *ptep,
1315 pte_t entry, int dirty)
1316{
1317 pgste_t pgste;
1318
1319 if (pte_same(*ptep, entry))
1320 return 0;
1321 if (mm_has_pgste(vma->vm_mm)) {
1322 pgste = pgste_get_lock(ptep);
1323 pgste = pgste_ipte_notify(vma->vm_mm, ptep, pgste);
1324 }
1325
1326 ptep_flush_direct(vma->vm_mm, address, ptep);
1327
1328 if (mm_has_pgste(vma->vm_mm)) {
1329 pgste_set_pte(ptep, entry);
1330 pgste_set_unlock(ptep, pgste);
1331 } else
1332 *ptep = entry;
1333 return 1;
1334}
1335
1336/*
1337 * Conversion functions: convert a page and protection to a page entry,
1338 * and a page entry and page directory to the page they refer to.
1339 */
1340static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
1341{
1342 pte_t __pte;
1343 pte_val(__pte) = physpage + pgprot_val(pgprot);
1344 return pte_mkyoung(__pte);
1345}
1346
1347static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1348{
1349 unsigned long physpage = page_to_phys(page);
1350 pte_t __pte = mk_pte_phys(physpage, pgprot);
1351
1352 if (pte_write(__pte) && PageDirty(page))
1353 __pte = pte_mkdirty(__pte);
1354 return __pte;
1355}
1356
1357#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
1358#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
1359#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
1360#define pte_index(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
1361
1362#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1363#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1364
1365#ifndef CONFIG_64BIT
1366
1367#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1368#define pud_deref(pmd) ({ BUG(); 0UL; })
1369#define pgd_deref(pmd) ({ BUG(); 0UL; })
1370
1371#define pud_offset(pgd, address) ((pud_t *) pgd)
1372#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1373
1374#else /* CONFIG_64BIT */
1375
1376#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1377#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
1378#define pgd_deref(pgd) (pgd_val(pgd) & _REGION_ENTRY_ORIGIN)
1379
1380static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
1381{
1382 pud_t *pud = (pud_t *) pgd;
1383 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
1384 pud = (pud_t *) pgd_deref(*pgd);
1385 return pud + pud_index(address);
1386}
1387
1388static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1389{
1390 pmd_t *pmd = (pmd_t *) pud;
1391 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
1392 pmd = (pmd_t *) pud_deref(*pud);
1393 return pmd + pmd_index(address);
1394}
1395
1396#endif /* CONFIG_64BIT */
1397
1398#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1399#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
1400#define pte_page(x) pfn_to_page(pte_pfn(x))
1401
1402#define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
1403
1404/* Find an entry in the lowest level page table.. */
1405#define pte_offset(pmd, addr) ((pte_t *) pmd_deref(*(pmd)) + pte_index(addr))
1406#define pte_offset_kernel(pmd, address) pte_offset(pmd,address)
1407#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
1408#define pte_unmap(pte) do { } while (0)
1409
1410#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
1411static inline unsigned long massage_pgprot_pmd(pgprot_t pgprot)
1412{
1413 /*
1414 * pgprot is PAGE_NONE, PAGE_READ, or PAGE_WRITE (see __Pxxx / __Sxxx)
1415 * Convert to segment table entry format.
1416 */
1417 if (pgprot_val(pgprot) == pgprot_val(PAGE_NONE))
1418 return pgprot_val(SEGMENT_NONE);
1419 if (pgprot_val(pgprot) == pgprot_val(PAGE_READ))
1420 return pgprot_val(SEGMENT_READ);
1421 return pgprot_val(SEGMENT_WRITE);
1422}
1423
1424static inline pmd_t pmd_mkyoung(pmd_t pmd)
1425{
1426#ifdef CONFIG_64BIT
1427 if (pmd_prot_none(pmd)) {
1428 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1429 } else {
1430 pmd_val(pmd) |= _SEGMENT_ENTRY_YOUNG;
1431 pmd_val(pmd) &= ~_SEGMENT_ENTRY_INVALID;
1432 }
1433#endif
1434 return pmd;
1435}
1436
1437static inline pmd_t pmd_mkold(pmd_t pmd)
1438{
1439#ifdef CONFIG_64BIT
1440 if (pmd_prot_none(pmd)) {
1441 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1442 } else {
1443 pmd_val(pmd) &= ~_SEGMENT_ENTRY_YOUNG;
1444 pmd_val(pmd) |= _SEGMENT_ENTRY_INVALID;
1445 }
1446#endif
1447 return pmd;
1448}
1449
1450static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
1451{
1452 int young;
1453
1454 young = pmd_young(pmd);
1455 pmd_val(pmd) &= _SEGMENT_CHG_MASK;
1456 pmd_val(pmd) |= massage_pgprot_pmd(newprot);
1457 if (young)
1458 pmd = pmd_mkyoung(pmd);
1459 return pmd;
1460}
1461
1462static inline pmd_t mk_pmd_phys(unsigned long physpage, pgprot_t pgprot)
1463{
1464 pmd_t __pmd;
1465 pmd_val(__pmd) = physpage + massage_pgprot_pmd(pgprot);
1466 return pmd_mkyoung(__pmd);
1467}
1468
1469static inline pmd_t pmd_mkwrite(pmd_t pmd)
1470{
1471 /* Do not clobber PROT_NONE segments! */
1472 if (!pmd_prot_none(pmd))
1473 pmd_val(pmd) &= ~_SEGMENT_ENTRY_PROTECT;
1474 return pmd;
1475}
1476#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLB_PAGE */
1477
1478static inline void __pmdp_csp(pmd_t *pmdp)
1479{
1480 register unsigned long reg2 asm("2") = pmd_val(*pmdp);
1481 register unsigned long reg3 asm("3") = pmd_val(*pmdp) |
1482 _SEGMENT_ENTRY_INVALID;
1483 register unsigned long reg4 asm("4") = ((unsigned long) pmdp) + 5;
1484
1485 asm volatile(
1486 " csp %1,%3"
1487 : "=m" (*pmdp)
1488 : "d" (reg2), "d" (reg3), "d" (reg4), "m" (*pmdp) : "cc");
1489}
1490
1491static inline void __pmdp_idte(unsigned long address, pmd_t *pmdp)
1492{
1493 unsigned long sto;
1494
1495 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1496 asm volatile(
1497 " .insn rrf,0xb98e0000,%2,%3,0,0"
1498 : "=m" (*pmdp)
1499 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1500 : "cc" );
1501}
1502
1503static inline void __pmdp_idte_local(unsigned long address, pmd_t *pmdp)
1504{
1505 unsigned long sto;
1506
1507 sto = (unsigned long) pmdp - pmd_index(address) * sizeof(pmd_t);
1508 asm volatile(
1509 " .insn rrf,0xb98e0000,%2,%3,0,1"
1510 : "=m" (*pmdp)
1511 : "m" (*pmdp), "a" (sto), "a" ((address & HPAGE_MASK))
1512 : "cc" );
1513}
1514
1515static inline void pmdp_flush_direct(struct mm_struct *mm,
1516 unsigned long address, pmd_t *pmdp)
1517{
1518 int active, count;
1519
1520 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1521 return;
1522 if (!MACHINE_HAS_IDTE) {
1523 __pmdp_csp(pmdp);
1524 return;
1525 }
1526 active = (mm == current->active_mm) ? 1 : 0;
1527 count = atomic_add_return(0x10000, &mm->context.attach_count);
1528 if (MACHINE_HAS_TLB_LC && (count & 0xffff) <= active &&
1529 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1530 __pmdp_idte_local(address, pmdp);
1531 else
1532 __pmdp_idte(address, pmdp);
1533 atomic_sub(0x10000, &mm->context.attach_count);
1534}
1535
1536static inline void pmdp_flush_lazy(struct mm_struct *mm,
1537 unsigned long address, pmd_t *pmdp)
1538{
1539 int active, count;
1540
1541 if (pmd_val(*pmdp) & _SEGMENT_ENTRY_INVALID)
1542 return;
1543 active = (mm == current->active_mm) ? 1 : 0;
1544 count = atomic_add_return(0x10000, &mm->context.attach_count);
1545 if ((count & 0xffff) <= active) {
1546 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
1547 mm->context.flush_mm = 1;
1548 } else if (MACHINE_HAS_IDTE)
1549 __pmdp_idte(address, pmdp);
1550 else
1551 __pmdp_csp(pmdp);
1552 atomic_sub(0x10000, &mm->context.attach_count);
1553}
1554
1555#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1556
1557#define __HAVE_ARCH_PGTABLE_DEPOSIT
1558extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1559 pgtable_t pgtable);
1560
1561#define __HAVE_ARCH_PGTABLE_WITHDRAW
1562extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
1563
1564static inline int pmd_trans_splitting(pmd_t pmd)
1565{
1566 return pmd_val(pmd) & _SEGMENT_ENTRY_SPLIT;
1567}
1568
1569static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
1570 pmd_t *pmdp, pmd_t entry)
1571{
1572 if (!(pmd_val(entry) & _SEGMENT_ENTRY_INVALID) && MACHINE_HAS_EDAT1)
1573 pmd_val(entry) |= _SEGMENT_ENTRY_CO;
1574 *pmdp = entry;
1575}
1576
1577static inline pmd_t pmd_mkhuge(pmd_t pmd)
1578{
1579 pmd_val(pmd) |= _SEGMENT_ENTRY_LARGE;
1580 return pmd;
1581}
1582
1583static inline pmd_t pmd_wrprotect(pmd_t pmd)
1584{
1585 /* Do not clobber PROT_NONE segments! */
1586 if (!pmd_prot_none(pmd))
1587 pmd_val(pmd) |= _SEGMENT_ENTRY_PROTECT;
1588 return pmd;
1589}
1590
1591static inline pmd_t pmd_mkdirty(pmd_t pmd)
1592{
1593 /* No dirty bit in the segment table entry. */
1594 return pmd;
1595}
1596
1597#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
1598static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
1599 unsigned long address, pmd_t *pmdp)
1600{
1601 pmd_t pmd;
1602
1603 pmd = *pmdp;
1604 pmdp_flush_direct(vma->vm_mm, address, pmdp);
1605 *pmdp = pmd_mkold(pmd);
1606 return pmd_young(pmd);
1607}
1608
1609#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
1610static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1611 unsigned long address, pmd_t *pmdp)
1612{
1613 pmd_t pmd = *pmdp;
1614
1615 pmdp_flush_direct(mm, address, pmdp);
1616 pmd_clear(pmdp);
1617 return pmd;
1618}
1619
1620#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1621static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1622 unsigned long address, pmd_t *pmdp)
1623{
1624 return pmdp_get_and_clear(vma->vm_mm, address, pmdp);
1625}
1626
1627#define __HAVE_ARCH_PMDP_INVALIDATE
1628static inline void pmdp_invalidate(struct vm_area_struct *vma,
1629 unsigned long address, pmd_t *pmdp)
1630{
1631 pmdp_flush_direct(vma->vm_mm, address, pmdp);
1632}
1633
1634#define __HAVE_ARCH_PMDP_SET_WRPROTECT
1635static inline void pmdp_set_wrprotect(struct mm_struct *mm,
1636 unsigned long address, pmd_t *pmdp)
1637{
1638 pmd_t pmd = *pmdp;
1639
1640 if (pmd_write(pmd)) {
1641 pmdp_flush_direct(mm, address, pmdp);
1642 set_pmd_at(mm, address, pmdp, pmd_wrprotect(pmd));
1643 }
1644}
1645
1646#define pfn_pmd(pfn, pgprot) mk_pmd_phys(__pa((pfn) << PAGE_SHIFT), (pgprot))
1647#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
1648
1649static inline int pmd_trans_huge(pmd_t pmd)
1650{
1651 return pmd_val(pmd) & _SEGMENT_ENTRY_LARGE;
1652}
1653
1654static inline int has_transparent_hugepage(void)
1655{
1656 return MACHINE_HAS_HPAGE ? 1 : 0;
1657}
1658
1659static inline unsigned long pmd_pfn(pmd_t pmd)
1660{
1661 return pmd_val(pmd) >> PAGE_SHIFT;
1662}
1663#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1664
1665/*
1666 * 31 bit swap entry format:
1667 * A page-table entry has some bits we have to treat in a special way.
1668 * Bits 0, 20 and bit 23 have to be zero, otherwise an specification
1669 * exception will occur instead of a page translation exception. The
1670 * specifiation exception has the bad habit not to store necessary
1671 * information in the lowcore.
1672 * Bits 21, 22, 30 and 31 are used to indicate the page type.
1673 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1674 * This leaves the bits 1-19 and bits 24-29 to store type and offset.
1675 * We use the 5 bits from 25-29 for the type and the 20 bits from 1-19
1676 * plus 24 for the offset.
1677 * 0| offset |0110|o|type |00|
1678 * 0 0000000001111111111 2222 2 22222 33
1679 * 0 1234567890123456789 0123 4 56789 01
1680 *
1681 * 64 bit swap entry format:
1682 * A page-table entry has some bits we have to treat in a special way.
1683 * Bits 52 and bit 55 have to be zero, otherwise an specification
1684 * exception will occur instead of a page translation exception. The
1685 * specifiation exception has the bad habit not to store necessary
1686 * information in the lowcore.
1687 * Bits 53, 54, 62 and 63 are used to indicate the page type.
1688 * A swap pte is indicated by bit pattern (pte & 0x603) == 0x402
1689 * This leaves the bits 0-51 and bits 56-61 to store type and offset.
1690 * We use the 5 bits from 57-61 for the type and the 53 bits from 0-51
1691 * plus 56 for the offset.
1692 * | offset |0110|o|type |00|
1693 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1694 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1695 */
1696#ifndef CONFIG_64BIT
1697#define __SWP_OFFSET_MASK (~0UL >> 12)
1698#else
1699#define __SWP_OFFSET_MASK (~0UL >> 11)
1700#endif
1701static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1702{
1703 pte_t pte;
1704 offset &= __SWP_OFFSET_MASK;
1705 pte_val(pte) = _PAGE_INVALID | _PAGE_TYPE | ((type & 0x1f) << 2) |
1706 ((offset & 1UL) << 7) | ((offset & ~1UL) << 11);
1707 return pte;
1708}
1709
1710#define __swp_type(entry) (((entry).val >> 2) & 0x1f)
1711#define __swp_offset(entry) (((entry).val >> 11) | (((entry).val >> 7) & 1))
1712#define __swp_entry(type,offset) ((swp_entry_t) { pte_val(mk_swap_pte((type),(offset))) })
1713
1714#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1715#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1716
1717#ifndef CONFIG_64BIT
1718# define PTE_FILE_MAX_BITS 26
1719#else /* CONFIG_64BIT */
1720# define PTE_FILE_MAX_BITS 59
1721#endif /* CONFIG_64BIT */
1722
1723#define pte_to_pgoff(__pte) \
1724 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
1725
1726#define pgoff_to_pte(__off) \
1727 ((pte_t) { ((((__off) & 0x7f) << 1) + (((__off) >> 7) << 12)) \
1728 | _PAGE_INVALID | _PAGE_PROTECT })
1729
1730#endif /* !__ASSEMBLY__ */
1731
1732#define kern_addr_valid(addr) (1)
1733
1734extern int vmem_add_mapping(unsigned long start, unsigned long size);
1735extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1736extern int s390_enable_sie(void);
1737
1738/*
1739 * No page table caches to initialise
1740 */
1741static inline void pgtable_cache_init(void) { }
1742static inline void check_pgt_cache(void) { }
1743
1744#include <asm-generic/pgtable.h>
1745
1746#endif /* _S390_PAGE_H */