Loading...
1/*
2 * include/asm-s390/pgalloc.h
3 *
4 * S390 version
5 * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
6 * Author(s): Hartmut Penner (hp@de.ibm.com)
7 * Martin Schwidefsky (schwidefsky@de.ibm.com)
8 *
9 * Derived from "include/asm-i386/pgalloc.h"
10 * Copyright (C) 1994 Linus Torvalds
11 */
12
13#ifndef _S390_PGALLOC_H
14#define _S390_PGALLOC_H
15
16#include <linux/threads.h>
17#include <linux/gfp.h>
18#include <linux/mm.h>
19
20unsigned long *crst_table_alloc(struct mm_struct *);
21void crst_table_free(struct mm_struct *, unsigned long *);
22
23unsigned long *page_table_alloc(struct mm_struct *, unsigned long);
24void page_table_free(struct mm_struct *, unsigned long *);
25void page_table_free_rcu(struct mmu_gather *, unsigned long *);
26
27static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
28{
29 typedef struct { char _[n]; } addrtype;
30
31 *s = val;
32 n = (n / 256) - 1;
33 asm volatile(
34#ifdef CONFIG_64BIT
35 " mvc 8(248,%0),0(%0)\n"
36#else
37 " mvc 4(252,%0),0(%0)\n"
38#endif
39 "0: mvc 256(256,%0),0(%0)\n"
40 " la %0,256(%0)\n"
41 " brct %1,0b\n"
42 : "+a" (s), "+d" (n), "=m" (*(addrtype *) s)
43 : "m" (*(addrtype *) s));
44}
45
46static inline void crst_table_init(unsigned long *crst, unsigned long entry)
47{
48 clear_table(crst, entry, sizeof(unsigned long)*2048);
49}
50
51#ifndef CONFIG_64BIT
52
53static inline unsigned long pgd_entry_type(struct mm_struct *mm)
54{
55 return _SEGMENT_ENTRY_EMPTY;
56}
57
58#define pud_alloc_one(mm,address) ({ BUG(); ((pud_t *)2); })
59#define pud_free(mm, x) do { } while (0)
60
61#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
62#define pmd_free(mm, x) do { } while (0)
63
64#define pgd_populate(mm, pgd, pud) BUG()
65#define pud_populate(mm, pud, pmd) BUG()
66
67#else /* CONFIG_64BIT */
68
69static inline unsigned long pgd_entry_type(struct mm_struct *mm)
70{
71 if (mm->context.asce_limit <= (1UL << 31))
72 return _SEGMENT_ENTRY_EMPTY;
73 if (mm->context.asce_limit <= (1UL << 42))
74 return _REGION3_ENTRY_EMPTY;
75 return _REGION2_ENTRY_EMPTY;
76}
77
78int crst_table_upgrade(struct mm_struct *, unsigned long limit);
79void crst_table_downgrade(struct mm_struct *, unsigned long limit);
80
81static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
82{
83 unsigned long *table = crst_table_alloc(mm);
84 if (table)
85 crst_table_init(table, _REGION3_ENTRY_EMPTY);
86 return (pud_t *) table;
87}
88#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
89
90static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
91{
92 unsigned long *table = crst_table_alloc(mm);
93 if (table)
94 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
95 return (pmd_t *) table;
96}
97#define pmd_free(mm, pmd) crst_table_free(mm, (unsigned long *) pmd)
98
99static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
100{
101 pgd_val(*pgd) = _REGION2_ENTRY | __pa(pud);
102}
103
104static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
105{
106 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
107}
108
109#endif /* CONFIG_64BIT */
110
111static inline pgd_t *pgd_alloc(struct mm_struct *mm)
112{
113 spin_lock_init(&mm->context.list_lock);
114 INIT_LIST_HEAD(&mm->context.pgtable_list);
115 INIT_LIST_HEAD(&mm->context.gmap_list);
116 return (pgd_t *) crst_table_alloc(mm);
117}
118#define pgd_free(mm, pgd) crst_table_free(mm, (unsigned long *) pgd)
119
120static inline void pmd_populate(struct mm_struct *mm,
121 pmd_t *pmd, pgtable_t pte)
122{
123 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
124}
125
126#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
127
128#define pmd_pgtable(pmd) \
129 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
130
131/*
132 * page table entry allocation/free routines.
133 */
134#define pte_alloc_one_kernel(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr))
135#define pte_alloc_one(mm, vmaddr) ((pte_t *) page_table_alloc(mm, vmaddr))
136
137#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
138#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
139
140extern void rcu_table_freelist_finish(void);
141
142#endif /* _S390_PGALLOC_H */
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * S390 version
4 * Copyright IBM Corp. 1999, 2000
5 * Author(s): Hartmut Penner (hp@de.ibm.com)
6 * Martin Schwidefsky (schwidefsky@de.ibm.com)
7 *
8 * Derived from "include/asm-i386/pgalloc.h"
9 * Copyright (C) 1994 Linus Torvalds
10 */
11
12#ifndef _S390_PGALLOC_H
13#define _S390_PGALLOC_H
14
15#include <linux/threads.h>
16#include <linux/string.h>
17#include <linux/gfp.h>
18#include <linux/mm.h>
19
20#define CRST_ALLOC_ORDER 2
21
22unsigned long *crst_table_alloc(struct mm_struct *);
23void crst_table_free(struct mm_struct *, unsigned long *);
24
25unsigned long *page_table_alloc(struct mm_struct *);
26struct page *page_table_alloc_pgste(struct mm_struct *mm);
27void page_table_free(struct mm_struct *, unsigned long *);
28void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
29void page_table_free_pgste(struct page *page);
30extern int page_table_allocate_pgste;
31
32static inline void crst_table_init(unsigned long *crst, unsigned long entry)
33{
34 memset64((u64 *)crst, entry, _CRST_ENTRIES);
35}
36
37static inline unsigned long pgd_entry_type(struct mm_struct *mm)
38{
39 if (mm_pmd_folded(mm))
40 return _SEGMENT_ENTRY_EMPTY;
41 if (mm_pud_folded(mm))
42 return _REGION3_ENTRY_EMPTY;
43 if (mm_p4d_folded(mm))
44 return _REGION2_ENTRY_EMPTY;
45 return _REGION1_ENTRY_EMPTY;
46}
47
48int crst_table_upgrade(struct mm_struct *mm, unsigned long limit);
49void crst_table_downgrade(struct mm_struct *);
50
51static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long address)
52{
53 unsigned long *table = crst_table_alloc(mm);
54
55 if (table)
56 crst_table_init(table, _REGION2_ENTRY_EMPTY);
57 return (p4d_t *) table;
58}
59#define p4d_free(mm, p4d) crst_table_free(mm, (unsigned long *) p4d)
60
61static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long address)
62{
63 unsigned long *table = crst_table_alloc(mm);
64 if (table)
65 crst_table_init(table, _REGION3_ENTRY_EMPTY);
66 return (pud_t *) table;
67}
68#define pud_free(mm, pud) crst_table_free(mm, (unsigned long *) pud)
69
70static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
71{
72 unsigned long *table = crst_table_alloc(mm);
73
74 if (!table)
75 return NULL;
76 crst_table_init(table, _SEGMENT_ENTRY_EMPTY);
77 if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
78 crst_table_free(mm, table);
79 return NULL;
80 }
81 return (pmd_t *) table;
82}
83
84static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
85{
86 pgtable_pmd_page_dtor(virt_to_page(pmd));
87 crst_table_free(mm, (unsigned long *) pmd);
88}
89
90static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
91{
92 pgd_val(*pgd) = _REGION1_ENTRY | __pa(p4d);
93}
94
95static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
96{
97 p4d_val(*p4d) = _REGION2_ENTRY | __pa(pud);
98}
99
100static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
101{
102 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
103}
104
105static inline pgd_t *pgd_alloc(struct mm_struct *mm)
106{
107 unsigned long *table = crst_table_alloc(mm);
108
109 if (!table)
110 return NULL;
111 if (mm->context.asce_limit == _REGION3_SIZE) {
112 /* Forking a compat process with 2 page table levels */
113 if (!pgtable_pmd_page_ctor(virt_to_page(table))) {
114 crst_table_free(mm, table);
115 return NULL;
116 }
117 }
118 return (pgd_t *) table;
119}
120
121static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
122{
123 if (mm->context.asce_limit == _REGION3_SIZE)
124 pgtable_pmd_page_dtor(virt_to_page(pgd));
125 crst_table_free(mm, (unsigned long *) pgd);
126}
127
128static inline void pmd_populate(struct mm_struct *mm,
129 pmd_t *pmd, pgtable_t pte)
130{
131 pmd_val(*pmd) = _SEGMENT_ENTRY + __pa(pte);
132}
133
134#define pmd_populate_kernel(mm, pmd, pte) pmd_populate(mm, pmd, pte)
135
136#define pmd_pgtable(pmd) \
137 (pgtable_t)(pmd_val(pmd) & -sizeof(pte_t)*PTRS_PER_PTE)
138
139/*
140 * page table entry allocation/free routines.
141 */
142#define pte_alloc_one_kernel(mm) ((pte_t *)page_table_alloc(mm))
143#define pte_alloc_one(mm) ((pte_t *)page_table_alloc(mm))
144
145#define pte_free_kernel(mm, pte) page_table_free(mm, (unsigned long *) pte)
146#define pte_free(mm, pte) page_table_free(mm, (unsigned long *) pte)
147
148extern void rcu_table_freelist_finish(void);
149
150void vmem_map_init(void);
151void *vmem_crst_alloc(unsigned long val);
152pte_t *vmem_pte_alloc(void);
153
154unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages);
155void base_asce_free(unsigned long asce);
156
157#endif /* _S390_PGALLOC_H */