Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Page table allocation functions
4 *
5 * Copyright IBM Corp. 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
7 */
8
9#include <linux/sysctl.h>
10#include <linux/slab.h>
11#include <linux/mm.h>
12#include <asm/mmu_context.h>
13#include <asm/page-states.h>
14#include <asm/pgalloc.h>
15#include <asm/gmap.h>
16#include <asm/tlb.h>
17#include <asm/tlbflush.h>
18
19#ifdef CONFIG_PGSTE
20
21int page_table_allocate_pgste = 0;
22EXPORT_SYMBOL(page_table_allocate_pgste);
23
24static struct ctl_table page_table_sysctl[] = {
25 {
26 .procname = "allocate_pgste",
27 .data = &page_table_allocate_pgste,
28 .maxlen = sizeof(int),
29 .mode = S_IRUGO | S_IWUSR,
30 .proc_handler = proc_dointvec_minmax,
31 .extra1 = SYSCTL_ZERO,
32 .extra2 = SYSCTL_ONE,
33 },
34};
35
36static int __init page_table_register_sysctl(void)
37{
38 return register_sysctl("vm", page_table_sysctl) ? 0 : -ENOMEM;
39}
40__initcall(page_table_register_sysctl);
41
42#endif /* CONFIG_PGSTE */
43
44unsigned long *crst_table_alloc(struct mm_struct *mm)
45{
46 struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
47 unsigned long *table;
48
49 if (!ptdesc)
50 return NULL;
51 table = ptdesc_to_virt(ptdesc);
52 __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER);
53 return table;
54}
55
56void crst_table_free(struct mm_struct *mm, unsigned long *table)
57{
58 if (!table)
59 return;
60 pagetable_free(virt_to_ptdesc(table));
61}
62
63static void __crst_table_upgrade(void *arg)
64{
65 struct mm_struct *mm = arg;
66
67 /* change all active ASCEs to avoid the creation of new TLBs */
68 if (current->active_mm == mm) {
69 get_lowcore()->user_asce.val = mm->context.asce;
70 local_ctl_load(7, &get_lowcore()->user_asce);
71 }
72 __tlb_flush_local();
73}
74
75int crst_table_upgrade(struct mm_struct *mm, unsigned long end)
76{
77 unsigned long *pgd = NULL, *p4d = NULL, *__pgd;
78 unsigned long asce_limit = mm->context.asce_limit;
79
80 /* upgrade should only happen from 3 to 4, 3 to 5, or 4 to 5 levels */
81 VM_BUG_ON(asce_limit < _REGION2_SIZE);
82
83 if (end <= asce_limit)
84 return 0;
85
86 if (asce_limit == _REGION2_SIZE) {
87 p4d = crst_table_alloc(mm);
88 if (unlikely(!p4d))
89 goto err_p4d;
90 crst_table_init(p4d, _REGION2_ENTRY_EMPTY);
91 }
92 if (end > _REGION1_SIZE) {
93 pgd = crst_table_alloc(mm);
94 if (unlikely(!pgd))
95 goto err_pgd;
96 crst_table_init(pgd, _REGION1_ENTRY_EMPTY);
97 }
98
99 spin_lock_bh(&mm->page_table_lock);
100
101 /*
102 * This routine gets called with mmap_lock lock held and there is
103 * no reason to optimize for the case of otherwise. However, if
104 * that would ever change, the below check will let us know.
105 */
106 VM_BUG_ON(asce_limit != mm->context.asce_limit);
107
108 if (p4d) {
109 __pgd = (unsigned long *) mm->pgd;
110 p4d_populate(mm, (p4d_t *) p4d, (pud_t *) __pgd);
111 mm->pgd = (pgd_t *) p4d;
112 mm->context.asce_limit = _REGION1_SIZE;
113 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
114 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
115 mm_inc_nr_puds(mm);
116 }
117 if (pgd) {
118 __pgd = (unsigned long *) mm->pgd;
119 pgd_populate(mm, (pgd_t *) pgd, (p4d_t *) __pgd);
120 mm->pgd = (pgd_t *) pgd;
121 mm->context.asce_limit = TASK_SIZE_MAX;
122 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
123 _ASCE_USER_BITS | _ASCE_TYPE_REGION1;
124 }
125
126 spin_unlock_bh(&mm->page_table_lock);
127
128 on_each_cpu(__crst_table_upgrade, mm, 0);
129
130 return 0;
131
132err_pgd:
133 crst_table_free(mm, p4d);
134err_p4d:
135 return -ENOMEM;
136}
137
138#ifdef CONFIG_PGSTE
139
140struct ptdesc *page_table_alloc_pgste(struct mm_struct *mm)
141{
142 struct ptdesc *ptdesc;
143 u64 *table;
144
145 ptdesc = pagetable_alloc(GFP_KERNEL, 0);
146 if (ptdesc) {
147 table = (u64 *)ptdesc_to_virt(ptdesc);
148 __arch_set_page_dat(table, 1);
149 memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
150 memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
151 }
152 return ptdesc;
153}
154
155void page_table_free_pgste(struct ptdesc *ptdesc)
156{
157 pagetable_free(ptdesc);
158}
159
160#endif /* CONFIG_PGSTE */
161
162unsigned long *page_table_alloc(struct mm_struct *mm)
163{
164 struct ptdesc *ptdesc;
165 unsigned long *table;
166
167 ptdesc = pagetable_alloc(GFP_KERNEL, 0);
168 if (!ptdesc)
169 return NULL;
170 if (!pagetable_pte_ctor(ptdesc)) {
171 pagetable_free(ptdesc);
172 return NULL;
173 }
174 table = ptdesc_to_virt(ptdesc);
175 __arch_set_page_dat(table, 1);
176 /* pt_list is used by gmap only */
177 INIT_LIST_HEAD(&ptdesc->pt_list);
178 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
179 memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
180 return table;
181}
182
183static void pagetable_pte_dtor_free(struct ptdesc *ptdesc)
184{
185 pagetable_pte_dtor(ptdesc);
186 pagetable_free(ptdesc);
187}
188
189void page_table_free(struct mm_struct *mm, unsigned long *table)
190{
191 struct ptdesc *ptdesc = virt_to_ptdesc(table);
192
193 pagetable_pte_dtor_free(ptdesc);
194}
195
196void __tlb_remove_table(void *table)
197{
198 struct ptdesc *ptdesc = virt_to_ptdesc(table);
199 struct page *page = ptdesc_page(ptdesc);
200
201 if (compound_order(page) == CRST_ALLOC_ORDER) {
202 /* pmd, pud, or p4d */
203 pagetable_free(ptdesc);
204 return;
205 }
206 pagetable_pte_dtor_free(ptdesc);
207}
208
209#ifdef CONFIG_TRANSPARENT_HUGEPAGE
210static void pte_free_now(struct rcu_head *head)
211{
212 struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head);
213
214 pagetable_pte_dtor_free(ptdesc);
215}
216
217void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
218{
219 struct ptdesc *ptdesc = virt_to_ptdesc(pgtable);
220
221 call_rcu(&ptdesc->pt_rcu_head, pte_free_now);
222 /*
223 * THPs are not allowed for KVM guests. Warn if pgste ever reaches here.
224 * Turn to the generic pte_free_defer() version once gmap is removed.
225 */
226 WARN_ON_ONCE(mm_has_pgste(mm));
227}
228#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
229
230/*
231 * Base infrastructure required to generate basic asces, region, segment,
232 * and page tables that do not make use of enhanced features like EDAT1.
233 */
234
235static struct kmem_cache *base_pgt_cache;
236
237static unsigned long *base_pgt_alloc(void)
238{
239 unsigned long *table;
240
241 table = kmem_cache_alloc(base_pgt_cache, GFP_KERNEL);
242 if (table)
243 memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
244 return table;
245}
246
247static void base_pgt_free(unsigned long *table)
248{
249 kmem_cache_free(base_pgt_cache, table);
250}
251
252static unsigned long *base_crst_alloc(unsigned long val)
253{
254 unsigned long *table;
255 struct ptdesc *ptdesc;
256
257 ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER);
258 if (!ptdesc)
259 return NULL;
260 table = ptdesc_address(ptdesc);
261 crst_table_init(table, val);
262 return table;
263}
264
265static void base_crst_free(unsigned long *table)
266{
267 if (!table)
268 return;
269 pagetable_free(virt_to_ptdesc(table));
270}
271
272#define BASE_ADDR_END_FUNC(NAME, SIZE) \
273static inline unsigned long base_##NAME##_addr_end(unsigned long addr, \
274 unsigned long end) \
275{ \
276 unsigned long next = (addr + (SIZE)) & ~((SIZE) - 1); \
277 \
278 return (next - 1) < (end - 1) ? next : end; \
279}
280
281BASE_ADDR_END_FUNC(page, PAGE_SIZE)
282BASE_ADDR_END_FUNC(segment, _SEGMENT_SIZE)
283BASE_ADDR_END_FUNC(region3, _REGION3_SIZE)
284BASE_ADDR_END_FUNC(region2, _REGION2_SIZE)
285BASE_ADDR_END_FUNC(region1, _REGION1_SIZE)
286
287static inline unsigned long base_lra(unsigned long address)
288{
289 unsigned long real;
290
291 asm volatile(
292 " lra %0,0(%1)\n"
293 : "=d" (real) : "a" (address) : "cc");
294 return real;
295}
296
297static int base_page_walk(unsigned long *origin, unsigned long addr,
298 unsigned long end, int alloc)
299{
300 unsigned long *pte, next;
301
302 if (!alloc)
303 return 0;
304 pte = origin;
305 pte += (addr & _PAGE_INDEX) >> PAGE_SHIFT;
306 do {
307 next = base_page_addr_end(addr, end);
308 *pte = base_lra(addr);
309 } while (pte++, addr = next, addr < end);
310 return 0;
311}
312
313static int base_segment_walk(unsigned long *origin, unsigned long addr,
314 unsigned long end, int alloc)
315{
316 unsigned long *ste, next, *table;
317 int rc;
318
319 ste = origin;
320 ste += (addr & _SEGMENT_INDEX) >> _SEGMENT_SHIFT;
321 do {
322 next = base_segment_addr_end(addr, end);
323 if (*ste & _SEGMENT_ENTRY_INVALID) {
324 if (!alloc)
325 continue;
326 table = base_pgt_alloc();
327 if (!table)
328 return -ENOMEM;
329 *ste = __pa(table) | _SEGMENT_ENTRY;
330 }
331 table = __va(*ste & _SEGMENT_ENTRY_ORIGIN);
332 rc = base_page_walk(table, addr, next, alloc);
333 if (rc)
334 return rc;
335 if (!alloc)
336 base_pgt_free(table);
337 cond_resched();
338 } while (ste++, addr = next, addr < end);
339 return 0;
340}
341
342static int base_region3_walk(unsigned long *origin, unsigned long addr,
343 unsigned long end, int alloc)
344{
345 unsigned long *rtte, next, *table;
346 int rc;
347
348 rtte = origin;
349 rtte += (addr & _REGION3_INDEX) >> _REGION3_SHIFT;
350 do {
351 next = base_region3_addr_end(addr, end);
352 if (*rtte & _REGION_ENTRY_INVALID) {
353 if (!alloc)
354 continue;
355 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
356 if (!table)
357 return -ENOMEM;
358 *rtte = __pa(table) | _REGION3_ENTRY;
359 }
360 table = __va(*rtte & _REGION_ENTRY_ORIGIN);
361 rc = base_segment_walk(table, addr, next, alloc);
362 if (rc)
363 return rc;
364 if (!alloc)
365 base_crst_free(table);
366 } while (rtte++, addr = next, addr < end);
367 return 0;
368}
369
370static int base_region2_walk(unsigned long *origin, unsigned long addr,
371 unsigned long end, int alloc)
372{
373 unsigned long *rste, next, *table;
374 int rc;
375
376 rste = origin;
377 rste += (addr & _REGION2_INDEX) >> _REGION2_SHIFT;
378 do {
379 next = base_region2_addr_end(addr, end);
380 if (*rste & _REGION_ENTRY_INVALID) {
381 if (!alloc)
382 continue;
383 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
384 if (!table)
385 return -ENOMEM;
386 *rste = __pa(table) | _REGION2_ENTRY;
387 }
388 table = __va(*rste & _REGION_ENTRY_ORIGIN);
389 rc = base_region3_walk(table, addr, next, alloc);
390 if (rc)
391 return rc;
392 if (!alloc)
393 base_crst_free(table);
394 } while (rste++, addr = next, addr < end);
395 return 0;
396}
397
398static int base_region1_walk(unsigned long *origin, unsigned long addr,
399 unsigned long end, int alloc)
400{
401 unsigned long *rfte, next, *table;
402 int rc;
403
404 rfte = origin;
405 rfte += (addr & _REGION1_INDEX) >> _REGION1_SHIFT;
406 do {
407 next = base_region1_addr_end(addr, end);
408 if (*rfte & _REGION_ENTRY_INVALID) {
409 if (!alloc)
410 continue;
411 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
412 if (!table)
413 return -ENOMEM;
414 *rfte = __pa(table) | _REGION1_ENTRY;
415 }
416 table = __va(*rfte & _REGION_ENTRY_ORIGIN);
417 rc = base_region2_walk(table, addr, next, alloc);
418 if (rc)
419 return rc;
420 if (!alloc)
421 base_crst_free(table);
422 } while (rfte++, addr = next, addr < end);
423 return 0;
424}
425
426/**
427 * base_asce_free - free asce and tables returned from base_asce_alloc()
428 * @asce: asce to be freed
429 *
430 * Frees all region, segment, and page tables that were allocated with a
431 * corresponding base_asce_alloc() call.
432 */
433void base_asce_free(unsigned long asce)
434{
435 unsigned long *table = __va(asce & _ASCE_ORIGIN);
436
437 if (!asce)
438 return;
439 switch (asce & _ASCE_TYPE_MASK) {
440 case _ASCE_TYPE_SEGMENT:
441 base_segment_walk(table, 0, _REGION3_SIZE, 0);
442 break;
443 case _ASCE_TYPE_REGION3:
444 base_region3_walk(table, 0, _REGION2_SIZE, 0);
445 break;
446 case _ASCE_TYPE_REGION2:
447 base_region2_walk(table, 0, _REGION1_SIZE, 0);
448 break;
449 case _ASCE_TYPE_REGION1:
450 base_region1_walk(table, 0, TASK_SIZE_MAX, 0);
451 break;
452 }
453 base_crst_free(table);
454}
455
456static int base_pgt_cache_init(void)
457{
458 static DEFINE_MUTEX(base_pgt_cache_mutex);
459 unsigned long sz = _PAGE_TABLE_SIZE;
460
461 if (base_pgt_cache)
462 return 0;
463 mutex_lock(&base_pgt_cache_mutex);
464 if (!base_pgt_cache)
465 base_pgt_cache = kmem_cache_create("base_pgt", sz, sz, 0, NULL);
466 mutex_unlock(&base_pgt_cache_mutex);
467 return base_pgt_cache ? 0 : -ENOMEM;
468}
469
470/**
471 * base_asce_alloc - create kernel mapping without enhanced DAT features
472 * @addr: virtual start address of kernel mapping
473 * @num_pages: number of consecutive pages
474 *
475 * Generate an asce, including all required region, segment and page tables,
476 * that can be used to access the virtual kernel mapping. The difference is
477 * that the returned asce does not make use of any enhanced DAT features like
478 * e.g. large pages. This is required for some I/O functions that pass an
479 * asce, like e.g. some service call requests.
480 *
481 * Note: the returned asce may NEVER be attached to any cpu. It may only be
482 * used for I/O requests. tlb entries that might result because the
483 * asce was attached to a cpu won't be cleared.
484 */
485unsigned long base_asce_alloc(unsigned long addr, unsigned long num_pages)
486{
487 unsigned long asce, *table, end;
488 int rc;
489
490 if (base_pgt_cache_init())
491 return 0;
492 end = addr + num_pages * PAGE_SIZE;
493 if (end <= _REGION3_SIZE) {
494 table = base_crst_alloc(_SEGMENT_ENTRY_EMPTY);
495 if (!table)
496 return 0;
497 rc = base_segment_walk(table, addr, end, 1);
498 asce = __pa(table) | _ASCE_TYPE_SEGMENT | _ASCE_TABLE_LENGTH;
499 } else if (end <= _REGION2_SIZE) {
500 table = base_crst_alloc(_REGION3_ENTRY_EMPTY);
501 if (!table)
502 return 0;
503 rc = base_region3_walk(table, addr, end, 1);
504 asce = __pa(table) | _ASCE_TYPE_REGION3 | _ASCE_TABLE_LENGTH;
505 } else if (end <= _REGION1_SIZE) {
506 table = base_crst_alloc(_REGION2_ENTRY_EMPTY);
507 if (!table)
508 return 0;
509 rc = base_region2_walk(table, addr, end, 1);
510 asce = __pa(table) | _ASCE_TYPE_REGION2 | _ASCE_TABLE_LENGTH;
511 } else {
512 table = base_crst_alloc(_REGION1_ENTRY_EMPTY);
513 if (!table)
514 return 0;
515 rc = base_region1_walk(table, addr, end, 1);
516 asce = __pa(table) | _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH;
517 }
518 if (rc) {
519 base_asce_free(asce);
520 asce = 0;
521 }
522 return asce;
523}
1/*
2 * Page table allocation functions
3 *
4 * Copyright IBM Corp. 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8#include <linux/mm.h>
9#include <linux/sysctl.h>
10#include <asm/mmu_context.h>
11#include <asm/pgalloc.h>
12#include <asm/gmap.h>
13#include <asm/tlb.h>
14#include <asm/tlbflush.h>
15
16#ifdef CONFIG_PGSTE
17
18static int page_table_allocate_pgste_min = 0;
19static int page_table_allocate_pgste_max = 1;
20int page_table_allocate_pgste = 0;
21EXPORT_SYMBOL(page_table_allocate_pgste);
22
23static struct ctl_table page_table_sysctl[] = {
24 {
25 .procname = "allocate_pgste",
26 .data = &page_table_allocate_pgste,
27 .maxlen = sizeof(int),
28 .mode = S_IRUGO | S_IWUSR,
29 .proc_handler = proc_dointvec,
30 .extra1 = &page_table_allocate_pgste_min,
31 .extra2 = &page_table_allocate_pgste_max,
32 },
33 { }
34};
35
36static struct ctl_table page_table_sysctl_dir[] = {
37 {
38 .procname = "vm",
39 .maxlen = 0,
40 .mode = 0555,
41 .child = page_table_sysctl,
42 },
43 { }
44};
45
46static int __init page_table_register_sysctl(void)
47{
48 return register_sysctl_table(page_table_sysctl_dir) ? 0 : -ENOMEM;
49}
50__initcall(page_table_register_sysctl);
51
52#endif /* CONFIG_PGSTE */
53
54unsigned long *crst_table_alloc(struct mm_struct *mm)
55{
56 struct page *page = alloc_pages(GFP_KERNEL, 2);
57
58 if (!page)
59 return NULL;
60 return (unsigned long *) page_to_phys(page);
61}
62
63void crst_table_free(struct mm_struct *mm, unsigned long *table)
64{
65 free_pages((unsigned long) table, 2);
66}
67
68static void __crst_table_upgrade(void *arg)
69{
70 struct mm_struct *mm = arg;
71
72 if (current->active_mm == mm) {
73 clear_user_asce();
74 set_user_asce(mm);
75 }
76 __tlb_flush_local();
77}
78
79int crst_table_upgrade(struct mm_struct *mm)
80{
81 unsigned long *table, *pgd;
82
83 /* upgrade should only happen from 3 to 4 levels */
84 BUG_ON(mm->context.asce_limit != (1UL << 42));
85
86 table = crst_table_alloc(mm);
87 if (!table)
88 return -ENOMEM;
89
90 spin_lock_bh(&mm->page_table_lock);
91 pgd = (unsigned long *) mm->pgd;
92 crst_table_init(table, _REGION2_ENTRY_EMPTY);
93 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
94 mm->pgd = (pgd_t *) table;
95 mm->context.asce_limit = 1UL << 53;
96 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
97 _ASCE_USER_BITS | _ASCE_TYPE_REGION2;
98 mm->task_size = mm->context.asce_limit;
99 spin_unlock_bh(&mm->page_table_lock);
100
101 on_each_cpu(__crst_table_upgrade, mm, 0);
102 return 0;
103}
104
105void crst_table_downgrade(struct mm_struct *mm)
106{
107 pgd_t *pgd;
108
109 /* downgrade should only happen from 3 to 2 levels (compat only) */
110 BUG_ON(mm->context.asce_limit != (1UL << 42));
111
112 if (current->active_mm == mm) {
113 clear_user_asce();
114 __tlb_flush_mm(mm);
115 }
116
117 pgd = mm->pgd;
118 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
119 mm->context.asce_limit = 1UL << 31;
120 mm->context.asce = __pa(mm->pgd) | _ASCE_TABLE_LENGTH |
121 _ASCE_USER_BITS | _ASCE_TYPE_SEGMENT;
122 mm->task_size = mm->context.asce_limit;
123 crst_table_free(mm, (unsigned long *) pgd);
124
125 if (current->active_mm == mm)
126 set_user_asce(mm);
127}
128
129static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
130{
131 unsigned int old, new;
132
133 do {
134 old = atomic_read(v);
135 new = old ^ bits;
136 } while (atomic_cmpxchg(v, old, new) != old);
137 return new;
138}
139
140#ifdef CONFIG_PGSTE
141
142struct page *page_table_alloc_pgste(struct mm_struct *mm)
143{
144 struct page *page;
145 unsigned long *table;
146
147 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
148 if (page) {
149 table = (unsigned long *) page_to_phys(page);
150 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
151 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
152 }
153 return page;
154}
155
156void page_table_free_pgste(struct page *page)
157{
158 __free_page(page);
159}
160
161#endif /* CONFIG_PGSTE */
162
163/*
164 * page table entry allocation/free routines.
165 */
166unsigned long *page_table_alloc(struct mm_struct *mm)
167{
168 unsigned long *table;
169 struct page *page;
170 unsigned int mask, bit;
171
172 /* Try to get a fragment of a 4K page as a 2K page table */
173 if (!mm_alloc_pgste(mm)) {
174 table = NULL;
175 spin_lock_bh(&mm->context.pgtable_lock);
176 if (!list_empty(&mm->context.pgtable_list)) {
177 page = list_first_entry(&mm->context.pgtable_list,
178 struct page, lru);
179 mask = atomic_read(&page->_mapcount);
180 mask = (mask | (mask >> 4)) & 3;
181 if (mask != 3) {
182 table = (unsigned long *) page_to_phys(page);
183 bit = mask & 1; /* =1 -> second 2K */
184 if (bit)
185 table += PTRS_PER_PTE;
186 atomic_xor_bits(&page->_mapcount, 1U << bit);
187 list_del(&page->lru);
188 }
189 }
190 spin_unlock_bh(&mm->context.pgtable_lock);
191 if (table)
192 return table;
193 }
194 /* Allocate a fresh page */
195 page = alloc_page(GFP_KERNEL);
196 if (!page)
197 return NULL;
198 if (!pgtable_page_ctor(page)) {
199 __free_page(page);
200 return NULL;
201 }
202 /* Initialize page table */
203 table = (unsigned long *) page_to_phys(page);
204 if (mm_alloc_pgste(mm)) {
205 /* Return 4K page table with PGSTEs */
206 atomic_set(&page->_mapcount, 3);
207 clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
208 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
209 } else {
210 /* Return the first 2K fragment of the page */
211 atomic_set(&page->_mapcount, 1);
212 clear_table(table, _PAGE_INVALID, PAGE_SIZE);
213 spin_lock_bh(&mm->context.pgtable_lock);
214 list_add(&page->lru, &mm->context.pgtable_list);
215 spin_unlock_bh(&mm->context.pgtable_lock);
216 }
217 return table;
218}
219
220void page_table_free(struct mm_struct *mm, unsigned long *table)
221{
222 struct page *page;
223 unsigned int bit, mask;
224
225 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
226 if (!mm_alloc_pgste(mm)) {
227 /* Free 2K page table fragment of a 4K page */
228 bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t));
229 spin_lock_bh(&mm->context.pgtable_lock);
230 mask = atomic_xor_bits(&page->_mapcount, 1U << bit);
231 if (mask & 3)
232 list_add(&page->lru, &mm->context.pgtable_list);
233 else
234 list_del(&page->lru);
235 spin_unlock_bh(&mm->context.pgtable_lock);
236 if (mask != 0)
237 return;
238 }
239
240 pgtable_page_dtor(page);
241 atomic_set(&page->_mapcount, -1);
242 __free_page(page);
243}
244
245void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table,
246 unsigned long vmaddr)
247{
248 struct mm_struct *mm;
249 struct page *page;
250 unsigned int bit, mask;
251
252 mm = tlb->mm;
253 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
254 if (mm_alloc_pgste(mm)) {
255 gmap_unlink(mm, table, vmaddr);
256 table = (unsigned long *) (__pa(table) | 3);
257 tlb_remove_table(tlb, table);
258 return;
259 }
260 bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t));
261 spin_lock_bh(&mm->context.pgtable_lock);
262 mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit);
263 if (mask & 3)
264 list_add_tail(&page->lru, &mm->context.pgtable_list);
265 else
266 list_del(&page->lru);
267 spin_unlock_bh(&mm->context.pgtable_lock);
268 table = (unsigned long *) (__pa(table) | (1U << bit));
269 tlb_remove_table(tlb, table);
270}
271
272static void __tlb_remove_table(void *_table)
273{
274 unsigned int mask = (unsigned long) _table & 3;
275 void *table = (void *)((unsigned long) _table ^ mask);
276 struct page *page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
277
278 switch (mask) {
279 case 0: /* pmd or pud */
280 free_pages((unsigned long) table, 2);
281 break;
282 case 1: /* lower 2K of a 4K page table */
283 case 2: /* higher 2K of a 4K page table */
284 if (atomic_xor_bits(&page->_mapcount, mask << 4) != 0)
285 break;
286 /* fallthrough */
287 case 3: /* 4K page table with pgstes */
288 pgtable_page_dtor(page);
289 atomic_set(&page->_mapcount, -1);
290 __free_page(page);
291 break;
292 }
293}
294
295static void tlb_remove_table_smp_sync(void *arg)
296{
297 /* Simply deliver the interrupt */
298}
299
300static void tlb_remove_table_one(void *table)
301{
302 /*
303 * This isn't an RCU grace period and hence the page-tables cannot be
304 * assumed to be actually RCU-freed.
305 *
306 * It is however sufficient for software page-table walkers that rely
307 * on IRQ disabling. See the comment near struct mmu_table_batch.
308 */
309 smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
310 __tlb_remove_table(table);
311}
312
313static void tlb_remove_table_rcu(struct rcu_head *head)
314{
315 struct mmu_table_batch *batch;
316 int i;
317
318 batch = container_of(head, struct mmu_table_batch, rcu);
319
320 for (i = 0; i < batch->nr; i++)
321 __tlb_remove_table(batch->tables[i]);
322
323 free_page((unsigned long)batch);
324}
325
326void tlb_table_flush(struct mmu_gather *tlb)
327{
328 struct mmu_table_batch **batch = &tlb->batch;
329
330 if (*batch) {
331 call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
332 *batch = NULL;
333 }
334}
335
336void tlb_remove_table(struct mmu_gather *tlb, void *table)
337{
338 struct mmu_table_batch **batch = &tlb->batch;
339
340 tlb->mm->context.flush_mm = 1;
341 if (*batch == NULL) {
342 *batch = (struct mmu_table_batch *)
343 __get_free_page(GFP_NOWAIT | __GFP_NOWARN);
344 if (*batch == NULL) {
345 __tlb_flush_mm_lazy(tlb->mm);
346 tlb_remove_table_one(table);
347 return;
348 }
349 (*batch)->nr = 0;
350 }
351 (*batch)->tables[(*batch)->nr++] = table;
352 if ((*batch)->nr == MAX_TABLE_BATCH)
353 tlb_flush_mmu(tlb);
354}