Loading...
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright IBM Corp. 2007, 2011
4 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
5 */
6
7#include <linux/sched.h>
8#include <linux/kernel.h>
9#include <linux/errno.h>
10#include <linux/gfp.h>
11#include <linux/mm.h>
12#include <linux/swap.h>
13#include <linux/smp.h>
14#include <linux/spinlock.h>
15#include <linux/rcupdate.h>
16#include <linux/slab.h>
17#include <linux/swapops.h>
18#include <linux/sysctl.h>
19#include <linux/ksm.h>
20#include <linux/mman.h>
21
22#include <asm/tlb.h>
23#include <asm/tlbflush.h>
24#include <asm/mmu_context.h>
25#include <asm/page-states.h>
26
27static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
28 pte_t *ptep, int nodat)
29{
30 unsigned long opt, asce;
31
32 if (MACHINE_HAS_TLB_GUEST) {
33 opt = 0;
34 asce = READ_ONCE(mm->context.gmap_asce);
35 if (asce == 0UL || nodat)
36 opt |= IPTE_NODAT;
37 if (asce != -1UL) {
38 asce = asce ? : mm->context.asce;
39 opt |= IPTE_GUEST_ASCE;
40 }
41 __ptep_ipte(addr, ptep, opt, asce, IPTE_LOCAL);
42 } else {
43 __ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL);
44 }
45}
46
47static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
48 pte_t *ptep, int nodat)
49{
50 unsigned long opt, asce;
51
52 if (MACHINE_HAS_TLB_GUEST) {
53 opt = 0;
54 asce = READ_ONCE(mm->context.gmap_asce);
55 if (asce == 0UL || nodat)
56 opt |= IPTE_NODAT;
57 if (asce != -1UL) {
58 asce = asce ? : mm->context.asce;
59 opt |= IPTE_GUEST_ASCE;
60 }
61 __ptep_ipte(addr, ptep, opt, asce, IPTE_GLOBAL);
62 } else {
63 __ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
64 }
65}
66
67static inline pte_t ptep_flush_direct(struct mm_struct *mm,
68 unsigned long addr, pte_t *ptep,
69 int nodat)
70{
71 pte_t old;
72
73 old = *ptep;
74 if (unlikely(pte_val(old) & _PAGE_INVALID))
75 return old;
76 atomic_inc(&mm->context.flush_count);
77 if (MACHINE_HAS_TLB_LC &&
78 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
79 ptep_ipte_local(mm, addr, ptep, nodat);
80 else
81 ptep_ipte_global(mm, addr, ptep, nodat);
82 atomic_dec(&mm->context.flush_count);
83 return old;
84}
85
86static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
87 unsigned long addr, pte_t *ptep,
88 int nodat)
89{
90 pte_t old;
91
92 old = *ptep;
93 if (unlikely(pte_val(old) & _PAGE_INVALID))
94 return old;
95 atomic_inc(&mm->context.flush_count);
96 if (cpumask_equal(&mm->context.cpu_attach_mask,
97 cpumask_of(smp_processor_id()))) {
98 pte_val(*ptep) |= _PAGE_INVALID;
99 mm->context.flush_mm = 1;
100 } else
101 ptep_ipte_global(mm, addr, ptep, nodat);
102 atomic_dec(&mm->context.flush_count);
103 return old;
104}
105
106static inline pgste_t pgste_get_lock(pte_t *ptep)
107{
108 unsigned long new = 0;
109#ifdef CONFIG_PGSTE
110 unsigned long old;
111
112 asm(
113 " lg %0,%2\n"
114 "0: lgr %1,%0\n"
115 " nihh %0,0xff7f\n" /* clear PCL bit in old */
116 " oihh %1,0x0080\n" /* set PCL bit in new */
117 " csg %0,%1,%2\n"
118 " jl 0b\n"
119 : "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
120 : "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
121#endif
122 return __pgste(new);
123}
124
125static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
126{
127#ifdef CONFIG_PGSTE
128 asm(
129 " nihh %1,0xff7f\n" /* clear PCL bit */
130 " stg %1,%0\n"
131 : "=Q" (ptep[PTRS_PER_PTE])
132 : "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
133 : "cc", "memory");
134#endif
135}
136
137static inline pgste_t pgste_get(pte_t *ptep)
138{
139 unsigned long pgste = 0;
140#ifdef CONFIG_PGSTE
141 pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
142#endif
143 return __pgste(pgste);
144}
145
146static inline void pgste_set(pte_t *ptep, pgste_t pgste)
147{
148#ifdef CONFIG_PGSTE
149 *(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
150#endif
151}
152
153static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
154 struct mm_struct *mm)
155{
156#ifdef CONFIG_PGSTE
157 unsigned long address, bits, skey;
158
159 if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID)
160 return pgste;
161 address = pte_val(pte) & PAGE_MASK;
162 skey = (unsigned long) page_get_storage_key(address);
163 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
164 /* Transfer page changed & referenced bit to guest bits in pgste */
165 pgste_val(pgste) |= bits << 48; /* GR bit & GC bit */
166 /* Copy page access key and fetch protection bit to pgste */
167 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
168 pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
169#endif
170 return pgste;
171
172}
173
174static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
175 struct mm_struct *mm)
176{
177#ifdef CONFIG_PGSTE
178 unsigned long address;
179 unsigned long nkey;
180
181 if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID)
182 return;
183 VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
184 address = pte_val(entry) & PAGE_MASK;
185 /*
186 * Set page access key and fetch protection bit from pgste.
187 * The guest C/R information is still in the PGSTE, set real
188 * key C/R to 0.
189 */
190 nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
191 nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
192 page_set_storage_key(address, nkey, 0);
193#endif
194}
195
196static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
197{
198#ifdef CONFIG_PGSTE
199 if ((pte_val(entry) & _PAGE_PRESENT) &&
200 (pte_val(entry) & _PAGE_WRITE) &&
201 !(pte_val(entry) & _PAGE_INVALID)) {
202 if (!MACHINE_HAS_ESOP) {
203 /*
204 * Without enhanced suppression-on-protection force
205 * the dirty bit on for all writable ptes.
206 */
207 pte_val(entry) |= _PAGE_DIRTY;
208 pte_val(entry) &= ~_PAGE_PROTECT;
209 }
210 if (!(pte_val(entry) & _PAGE_PROTECT))
211 /* This pte allows write access, set user-dirty */
212 pgste_val(pgste) |= PGSTE_UC_BIT;
213 }
214#endif
215 *ptep = entry;
216 return pgste;
217}
218
219static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
220 unsigned long addr,
221 pte_t *ptep, pgste_t pgste)
222{
223#ifdef CONFIG_PGSTE
224 unsigned long bits;
225
226 bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
227 if (bits) {
228 pgste_val(pgste) ^= bits;
229 ptep_notify(mm, addr, ptep, bits);
230 }
231#endif
232 return pgste;
233}
234
235static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
236 unsigned long addr, pte_t *ptep)
237{
238 pgste_t pgste = __pgste(0);
239
240 if (mm_has_pgste(mm)) {
241 pgste = pgste_get_lock(ptep);
242 pgste = pgste_pte_notify(mm, addr, ptep, pgste);
243 }
244 return pgste;
245}
246
247static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
248 unsigned long addr, pte_t *ptep,
249 pgste_t pgste, pte_t old, pte_t new)
250{
251 if (mm_has_pgste(mm)) {
252 if (pte_val(old) & _PAGE_INVALID)
253 pgste_set_key(ptep, pgste, new, mm);
254 if (pte_val(new) & _PAGE_INVALID) {
255 pgste = pgste_update_all(old, pgste, mm);
256 if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
257 _PGSTE_GPS_USAGE_UNUSED)
258 pte_val(old) |= _PAGE_UNUSED;
259 }
260 pgste = pgste_set_pte(ptep, pgste, new);
261 pgste_set_unlock(ptep, pgste);
262 } else {
263 *ptep = new;
264 }
265 return old;
266}
267
268pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
269 pte_t *ptep, pte_t new)
270{
271 pgste_t pgste;
272 pte_t old;
273 int nodat;
274
275 preempt_disable();
276 pgste = ptep_xchg_start(mm, addr, ptep);
277 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
278 old = ptep_flush_direct(mm, addr, ptep, nodat);
279 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
280 preempt_enable();
281 return old;
282}
283EXPORT_SYMBOL(ptep_xchg_direct);
284
285pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
286 pte_t *ptep, pte_t new)
287{
288 pgste_t pgste;
289 pte_t old;
290 int nodat;
291
292 preempt_disable();
293 pgste = ptep_xchg_start(mm, addr, ptep);
294 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
295 old = ptep_flush_lazy(mm, addr, ptep, nodat);
296 old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
297 preempt_enable();
298 return old;
299}
300EXPORT_SYMBOL(ptep_xchg_lazy);
301
302pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
303 pte_t *ptep)
304{
305 pgste_t pgste;
306 pte_t old;
307 int nodat;
308 struct mm_struct *mm = vma->vm_mm;
309
310 preempt_disable();
311 pgste = ptep_xchg_start(mm, addr, ptep);
312 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
313 old = ptep_flush_lazy(mm, addr, ptep, nodat);
314 if (mm_has_pgste(mm)) {
315 pgste = pgste_update_all(old, pgste, mm);
316 pgste_set(ptep, pgste);
317 }
318 return old;
319}
320
321void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
322 pte_t *ptep, pte_t old_pte, pte_t pte)
323{
324 pgste_t pgste;
325 struct mm_struct *mm = vma->vm_mm;
326
327 if (!MACHINE_HAS_NX)
328 pte_val(pte) &= ~_PAGE_NOEXEC;
329 if (mm_has_pgste(mm)) {
330 pgste = pgste_get(ptep);
331 pgste_set_key(ptep, pgste, pte, mm);
332 pgste = pgste_set_pte(ptep, pgste, pte);
333 pgste_set_unlock(ptep, pgste);
334 } else {
335 *ptep = pte;
336 }
337 preempt_enable();
338}
339
340static inline void pmdp_idte_local(struct mm_struct *mm,
341 unsigned long addr, pmd_t *pmdp)
342{
343 if (MACHINE_HAS_TLB_GUEST)
344 __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
345 mm->context.asce, IDTE_LOCAL);
346 else
347 __pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
348 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
349 gmap_pmdp_idte_local(mm, addr);
350}
351
352static inline void pmdp_idte_global(struct mm_struct *mm,
353 unsigned long addr, pmd_t *pmdp)
354{
355 if (MACHINE_HAS_TLB_GUEST) {
356 __pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
357 mm->context.asce, IDTE_GLOBAL);
358 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
359 gmap_pmdp_idte_global(mm, addr);
360 } else if (MACHINE_HAS_IDTE) {
361 __pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
362 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
363 gmap_pmdp_idte_global(mm, addr);
364 } else {
365 __pmdp_csp(pmdp);
366 if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
367 gmap_pmdp_csp(mm, addr);
368 }
369}
370
371static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
372 unsigned long addr, pmd_t *pmdp)
373{
374 pmd_t old;
375
376 old = *pmdp;
377 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
378 return old;
379 atomic_inc(&mm->context.flush_count);
380 if (MACHINE_HAS_TLB_LC &&
381 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
382 pmdp_idte_local(mm, addr, pmdp);
383 else
384 pmdp_idte_global(mm, addr, pmdp);
385 atomic_dec(&mm->context.flush_count);
386 return old;
387}
388
389static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
390 unsigned long addr, pmd_t *pmdp)
391{
392 pmd_t old;
393
394 old = *pmdp;
395 if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
396 return old;
397 atomic_inc(&mm->context.flush_count);
398 if (cpumask_equal(&mm->context.cpu_attach_mask,
399 cpumask_of(smp_processor_id()))) {
400 pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
401 mm->context.flush_mm = 1;
402 if (mm_has_pgste(mm))
403 gmap_pmdp_invalidate(mm, addr);
404 } else {
405 pmdp_idte_global(mm, addr, pmdp);
406 }
407 atomic_dec(&mm->context.flush_count);
408 return old;
409}
410
411#ifdef CONFIG_PGSTE
412static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
413{
414 pgd_t *pgd;
415 p4d_t *p4d;
416 pud_t *pud;
417 pmd_t *pmd;
418
419 pgd = pgd_offset(mm, addr);
420 p4d = p4d_alloc(mm, pgd, addr);
421 if (!p4d)
422 return NULL;
423 pud = pud_alloc(mm, p4d, addr);
424 if (!pud)
425 return NULL;
426 pmd = pmd_alloc(mm, pud, addr);
427 return pmd;
428}
429#endif
430
431pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
432 pmd_t *pmdp, pmd_t new)
433{
434 pmd_t old;
435
436 preempt_disable();
437 old = pmdp_flush_direct(mm, addr, pmdp);
438 *pmdp = new;
439 preempt_enable();
440 return old;
441}
442EXPORT_SYMBOL(pmdp_xchg_direct);
443
444pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
445 pmd_t *pmdp, pmd_t new)
446{
447 pmd_t old;
448
449 preempt_disable();
450 old = pmdp_flush_lazy(mm, addr, pmdp);
451 *pmdp = new;
452 preempt_enable();
453 return old;
454}
455EXPORT_SYMBOL(pmdp_xchg_lazy);
456
457static inline void pudp_idte_local(struct mm_struct *mm,
458 unsigned long addr, pud_t *pudp)
459{
460 if (MACHINE_HAS_TLB_GUEST)
461 __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
462 mm->context.asce, IDTE_LOCAL);
463 else
464 __pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL);
465}
466
467static inline void pudp_idte_global(struct mm_struct *mm,
468 unsigned long addr, pud_t *pudp)
469{
470 if (MACHINE_HAS_TLB_GUEST)
471 __pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
472 mm->context.asce, IDTE_GLOBAL);
473 else if (MACHINE_HAS_IDTE)
474 __pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL);
475 else
476 /*
477 * Invalid bit position is the same for pmd and pud, so we can
478 * re-use _pmd_csp() here
479 */
480 __pmdp_csp((pmd_t *) pudp);
481}
482
483static inline pud_t pudp_flush_direct(struct mm_struct *mm,
484 unsigned long addr, pud_t *pudp)
485{
486 pud_t old;
487
488 old = *pudp;
489 if (pud_val(old) & _REGION_ENTRY_INVALID)
490 return old;
491 atomic_inc(&mm->context.flush_count);
492 if (MACHINE_HAS_TLB_LC &&
493 cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
494 pudp_idte_local(mm, addr, pudp);
495 else
496 pudp_idte_global(mm, addr, pudp);
497 atomic_dec(&mm->context.flush_count);
498 return old;
499}
500
501pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
502 pud_t *pudp, pud_t new)
503{
504 pud_t old;
505
506 preempt_disable();
507 old = pudp_flush_direct(mm, addr, pudp);
508 *pudp = new;
509 preempt_enable();
510 return old;
511}
512EXPORT_SYMBOL(pudp_xchg_direct);
513
514#ifdef CONFIG_TRANSPARENT_HUGEPAGE
515void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
516 pgtable_t pgtable)
517{
518 struct list_head *lh = (struct list_head *) pgtable;
519
520 assert_spin_locked(pmd_lockptr(mm, pmdp));
521
522 /* FIFO */
523 if (!pmd_huge_pte(mm, pmdp))
524 INIT_LIST_HEAD(lh);
525 else
526 list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
527 pmd_huge_pte(mm, pmdp) = pgtable;
528}
529
530pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
531{
532 struct list_head *lh;
533 pgtable_t pgtable;
534 pte_t *ptep;
535
536 assert_spin_locked(pmd_lockptr(mm, pmdp));
537
538 /* FIFO */
539 pgtable = pmd_huge_pte(mm, pmdp);
540 lh = (struct list_head *) pgtable;
541 if (list_empty(lh))
542 pmd_huge_pte(mm, pmdp) = NULL;
543 else {
544 pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
545 list_del(lh);
546 }
547 ptep = (pte_t *) pgtable;
548 pte_val(*ptep) = _PAGE_INVALID;
549 ptep++;
550 pte_val(*ptep) = _PAGE_INVALID;
551 return pgtable;
552}
553#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
554
555#ifdef CONFIG_PGSTE
556void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
557 pte_t *ptep, pte_t entry)
558{
559 pgste_t pgste;
560
561 /* the mm_has_pgste() check is done in set_pte_at() */
562 preempt_disable();
563 pgste = pgste_get_lock(ptep);
564 pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
565 pgste_set_key(ptep, pgste, entry, mm);
566 pgste = pgste_set_pte(ptep, pgste, entry);
567 pgste_set_unlock(ptep, pgste);
568 preempt_enable();
569}
570
571void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
572{
573 pgste_t pgste;
574
575 preempt_disable();
576 pgste = pgste_get_lock(ptep);
577 pgste_val(pgste) |= PGSTE_IN_BIT;
578 pgste_set_unlock(ptep, pgste);
579 preempt_enable();
580}
581
582/**
583 * ptep_force_prot - change access rights of a locked pte
584 * @mm: pointer to the process mm_struct
585 * @addr: virtual address in the guest address space
586 * @ptep: pointer to the page table entry
587 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
588 * @bit: pgste bit to set (e.g. for notification)
589 *
590 * Returns 0 if the access rights were changed and -EAGAIN if the current
591 * and requested access rights are incompatible.
592 */
593int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
594 pte_t *ptep, int prot, unsigned long bit)
595{
596 pte_t entry;
597 pgste_t pgste;
598 int pte_i, pte_p, nodat;
599
600 pgste = pgste_get_lock(ptep);
601 entry = *ptep;
602 /* Check pte entry after all locks have been acquired */
603 pte_i = pte_val(entry) & _PAGE_INVALID;
604 pte_p = pte_val(entry) & _PAGE_PROTECT;
605 if ((pte_i && (prot != PROT_NONE)) ||
606 (pte_p && (prot & PROT_WRITE))) {
607 pgste_set_unlock(ptep, pgste);
608 return -EAGAIN;
609 }
610 /* Change access rights and set pgste bit */
611 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
612 if (prot == PROT_NONE && !pte_i) {
613 ptep_flush_direct(mm, addr, ptep, nodat);
614 pgste = pgste_update_all(entry, pgste, mm);
615 pte_val(entry) |= _PAGE_INVALID;
616 }
617 if (prot == PROT_READ && !pte_p) {
618 ptep_flush_direct(mm, addr, ptep, nodat);
619 pte_val(entry) &= ~_PAGE_INVALID;
620 pte_val(entry) |= _PAGE_PROTECT;
621 }
622 pgste_val(pgste) |= bit;
623 pgste = pgste_set_pte(ptep, pgste, entry);
624 pgste_set_unlock(ptep, pgste);
625 return 0;
626}
627
628int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
629 pte_t *sptep, pte_t *tptep, pte_t pte)
630{
631 pgste_t spgste, tpgste;
632 pte_t spte, tpte;
633 int rc = -EAGAIN;
634
635 if (!(pte_val(*tptep) & _PAGE_INVALID))
636 return 0; /* already shadowed */
637 spgste = pgste_get_lock(sptep);
638 spte = *sptep;
639 if (!(pte_val(spte) & _PAGE_INVALID) &&
640 !((pte_val(spte) & _PAGE_PROTECT) &&
641 !(pte_val(pte) & _PAGE_PROTECT))) {
642 pgste_val(spgste) |= PGSTE_VSIE_BIT;
643 tpgste = pgste_get_lock(tptep);
644 pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
645 (pte_val(pte) & _PAGE_PROTECT);
646 /* don't touch the storage key - it belongs to parent pgste */
647 tpgste = pgste_set_pte(tptep, tpgste, tpte);
648 pgste_set_unlock(tptep, tpgste);
649 rc = 1;
650 }
651 pgste_set_unlock(sptep, spgste);
652 return rc;
653}
654
655void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
656{
657 pgste_t pgste;
658 int nodat;
659
660 pgste = pgste_get_lock(ptep);
661 /* notifier is called by the caller */
662 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
663 ptep_flush_direct(mm, saddr, ptep, nodat);
664 /* don't touch the storage key - it belongs to parent pgste */
665 pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
666 pgste_set_unlock(ptep, pgste);
667}
668
669static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
670{
671 if (!non_swap_entry(entry))
672 dec_mm_counter(mm, MM_SWAPENTS);
673 else if (is_migration_entry(entry)) {
674 struct page *page = migration_entry_to_page(entry);
675
676 dec_mm_counter(mm, mm_counter(page));
677 }
678 free_swap_and_cache(entry);
679}
680
681void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
682 pte_t *ptep, int reset)
683{
684 unsigned long pgstev;
685 pgste_t pgste;
686 pte_t pte;
687
688 /* Zap unused and logically-zero pages */
689 preempt_disable();
690 pgste = pgste_get_lock(ptep);
691 pgstev = pgste_val(pgste);
692 pte = *ptep;
693 if (!reset && pte_swap(pte) &&
694 ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
695 (pgstev & _PGSTE_GPS_ZERO))) {
696 ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
697 pte_clear(mm, addr, ptep);
698 }
699 if (reset)
700 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
701 pgste_set_unlock(ptep, pgste);
702 preempt_enable();
703}
704
705void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
706{
707 unsigned long ptev;
708 pgste_t pgste;
709
710 /* Clear storage key ACC and F, but set R/C */
711 preempt_disable();
712 pgste = pgste_get_lock(ptep);
713 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
714 pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
715 ptev = pte_val(*ptep);
716 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
717 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
718 pgste_set_unlock(ptep, pgste);
719 preempt_enable();
720}
721
722/*
723 * Test and reset if a guest page is dirty
724 */
725bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
726 pte_t *ptep)
727{
728 pgste_t pgste;
729 pte_t pte;
730 bool dirty;
731 int nodat;
732
733 pgste = pgste_get_lock(ptep);
734 dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
735 pgste_val(pgste) &= ~PGSTE_UC_BIT;
736 pte = *ptep;
737 if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
738 pgste = pgste_pte_notify(mm, addr, ptep, pgste);
739 nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
740 ptep_ipte_global(mm, addr, ptep, nodat);
741 if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
742 pte_val(pte) |= _PAGE_PROTECT;
743 else
744 pte_val(pte) |= _PAGE_INVALID;
745 *ptep = pte;
746 }
747 pgste_set_unlock(ptep, pgste);
748 return dirty;
749}
750EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
751
752int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
753 unsigned char key, bool nq)
754{
755 unsigned long keyul, paddr;
756 spinlock_t *ptl;
757 pgste_t old, new;
758 pmd_t *pmdp;
759 pte_t *ptep;
760
761 pmdp = pmd_alloc_map(mm, addr);
762 if (unlikely(!pmdp))
763 return -EFAULT;
764
765 ptl = pmd_lock(mm, pmdp);
766 if (!pmd_present(*pmdp)) {
767 spin_unlock(ptl);
768 return -EFAULT;
769 }
770
771 if (pmd_large(*pmdp)) {
772 paddr = pmd_val(*pmdp) & HPAGE_MASK;
773 paddr |= addr & ~HPAGE_MASK;
774 /*
775 * Huge pmds need quiescing operations, they are
776 * always mapped.
777 */
778 page_set_storage_key(paddr, key, 1);
779 spin_unlock(ptl);
780 return 0;
781 }
782 spin_unlock(ptl);
783
784 ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
785 if (unlikely(!ptep))
786 return -EFAULT;
787
788 new = old = pgste_get_lock(ptep);
789 pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
790 PGSTE_ACC_BITS | PGSTE_FP_BIT);
791 keyul = (unsigned long) key;
792 pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
793 pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
794 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
795 unsigned long bits, skey;
796
797 paddr = pte_val(*ptep) & PAGE_MASK;
798 skey = (unsigned long) page_get_storage_key(paddr);
799 bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
800 skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
801 /* Set storage key ACC and FP */
802 page_set_storage_key(paddr, skey, !nq);
803 /* Merge host changed & referenced into pgste */
804 pgste_val(new) |= bits << 52;
805 }
806 /* changing the guest storage key is considered a change of the page */
807 if ((pgste_val(new) ^ pgste_val(old)) &
808 (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
809 pgste_val(new) |= PGSTE_UC_BIT;
810
811 pgste_set_unlock(ptep, new);
812 pte_unmap_unlock(ptep, ptl);
813 return 0;
814}
815EXPORT_SYMBOL(set_guest_storage_key);
816
817/**
818 * Conditionally set a guest storage key (handling csske).
819 * oldkey will be updated when either mr or mc is set and a pointer is given.
820 *
821 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
822 * storage key was updated and -EFAULT on access errors.
823 */
824int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
825 unsigned char key, unsigned char *oldkey,
826 bool nq, bool mr, bool mc)
827{
828 unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
829 int rc;
830
831 /* we can drop the pgste lock between getting and setting the key */
832 if (mr | mc) {
833 rc = get_guest_storage_key(current->mm, addr, &tmp);
834 if (rc)
835 return rc;
836 if (oldkey)
837 *oldkey = tmp;
838 if (!mr)
839 mask |= _PAGE_REFERENCED;
840 if (!mc)
841 mask |= _PAGE_CHANGED;
842 if (!((tmp ^ key) & mask))
843 return 0;
844 }
845 rc = set_guest_storage_key(current->mm, addr, key, nq);
846 return rc < 0 ? rc : 1;
847}
848EXPORT_SYMBOL(cond_set_guest_storage_key);
849
850/**
851 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
852 *
853 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
854 */
855int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
856{
857 spinlock_t *ptl;
858 unsigned long paddr;
859 pgste_t old, new;
860 pmd_t *pmdp;
861 pte_t *ptep;
862 int cc = 0;
863
864 pmdp = pmd_alloc_map(mm, addr);
865 if (unlikely(!pmdp))
866 return -EFAULT;
867
868 ptl = pmd_lock(mm, pmdp);
869 if (!pmd_present(*pmdp)) {
870 spin_unlock(ptl);
871 return -EFAULT;
872 }
873
874 if (pmd_large(*pmdp)) {
875 paddr = pmd_val(*pmdp) & HPAGE_MASK;
876 paddr |= addr & ~HPAGE_MASK;
877 cc = page_reset_referenced(paddr);
878 spin_unlock(ptl);
879 return cc;
880 }
881 spin_unlock(ptl);
882
883 ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
884 if (unlikely(!ptep))
885 return -EFAULT;
886
887 new = old = pgste_get_lock(ptep);
888 /* Reset guest reference bit only */
889 pgste_val(new) &= ~PGSTE_GR_BIT;
890
891 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
892 paddr = pte_val(*ptep) & PAGE_MASK;
893 cc = page_reset_referenced(paddr);
894 /* Merge real referenced bit into host-set */
895 pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
896 }
897 /* Reflect guest's logical view, not physical */
898 cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
899 /* Changing the guest storage key is considered a change of the page */
900 if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
901 pgste_val(new) |= PGSTE_UC_BIT;
902
903 pgste_set_unlock(ptep, new);
904 pte_unmap_unlock(ptep, ptl);
905 return cc;
906}
907EXPORT_SYMBOL(reset_guest_reference_bit);
908
909int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
910 unsigned char *key)
911{
912 unsigned long paddr;
913 spinlock_t *ptl;
914 pgste_t pgste;
915 pmd_t *pmdp;
916 pte_t *ptep;
917
918 pmdp = pmd_alloc_map(mm, addr);
919 if (unlikely(!pmdp))
920 return -EFAULT;
921
922 ptl = pmd_lock(mm, pmdp);
923 if (!pmd_present(*pmdp)) {
924 /* Not yet mapped memory has a zero key */
925 spin_unlock(ptl);
926 *key = 0;
927 return 0;
928 }
929
930 if (pmd_large(*pmdp)) {
931 paddr = pmd_val(*pmdp) & HPAGE_MASK;
932 paddr |= addr & ~HPAGE_MASK;
933 *key = page_get_storage_key(paddr);
934 spin_unlock(ptl);
935 return 0;
936 }
937 spin_unlock(ptl);
938
939 ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
940 if (unlikely(!ptep))
941 return -EFAULT;
942
943 pgste = pgste_get_lock(ptep);
944 *key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
945 paddr = pte_val(*ptep) & PAGE_MASK;
946 if (!(pte_val(*ptep) & _PAGE_INVALID))
947 *key = page_get_storage_key(paddr);
948 /* Reflect guest's logical view, not physical */
949 *key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
950 pgste_set_unlock(ptep, pgste);
951 pte_unmap_unlock(ptep, ptl);
952 return 0;
953}
954EXPORT_SYMBOL(get_guest_storage_key);
955
956/**
957 * pgste_perform_essa - perform ESSA actions on the PGSTE.
958 * @mm: the memory context. It must have PGSTEs, no check is performed here!
959 * @hva: the host virtual address of the page whose PGSTE is to be processed
960 * @orc: the specific action to perform, see the ESSA_SET_* macros.
961 * @oldpte: the PTE will be saved there if the pointer is not NULL.
962 * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
963 *
964 * Return: 1 if the page is to be added to the CBRL, otherwise 0,
965 * or < 0 in case of error. -EINVAL is returned for invalid values
966 * of orc, -EFAULT for invalid addresses.
967 */
968int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
969 unsigned long *oldpte, unsigned long *oldpgste)
970{
971 unsigned long pgstev;
972 spinlock_t *ptl;
973 pgste_t pgste;
974 pte_t *ptep;
975 int res = 0;
976
977 WARN_ON_ONCE(orc > ESSA_MAX);
978 if (unlikely(orc > ESSA_MAX))
979 return -EINVAL;
980 ptep = get_locked_pte(mm, hva, &ptl);
981 if (unlikely(!ptep))
982 return -EFAULT;
983 pgste = pgste_get_lock(ptep);
984 pgstev = pgste_val(pgste);
985 if (oldpte)
986 *oldpte = pte_val(*ptep);
987 if (oldpgste)
988 *oldpgste = pgstev;
989
990 switch (orc) {
991 case ESSA_GET_STATE:
992 break;
993 case ESSA_SET_STABLE:
994 pgstev &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
995 pgstev |= _PGSTE_GPS_USAGE_STABLE;
996 break;
997 case ESSA_SET_UNUSED:
998 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
999 pgstev |= _PGSTE_GPS_USAGE_UNUSED;
1000 if (pte_val(*ptep) & _PAGE_INVALID)
1001 res = 1;
1002 break;
1003 case ESSA_SET_VOLATILE:
1004 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1005 pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1006 if (pte_val(*ptep) & _PAGE_INVALID)
1007 res = 1;
1008 break;
1009 case ESSA_SET_POT_VOLATILE:
1010 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1011 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1012 pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE;
1013 break;
1014 }
1015 if (pgstev & _PGSTE_GPS_ZERO) {
1016 pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1017 break;
1018 }
1019 if (!(pgstev & PGSTE_GC_BIT)) {
1020 pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1021 res = 1;
1022 break;
1023 }
1024 break;
1025 case ESSA_SET_STABLE_RESIDENT:
1026 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1027 pgstev |= _PGSTE_GPS_USAGE_STABLE;
1028 /*
1029 * Since the resident state can go away any time after this
1030 * call, we will not make this page resident. We can revisit
1031 * this decision if a guest will ever start using this.
1032 */
1033 break;
1034 case ESSA_SET_STABLE_IF_RESIDENT:
1035 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1036 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1037 pgstev |= _PGSTE_GPS_USAGE_STABLE;
1038 }
1039 break;
1040 case ESSA_SET_STABLE_NODAT:
1041 pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1042 pgstev |= _PGSTE_GPS_USAGE_STABLE | _PGSTE_GPS_NODAT;
1043 break;
1044 default:
1045 /* we should never get here! */
1046 break;
1047 }
1048 /* If we are discarding a page, set it to logical zero */
1049 if (res)
1050 pgstev |= _PGSTE_GPS_ZERO;
1051
1052 pgste_val(pgste) = pgstev;
1053 pgste_set_unlock(ptep, pgste);
1054 pte_unmap_unlock(ptep, ptl);
1055 return res;
1056}
1057EXPORT_SYMBOL(pgste_perform_essa);
1058
1059/**
1060 * set_pgste_bits - set specific PGSTE bits.
1061 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1062 * @hva: the host virtual address of the page whose PGSTE is to be processed
1063 * @bits: a bitmask representing the bits that will be touched
1064 * @value: the values of the bits to be written. Only the bits in the mask
1065 * will be written.
1066 *
1067 * Return: 0 on success, < 0 in case of error.
1068 */
1069int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
1070 unsigned long bits, unsigned long value)
1071{
1072 spinlock_t *ptl;
1073 pgste_t new;
1074 pte_t *ptep;
1075
1076 ptep = get_locked_pte(mm, hva, &ptl);
1077 if (unlikely(!ptep))
1078 return -EFAULT;
1079 new = pgste_get_lock(ptep);
1080
1081 pgste_val(new) &= ~bits;
1082 pgste_val(new) |= value & bits;
1083
1084 pgste_set_unlock(ptep, new);
1085 pte_unmap_unlock(ptep, ptl);
1086 return 0;
1087}
1088EXPORT_SYMBOL(set_pgste_bits);
1089
1090/**
1091 * get_pgste - get the current PGSTE for the given address.
1092 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1093 * @hva: the host virtual address of the page whose PGSTE is to be processed
1094 * @pgstep: will be written with the current PGSTE for the given address.
1095 *
1096 * Return: 0 on success, < 0 in case of error.
1097 */
1098int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
1099{
1100 spinlock_t *ptl;
1101 pte_t *ptep;
1102
1103 ptep = get_locked_pte(mm, hva, &ptl);
1104 if (unlikely(!ptep))
1105 return -EFAULT;
1106 *pgstep = pgste_val(pgste_get(ptep));
1107 pte_unmap_unlock(ptep, ptl);
1108 return 0;
1109}
1110EXPORT_SYMBOL(get_pgste);
1111#endif
1/*
2 * Copyright IBM Corp. 2007,2009
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
4 */
5
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
9#include <linux/gfp.h>
10#include <linux/mm.h>
11#include <linux/swap.h>
12#include <linux/smp.h>
13#include <linux/highmem.h>
14#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16#include <linux/module.h>
17#include <linux/quicklist.h>
18#include <linux/rcupdate.h>
19#include <linux/slab.h>
20
21#include <asm/system.h>
22#include <asm/pgtable.h>
23#include <asm/pgalloc.h>
24#include <asm/tlb.h>
25#include <asm/tlbflush.h>
26#include <asm/mmu_context.h>
27
28#ifndef CONFIG_64BIT
29#define ALLOC_ORDER 1
30#define FRAG_MASK 0x0f
31#else
32#define ALLOC_ORDER 2
33#define FRAG_MASK 0x03
34#endif
35
36unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
37EXPORT_SYMBOL(VMALLOC_START);
38
39static int __init parse_vmalloc(char *arg)
40{
41 if (!arg)
42 return -EINVAL;
43 VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
44 return 0;
45}
46early_param("vmalloc", parse_vmalloc);
47
48unsigned long *crst_table_alloc(struct mm_struct *mm)
49{
50 struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
51
52 if (!page)
53 return NULL;
54 return (unsigned long *) page_to_phys(page);
55}
56
57void crst_table_free(struct mm_struct *mm, unsigned long *table)
58{
59 free_pages((unsigned long) table, ALLOC_ORDER);
60}
61
62#ifdef CONFIG_64BIT
63int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
64{
65 unsigned long *table, *pgd;
66 unsigned long entry;
67
68 BUG_ON(limit > (1UL << 53));
69repeat:
70 table = crst_table_alloc(mm);
71 if (!table)
72 return -ENOMEM;
73 spin_lock_bh(&mm->page_table_lock);
74 if (mm->context.asce_limit < limit) {
75 pgd = (unsigned long *) mm->pgd;
76 if (mm->context.asce_limit <= (1UL << 31)) {
77 entry = _REGION3_ENTRY_EMPTY;
78 mm->context.asce_limit = 1UL << 42;
79 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
80 _ASCE_USER_BITS |
81 _ASCE_TYPE_REGION3;
82 } else {
83 entry = _REGION2_ENTRY_EMPTY;
84 mm->context.asce_limit = 1UL << 53;
85 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
86 _ASCE_USER_BITS |
87 _ASCE_TYPE_REGION2;
88 }
89 crst_table_init(table, entry);
90 pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
91 mm->pgd = (pgd_t *) table;
92 mm->task_size = mm->context.asce_limit;
93 table = NULL;
94 }
95 spin_unlock_bh(&mm->page_table_lock);
96 if (table)
97 crst_table_free(mm, table);
98 if (mm->context.asce_limit < limit)
99 goto repeat;
100 update_mm(mm, current);
101 return 0;
102}
103
104void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
105{
106 pgd_t *pgd;
107
108 if (mm->context.asce_limit <= limit)
109 return;
110 __tlb_flush_mm(mm);
111 while (mm->context.asce_limit > limit) {
112 pgd = mm->pgd;
113 switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
114 case _REGION_ENTRY_TYPE_R2:
115 mm->context.asce_limit = 1UL << 42;
116 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
117 _ASCE_USER_BITS |
118 _ASCE_TYPE_REGION3;
119 break;
120 case _REGION_ENTRY_TYPE_R3:
121 mm->context.asce_limit = 1UL << 31;
122 mm->context.asce_bits = _ASCE_TABLE_LENGTH |
123 _ASCE_USER_BITS |
124 _ASCE_TYPE_SEGMENT;
125 break;
126 default:
127 BUG();
128 }
129 mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
130 mm->task_size = mm->context.asce_limit;
131 crst_table_free(mm, (unsigned long *) pgd);
132 }
133 update_mm(mm, current);
134}
135#endif
136
137#ifdef CONFIG_PGSTE
138
139/**
140 * gmap_alloc - allocate a guest address space
141 * @mm: pointer to the parent mm_struct
142 *
143 * Returns a guest address space structure.
144 */
145struct gmap *gmap_alloc(struct mm_struct *mm)
146{
147 struct gmap *gmap;
148 struct page *page;
149 unsigned long *table;
150
151 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
152 if (!gmap)
153 goto out;
154 INIT_LIST_HEAD(&gmap->crst_list);
155 gmap->mm = mm;
156 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
157 if (!page)
158 goto out_free;
159 list_add(&page->lru, &gmap->crst_list);
160 table = (unsigned long *) page_to_phys(page);
161 crst_table_init(table, _REGION1_ENTRY_EMPTY);
162 gmap->table = table;
163 gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
164 _ASCE_USER_BITS | __pa(table);
165 list_add(&gmap->list, &mm->context.gmap_list);
166 return gmap;
167
168out_free:
169 kfree(gmap);
170out:
171 return NULL;
172}
173EXPORT_SYMBOL_GPL(gmap_alloc);
174
175static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
176{
177 struct gmap_pgtable *mp;
178 struct gmap_rmap *rmap;
179 struct page *page;
180
181 if (*table & _SEGMENT_ENTRY_INV)
182 return 0;
183 page = pfn_to_page(*table >> PAGE_SHIFT);
184 mp = (struct gmap_pgtable *) page->index;
185 list_for_each_entry(rmap, &mp->mapper, list) {
186 if (rmap->entry != table)
187 continue;
188 list_del(&rmap->list);
189 kfree(rmap);
190 break;
191 }
192 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
193 return 1;
194}
195
196static void gmap_flush_tlb(struct gmap *gmap)
197{
198 if (MACHINE_HAS_IDTE)
199 __tlb_flush_idte((unsigned long) gmap->table |
200 _ASCE_TYPE_REGION1);
201 else
202 __tlb_flush_global();
203}
204
205/**
206 * gmap_free - free a guest address space
207 * @gmap: pointer to the guest address space structure
208 */
209void gmap_free(struct gmap *gmap)
210{
211 struct page *page, *next;
212 unsigned long *table;
213 int i;
214
215
216 /* Flush tlb. */
217 if (MACHINE_HAS_IDTE)
218 __tlb_flush_idte((unsigned long) gmap->table |
219 _ASCE_TYPE_REGION1);
220 else
221 __tlb_flush_global();
222
223 /* Free all segment & region tables. */
224 down_read(&gmap->mm->mmap_sem);
225 list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
226 table = (unsigned long *) page_to_phys(page);
227 if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
228 /* Remove gmap rmap structures for segment table. */
229 for (i = 0; i < PTRS_PER_PMD; i++, table++)
230 gmap_unlink_segment(gmap, table);
231 __free_pages(page, ALLOC_ORDER);
232 }
233 up_read(&gmap->mm->mmap_sem);
234 list_del(&gmap->list);
235 kfree(gmap);
236}
237EXPORT_SYMBOL_GPL(gmap_free);
238
239/**
240 * gmap_enable - switch primary space to the guest address space
241 * @gmap: pointer to the guest address space structure
242 */
243void gmap_enable(struct gmap *gmap)
244{
245 S390_lowcore.gmap = (unsigned long) gmap;
246}
247EXPORT_SYMBOL_GPL(gmap_enable);
248
249/**
250 * gmap_disable - switch back to the standard primary address space
251 * @gmap: pointer to the guest address space structure
252 */
253void gmap_disable(struct gmap *gmap)
254{
255 S390_lowcore.gmap = 0UL;
256}
257EXPORT_SYMBOL_GPL(gmap_disable);
258
259static int gmap_alloc_table(struct gmap *gmap,
260 unsigned long *table, unsigned long init)
261{
262 struct page *page;
263 unsigned long *new;
264
265 page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
266 if (!page)
267 return -ENOMEM;
268 new = (unsigned long *) page_to_phys(page);
269 crst_table_init(new, init);
270 down_read(&gmap->mm->mmap_sem);
271 if (*table & _REGION_ENTRY_INV) {
272 list_add(&page->lru, &gmap->crst_list);
273 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
274 (*table & _REGION_ENTRY_TYPE_MASK);
275 } else
276 __free_pages(page, ALLOC_ORDER);
277 up_read(&gmap->mm->mmap_sem);
278 return 0;
279}
280
281/**
282 * gmap_unmap_segment - unmap segment from the guest address space
283 * @gmap: pointer to the guest address space structure
284 * @addr: address in the guest address space
285 * @len: length of the memory area to unmap
286 *
287 * Returns 0 if the unmap succeded, -EINVAL if not.
288 */
289int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
290{
291 unsigned long *table;
292 unsigned long off;
293 int flush;
294
295 if ((to | len) & (PMD_SIZE - 1))
296 return -EINVAL;
297 if (len == 0 || to + len < to)
298 return -EINVAL;
299
300 flush = 0;
301 down_read(&gmap->mm->mmap_sem);
302 for (off = 0; off < len; off += PMD_SIZE) {
303 /* Walk the guest addr space page table */
304 table = gmap->table + (((to + off) >> 53) & 0x7ff);
305 if (*table & _REGION_ENTRY_INV)
306 goto out;
307 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
308 table = table + (((to + off) >> 42) & 0x7ff);
309 if (*table & _REGION_ENTRY_INV)
310 goto out;
311 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
312 table = table + (((to + off) >> 31) & 0x7ff);
313 if (*table & _REGION_ENTRY_INV)
314 goto out;
315 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
316 table = table + (((to + off) >> 20) & 0x7ff);
317
318 /* Clear segment table entry in guest address space. */
319 flush |= gmap_unlink_segment(gmap, table);
320 *table = _SEGMENT_ENTRY_INV;
321 }
322out:
323 up_read(&gmap->mm->mmap_sem);
324 if (flush)
325 gmap_flush_tlb(gmap);
326 return 0;
327}
328EXPORT_SYMBOL_GPL(gmap_unmap_segment);
329
330/**
331 * gmap_mmap_segment - map a segment to the guest address space
332 * @gmap: pointer to the guest address space structure
333 * @from: source address in the parent address space
334 * @to: target address in the guest address space
335 *
336 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
337 */
338int gmap_map_segment(struct gmap *gmap, unsigned long from,
339 unsigned long to, unsigned long len)
340{
341 unsigned long *table;
342 unsigned long off;
343 int flush;
344
345 if ((from | to | len) & (PMD_SIZE - 1))
346 return -EINVAL;
347 if (len == 0 || from + len > PGDIR_SIZE ||
348 from + len < from || to + len < to)
349 return -EINVAL;
350
351 flush = 0;
352 down_read(&gmap->mm->mmap_sem);
353 for (off = 0; off < len; off += PMD_SIZE) {
354 /* Walk the gmap address space page table */
355 table = gmap->table + (((to + off) >> 53) & 0x7ff);
356 if ((*table & _REGION_ENTRY_INV) &&
357 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
358 goto out_unmap;
359 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
360 table = table + (((to + off) >> 42) & 0x7ff);
361 if ((*table & _REGION_ENTRY_INV) &&
362 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
363 goto out_unmap;
364 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
365 table = table + (((to + off) >> 31) & 0x7ff);
366 if ((*table & _REGION_ENTRY_INV) &&
367 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
368 goto out_unmap;
369 table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
370 table = table + (((to + off) >> 20) & 0x7ff);
371
372 /* Store 'from' address in an invalid segment table entry. */
373 flush |= gmap_unlink_segment(gmap, table);
374 *table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
375 }
376 up_read(&gmap->mm->mmap_sem);
377 if (flush)
378 gmap_flush_tlb(gmap);
379 return 0;
380
381out_unmap:
382 up_read(&gmap->mm->mmap_sem);
383 gmap_unmap_segment(gmap, to, len);
384 return -ENOMEM;
385}
386EXPORT_SYMBOL_GPL(gmap_map_segment);
387
388unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
389{
390 unsigned long *table, vmaddr, segment;
391 struct mm_struct *mm;
392 struct gmap_pgtable *mp;
393 struct gmap_rmap *rmap;
394 struct vm_area_struct *vma;
395 struct page *page;
396 pgd_t *pgd;
397 pud_t *pud;
398 pmd_t *pmd;
399
400 current->thread.gmap_addr = address;
401 mm = gmap->mm;
402 /* Walk the gmap address space page table */
403 table = gmap->table + ((address >> 53) & 0x7ff);
404 if (unlikely(*table & _REGION_ENTRY_INV))
405 return -EFAULT;
406 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
407 table = table + ((address >> 42) & 0x7ff);
408 if (unlikely(*table & _REGION_ENTRY_INV))
409 return -EFAULT;
410 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
411 table = table + ((address >> 31) & 0x7ff);
412 if (unlikely(*table & _REGION_ENTRY_INV))
413 return -EFAULT;
414 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
415 table = table + ((address >> 20) & 0x7ff);
416
417 /* Convert the gmap address to an mm address. */
418 segment = *table;
419 if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
420 page = pfn_to_page(segment >> PAGE_SHIFT);
421 mp = (struct gmap_pgtable *) page->index;
422 return mp->vmaddr | (address & ~PMD_MASK);
423 } else if (segment & _SEGMENT_ENTRY_RO) {
424 vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
425 vma = find_vma(mm, vmaddr);
426 if (!vma || vma->vm_start > vmaddr)
427 return -EFAULT;
428
429 /* Walk the parent mm page table */
430 pgd = pgd_offset(mm, vmaddr);
431 pud = pud_alloc(mm, pgd, vmaddr);
432 if (!pud)
433 return -ENOMEM;
434 pmd = pmd_alloc(mm, pud, vmaddr);
435 if (!pmd)
436 return -ENOMEM;
437 if (!pmd_present(*pmd) &&
438 __pte_alloc(mm, vma, pmd, vmaddr))
439 return -ENOMEM;
440 /* pmd now points to a valid segment table entry. */
441 rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
442 if (!rmap)
443 return -ENOMEM;
444 /* Link gmap segment table entry location to page table. */
445 page = pmd_page(*pmd);
446 mp = (struct gmap_pgtable *) page->index;
447 rmap->entry = table;
448 list_add(&rmap->list, &mp->mapper);
449 /* Set gmap segment table entry to page table. */
450 *table = pmd_val(*pmd) & PAGE_MASK;
451 return vmaddr | (address & ~PMD_MASK);
452 }
453 return -EFAULT;
454
455}
456EXPORT_SYMBOL_GPL(gmap_fault);
457
458void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
459{
460 struct gmap_rmap *rmap, *next;
461 struct gmap_pgtable *mp;
462 struct page *page;
463 int flush;
464
465 flush = 0;
466 spin_lock(&mm->page_table_lock);
467 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
468 mp = (struct gmap_pgtable *) page->index;
469 list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
470 *rmap->entry =
471 _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
472 list_del(&rmap->list);
473 kfree(rmap);
474 flush = 1;
475 }
476 spin_unlock(&mm->page_table_lock);
477 if (flush)
478 __tlb_flush_global();
479}
480
481static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
482 unsigned long vmaddr)
483{
484 struct page *page;
485 unsigned long *table;
486 struct gmap_pgtable *mp;
487
488 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
489 if (!page)
490 return NULL;
491 mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
492 if (!mp) {
493 __free_page(page);
494 return NULL;
495 }
496 pgtable_page_ctor(page);
497 mp->vmaddr = vmaddr & PMD_MASK;
498 INIT_LIST_HEAD(&mp->mapper);
499 page->index = (unsigned long) mp;
500 atomic_set(&page->_mapcount, 3);
501 table = (unsigned long *) page_to_phys(page);
502 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
503 clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
504 return table;
505}
506
507static inline void page_table_free_pgste(unsigned long *table)
508{
509 struct page *page;
510 struct gmap_pgtable *mp;
511
512 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
513 mp = (struct gmap_pgtable *) page->index;
514 BUG_ON(!list_empty(&mp->mapper));
515 pgtable_page_ctor(page);
516 atomic_set(&page->_mapcount, -1);
517 kfree(mp);
518 __free_page(page);
519}
520
521#else /* CONFIG_PGSTE */
522
523static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
524 unsigned long vmaddr)
525{
526 return NULL;
527}
528
529static inline void page_table_free_pgste(unsigned long *table)
530{
531}
532
533static inline void gmap_unmap_notifier(struct mm_struct *mm,
534 unsigned long *table)
535{
536}
537
538#endif /* CONFIG_PGSTE */
539
540static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
541{
542 unsigned int old, new;
543
544 do {
545 old = atomic_read(v);
546 new = old ^ bits;
547 } while (atomic_cmpxchg(v, old, new) != old);
548 return new;
549}
550
551/*
552 * page table entry allocation/free routines.
553 */
554unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
555{
556 struct page *page;
557 unsigned long *table;
558 unsigned int mask, bit;
559
560 if (mm_has_pgste(mm))
561 return page_table_alloc_pgste(mm, vmaddr);
562 /* Allocate fragments of a 4K page as 1K/2K page table */
563 spin_lock_bh(&mm->context.list_lock);
564 mask = FRAG_MASK;
565 if (!list_empty(&mm->context.pgtable_list)) {
566 page = list_first_entry(&mm->context.pgtable_list,
567 struct page, lru);
568 table = (unsigned long *) page_to_phys(page);
569 mask = atomic_read(&page->_mapcount);
570 mask = mask | (mask >> 4);
571 }
572 if ((mask & FRAG_MASK) == FRAG_MASK) {
573 spin_unlock_bh(&mm->context.list_lock);
574 page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
575 if (!page)
576 return NULL;
577 pgtable_page_ctor(page);
578 atomic_set(&page->_mapcount, 1);
579 table = (unsigned long *) page_to_phys(page);
580 clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
581 spin_lock_bh(&mm->context.list_lock);
582 list_add(&page->lru, &mm->context.pgtable_list);
583 } else {
584 for (bit = 1; mask & bit; bit <<= 1)
585 table += PTRS_PER_PTE;
586 mask = atomic_xor_bits(&page->_mapcount, bit);
587 if ((mask & FRAG_MASK) == FRAG_MASK)
588 list_del(&page->lru);
589 }
590 spin_unlock_bh(&mm->context.list_lock);
591 return table;
592}
593
594void page_table_free(struct mm_struct *mm, unsigned long *table)
595{
596 struct page *page;
597 unsigned int bit, mask;
598
599 if (mm_has_pgste(mm)) {
600 gmap_unmap_notifier(mm, table);
601 return page_table_free_pgste(table);
602 }
603 /* Free 1K/2K page table fragment of a 4K page */
604 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
605 bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
606 spin_lock_bh(&mm->context.list_lock);
607 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
608 list_del(&page->lru);
609 mask = atomic_xor_bits(&page->_mapcount, bit);
610 if (mask & FRAG_MASK)
611 list_add(&page->lru, &mm->context.pgtable_list);
612 spin_unlock_bh(&mm->context.list_lock);
613 if (mask == 0) {
614 pgtable_page_dtor(page);
615 atomic_set(&page->_mapcount, -1);
616 __free_page(page);
617 }
618}
619
620#ifdef CONFIG_HAVE_RCU_TABLE_FREE
621
622static void __page_table_free_rcu(void *table, unsigned bit)
623{
624 struct page *page;
625
626 if (bit == FRAG_MASK)
627 return page_table_free_pgste(table);
628 /* Free 1K/2K page table fragment of a 4K page */
629 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
630 if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
631 pgtable_page_dtor(page);
632 atomic_set(&page->_mapcount, -1);
633 __free_page(page);
634 }
635}
636
637void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
638{
639 struct mm_struct *mm;
640 struct page *page;
641 unsigned int bit, mask;
642
643 mm = tlb->mm;
644 if (mm_has_pgste(mm)) {
645 gmap_unmap_notifier(mm, table);
646 table = (unsigned long *) (__pa(table) | FRAG_MASK);
647 tlb_remove_table(tlb, table);
648 return;
649 }
650 bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
651 page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
652 spin_lock_bh(&mm->context.list_lock);
653 if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
654 list_del(&page->lru);
655 mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
656 if (mask & FRAG_MASK)
657 list_add_tail(&page->lru, &mm->context.pgtable_list);
658 spin_unlock_bh(&mm->context.list_lock);
659 table = (unsigned long *) (__pa(table) | (bit << 4));
660 tlb_remove_table(tlb, table);
661}
662
663void __tlb_remove_table(void *_table)
664{
665 void *table = (void *)((unsigned long) _table & PAGE_MASK);
666 unsigned type = (unsigned long) _table & ~PAGE_MASK;
667
668 if (type)
669 __page_table_free_rcu(table, type);
670 else
671 free_pages((unsigned long) table, ALLOC_ORDER);
672}
673
674#endif
675
676/*
677 * switch on pgstes for its userspace process (for kvm)
678 */
679int s390_enable_sie(void)
680{
681 struct task_struct *tsk = current;
682 struct mm_struct *mm, *old_mm;
683
684 /* Do we have switched amode? If no, we cannot do sie */
685 if (user_mode == HOME_SPACE_MODE)
686 return -EINVAL;
687
688 /* Do we have pgstes? if yes, we are done */
689 if (mm_has_pgste(tsk->mm))
690 return 0;
691
692 /* lets check if we are allowed to replace the mm */
693 task_lock(tsk);
694 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
695#ifdef CONFIG_AIO
696 !hlist_empty(&tsk->mm->ioctx_list) ||
697#endif
698 tsk->mm != tsk->active_mm) {
699 task_unlock(tsk);
700 return -EINVAL;
701 }
702 task_unlock(tsk);
703
704 /* we copy the mm and let dup_mm create the page tables with_pgstes */
705 tsk->mm->context.alloc_pgste = 1;
706 mm = dup_mm(tsk);
707 tsk->mm->context.alloc_pgste = 0;
708 if (!mm)
709 return -ENOMEM;
710
711 /* Now lets check again if something happened */
712 task_lock(tsk);
713 if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
714#ifdef CONFIG_AIO
715 !hlist_empty(&tsk->mm->ioctx_list) ||
716#endif
717 tsk->mm != tsk->active_mm) {
718 mmput(mm);
719 task_unlock(tsk);
720 return -EINVAL;
721 }
722
723 /* ok, we are alone. No ptrace, no threads, etc. */
724 old_mm = tsk->mm;
725 tsk->mm = tsk->active_mm = mm;
726 preempt_disable();
727 update_mm(mm, tsk);
728 atomic_inc(&mm->context.attach_count);
729 atomic_dec(&old_mm->context.attach_count);
730 cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
731 preempt_enable();
732 task_unlock(tsk);
733 mmput(old_mm);
734 return 0;
735}
736EXPORT_SYMBOL_GPL(s390_enable_sie);
737
738#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
739bool kernel_page_present(struct page *page)
740{
741 unsigned long addr;
742 int cc;
743
744 addr = page_to_phys(page);
745 asm volatile(
746 " lra %1,0(%1)\n"
747 " ipm %0\n"
748 " srl %0,28"
749 : "=d" (cc), "+a" (addr) : : "cc");
750 return cc == 0;
751}
752#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */