Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *    Copyright IBM Corp. 2007, 2011
   4 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
   5 */
   6
   7#include <linux/sched.h>
   8#include <linux/kernel.h>
   9#include <linux/errno.h>
  10#include <linux/gfp.h>
  11#include <linux/mm.h>
  12#include <linux/swap.h>
  13#include <linux/smp.h>
  14#include <linux/spinlock.h>
  15#include <linux/rcupdate.h>
  16#include <linux/slab.h>
  17#include <linux/swapops.h>
  18#include <linux/sysctl.h>
  19#include <linux/ksm.h>
  20#include <linux/mman.h>
  21
  22#include <asm/tlb.h>
  23#include <asm/tlbflush.h>
  24#include <asm/mmu_context.h>
  25#include <asm/page-states.h>
  26
  27pgprot_t pgprot_writecombine(pgprot_t prot)
  28{
  29	/*
  30	 * mio_wb_bit_mask may be set on a different CPU, but it is only set
  31	 * once at init and only read afterwards.
  32	 */
  33	return __pgprot(pgprot_val(prot) | mio_wb_bit_mask);
  34}
  35EXPORT_SYMBOL_GPL(pgprot_writecombine);
  36
  37pgprot_t pgprot_writethrough(pgprot_t prot)
  38{
  39	/*
  40	 * mio_wb_bit_mask may be set on a different CPU, but it is only set
  41	 * once at init and only read afterwards.
  42	 */
  43	return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask);
  44}
  45EXPORT_SYMBOL_GPL(pgprot_writethrough);
  46
  47static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
  48				   pte_t *ptep, int nodat)
  49{
  50	unsigned long opt, asce;
  51
  52	if (MACHINE_HAS_TLB_GUEST) {
  53		opt = 0;
  54		asce = READ_ONCE(mm->context.gmap_asce);
  55		if (asce == 0UL || nodat)
  56			opt |= IPTE_NODAT;
  57		if (asce != -1UL) {
  58			asce = asce ? : mm->context.asce;
  59			opt |= IPTE_GUEST_ASCE;
  60		}
  61		__ptep_ipte(addr, ptep, opt, asce, IPTE_LOCAL);
  62	} else {
  63		__ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL);
  64	}
  65}
  66
  67static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
  68				    pte_t *ptep, int nodat)
  69{
  70	unsigned long opt, asce;
  71
  72	if (MACHINE_HAS_TLB_GUEST) {
  73		opt = 0;
  74		asce = READ_ONCE(mm->context.gmap_asce);
  75		if (asce == 0UL || nodat)
  76			opt |= IPTE_NODAT;
  77		if (asce != -1UL) {
  78			asce = asce ? : mm->context.asce;
  79			opt |= IPTE_GUEST_ASCE;
  80		}
  81		__ptep_ipte(addr, ptep, opt, asce, IPTE_GLOBAL);
  82	} else {
  83		__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
  84	}
  85}
  86
  87static inline pte_t ptep_flush_direct(struct mm_struct *mm,
  88				      unsigned long addr, pte_t *ptep,
  89				      int nodat)
  90{
  91	pte_t old;
  92
  93	old = *ptep;
  94	if (unlikely(pte_val(old) & _PAGE_INVALID))
  95		return old;
  96	atomic_inc(&mm->context.flush_count);
  97	if (MACHINE_HAS_TLB_LC &&
  98	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
  99		ptep_ipte_local(mm, addr, ptep, nodat);
 100	else
 101		ptep_ipte_global(mm, addr, ptep, nodat);
 102	atomic_dec(&mm->context.flush_count);
 103	return old;
 104}
 105
 106static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
 107				    unsigned long addr, pte_t *ptep,
 108				    int nodat)
 109{
 110	pte_t old;
 111
 112	old = *ptep;
 113	if (unlikely(pte_val(old) & _PAGE_INVALID))
 114		return old;
 115	atomic_inc(&mm->context.flush_count);
 116	if (cpumask_equal(&mm->context.cpu_attach_mask,
 117			  cpumask_of(smp_processor_id()))) {
 118		set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_INVALID)));
 119		mm->context.flush_mm = 1;
 120	} else
 121		ptep_ipte_global(mm, addr, ptep, nodat);
 122	atomic_dec(&mm->context.flush_count);
 123	return old;
 124}
 125
 126static inline pgste_t pgste_get_lock(pte_t *ptep)
 127{
 128	unsigned long new = 0;
 129#ifdef CONFIG_PGSTE
 130	unsigned long old;
 131
 132	asm(
 133		"	lg	%0,%2\n"
 134		"0:	lgr	%1,%0\n"
 135		"	nihh	%0,0xff7f\n"	/* clear PCL bit in old */
 136		"	oihh	%1,0x0080\n"	/* set PCL bit in new */
 137		"	csg	%0,%1,%2\n"
 138		"	jl	0b\n"
 139		: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
 140		: "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
 141#endif
 142	return __pgste(new);
 143}
 144
 145static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
 146{
 147#ifdef CONFIG_PGSTE
 148	asm(
 149		"	nihh	%1,0xff7f\n"	/* clear PCL bit */
 150		"	stg	%1,%0\n"
 151		: "=Q" (ptep[PTRS_PER_PTE])
 152		: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
 153		: "cc", "memory");
 154#endif
 155}
 156
 157static inline pgste_t pgste_get(pte_t *ptep)
 158{
 159	unsigned long pgste = 0;
 160#ifdef CONFIG_PGSTE
 161	pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
 162#endif
 163	return __pgste(pgste);
 164}
 165
 166static inline void pgste_set(pte_t *ptep, pgste_t pgste)
 167{
 168#ifdef CONFIG_PGSTE
 169	*(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
 170#endif
 171}
 172
 173static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
 174				       struct mm_struct *mm)
 175{
 176#ifdef CONFIG_PGSTE
 177	unsigned long address, bits, skey;
 178
 179	if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID)
 180		return pgste;
 181	address = pte_val(pte) & PAGE_MASK;
 182	skey = (unsigned long) page_get_storage_key(address);
 183	bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
 184	/* Transfer page changed & referenced bit to guest bits in pgste */
 185	pgste_val(pgste) |= bits << 48;		/* GR bit & GC bit */
 186	/* Copy page access key and fetch protection bit to pgste */
 187	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
 188	pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
 189#endif
 190	return pgste;
 191
 192}
 193
 194static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
 195				 struct mm_struct *mm)
 196{
 197#ifdef CONFIG_PGSTE
 198	unsigned long address;
 199	unsigned long nkey;
 200
 201	if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID)
 202		return;
 203	VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
 204	address = pte_val(entry) & PAGE_MASK;
 205	/*
 206	 * Set page access key and fetch protection bit from pgste.
 207	 * The guest C/R information is still in the PGSTE, set real
 208	 * key C/R to 0.
 209	 */
 210	nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
 211	nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
 212	page_set_storage_key(address, nkey, 0);
 213#endif
 214}
 215
 216static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
 217{
 218#ifdef CONFIG_PGSTE
 219	if ((pte_val(entry) & _PAGE_PRESENT) &&
 220	    (pte_val(entry) & _PAGE_WRITE) &&
 221	    !(pte_val(entry) & _PAGE_INVALID)) {
 222		if (!MACHINE_HAS_ESOP) {
 223			/*
 224			 * Without enhanced suppression-on-protection force
 225			 * the dirty bit on for all writable ptes.
 226			 */
 227			entry = set_pte_bit(entry, __pgprot(_PAGE_DIRTY));
 228			entry = clear_pte_bit(entry, __pgprot(_PAGE_PROTECT));
 229		}
 230		if (!(pte_val(entry) & _PAGE_PROTECT))
 231			/* This pte allows write access, set user-dirty */
 232			pgste_val(pgste) |= PGSTE_UC_BIT;
 233	}
 234#endif
 235	set_pte(ptep, entry);
 236	return pgste;
 237}
 238
 239static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
 240				       unsigned long addr,
 241				       pte_t *ptep, pgste_t pgste)
 242{
 243#ifdef CONFIG_PGSTE
 244	unsigned long bits;
 245
 246	bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
 247	if (bits) {
 248		pgste_val(pgste) ^= bits;
 249		ptep_notify(mm, addr, ptep, bits);
 250	}
 251#endif
 252	return pgste;
 253}
 254
 255static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
 256				      unsigned long addr, pte_t *ptep)
 257{
 258	pgste_t pgste = __pgste(0);
 259
 260	if (mm_has_pgste(mm)) {
 261		pgste = pgste_get_lock(ptep);
 262		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
 263	}
 264	return pgste;
 265}
 266
 267static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
 268				    unsigned long addr, pte_t *ptep,
 269				    pgste_t pgste, pte_t old, pte_t new)
 270{
 271	if (mm_has_pgste(mm)) {
 272		if (pte_val(old) & _PAGE_INVALID)
 273			pgste_set_key(ptep, pgste, new, mm);
 274		if (pte_val(new) & _PAGE_INVALID) {
 275			pgste = pgste_update_all(old, pgste, mm);
 276			if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
 277			    _PGSTE_GPS_USAGE_UNUSED)
 278				old = set_pte_bit(old, __pgprot(_PAGE_UNUSED));
 279		}
 280		pgste = pgste_set_pte(ptep, pgste, new);
 281		pgste_set_unlock(ptep, pgste);
 282	} else {
 283		set_pte(ptep, new);
 284	}
 285	return old;
 286}
 287
 288pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
 289		       pte_t *ptep, pte_t new)
 290{
 291	pgste_t pgste;
 292	pte_t old;
 293	int nodat;
 294
 295	preempt_disable();
 296	pgste = ptep_xchg_start(mm, addr, ptep);
 297	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 298	old = ptep_flush_direct(mm, addr, ptep, nodat);
 299	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
 300	preempt_enable();
 301	return old;
 302}
 303EXPORT_SYMBOL(ptep_xchg_direct);
 304
 305pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
 306		     pte_t *ptep, pte_t new)
 307{
 308	pgste_t pgste;
 309	pte_t old;
 310	int nodat;
 311
 312	preempt_disable();
 313	pgste = ptep_xchg_start(mm, addr, ptep);
 314	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 315	old = ptep_flush_lazy(mm, addr, ptep, nodat);
 316	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
 317	preempt_enable();
 318	return old;
 319}
 320EXPORT_SYMBOL(ptep_xchg_lazy);
 321
 322pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
 323			     pte_t *ptep)
 324{
 325	pgste_t pgste;
 326	pte_t old;
 327	int nodat;
 328	struct mm_struct *mm = vma->vm_mm;
 329
 330	preempt_disable();
 331	pgste = ptep_xchg_start(mm, addr, ptep);
 332	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 333	old = ptep_flush_lazy(mm, addr, ptep, nodat);
 334	if (mm_has_pgste(mm)) {
 335		pgste = pgste_update_all(old, pgste, mm);
 336		pgste_set(ptep, pgste);
 337	}
 338	return old;
 339}
 340
 341void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
 342			     pte_t *ptep, pte_t old_pte, pte_t pte)
 343{
 344	pgste_t pgste;
 345	struct mm_struct *mm = vma->vm_mm;
 346
 347	if (!MACHINE_HAS_NX)
 348		pte = clear_pte_bit(pte, __pgprot(_PAGE_NOEXEC));
 349	if (mm_has_pgste(mm)) {
 350		pgste = pgste_get(ptep);
 351		pgste_set_key(ptep, pgste, pte, mm);
 352		pgste = pgste_set_pte(ptep, pgste, pte);
 353		pgste_set_unlock(ptep, pgste);
 354	} else {
 355		set_pte(ptep, pte);
 356	}
 357	preempt_enable();
 358}
 359
 360static inline void pmdp_idte_local(struct mm_struct *mm,
 361				   unsigned long addr, pmd_t *pmdp)
 362{
 363	if (MACHINE_HAS_TLB_GUEST)
 364		__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
 365			    mm->context.asce, IDTE_LOCAL);
 366	else
 367		__pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
 368	if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 369		gmap_pmdp_idte_local(mm, addr);
 370}
 371
 372static inline void pmdp_idte_global(struct mm_struct *mm,
 373				    unsigned long addr, pmd_t *pmdp)
 374{
 375	if (MACHINE_HAS_TLB_GUEST) {
 376		__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
 377			    mm->context.asce, IDTE_GLOBAL);
 378		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 379			gmap_pmdp_idte_global(mm, addr);
 380	} else if (MACHINE_HAS_IDTE) {
 381		__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
 382		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 383			gmap_pmdp_idte_global(mm, addr);
 384	} else {
 385		__pmdp_csp(pmdp);
 386		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 387			gmap_pmdp_csp(mm, addr);
 388	}
 389}
 390
 391static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
 392				      unsigned long addr, pmd_t *pmdp)
 393{
 394	pmd_t old;
 395
 396	old = *pmdp;
 397	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
 398		return old;
 399	atomic_inc(&mm->context.flush_count);
 400	if (MACHINE_HAS_TLB_LC &&
 401	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
 402		pmdp_idte_local(mm, addr, pmdp);
 403	else
 404		pmdp_idte_global(mm, addr, pmdp);
 405	atomic_dec(&mm->context.flush_count);
 406	return old;
 407}
 408
 409static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
 410				    unsigned long addr, pmd_t *pmdp)
 411{
 412	pmd_t old;
 413
 414	old = *pmdp;
 415	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
 416		return old;
 417	atomic_inc(&mm->context.flush_count);
 418	if (cpumask_equal(&mm->context.cpu_attach_mask,
 419			  cpumask_of(smp_processor_id()))) {
 420		set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_INVALID)));
 421		mm->context.flush_mm = 1;
 422		if (mm_has_pgste(mm))
 423			gmap_pmdp_invalidate(mm, addr);
 424	} else {
 425		pmdp_idte_global(mm, addr, pmdp);
 426	}
 427	atomic_dec(&mm->context.flush_count);
 428	return old;
 429}
 430
 431#ifdef CONFIG_PGSTE
 432static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp)
 433{
 434	struct vm_area_struct *vma;
 435	pgd_t *pgd;
 436	p4d_t *p4d;
 437	pud_t *pud;
 438
 439	/* We need a valid VMA, otherwise this is clearly a fault. */
 440	vma = vma_lookup(mm, addr);
 441	if (!vma)
 442		return -EFAULT;
 443
 444	pgd = pgd_offset(mm, addr);
 445	if (!pgd_present(*pgd))
 446		return -ENOENT;
 447
 448	p4d = p4d_offset(pgd, addr);
 449	if (!p4d_present(*p4d))
 450		return -ENOENT;
 451
 452	pud = pud_offset(p4d, addr);
 453	if (!pud_present(*pud))
 454		return -ENOENT;
 455
 456	/* Large PUDs are not supported yet. */
 457	if (pud_large(*pud))
 458		return -EFAULT;
 459
 460	*pmdp = pmd_offset(pud, addr);
 461	return 0;
 462}
 463#endif
 464
 465pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
 466		       pmd_t *pmdp, pmd_t new)
 467{
 468	pmd_t old;
 469
 470	preempt_disable();
 471	old = pmdp_flush_direct(mm, addr, pmdp);
 472	set_pmd(pmdp, new);
 473	preempt_enable();
 474	return old;
 475}
 476EXPORT_SYMBOL(pmdp_xchg_direct);
 477
 478pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
 479		     pmd_t *pmdp, pmd_t new)
 480{
 481	pmd_t old;
 482
 483	preempt_disable();
 484	old = pmdp_flush_lazy(mm, addr, pmdp);
 485	set_pmd(pmdp, new);
 486	preempt_enable();
 487	return old;
 488}
 489EXPORT_SYMBOL(pmdp_xchg_lazy);
 490
 491static inline void pudp_idte_local(struct mm_struct *mm,
 492				   unsigned long addr, pud_t *pudp)
 493{
 494	if (MACHINE_HAS_TLB_GUEST)
 495		__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
 496			    mm->context.asce, IDTE_LOCAL);
 497	else
 498		__pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL);
 499}
 500
 501static inline void pudp_idte_global(struct mm_struct *mm,
 502				    unsigned long addr, pud_t *pudp)
 503{
 504	if (MACHINE_HAS_TLB_GUEST)
 505		__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
 506			    mm->context.asce, IDTE_GLOBAL);
 507	else if (MACHINE_HAS_IDTE)
 508		__pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL);
 509	else
 510		/*
 511		 * Invalid bit position is the same for pmd and pud, so we can
 512		 * re-use _pmd_csp() here
 513		 */
 514		__pmdp_csp((pmd_t *) pudp);
 515}
 516
 517static inline pud_t pudp_flush_direct(struct mm_struct *mm,
 518				      unsigned long addr, pud_t *pudp)
 519{
 520	pud_t old;
 521
 522	old = *pudp;
 523	if (pud_val(old) & _REGION_ENTRY_INVALID)
 524		return old;
 525	atomic_inc(&mm->context.flush_count);
 526	if (MACHINE_HAS_TLB_LC &&
 527	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
 528		pudp_idte_local(mm, addr, pudp);
 529	else
 530		pudp_idte_global(mm, addr, pudp);
 531	atomic_dec(&mm->context.flush_count);
 532	return old;
 533}
 534
 535pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
 536		       pud_t *pudp, pud_t new)
 537{
 538	pud_t old;
 539
 540	preempt_disable();
 541	old = pudp_flush_direct(mm, addr, pudp);
 542	set_pud(pudp, new);
 543	preempt_enable();
 544	return old;
 545}
 546EXPORT_SYMBOL(pudp_xchg_direct);
 547
 548#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 549void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 550				pgtable_t pgtable)
 551{
 552	struct list_head *lh = (struct list_head *) pgtable;
 553
 554	assert_spin_locked(pmd_lockptr(mm, pmdp));
 555
 556	/* FIFO */
 557	if (!pmd_huge_pte(mm, pmdp))
 558		INIT_LIST_HEAD(lh);
 559	else
 560		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
 561	pmd_huge_pte(mm, pmdp) = pgtable;
 562}
 563
 564pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
 565{
 566	struct list_head *lh;
 567	pgtable_t pgtable;
 568	pte_t *ptep;
 569
 570	assert_spin_locked(pmd_lockptr(mm, pmdp));
 571
 572	/* FIFO */
 573	pgtable = pmd_huge_pte(mm, pmdp);
 574	lh = (struct list_head *) pgtable;
 575	if (list_empty(lh))
 576		pmd_huge_pte(mm, pmdp) = NULL;
 577	else {
 578		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
 579		list_del(lh);
 580	}
 581	ptep = (pte_t *) pgtable;
 582	set_pte(ptep, __pte(_PAGE_INVALID));
 583	ptep++;
 584	set_pte(ptep, __pte(_PAGE_INVALID));
 585	return pgtable;
 586}
 587#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 588
 589#ifdef CONFIG_PGSTE
 590void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
 591		     pte_t *ptep, pte_t entry)
 592{
 593	pgste_t pgste;
 594
 595	/* the mm_has_pgste() check is done in set_pte_at() */
 596	preempt_disable();
 597	pgste = pgste_get_lock(ptep);
 598	pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
 599	pgste_set_key(ptep, pgste, entry, mm);
 600	pgste = pgste_set_pte(ptep, pgste, entry);
 601	pgste_set_unlock(ptep, pgste);
 602	preempt_enable();
 603}
 604
 605void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 606{
 607	pgste_t pgste;
 608
 609	preempt_disable();
 610	pgste = pgste_get_lock(ptep);
 611	pgste_val(pgste) |= PGSTE_IN_BIT;
 612	pgste_set_unlock(ptep, pgste);
 613	preempt_enable();
 614}
 615
 616/**
 617 * ptep_force_prot - change access rights of a locked pte
 618 * @mm: pointer to the process mm_struct
 619 * @addr: virtual address in the guest address space
 620 * @ptep: pointer to the page table entry
 621 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
 622 * @bit: pgste bit to set (e.g. for notification)
 623 *
 624 * Returns 0 if the access rights were changed and -EAGAIN if the current
 625 * and requested access rights are incompatible.
 626 */
 627int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
 628		    pte_t *ptep, int prot, unsigned long bit)
 629{
 630	pte_t entry;
 631	pgste_t pgste;
 632	int pte_i, pte_p, nodat;
 633
 634	pgste = pgste_get_lock(ptep);
 635	entry = *ptep;
 636	/* Check pte entry after all locks have been acquired */
 637	pte_i = pte_val(entry) & _PAGE_INVALID;
 638	pte_p = pte_val(entry) & _PAGE_PROTECT;
 639	if ((pte_i && (prot != PROT_NONE)) ||
 640	    (pte_p && (prot & PROT_WRITE))) {
 641		pgste_set_unlock(ptep, pgste);
 642		return -EAGAIN;
 643	}
 644	/* Change access rights and set pgste bit */
 645	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 646	if (prot == PROT_NONE && !pte_i) {
 647		ptep_flush_direct(mm, addr, ptep, nodat);
 648		pgste = pgste_update_all(entry, pgste, mm);
 649		entry = set_pte_bit(entry, __pgprot(_PAGE_INVALID));
 650	}
 651	if (prot == PROT_READ && !pte_p) {
 652		ptep_flush_direct(mm, addr, ptep, nodat);
 653		entry = clear_pte_bit(entry, __pgprot(_PAGE_INVALID));
 654		entry = set_pte_bit(entry, __pgprot(_PAGE_PROTECT));
 655	}
 656	pgste_val(pgste) |= bit;
 657	pgste = pgste_set_pte(ptep, pgste, entry);
 658	pgste_set_unlock(ptep, pgste);
 659	return 0;
 660}
 661
 662int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
 663		    pte_t *sptep, pte_t *tptep, pte_t pte)
 664{
 665	pgste_t spgste, tpgste;
 666	pte_t spte, tpte;
 667	int rc = -EAGAIN;
 668
 669	if (!(pte_val(*tptep) & _PAGE_INVALID))
 670		return 0;	/* already shadowed */
 671	spgste = pgste_get_lock(sptep);
 672	spte = *sptep;
 673	if (!(pte_val(spte) & _PAGE_INVALID) &&
 674	    !((pte_val(spte) & _PAGE_PROTECT) &&
 675	      !(pte_val(pte) & _PAGE_PROTECT))) {
 676		pgste_val(spgste) |= PGSTE_VSIE_BIT;
 677		tpgste = pgste_get_lock(tptep);
 678		tpte = __pte((pte_val(spte) & PAGE_MASK) |
 679			     (pte_val(pte) & _PAGE_PROTECT));
 680		/* don't touch the storage key - it belongs to parent pgste */
 681		tpgste = pgste_set_pte(tptep, tpgste, tpte);
 682		pgste_set_unlock(tptep, tpgste);
 683		rc = 1;
 684	}
 685	pgste_set_unlock(sptep, spgste);
 686	return rc;
 687}
 688
 689void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
 690{
 691	pgste_t pgste;
 692	int nodat;
 693
 694	pgste = pgste_get_lock(ptep);
 695	/* notifier is called by the caller */
 696	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 697	ptep_flush_direct(mm, saddr, ptep, nodat);
 698	/* don't touch the storage key - it belongs to parent pgste */
 699	pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
 700	pgste_set_unlock(ptep, pgste);
 701}
 702
 703static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
 704{
 705	if (!non_swap_entry(entry))
 706		dec_mm_counter(mm, MM_SWAPENTS);
 707	else if (is_migration_entry(entry)) {
 708		struct page *page = pfn_swap_entry_to_page(entry);
 709
 710		dec_mm_counter(mm, mm_counter(page));
 711	}
 712	free_swap_and_cache(entry);
 713}
 714
 715void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
 716		     pte_t *ptep, int reset)
 717{
 718	unsigned long pgstev;
 719	pgste_t pgste;
 720	pte_t pte;
 721
 722	/* Zap unused and logically-zero pages */
 723	preempt_disable();
 724	pgste = pgste_get_lock(ptep);
 725	pgstev = pgste_val(pgste);
 726	pte = *ptep;
 727	if (!reset && pte_swap(pte) &&
 728	    ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
 729	     (pgstev & _PGSTE_GPS_ZERO))) {
 730		ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
 731		pte_clear(mm, addr, ptep);
 732	}
 733	if (reset)
 734		pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
 735	pgste_set_unlock(ptep, pgste);
 736	preempt_enable();
 737}
 738
 739void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 740{
 741	unsigned long ptev;
 742	pgste_t pgste;
 743
 744	/* Clear storage key ACC and F, but set R/C */
 745	preempt_disable();
 746	pgste = pgste_get_lock(ptep);
 747	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
 748	pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
 749	ptev = pte_val(*ptep);
 750	if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
 751		page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
 752	pgste_set_unlock(ptep, pgste);
 753	preempt_enable();
 754}
 755
 756/*
 757 * Test and reset if a guest page is dirty
 758 */
 759bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
 760		       pte_t *ptep)
 761{
 762	pgste_t pgste;
 763	pte_t pte;
 764	bool dirty;
 765	int nodat;
 766
 767	pgste = pgste_get_lock(ptep);
 768	dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
 769	pgste_val(pgste) &= ~PGSTE_UC_BIT;
 770	pte = *ptep;
 771	if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
 772		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
 773		nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 774		ptep_ipte_global(mm, addr, ptep, nodat);
 775		if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
 776			pte = set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
 777		else
 778			pte = set_pte_bit(pte, __pgprot(_PAGE_INVALID));
 779		set_pte(ptep, pte);
 780	}
 781	pgste_set_unlock(ptep, pgste);
 782	return dirty;
 783}
 784EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
 785
 786int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 787			  unsigned char key, bool nq)
 788{
 789	unsigned long keyul, paddr;
 790	spinlock_t *ptl;
 791	pgste_t old, new;
 792	pmd_t *pmdp;
 793	pte_t *ptep;
 794
 795	/*
 796	 * If we don't have a PTE table and if there is no huge page mapped,
 797	 * we can ignore attempts to set the key to 0, because it already is 0.
 798	 */
 799	switch (pmd_lookup(mm, addr, &pmdp)) {
 800	case -ENOENT:
 801		return key ? -EFAULT : 0;
 802	case 0:
 803		break;
 804	default:
 805		return -EFAULT;
 806	}
 807
 808	ptl = pmd_lock(mm, pmdp);
 809	if (!pmd_present(*pmdp)) {
 810		spin_unlock(ptl);
 811		return key ? -EFAULT : 0;
 812	}
 813
 814	if (pmd_large(*pmdp)) {
 815		paddr = pmd_val(*pmdp) & HPAGE_MASK;
 816		paddr |= addr & ~HPAGE_MASK;
 817		/*
 818		 * Huge pmds need quiescing operations, they are
 819		 * always mapped.
 820		 */
 821		page_set_storage_key(paddr, key, 1);
 822		spin_unlock(ptl);
 823		return 0;
 824	}
 825	spin_unlock(ptl);
 826
 827	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
 
 
 
 828	new = old = pgste_get_lock(ptep);
 829	pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
 830			    PGSTE_ACC_BITS | PGSTE_FP_BIT);
 831	keyul = (unsigned long) key;
 832	pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
 833	pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
 834	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
 835		unsigned long bits, skey;
 836
 837		paddr = pte_val(*ptep) & PAGE_MASK;
 838		skey = (unsigned long) page_get_storage_key(paddr);
 839		bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
 840		skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
 841		/* Set storage key ACC and FP */
 842		page_set_storage_key(paddr, skey, !nq);
 843		/* Merge host changed & referenced into pgste  */
 844		pgste_val(new) |= bits << 52;
 845	}
 846	/* changing the guest storage key is considered a change of the page */
 847	if ((pgste_val(new) ^ pgste_val(old)) &
 848	    (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
 849		pgste_val(new) |= PGSTE_UC_BIT;
 850
 851	pgste_set_unlock(ptep, new);
 852	pte_unmap_unlock(ptep, ptl);
 853	return 0;
 854}
 855EXPORT_SYMBOL(set_guest_storage_key);
 856
 857/*
 858 * Conditionally set a guest storage key (handling csske).
 859 * oldkey will be updated when either mr or mc is set and a pointer is given.
 860 *
 861 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
 862 * storage key was updated and -EFAULT on access errors.
 863 */
 864int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 865			       unsigned char key, unsigned char *oldkey,
 866			       bool nq, bool mr, bool mc)
 867{
 868	unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
 869	int rc;
 870
 871	/* we can drop the pgste lock between getting and setting the key */
 872	if (mr | mc) {
 873		rc = get_guest_storage_key(current->mm, addr, &tmp);
 874		if (rc)
 875			return rc;
 876		if (oldkey)
 877			*oldkey = tmp;
 878		if (!mr)
 879			mask |= _PAGE_REFERENCED;
 880		if (!mc)
 881			mask |= _PAGE_CHANGED;
 882		if (!((tmp ^ key) & mask))
 883			return 0;
 884	}
 885	rc = set_guest_storage_key(current->mm, addr, key, nq);
 886	return rc < 0 ? rc : 1;
 887}
 888EXPORT_SYMBOL(cond_set_guest_storage_key);
 889
 890/*
 891 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
 892 *
 893 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
 894 */
 895int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
 896{
 897	spinlock_t *ptl;
 898	unsigned long paddr;
 899	pgste_t old, new;
 900	pmd_t *pmdp;
 901	pte_t *ptep;
 902	int cc = 0;
 903
 904	/*
 905	 * If we don't have a PTE table and if there is no huge page mapped,
 906	 * the storage key is 0 and there is nothing for us to do.
 907	 */
 908	switch (pmd_lookup(mm, addr, &pmdp)) {
 909	case -ENOENT:
 910		return 0;
 911	case 0:
 912		break;
 913	default:
 914		return -EFAULT;
 915	}
 916
 917	ptl = pmd_lock(mm, pmdp);
 918	if (!pmd_present(*pmdp)) {
 919		spin_unlock(ptl);
 920		return 0;
 921	}
 922
 923	if (pmd_large(*pmdp)) {
 924		paddr = pmd_val(*pmdp) & HPAGE_MASK;
 925		paddr |= addr & ~HPAGE_MASK;
 926		cc = page_reset_referenced(paddr);
 927		spin_unlock(ptl);
 928		return cc;
 929	}
 930	spin_unlock(ptl);
 931
 932	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
 
 
 
 933	new = old = pgste_get_lock(ptep);
 934	/* Reset guest reference bit only */
 935	pgste_val(new) &= ~PGSTE_GR_BIT;
 936
 937	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
 938		paddr = pte_val(*ptep) & PAGE_MASK;
 939		cc = page_reset_referenced(paddr);
 940		/* Merge real referenced bit into host-set */
 941		pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
 942	}
 943	/* Reflect guest's logical view, not physical */
 944	cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
 945	/* Changing the guest storage key is considered a change of the page */
 946	if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
 947		pgste_val(new) |= PGSTE_UC_BIT;
 948
 949	pgste_set_unlock(ptep, new);
 950	pte_unmap_unlock(ptep, ptl);
 951	return cc;
 952}
 953EXPORT_SYMBOL(reset_guest_reference_bit);
 954
 955int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 956			  unsigned char *key)
 957{
 958	unsigned long paddr;
 959	spinlock_t *ptl;
 960	pgste_t pgste;
 961	pmd_t *pmdp;
 962	pte_t *ptep;
 963
 964	/*
 965	 * If we don't have a PTE table and if there is no huge page mapped,
 966	 * the storage key is 0.
 967	 */
 968	*key = 0;
 969
 970	switch (pmd_lookup(mm, addr, &pmdp)) {
 971	case -ENOENT:
 972		return 0;
 973	case 0:
 974		break;
 975	default:
 976		return -EFAULT;
 977	}
 978
 979	ptl = pmd_lock(mm, pmdp);
 980	if (!pmd_present(*pmdp)) {
 
 981		spin_unlock(ptl);
 
 982		return 0;
 983	}
 984
 985	if (pmd_large(*pmdp)) {
 986		paddr = pmd_val(*pmdp) & HPAGE_MASK;
 987		paddr |= addr & ~HPAGE_MASK;
 988		*key = page_get_storage_key(paddr);
 989		spin_unlock(ptl);
 990		return 0;
 991	}
 992	spin_unlock(ptl);
 993
 994	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
 
 
 
 995	pgste = pgste_get_lock(ptep);
 996	*key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
 997	paddr = pte_val(*ptep) & PAGE_MASK;
 998	if (!(pte_val(*ptep) & _PAGE_INVALID))
 999		*key = page_get_storage_key(paddr);
1000	/* Reflect guest's logical view, not physical */
1001	*key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
1002	pgste_set_unlock(ptep, pgste);
1003	pte_unmap_unlock(ptep, ptl);
1004	return 0;
1005}
1006EXPORT_SYMBOL(get_guest_storage_key);
1007
1008/**
1009 * pgste_perform_essa - perform ESSA actions on the PGSTE.
1010 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1011 * @hva: the host virtual address of the page whose PGSTE is to be processed
1012 * @orc: the specific action to perform, see the ESSA_SET_* macros.
1013 * @oldpte: the PTE will be saved there if the pointer is not NULL.
1014 * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
1015 *
1016 * Return: 1 if the page is to be added to the CBRL, otherwise 0,
1017 *	   or < 0 in case of error. -EINVAL is returned for invalid values
1018 *	   of orc, -EFAULT for invalid addresses.
1019 */
1020int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1021			unsigned long *oldpte, unsigned long *oldpgste)
1022{
1023	struct vm_area_struct *vma;
1024	unsigned long pgstev;
1025	spinlock_t *ptl;
1026	pgste_t pgste;
1027	pte_t *ptep;
1028	int res = 0;
1029
1030	WARN_ON_ONCE(orc > ESSA_MAX);
1031	if (unlikely(orc > ESSA_MAX))
1032		return -EINVAL;
1033
1034	vma = vma_lookup(mm, hva);
1035	if (!vma || is_vm_hugetlb_page(vma))
1036		return -EFAULT;
1037	ptep = get_locked_pte(mm, hva, &ptl);
1038	if (unlikely(!ptep))
1039		return -EFAULT;
1040	pgste = pgste_get_lock(ptep);
1041	pgstev = pgste_val(pgste);
1042	if (oldpte)
1043		*oldpte = pte_val(*ptep);
1044	if (oldpgste)
1045		*oldpgste = pgstev;
1046
1047	switch (orc) {
1048	case ESSA_GET_STATE:
1049		break;
1050	case ESSA_SET_STABLE:
1051		pgstev &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
1052		pgstev |= _PGSTE_GPS_USAGE_STABLE;
1053		break;
1054	case ESSA_SET_UNUSED:
1055		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1056		pgstev |= _PGSTE_GPS_USAGE_UNUSED;
1057		if (pte_val(*ptep) & _PAGE_INVALID)
1058			res = 1;
1059		break;
1060	case ESSA_SET_VOLATILE:
1061		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1062		pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1063		if (pte_val(*ptep) & _PAGE_INVALID)
1064			res = 1;
1065		break;
1066	case ESSA_SET_POT_VOLATILE:
1067		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1068		if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1069			pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE;
1070			break;
1071		}
1072		if (pgstev & _PGSTE_GPS_ZERO) {
1073			pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1074			break;
1075		}
1076		if (!(pgstev & PGSTE_GC_BIT)) {
1077			pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1078			res = 1;
1079			break;
1080		}
1081		break;
1082	case ESSA_SET_STABLE_RESIDENT:
1083		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1084		pgstev |= _PGSTE_GPS_USAGE_STABLE;
1085		/*
1086		 * Since the resident state can go away any time after this
1087		 * call, we will not make this page resident. We can revisit
1088		 * this decision if a guest will ever start using this.
1089		 */
1090		break;
1091	case ESSA_SET_STABLE_IF_RESIDENT:
1092		if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1093			pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1094			pgstev |= _PGSTE_GPS_USAGE_STABLE;
1095		}
1096		break;
1097	case ESSA_SET_STABLE_NODAT:
1098		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1099		pgstev |= _PGSTE_GPS_USAGE_STABLE | _PGSTE_GPS_NODAT;
1100		break;
1101	default:
1102		/* we should never get here! */
1103		break;
1104	}
1105	/* If we are discarding a page, set it to logical zero */
1106	if (res)
1107		pgstev |= _PGSTE_GPS_ZERO;
1108
1109	pgste_val(pgste) = pgstev;
1110	pgste_set_unlock(ptep, pgste);
1111	pte_unmap_unlock(ptep, ptl);
1112	return res;
1113}
1114EXPORT_SYMBOL(pgste_perform_essa);
1115
1116/**
1117 * set_pgste_bits - set specific PGSTE bits.
1118 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1119 * @hva: the host virtual address of the page whose PGSTE is to be processed
1120 * @bits: a bitmask representing the bits that will be touched
1121 * @value: the values of the bits to be written. Only the bits in the mask
1122 *	   will be written.
1123 *
1124 * Return: 0 on success, < 0 in case of error.
1125 */
1126int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
1127			unsigned long bits, unsigned long value)
1128{
1129	struct vm_area_struct *vma;
1130	spinlock_t *ptl;
1131	pgste_t new;
1132	pte_t *ptep;
1133
1134	vma = vma_lookup(mm, hva);
1135	if (!vma || is_vm_hugetlb_page(vma))
1136		return -EFAULT;
1137	ptep = get_locked_pte(mm, hva, &ptl);
1138	if (unlikely(!ptep))
1139		return -EFAULT;
1140	new = pgste_get_lock(ptep);
1141
1142	pgste_val(new) &= ~bits;
1143	pgste_val(new) |= value & bits;
1144
1145	pgste_set_unlock(ptep, new);
1146	pte_unmap_unlock(ptep, ptl);
1147	return 0;
1148}
1149EXPORT_SYMBOL(set_pgste_bits);
1150
1151/**
1152 * get_pgste - get the current PGSTE for the given address.
1153 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1154 * @hva: the host virtual address of the page whose PGSTE is to be processed
1155 * @pgstep: will be written with the current PGSTE for the given address.
1156 *
1157 * Return: 0 on success, < 0 in case of error.
1158 */
1159int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
1160{
1161	struct vm_area_struct *vma;
1162	spinlock_t *ptl;
1163	pte_t *ptep;
1164
1165	vma = vma_lookup(mm, hva);
1166	if (!vma || is_vm_hugetlb_page(vma))
1167		return -EFAULT;
1168	ptep = get_locked_pte(mm, hva, &ptl);
1169	if (unlikely(!ptep))
1170		return -EFAULT;
1171	*pgstep = pgste_val(pgste_get(ptep));
1172	pte_unmap_unlock(ptep, ptl);
1173	return 0;
1174}
1175EXPORT_SYMBOL(get_pgste);
1176#endif
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *    Copyright IBM Corp. 2007, 2011
   4 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
   5 */
   6
   7#include <linux/sched.h>
   8#include <linux/kernel.h>
   9#include <linux/errno.h>
  10#include <linux/gfp.h>
  11#include <linux/mm.h>
  12#include <linux/swap.h>
  13#include <linux/smp.h>
  14#include <linux/spinlock.h>
  15#include <linux/rcupdate.h>
  16#include <linux/slab.h>
  17#include <linux/swapops.h>
  18#include <linux/sysctl.h>
  19#include <linux/ksm.h>
  20#include <linux/mman.h>
  21
  22#include <asm/tlb.h>
  23#include <asm/tlbflush.h>
  24#include <asm/mmu_context.h>
  25#include <asm/page-states.h>
  26
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  27static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
  28				   pte_t *ptep, int nodat)
  29{
  30	unsigned long opt, asce;
  31
  32	if (MACHINE_HAS_TLB_GUEST) {
  33		opt = 0;
  34		asce = READ_ONCE(mm->context.gmap_asce);
  35		if (asce == 0UL || nodat)
  36			opt |= IPTE_NODAT;
  37		if (asce != -1UL) {
  38			asce = asce ? : mm->context.asce;
  39			opt |= IPTE_GUEST_ASCE;
  40		}
  41		__ptep_ipte(addr, ptep, opt, asce, IPTE_LOCAL);
  42	} else {
  43		__ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL);
  44	}
  45}
  46
  47static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
  48				    pte_t *ptep, int nodat)
  49{
  50	unsigned long opt, asce;
  51
  52	if (MACHINE_HAS_TLB_GUEST) {
  53		opt = 0;
  54		asce = READ_ONCE(mm->context.gmap_asce);
  55		if (asce == 0UL || nodat)
  56			opt |= IPTE_NODAT;
  57		if (asce != -1UL) {
  58			asce = asce ? : mm->context.asce;
  59			opt |= IPTE_GUEST_ASCE;
  60		}
  61		__ptep_ipte(addr, ptep, opt, asce, IPTE_GLOBAL);
  62	} else {
  63		__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
  64	}
  65}
  66
  67static inline pte_t ptep_flush_direct(struct mm_struct *mm,
  68				      unsigned long addr, pte_t *ptep,
  69				      int nodat)
  70{
  71	pte_t old;
  72
  73	old = *ptep;
  74	if (unlikely(pte_val(old) & _PAGE_INVALID))
  75		return old;
  76	atomic_inc(&mm->context.flush_count);
  77	if (MACHINE_HAS_TLB_LC &&
  78	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
  79		ptep_ipte_local(mm, addr, ptep, nodat);
  80	else
  81		ptep_ipte_global(mm, addr, ptep, nodat);
  82	atomic_dec(&mm->context.flush_count);
  83	return old;
  84}
  85
  86static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
  87				    unsigned long addr, pte_t *ptep,
  88				    int nodat)
  89{
  90	pte_t old;
  91
  92	old = *ptep;
  93	if (unlikely(pte_val(old) & _PAGE_INVALID))
  94		return old;
  95	atomic_inc(&mm->context.flush_count);
  96	if (cpumask_equal(&mm->context.cpu_attach_mask,
  97			  cpumask_of(smp_processor_id()))) {
  98		pte_val(*ptep) |= _PAGE_INVALID;
  99		mm->context.flush_mm = 1;
 100	} else
 101		ptep_ipte_global(mm, addr, ptep, nodat);
 102	atomic_dec(&mm->context.flush_count);
 103	return old;
 104}
 105
 106static inline pgste_t pgste_get_lock(pte_t *ptep)
 107{
 108	unsigned long new = 0;
 109#ifdef CONFIG_PGSTE
 110	unsigned long old;
 111
 112	asm(
 113		"	lg	%0,%2\n"
 114		"0:	lgr	%1,%0\n"
 115		"	nihh	%0,0xff7f\n"	/* clear PCL bit in old */
 116		"	oihh	%1,0x0080\n"	/* set PCL bit in new */
 117		"	csg	%0,%1,%2\n"
 118		"	jl	0b\n"
 119		: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
 120		: "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
 121#endif
 122	return __pgste(new);
 123}
 124
 125static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
 126{
 127#ifdef CONFIG_PGSTE
 128	asm(
 129		"	nihh	%1,0xff7f\n"	/* clear PCL bit */
 130		"	stg	%1,%0\n"
 131		: "=Q" (ptep[PTRS_PER_PTE])
 132		: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
 133		: "cc", "memory");
 134#endif
 135}
 136
 137static inline pgste_t pgste_get(pte_t *ptep)
 138{
 139	unsigned long pgste = 0;
 140#ifdef CONFIG_PGSTE
 141	pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
 142#endif
 143	return __pgste(pgste);
 144}
 145
 146static inline void pgste_set(pte_t *ptep, pgste_t pgste)
 147{
 148#ifdef CONFIG_PGSTE
 149	*(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
 150#endif
 151}
 152
 153static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
 154				       struct mm_struct *mm)
 155{
 156#ifdef CONFIG_PGSTE
 157	unsigned long address, bits, skey;
 158
 159	if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID)
 160		return pgste;
 161	address = pte_val(pte) & PAGE_MASK;
 162	skey = (unsigned long) page_get_storage_key(address);
 163	bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
 164	/* Transfer page changed & referenced bit to guest bits in pgste */
 165	pgste_val(pgste) |= bits << 48;		/* GR bit & GC bit */
 166	/* Copy page access key and fetch protection bit to pgste */
 167	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
 168	pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
 169#endif
 170	return pgste;
 171
 172}
 173
 174static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
 175				 struct mm_struct *mm)
 176{
 177#ifdef CONFIG_PGSTE
 178	unsigned long address;
 179	unsigned long nkey;
 180
 181	if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID)
 182		return;
 183	VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
 184	address = pte_val(entry) & PAGE_MASK;
 185	/*
 186	 * Set page access key and fetch protection bit from pgste.
 187	 * The guest C/R information is still in the PGSTE, set real
 188	 * key C/R to 0.
 189	 */
 190	nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
 191	nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
 192	page_set_storage_key(address, nkey, 0);
 193#endif
 194}
 195
 196static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
 197{
 198#ifdef CONFIG_PGSTE
 199	if ((pte_val(entry) & _PAGE_PRESENT) &&
 200	    (pte_val(entry) & _PAGE_WRITE) &&
 201	    !(pte_val(entry) & _PAGE_INVALID)) {
 202		if (!MACHINE_HAS_ESOP) {
 203			/*
 204			 * Without enhanced suppression-on-protection force
 205			 * the dirty bit on for all writable ptes.
 206			 */
 207			pte_val(entry) |= _PAGE_DIRTY;
 208			pte_val(entry) &= ~_PAGE_PROTECT;
 209		}
 210		if (!(pte_val(entry) & _PAGE_PROTECT))
 211			/* This pte allows write access, set user-dirty */
 212			pgste_val(pgste) |= PGSTE_UC_BIT;
 213	}
 214#endif
 215	*ptep = entry;
 216	return pgste;
 217}
 218
 219static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
 220				       unsigned long addr,
 221				       pte_t *ptep, pgste_t pgste)
 222{
 223#ifdef CONFIG_PGSTE
 224	unsigned long bits;
 225
 226	bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
 227	if (bits) {
 228		pgste_val(pgste) ^= bits;
 229		ptep_notify(mm, addr, ptep, bits);
 230	}
 231#endif
 232	return pgste;
 233}
 234
 235static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
 236				      unsigned long addr, pte_t *ptep)
 237{
 238	pgste_t pgste = __pgste(0);
 239
 240	if (mm_has_pgste(mm)) {
 241		pgste = pgste_get_lock(ptep);
 242		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
 243	}
 244	return pgste;
 245}
 246
 247static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
 248				    unsigned long addr, pte_t *ptep,
 249				    pgste_t pgste, pte_t old, pte_t new)
 250{
 251	if (mm_has_pgste(mm)) {
 252		if (pte_val(old) & _PAGE_INVALID)
 253			pgste_set_key(ptep, pgste, new, mm);
 254		if (pte_val(new) & _PAGE_INVALID) {
 255			pgste = pgste_update_all(old, pgste, mm);
 256			if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
 257			    _PGSTE_GPS_USAGE_UNUSED)
 258				pte_val(old) |= _PAGE_UNUSED;
 259		}
 260		pgste = pgste_set_pte(ptep, pgste, new);
 261		pgste_set_unlock(ptep, pgste);
 262	} else {
 263		*ptep = new;
 264	}
 265	return old;
 266}
 267
 268pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
 269		       pte_t *ptep, pte_t new)
 270{
 271	pgste_t pgste;
 272	pte_t old;
 273	int nodat;
 274
 275	preempt_disable();
 276	pgste = ptep_xchg_start(mm, addr, ptep);
 277	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 278	old = ptep_flush_direct(mm, addr, ptep, nodat);
 279	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
 280	preempt_enable();
 281	return old;
 282}
 283EXPORT_SYMBOL(ptep_xchg_direct);
 284
 285pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
 286		     pte_t *ptep, pte_t new)
 287{
 288	pgste_t pgste;
 289	pte_t old;
 290	int nodat;
 291
 292	preempt_disable();
 293	pgste = ptep_xchg_start(mm, addr, ptep);
 294	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 295	old = ptep_flush_lazy(mm, addr, ptep, nodat);
 296	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
 297	preempt_enable();
 298	return old;
 299}
 300EXPORT_SYMBOL(ptep_xchg_lazy);
 301
 302pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
 303			     pte_t *ptep)
 304{
 305	pgste_t pgste;
 306	pte_t old;
 307	int nodat;
 308	struct mm_struct *mm = vma->vm_mm;
 309
 310	preempt_disable();
 311	pgste = ptep_xchg_start(mm, addr, ptep);
 312	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 313	old = ptep_flush_lazy(mm, addr, ptep, nodat);
 314	if (mm_has_pgste(mm)) {
 315		pgste = pgste_update_all(old, pgste, mm);
 316		pgste_set(ptep, pgste);
 317	}
 318	return old;
 319}
 320
 321void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
 322			     pte_t *ptep, pte_t old_pte, pte_t pte)
 323{
 324	pgste_t pgste;
 325	struct mm_struct *mm = vma->vm_mm;
 326
 327	if (!MACHINE_HAS_NX)
 328		pte_val(pte) &= ~_PAGE_NOEXEC;
 329	if (mm_has_pgste(mm)) {
 330		pgste = pgste_get(ptep);
 331		pgste_set_key(ptep, pgste, pte, mm);
 332		pgste = pgste_set_pte(ptep, pgste, pte);
 333		pgste_set_unlock(ptep, pgste);
 334	} else {
 335		*ptep = pte;
 336	}
 337	preempt_enable();
 338}
 339
 340static inline void pmdp_idte_local(struct mm_struct *mm,
 341				   unsigned long addr, pmd_t *pmdp)
 342{
 343	if (MACHINE_HAS_TLB_GUEST)
 344		__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
 345			    mm->context.asce, IDTE_LOCAL);
 346	else
 347		__pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
 348	if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 349		gmap_pmdp_idte_local(mm, addr);
 350}
 351
 352static inline void pmdp_idte_global(struct mm_struct *mm,
 353				    unsigned long addr, pmd_t *pmdp)
 354{
 355	if (MACHINE_HAS_TLB_GUEST) {
 356		__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
 357			    mm->context.asce, IDTE_GLOBAL);
 358		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 359			gmap_pmdp_idte_global(mm, addr);
 360	} else if (MACHINE_HAS_IDTE) {
 361		__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
 362		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 363			gmap_pmdp_idte_global(mm, addr);
 364	} else {
 365		__pmdp_csp(pmdp);
 366		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 367			gmap_pmdp_csp(mm, addr);
 368	}
 369}
 370
 371static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
 372				      unsigned long addr, pmd_t *pmdp)
 373{
 374	pmd_t old;
 375
 376	old = *pmdp;
 377	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
 378		return old;
 379	atomic_inc(&mm->context.flush_count);
 380	if (MACHINE_HAS_TLB_LC &&
 381	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
 382		pmdp_idte_local(mm, addr, pmdp);
 383	else
 384		pmdp_idte_global(mm, addr, pmdp);
 385	atomic_dec(&mm->context.flush_count);
 386	return old;
 387}
 388
 389static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
 390				    unsigned long addr, pmd_t *pmdp)
 391{
 392	pmd_t old;
 393
 394	old = *pmdp;
 395	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
 396		return old;
 397	atomic_inc(&mm->context.flush_count);
 398	if (cpumask_equal(&mm->context.cpu_attach_mask,
 399			  cpumask_of(smp_processor_id()))) {
 400		pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
 401		mm->context.flush_mm = 1;
 402		if (mm_has_pgste(mm))
 403			gmap_pmdp_invalidate(mm, addr);
 404	} else {
 405		pmdp_idte_global(mm, addr, pmdp);
 406	}
 407	atomic_dec(&mm->context.flush_count);
 408	return old;
 409}
 410
 411#ifdef CONFIG_PGSTE
 412static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
 413{
 
 414	pgd_t *pgd;
 415	p4d_t *p4d;
 416	pud_t *pud;
 417	pmd_t *pmd;
 
 
 
 
 418
 419	pgd = pgd_offset(mm, addr);
 420	p4d = p4d_alloc(mm, pgd, addr);
 421	if (!p4d)
 422		return NULL;
 423	pud = pud_alloc(mm, p4d, addr);
 424	if (!pud)
 425		return NULL;
 426	pmd = pmd_alloc(mm, pud, addr);
 427	return pmd;
 
 
 
 
 
 
 
 
 
 428}
 429#endif
 430
 431pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
 432		       pmd_t *pmdp, pmd_t new)
 433{
 434	pmd_t old;
 435
 436	preempt_disable();
 437	old = pmdp_flush_direct(mm, addr, pmdp);
 438	*pmdp = new;
 439	preempt_enable();
 440	return old;
 441}
 442EXPORT_SYMBOL(pmdp_xchg_direct);
 443
 444pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
 445		     pmd_t *pmdp, pmd_t new)
 446{
 447	pmd_t old;
 448
 449	preempt_disable();
 450	old = pmdp_flush_lazy(mm, addr, pmdp);
 451	*pmdp = new;
 452	preempt_enable();
 453	return old;
 454}
 455EXPORT_SYMBOL(pmdp_xchg_lazy);
 456
 457static inline void pudp_idte_local(struct mm_struct *mm,
 458				   unsigned long addr, pud_t *pudp)
 459{
 460	if (MACHINE_HAS_TLB_GUEST)
 461		__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
 462			    mm->context.asce, IDTE_LOCAL);
 463	else
 464		__pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL);
 465}
 466
 467static inline void pudp_idte_global(struct mm_struct *mm,
 468				    unsigned long addr, pud_t *pudp)
 469{
 470	if (MACHINE_HAS_TLB_GUEST)
 471		__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
 472			    mm->context.asce, IDTE_GLOBAL);
 473	else if (MACHINE_HAS_IDTE)
 474		__pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL);
 475	else
 476		/*
 477		 * Invalid bit position is the same for pmd and pud, so we can
 478		 * re-use _pmd_csp() here
 479		 */
 480		__pmdp_csp((pmd_t *) pudp);
 481}
 482
 483static inline pud_t pudp_flush_direct(struct mm_struct *mm,
 484				      unsigned long addr, pud_t *pudp)
 485{
 486	pud_t old;
 487
 488	old = *pudp;
 489	if (pud_val(old) & _REGION_ENTRY_INVALID)
 490		return old;
 491	atomic_inc(&mm->context.flush_count);
 492	if (MACHINE_HAS_TLB_LC &&
 493	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
 494		pudp_idte_local(mm, addr, pudp);
 495	else
 496		pudp_idte_global(mm, addr, pudp);
 497	atomic_dec(&mm->context.flush_count);
 498	return old;
 499}
 500
 501pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
 502		       pud_t *pudp, pud_t new)
 503{
 504	pud_t old;
 505
 506	preempt_disable();
 507	old = pudp_flush_direct(mm, addr, pudp);
 508	*pudp = new;
 509	preempt_enable();
 510	return old;
 511}
 512EXPORT_SYMBOL(pudp_xchg_direct);
 513
 514#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 515void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 516				pgtable_t pgtable)
 517{
 518	struct list_head *lh = (struct list_head *) pgtable;
 519
 520	assert_spin_locked(pmd_lockptr(mm, pmdp));
 521
 522	/* FIFO */
 523	if (!pmd_huge_pte(mm, pmdp))
 524		INIT_LIST_HEAD(lh);
 525	else
 526		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
 527	pmd_huge_pte(mm, pmdp) = pgtable;
 528}
 529
 530pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
 531{
 532	struct list_head *lh;
 533	pgtable_t pgtable;
 534	pte_t *ptep;
 535
 536	assert_spin_locked(pmd_lockptr(mm, pmdp));
 537
 538	/* FIFO */
 539	pgtable = pmd_huge_pte(mm, pmdp);
 540	lh = (struct list_head *) pgtable;
 541	if (list_empty(lh))
 542		pmd_huge_pte(mm, pmdp) = NULL;
 543	else {
 544		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
 545		list_del(lh);
 546	}
 547	ptep = (pte_t *) pgtable;
 548	pte_val(*ptep) = _PAGE_INVALID;
 549	ptep++;
 550	pte_val(*ptep) = _PAGE_INVALID;
 551	return pgtable;
 552}
 553#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 554
 555#ifdef CONFIG_PGSTE
 556void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
 557		     pte_t *ptep, pte_t entry)
 558{
 559	pgste_t pgste;
 560
 561	/* the mm_has_pgste() check is done in set_pte_at() */
 562	preempt_disable();
 563	pgste = pgste_get_lock(ptep);
 564	pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
 565	pgste_set_key(ptep, pgste, entry, mm);
 566	pgste = pgste_set_pte(ptep, pgste, entry);
 567	pgste_set_unlock(ptep, pgste);
 568	preempt_enable();
 569}
 570
 571void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 572{
 573	pgste_t pgste;
 574
 575	preempt_disable();
 576	pgste = pgste_get_lock(ptep);
 577	pgste_val(pgste) |= PGSTE_IN_BIT;
 578	pgste_set_unlock(ptep, pgste);
 579	preempt_enable();
 580}
 581
 582/**
 583 * ptep_force_prot - change access rights of a locked pte
 584 * @mm: pointer to the process mm_struct
 585 * @addr: virtual address in the guest address space
 586 * @ptep: pointer to the page table entry
 587 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
 588 * @bit: pgste bit to set (e.g. for notification)
 589 *
 590 * Returns 0 if the access rights were changed and -EAGAIN if the current
 591 * and requested access rights are incompatible.
 592 */
 593int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
 594		    pte_t *ptep, int prot, unsigned long bit)
 595{
 596	pte_t entry;
 597	pgste_t pgste;
 598	int pte_i, pte_p, nodat;
 599
 600	pgste = pgste_get_lock(ptep);
 601	entry = *ptep;
 602	/* Check pte entry after all locks have been acquired */
 603	pte_i = pte_val(entry) & _PAGE_INVALID;
 604	pte_p = pte_val(entry) & _PAGE_PROTECT;
 605	if ((pte_i && (prot != PROT_NONE)) ||
 606	    (pte_p && (prot & PROT_WRITE))) {
 607		pgste_set_unlock(ptep, pgste);
 608		return -EAGAIN;
 609	}
 610	/* Change access rights and set pgste bit */
 611	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 612	if (prot == PROT_NONE && !pte_i) {
 613		ptep_flush_direct(mm, addr, ptep, nodat);
 614		pgste = pgste_update_all(entry, pgste, mm);
 615		pte_val(entry) |= _PAGE_INVALID;
 616	}
 617	if (prot == PROT_READ && !pte_p) {
 618		ptep_flush_direct(mm, addr, ptep, nodat);
 619		pte_val(entry) &= ~_PAGE_INVALID;
 620		pte_val(entry) |= _PAGE_PROTECT;
 621	}
 622	pgste_val(pgste) |= bit;
 623	pgste = pgste_set_pte(ptep, pgste, entry);
 624	pgste_set_unlock(ptep, pgste);
 625	return 0;
 626}
 627
 628int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
 629		    pte_t *sptep, pte_t *tptep, pte_t pte)
 630{
 631	pgste_t spgste, tpgste;
 632	pte_t spte, tpte;
 633	int rc = -EAGAIN;
 634
 635	if (!(pte_val(*tptep) & _PAGE_INVALID))
 636		return 0;	/* already shadowed */
 637	spgste = pgste_get_lock(sptep);
 638	spte = *sptep;
 639	if (!(pte_val(spte) & _PAGE_INVALID) &&
 640	    !((pte_val(spte) & _PAGE_PROTECT) &&
 641	      !(pte_val(pte) & _PAGE_PROTECT))) {
 642		pgste_val(spgste) |= PGSTE_VSIE_BIT;
 643		tpgste = pgste_get_lock(tptep);
 644		pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
 645				(pte_val(pte) & _PAGE_PROTECT);
 646		/* don't touch the storage key - it belongs to parent pgste */
 647		tpgste = pgste_set_pte(tptep, tpgste, tpte);
 648		pgste_set_unlock(tptep, tpgste);
 649		rc = 1;
 650	}
 651	pgste_set_unlock(sptep, spgste);
 652	return rc;
 653}
 654
 655void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
 656{
 657	pgste_t pgste;
 658	int nodat;
 659
 660	pgste = pgste_get_lock(ptep);
 661	/* notifier is called by the caller */
 662	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 663	ptep_flush_direct(mm, saddr, ptep, nodat);
 664	/* don't touch the storage key - it belongs to parent pgste */
 665	pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
 666	pgste_set_unlock(ptep, pgste);
 667}
 668
 669static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
 670{
 671	if (!non_swap_entry(entry))
 672		dec_mm_counter(mm, MM_SWAPENTS);
 673	else if (is_migration_entry(entry)) {
 674		struct page *page = migration_entry_to_page(entry);
 675
 676		dec_mm_counter(mm, mm_counter(page));
 677	}
 678	free_swap_and_cache(entry);
 679}
 680
 681void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
 682		     pte_t *ptep, int reset)
 683{
 684	unsigned long pgstev;
 685	pgste_t pgste;
 686	pte_t pte;
 687
 688	/* Zap unused and logically-zero pages */
 689	preempt_disable();
 690	pgste = pgste_get_lock(ptep);
 691	pgstev = pgste_val(pgste);
 692	pte = *ptep;
 693	if (!reset && pte_swap(pte) &&
 694	    ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
 695	     (pgstev & _PGSTE_GPS_ZERO))) {
 696		ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
 697		pte_clear(mm, addr, ptep);
 698	}
 699	if (reset)
 700		pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
 701	pgste_set_unlock(ptep, pgste);
 702	preempt_enable();
 703}
 704
 705void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 706{
 707	unsigned long ptev;
 708	pgste_t pgste;
 709
 710	/* Clear storage key ACC and F, but set R/C */
 711	preempt_disable();
 712	pgste = pgste_get_lock(ptep);
 713	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
 714	pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
 715	ptev = pte_val(*ptep);
 716	if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
 717		page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
 718	pgste_set_unlock(ptep, pgste);
 719	preempt_enable();
 720}
 721
 722/*
 723 * Test and reset if a guest page is dirty
 724 */
 725bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
 726		       pte_t *ptep)
 727{
 728	pgste_t pgste;
 729	pte_t pte;
 730	bool dirty;
 731	int nodat;
 732
 733	pgste = pgste_get_lock(ptep);
 734	dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
 735	pgste_val(pgste) &= ~PGSTE_UC_BIT;
 736	pte = *ptep;
 737	if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
 738		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
 739		nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 740		ptep_ipte_global(mm, addr, ptep, nodat);
 741		if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
 742			pte_val(pte) |= _PAGE_PROTECT;
 743		else
 744			pte_val(pte) |= _PAGE_INVALID;
 745		*ptep = pte;
 746	}
 747	pgste_set_unlock(ptep, pgste);
 748	return dirty;
 749}
 750EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
 751
 752int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 753			  unsigned char key, bool nq)
 754{
 755	unsigned long keyul, paddr;
 756	spinlock_t *ptl;
 757	pgste_t old, new;
 758	pmd_t *pmdp;
 759	pte_t *ptep;
 760
 761	pmdp = pmd_alloc_map(mm, addr);
 762	if (unlikely(!pmdp))
 
 
 
 
 
 
 
 
 763		return -EFAULT;
 
 764
 765	ptl = pmd_lock(mm, pmdp);
 766	if (!pmd_present(*pmdp)) {
 767		spin_unlock(ptl);
 768		return -EFAULT;
 769	}
 770
 771	if (pmd_large(*pmdp)) {
 772		paddr = pmd_val(*pmdp) & HPAGE_MASK;
 773		paddr |= addr & ~HPAGE_MASK;
 774		/*
 775		 * Huge pmds need quiescing operations, they are
 776		 * always mapped.
 777		 */
 778		page_set_storage_key(paddr, key, 1);
 779		spin_unlock(ptl);
 780		return 0;
 781	}
 782	spin_unlock(ptl);
 783
 784	ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
 785	if (unlikely(!ptep))
 786		return -EFAULT;
 787
 788	new = old = pgste_get_lock(ptep);
 789	pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
 790			    PGSTE_ACC_BITS | PGSTE_FP_BIT);
 791	keyul = (unsigned long) key;
 792	pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
 793	pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
 794	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
 795		unsigned long bits, skey;
 796
 797		paddr = pte_val(*ptep) & PAGE_MASK;
 798		skey = (unsigned long) page_get_storage_key(paddr);
 799		bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
 800		skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
 801		/* Set storage key ACC and FP */
 802		page_set_storage_key(paddr, skey, !nq);
 803		/* Merge host changed & referenced into pgste  */
 804		pgste_val(new) |= bits << 52;
 805	}
 806	/* changing the guest storage key is considered a change of the page */
 807	if ((pgste_val(new) ^ pgste_val(old)) &
 808	    (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
 809		pgste_val(new) |= PGSTE_UC_BIT;
 810
 811	pgste_set_unlock(ptep, new);
 812	pte_unmap_unlock(ptep, ptl);
 813	return 0;
 814}
 815EXPORT_SYMBOL(set_guest_storage_key);
 816
 817/**
 818 * Conditionally set a guest storage key (handling csske).
 819 * oldkey will be updated when either mr or mc is set and a pointer is given.
 820 *
 821 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
 822 * storage key was updated and -EFAULT on access errors.
 823 */
 824int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 825			       unsigned char key, unsigned char *oldkey,
 826			       bool nq, bool mr, bool mc)
 827{
 828	unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
 829	int rc;
 830
 831	/* we can drop the pgste lock between getting and setting the key */
 832	if (mr | mc) {
 833		rc = get_guest_storage_key(current->mm, addr, &tmp);
 834		if (rc)
 835			return rc;
 836		if (oldkey)
 837			*oldkey = tmp;
 838		if (!mr)
 839			mask |= _PAGE_REFERENCED;
 840		if (!mc)
 841			mask |= _PAGE_CHANGED;
 842		if (!((tmp ^ key) & mask))
 843			return 0;
 844	}
 845	rc = set_guest_storage_key(current->mm, addr, key, nq);
 846	return rc < 0 ? rc : 1;
 847}
 848EXPORT_SYMBOL(cond_set_guest_storage_key);
 849
 850/**
 851 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
 852 *
 853 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
 854 */
 855int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
 856{
 857	spinlock_t *ptl;
 858	unsigned long paddr;
 859	pgste_t old, new;
 860	pmd_t *pmdp;
 861	pte_t *ptep;
 862	int cc = 0;
 863
 864	pmdp = pmd_alloc_map(mm, addr);
 865	if (unlikely(!pmdp))
 
 
 
 
 
 
 
 
 866		return -EFAULT;
 
 867
 868	ptl = pmd_lock(mm, pmdp);
 869	if (!pmd_present(*pmdp)) {
 870		spin_unlock(ptl);
 871		return -EFAULT;
 872	}
 873
 874	if (pmd_large(*pmdp)) {
 875		paddr = pmd_val(*pmdp) & HPAGE_MASK;
 876		paddr |= addr & ~HPAGE_MASK;
 877		cc = page_reset_referenced(paddr);
 878		spin_unlock(ptl);
 879		return cc;
 880	}
 881	spin_unlock(ptl);
 882
 883	ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
 884	if (unlikely(!ptep))
 885		return -EFAULT;
 886
 887	new = old = pgste_get_lock(ptep);
 888	/* Reset guest reference bit only */
 889	pgste_val(new) &= ~PGSTE_GR_BIT;
 890
 891	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
 892		paddr = pte_val(*ptep) & PAGE_MASK;
 893		cc = page_reset_referenced(paddr);
 894		/* Merge real referenced bit into host-set */
 895		pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
 896	}
 897	/* Reflect guest's logical view, not physical */
 898	cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
 899	/* Changing the guest storage key is considered a change of the page */
 900	if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
 901		pgste_val(new) |= PGSTE_UC_BIT;
 902
 903	pgste_set_unlock(ptep, new);
 904	pte_unmap_unlock(ptep, ptl);
 905	return cc;
 906}
 907EXPORT_SYMBOL(reset_guest_reference_bit);
 908
 909int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 910			  unsigned char *key)
 911{
 912	unsigned long paddr;
 913	spinlock_t *ptl;
 914	pgste_t pgste;
 915	pmd_t *pmdp;
 916	pte_t *ptep;
 917
 918	pmdp = pmd_alloc_map(mm, addr);
 919	if (unlikely(!pmdp))
 
 
 
 
 
 
 
 
 
 
 920		return -EFAULT;
 
 921
 922	ptl = pmd_lock(mm, pmdp);
 923	if (!pmd_present(*pmdp)) {
 924		/* Not yet mapped memory has a zero key */
 925		spin_unlock(ptl);
 926		*key = 0;
 927		return 0;
 928	}
 929
 930	if (pmd_large(*pmdp)) {
 931		paddr = pmd_val(*pmdp) & HPAGE_MASK;
 932		paddr |= addr & ~HPAGE_MASK;
 933		*key = page_get_storage_key(paddr);
 934		spin_unlock(ptl);
 935		return 0;
 936	}
 937	spin_unlock(ptl);
 938
 939	ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
 940	if (unlikely(!ptep))
 941		return -EFAULT;
 942
 943	pgste = pgste_get_lock(ptep);
 944	*key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
 945	paddr = pte_val(*ptep) & PAGE_MASK;
 946	if (!(pte_val(*ptep) & _PAGE_INVALID))
 947		*key = page_get_storage_key(paddr);
 948	/* Reflect guest's logical view, not physical */
 949	*key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
 950	pgste_set_unlock(ptep, pgste);
 951	pte_unmap_unlock(ptep, ptl);
 952	return 0;
 953}
 954EXPORT_SYMBOL(get_guest_storage_key);
 955
 956/**
 957 * pgste_perform_essa - perform ESSA actions on the PGSTE.
 958 * @mm: the memory context. It must have PGSTEs, no check is performed here!
 959 * @hva: the host virtual address of the page whose PGSTE is to be processed
 960 * @orc: the specific action to perform, see the ESSA_SET_* macros.
 961 * @oldpte: the PTE will be saved there if the pointer is not NULL.
 962 * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
 963 *
 964 * Return: 1 if the page is to be added to the CBRL, otherwise 0,
 965 *	   or < 0 in case of error. -EINVAL is returned for invalid values
 966 *	   of orc, -EFAULT for invalid addresses.
 967 */
 968int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
 969			unsigned long *oldpte, unsigned long *oldpgste)
 970{
 
 971	unsigned long pgstev;
 972	spinlock_t *ptl;
 973	pgste_t pgste;
 974	pte_t *ptep;
 975	int res = 0;
 976
 977	WARN_ON_ONCE(orc > ESSA_MAX);
 978	if (unlikely(orc > ESSA_MAX))
 979		return -EINVAL;
 
 
 
 
 980	ptep = get_locked_pte(mm, hva, &ptl);
 981	if (unlikely(!ptep))
 982		return -EFAULT;
 983	pgste = pgste_get_lock(ptep);
 984	pgstev = pgste_val(pgste);
 985	if (oldpte)
 986		*oldpte = pte_val(*ptep);
 987	if (oldpgste)
 988		*oldpgste = pgstev;
 989
 990	switch (orc) {
 991	case ESSA_GET_STATE:
 992		break;
 993	case ESSA_SET_STABLE:
 994		pgstev &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
 995		pgstev |= _PGSTE_GPS_USAGE_STABLE;
 996		break;
 997	case ESSA_SET_UNUSED:
 998		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
 999		pgstev |= _PGSTE_GPS_USAGE_UNUSED;
1000		if (pte_val(*ptep) & _PAGE_INVALID)
1001			res = 1;
1002		break;
1003	case ESSA_SET_VOLATILE:
1004		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1005		pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1006		if (pte_val(*ptep) & _PAGE_INVALID)
1007			res = 1;
1008		break;
1009	case ESSA_SET_POT_VOLATILE:
1010		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1011		if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1012			pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE;
1013			break;
1014		}
1015		if (pgstev & _PGSTE_GPS_ZERO) {
1016			pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1017			break;
1018		}
1019		if (!(pgstev & PGSTE_GC_BIT)) {
1020			pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1021			res = 1;
1022			break;
1023		}
1024		break;
1025	case ESSA_SET_STABLE_RESIDENT:
1026		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1027		pgstev |= _PGSTE_GPS_USAGE_STABLE;
1028		/*
1029		 * Since the resident state can go away any time after this
1030		 * call, we will not make this page resident. We can revisit
1031		 * this decision if a guest will ever start using this.
1032		 */
1033		break;
1034	case ESSA_SET_STABLE_IF_RESIDENT:
1035		if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1036			pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1037			pgstev |= _PGSTE_GPS_USAGE_STABLE;
1038		}
1039		break;
1040	case ESSA_SET_STABLE_NODAT:
1041		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1042		pgstev |= _PGSTE_GPS_USAGE_STABLE | _PGSTE_GPS_NODAT;
1043		break;
1044	default:
1045		/* we should never get here! */
1046		break;
1047	}
1048	/* If we are discarding a page, set it to logical zero */
1049	if (res)
1050		pgstev |= _PGSTE_GPS_ZERO;
1051
1052	pgste_val(pgste) = pgstev;
1053	pgste_set_unlock(ptep, pgste);
1054	pte_unmap_unlock(ptep, ptl);
1055	return res;
1056}
1057EXPORT_SYMBOL(pgste_perform_essa);
1058
1059/**
1060 * set_pgste_bits - set specific PGSTE bits.
1061 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1062 * @hva: the host virtual address of the page whose PGSTE is to be processed
1063 * @bits: a bitmask representing the bits that will be touched
1064 * @value: the values of the bits to be written. Only the bits in the mask
1065 *	   will be written.
1066 *
1067 * Return: 0 on success, < 0 in case of error.
1068 */
1069int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
1070			unsigned long bits, unsigned long value)
1071{
 
1072	spinlock_t *ptl;
1073	pgste_t new;
1074	pte_t *ptep;
1075
 
 
 
1076	ptep = get_locked_pte(mm, hva, &ptl);
1077	if (unlikely(!ptep))
1078		return -EFAULT;
1079	new = pgste_get_lock(ptep);
1080
1081	pgste_val(new) &= ~bits;
1082	pgste_val(new) |= value & bits;
1083
1084	pgste_set_unlock(ptep, new);
1085	pte_unmap_unlock(ptep, ptl);
1086	return 0;
1087}
1088EXPORT_SYMBOL(set_pgste_bits);
1089
1090/**
1091 * get_pgste - get the current PGSTE for the given address.
1092 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1093 * @hva: the host virtual address of the page whose PGSTE is to be processed
1094 * @pgstep: will be written with the current PGSTE for the given address.
1095 *
1096 * Return: 0 on success, < 0 in case of error.
1097 */
1098int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
1099{
 
1100	spinlock_t *ptl;
1101	pte_t *ptep;
1102
 
 
 
1103	ptep = get_locked_pte(mm, hva, &ptl);
1104	if (unlikely(!ptep))
1105		return -EFAULT;
1106	*pgstep = pgste_val(pgste_get(ptep));
1107	pte_unmap_unlock(ptep, ptl);
1108	return 0;
1109}
1110EXPORT_SYMBOL(get_pgste);
1111#endif