Linux Audio

Check our new training course

Loading...
v6.2
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *    Copyright IBM Corp. 2007, 2011
   4 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
   5 */
   6
   7#include <linux/sched.h>
   8#include <linux/kernel.h>
   9#include <linux/errno.h>
  10#include <linux/gfp.h>
  11#include <linux/mm.h>
  12#include <linux/swap.h>
  13#include <linux/smp.h>
 
 
  14#include <linux/spinlock.h>
 
 
  15#include <linux/rcupdate.h>
  16#include <linux/slab.h>
  17#include <linux/swapops.h>
  18#include <linux/sysctl.h>
  19#include <linux/ksm.h>
  20#include <linux/mman.h>
  21
 
 
  22#include <asm/tlb.h>
  23#include <asm/tlbflush.h>
  24#include <asm/mmu_context.h>
  25#include <asm/page-states.h>
  26
  27pgprot_t pgprot_writecombine(pgprot_t prot)
  28{
  29	/*
  30	 * mio_wb_bit_mask may be set on a different CPU, but it is only set
  31	 * once at init and only read afterwards.
  32	 */
  33	return __pgprot(pgprot_val(prot) | mio_wb_bit_mask);
  34}
  35EXPORT_SYMBOL_GPL(pgprot_writecombine);
  36
  37pgprot_t pgprot_writethrough(pgprot_t prot)
  38{
  39	/*
  40	 * mio_wb_bit_mask may be set on a different CPU, but it is only set
  41	 * once at init and only read afterwards.
  42	 */
  43	return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask);
  44}
  45EXPORT_SYMBOL_GPL(pgprot_writethrough);
  46
  47static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
  48				   pte_t *ptep, int nodat)
  49{
  50	unsigned long opt, asce;
  51
  52	if (MACHINE_HAS_TLB_GUEST) {
  53		opt = 0;
  54		asce = READ_ONCE(mm->context.gmap_asce);
  55		if (asce == 0UL || nodat)
  56			opt |= IPTE_NODAT;
  57		if (asce != -1UL) {
  58			asce = asce ? : mm->context.asce;
  59			opt |= IPTE_GUEST_ASCE;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  60		}
  61		__ptep_ipte(addr, ptep, opt, asce, IPTE_LOCAL);
  62	} else {
  63		__ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL);
  64	}
 
 
 
 
 
 
 
 
  65}
  66
  67static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
  68				    pte_t *ptep, int nodat)
  69{
  70	unsigned long opt, asce;
  71
  72	if (MACHINE_HAS_TLB_GUEST) {
  73		opt = 0;
  74		asce = READ_ONCE(mm->context.gmap_asce);
  75		if (asce == 0UL || nodat)
  76			opt |= IPTE_NODAT;
  77		if (asce != -1UL) {
  78			asce = asce ? : mm->context.asce;
  79			opt |= IPTE_GUEST_ASCE;
 
 
 
 
 
 
 
 
 
  80		}
  81		__ptep_ipte(addr, ptep, opt, asce, IPTE_GLOBAL);
  82	} else {
  83		__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
  84	}
  85}
  86
  87static inline pte_t ptep_flush_direct(struct mm_struct *mm,
  88				      unsigned long addr, pte_t *ptep,
  89				      int nodat)
  90{
  91	pte_t old;
  92
  93	old = *ptep;
  94	if (unlikely(pte_val(old) & _PAGE_INVALID))
  95		return old;
  96	atomic_inc(&mm->context.flush_count);
  97	if (MACHINE_HAS_TLB_LC &&
  98	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
  99		ptep_ipte_local(mm, addr, ptep, nodat);
 100	else
 101		ptep_ipte_global(mm, addr, ptep, nodat);
 102	atomic_dec(&mm->context.flush_count);
 103	return old;
 104}
 105
 106static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
 107				    unsigned long addr, pte_t *ptep,
 108				    int nodat)
 109{
 110	pte_t old;
 111
 112	old = *ptep;
 113	if (unlikely(pte_val(old) & _PAGE_INVALID))
 114		return old;
 115	atomic_inc(&mm->context.flush_count);
 116	if (cpumask_equal(&mm->context.cpu_attach_mask,
 117			  cpumask_of(smp_processor_id()))) {
 118		set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_INVALID)));
 119		mm->context.flush_mm = 1;
 120	} else
 121		ptep_ipte_global(mm, addr, ptep, nodat);
 122	atomic_dec(&mm->context.flush_count);
 123	return old;
 124}
 125
 126static inline pgste_t pgste_get_lock(pte_t *ptep)
 127{
 128	unsigned long new = 0;
 129#ifdef CONFIG_PGSTE
 130	unsigned long old;
 131
 132	asm(
 133		"	lg	%0,%2\n"
 134		"0:	lgr	%1,%0\n"
 135		"	nihh	%0,0xff7f\n"	/* clear PCL bit in old */
 136		"	oihh	%1,0x0080\n"	/* set PCL bit in new */
 137		"	csg	%0,%1,%2\n"
 138		"	jl	0b\n"
 139		: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
 140		: "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
 141#endif
 142	return __pgste(new);
 143}
 144
 145static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
 146{
 147#ifdef CONFIG_PGSTE
 148	asm(
 149		"	nihh	%1,0xff7f\n"	/* clear PCL bit */
 150		"	stg	%1,%0\n"
 151		: "=Q" (ptep[PTRS_PER_PTE])
 152		: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
 153		: "cc", "memory");
 154#endif
 155}
 156
 157static inline pgste_t pgste_get(pte_t *ptep)
 
 
 
 
 
 
 158{
 159	unsigned long pgste = 0;
 160#ifdef CONFIG_PGSTE
 161	pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
 162#endif
 163	return __pgste(pgste);
 164}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 165
 166static inline void pgste_set(pte_t *ptep, pgste_t pgste)
 167{
 168#ifdef CONFIG_PGSTE
 169	*(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
 170#endif
 
 
 
 
 
 
 
 
 171}
 172
 173static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
 174				       struct mm_struct *mm)
 175{
 176#ifdef CONFIG_PGSTE
 177	unsigned long address, bits, skey;
 178
 179	if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID)
 180		return pgste;
 181	address = pte_val(pte) & PAGE_MASK;
 182	skey = (unsigned long) page_get_storage_key(address);
 183	bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
 184	/* Transfer page changed & referenced bit to guest bits in pgste */
 185	pgste_val(pgste) |= bits << 48;		/* GR bit & GC bit */
 186	/* Copy page access key and fetch protection bit to pgste */
 187	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
 188	pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
 189#endif
 190	return pgste;
 191
 192}
 193
 194static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
 195				 struct mm_struct *mm)
 
 
 
 196{
 197#ifdef CONFIG_PGSTE
 198	unsigned long address;
 199	unsigned long nkey;
 200
 201	if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID)
 202		return;
 203	VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
 204	address = pte_val(entry) & PAGE_MASK;
 205	/*
 206	 * Set page access key and fetch protection bit from pgste.
 207	 * The guest C/R information is still in the PGSTE, set real
 208	 * key C/R to 0.
 209	 */
 210	nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
 211	nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
 212	page_set_storage_key(address, nkey, 0);
 213#endif
 214}
 215
 216static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
 217{
 218#ifdef CONFIG_PGSTE
 219	if ((pte_val(entry) & _PAGE_PRESENT) &&
 220	    (pte_val(entry) & _PAGE_WRITE) &&
 221	    !(pte_val(entry) & _PAGE_INVALID)) {
 222		if (!MACHINE_HAS_ESOP) {
 223			/*
 224			 * Without enhanced suppression-on-protection force
 225			 * the dirty bit on for all writable ptes.
 226			 */
 227			entry = set_pte_bit(entry, __pgprot(_PAGE_DIRTY));
 228			entry = clear_pte_bit(entry, __pgprot(_PAGE_PROTECT));
 229		}
 230		if (!(pte_val(entry) & _PAGE_PROTECT))
 231			/* This pte allows write access, set user-dirty */
 232			pgste_val(pgste) |= PGSTE_UC_BIT;
 233	}
 234#endif
 235	set_pte(ptep, entry);
 236	return pgste;
 237}
 238
 239static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
 240				       unsigned long addr,
 241				       pte_t *ptep, pgste_t pgste)
 242{
 243#ifdef CONFIG_PGSTE
 244	unsigned long bits;
 245
 246	bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
 247	if (bits) {
 248		pgste_val(pgste) ^= bits;
 249		ptep_notify(mm, addr, ptep, bits);
 250	}
 251#endif
 252	return pgste;
 
 
 
 
 
 
 
 
 253}
 
 254
 255static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
 256				      unsigned long addr, pte_t *ptep)
 
 
 
 257{
 258	pgste_t pgste = __pgste(0);
 259
 260	if (mm_has_pgste(mm)) {
 261		pgste = pgste_get_lock(ptep);
 262		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
 263	}
 264	return pgste;
 265}
 
 266
 267static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
 268				    unsigned long addr, pte_t *ptep,
 269				    pgste_t pgste, pte_t old, pte_t new)
 
 
 270{
 271	if (mm_has_pgste(mm)) {
 272		if (pte_val(old) & _PAGE_INVALID)
 273			pgste_set_key(ptep, pgste, new, mm);
 274		if (pte_val(new) & _PAGE_INVALID) {
 275			pgste = pgste_update_all(old, pgste, mm);
 276			if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
 277			    _PGSTE_GPS_USAGE_UNUSED)
 278				old = set_pte_bit(old, __pgprot(_PAGE_UNUSED));
 279		}
 280		pgste = pgste_set_pte(ptep, pgste, new);
 281		pgste_set_unlock(ptep, pgste);
 282	} else {
 283		set_pte(ptep, new);
 284	}
 285	return old;
 286}
 
 287
 288pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
 289		       pte_t *ptep, pte_t new)
 
 
 
 290{
 291	pgste_t pgste;
 292	pte_t old;
 293	int nodat;
 294
 295	preempt_disable();
 296	pgste = ptep_xchg_start(mm, addr, ptep);
 297	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 298	old = ptep_flush_direct(mm, addr, ptep, nodat);
 299	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
 300	preempt_enable();
 301	return old;
 
 
 
 
 
 
 
 
 302}
 303EXPORT_SYMBOL(ptep_xchg_direct);
 304
 305pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
 306		     pte_t *ptep, pte_t new)
 
 
 
 
 
 
 
 307{
 308	pgste_t pgste;
 309	pte_t old;
 310	int nodat;
 311
 312	preempt_disable();
 313	pgste = ptep_xchg_start(mm, addr, ptep);
 314	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 315	old = ptep_flush_lazy(mm, addr, ptep, nodat);
 316	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
 317	preempt_enable();
 318	return old;
 319}
 320EXPORT_SYMBOL(ptep_xchg_lazy);
 321
 322pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
 323			     pte_t *ptep)
 324{
 325	pgste_t pgste;
 326	pte_t old;
 327	int nodat;
 328	struct mm_struct *mm = vma->vm_mm;
 329
 330	preempt_disable();
 331	pgste = ptep_xchg_start(mm, addr, ptep);
 332	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 333	old = ptep_flush_lazy(mm, addr, ptep, nodat);
 334	if (mm_has_pgste(mm)) {
 335		pgste = pgste_update_all(old, pgste, mm);
 336		pgste_set(ptep, pgste);
 337	}
 338	return old;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 339}
 
 340
 341void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
 342			     pte_t *ptep, pte_t old_pte, pte_t pte)
 
 
 
 
 
 
 
 
 343{
 344	pgste_t pgste;
 345	struct mm_struct *mm = vma->vm_mm;
 
 346
 347	if (!MACHINE_HAS_NX)
 348		pte = clear_pte_bit(pte, __pgprot(_PAGE_NOEXEC));
 349	if (mm_has_pgste(mm)) {
 350		pgste = pgste_get(ptep);
 351		pgste_set_key(ptep, pgste, pte, mm);
 352		pgste = pgste_set_pte(ptep, pgste, pte);
 353		pgste_set_unlock(ptep, pgste);
 354	} else {
 355		set_pte(ptep, pte);
 356	}
 357	preempt_enable();
 358}
 359
 360static inline void pmdp_idte_local(struct mm_struct *mm,
 361				   unsigned long addr, pmd_t *pmdp)
 362{
 363	if (MACHINE_HAS_TLB_GUEST)
 364		__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
 365			    mm->context.asce, IDTE_LOCAL);
 366	else
 367		__pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
 368	if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 369		gmap_pmdp_idte_local(mm, addr);
 370}
 371
 372static inline void pmdp_idte_global(struct mm_struct *mm,
 373				    unsigned long addr, pmd_t *pmdp)
 374{
 375	if (MACHINE_HAS_TLB_GUEST) {
 376		__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
 377			    mm->context.asce, IDTE_GLOBAL);
 378		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 379			gmap_pmdp_idte_global(mm, addr);
 380	} else if (MACHINE_HAS_IDTE) {
 381		__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
 382		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 383			gmap_pmdp_idte_global(mm, addr);
 384	} else {
 385		__pmdp_csp(pmdp);
 386		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 387			gmap_pmdp_csp(mm, addr);
 388	}
 389}
 390
 391static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
 392				      unsigned long addr, pmd_t *pmdp)
 393{
 394	pmd_t old;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 395
 396	old = *pmdp;
 397	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
 398		return old;
 399	atomic_inc(&mm->context.flush_count);
 400	if (MACHINE_HAS_TLB_LC &&
 401	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
 402		pmdp_idte_local(mm, addr, pmdp);
 403	else
 404		pmdp_idte_global(mm, addr, pmdp);
 405	atomic_dec(&mm->context.flush_count);
 406	return old;
 407}
 408
 409static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
 410				    unsigned long addr, pmd_t *pmdp)
 411{
 412	pmd_t old;
 413
 414	old = *pmdp;
 415	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
 416		return old;
 417	atomic_inc(&mm->context.flush_count);
 418	if (cpumask_equal(&mm->context.cpu_attach_mask,
 419			  cpumask_of(smp_processor_id()))) {
 420		set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_INVALID)));
 421		mm->context.flush_mm = 1;
 422		if (mm_has_pgste(mm))
 423			gmap_pmdp_invalidate(mm, addr);
 424	} else {
 425		pmdp_idte_global(mm, addr, pmdp);
 426	}
 427	atomic_dec(&mm->context.flush_count);
 428	return old;
 429}
 
 430
 431#ifdef CONFIG_PGSTE
 432static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp)
 
 
 433{
 
 
 
 
 434	struct vm_area_struct *vma;
 
 435	pgd_t *pgd;
 436	p4d_t *p4d;
 437	pud_t *pud;
 
 438
 439	/* We need a valid VMA, otherwise this is clearly a fault. */
 440	vma = vma_lookup(mm, addr);
 441	if (!vma)
 442		return -EFAULT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 443
 444	pgd = pgd_offset(mm, addr);
 445	if (!pgd_present(*pgd))
 446		return -ENOENT;
 447
 448	p4d = p4d_offset(pgd, addr);
 449	if (!p4d_present(*p4d))
 450		return -ENOENT;
 451
 452	pud = pud_offset(p4d, addr);
 453	if (!pud_present(*pud))
 454		return -ENOENT;
 455
 456	/* Large PUDs are not supported yet. */
 457	if (pud_large(*pud))
 458		return -EFAULT;
 459
 460	*pmdp = pmd_offset(pud, addr);
 461	return 0;
 462}
 463#endif
 464
 465pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
 466		       pmd_t *pmdp, pmd_t new)
 467{
 468	pmd_t old;
 469
 470	preempt_disable();
 471	old = pmdp_flush_direct(mm, addr, pmdp);
 472	set_pmd(pmdp, new);
 473	preempt_enable();
 474	return old;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 475}
 476EXPORT_SYMBOL(pmdp_xchg_direct);
 477
 478pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
 479		     pmd_t *pmdp, pmd_t new)
 480{
 481	pmd_t old;
 
 
 
 482
 483	preempt_disable();
 484	old = pmdp_flush_lazy(mm, addr, pmdp);
 485	set_pmd(pmdp, new);
 486	preempt_enable();
 487	return old;
 
 
 
 
 
 
 
 
 
 488}
 489EXPORT_SYMBOL(pmdp_xchg_lazy);
 490
 491static inline void pudp_idte_local(struct mm_struct *mm,
 492				   unsigned long addr, pud_t *pudp)
 493{
 494	if (MACHINE_HAS_TLB_GUEST)
 495		__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
 496			    mm->context.asce, IDTE_LOCAL);
 497	else
 498		__pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL);
 499}
 500
 501static inline void pudp_idte_global(struct mm_struct *mm,
 502				    unsigned long addr, pud_t *pudp)
 503{
 504	if (MACHINE_HAS_TLB_GUEST)
 505		__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
 506			    mm->context.asce, IDTE_GLOBAL);
 507	else if (MACHINE_HAS_IDTE)
 508		__pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL);
 509	else
 510		/*
 511		 * Invalid bit position is the same for pmd and pud, so we can
 512		 * re-use _pmd_csp() here
 513		 */
 514		__pmdp_csp((pmd_t *) pudp);
 515}
 516
 517static inline pud_t pudp_flush_direct(struct mm_struct *mm,
 518				      unsigned long addr, pud_t *pudp)
 519{
 520	pud_t old;
 521
 522	old = *pudp;
 523	if (pud_val(old) & _REGION_ENTRY_INVALID)
 524		return old;
 525	atomic_inc(&mm->context.flush_count);
 526	if (MACHINE_HAS_TLB_LC &&
 527	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
 528		pudp_idte_local(mm, addr, pudp);
 529	else
 530		pudp_idte_global(mm, addr, pudp);
 531	atomic_dec(&mm->context.flush_count);
 532	return old;
 533}
 534
 535pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
 536		       pud_t *pudp, pud_t new)
 537{
 538	pud_t old;
 
 539
 540	preempt_disable();
 541	old = pudp_flush_direct(mm, addr, pudp);
 542	set_pud(pudp, new);
 543	preempt_enable();
 544	return old;
 
 
 545}
 546EXPORT_SYMBOL(pudp_xchg_direct);
 547
 548#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 549void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 550				pgtable_t pgtable)
 551{
 552	struct list_head *lh = (struct list_head *) pgtable;
 553
 554	assert_spin_locked(pmd_lockptr(mm, pmdp));
 555
 556	/* FIFO */
 557	if (!pmd_huge_pte(mm, pmdp))
 558		INIT_LIST_HEAD(lh);
 559	else
 560		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
 561	pmd_huge_pte(mm, pmdp) = pgtable;
 562}
 563
 564pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
 565{
 566	struct list_head *lh;
 567	pgtable_t pgtable;
 568	pte_t *ptep;
 569
 570	assert_spin_locked(pmd_lockptr(mm, pmdp));
 571
 572	/* FIFO */
 573	pgtable = pmd_huge_pte(mm, pmdp);
 574	lh = (struct list_head *) pgtable;
 575	if (list_empty(lh))
 576		pmd_huge_pte(mm, pmdp) = NULL;
 577	else {
 578		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
 579		list_del(lh);
 580	}
 581	ptep = (pte_t *) pgtable;
 582	set_pte(ptep, __pte(_PAGE_INVALID));
 583	ptep++;
 584	set_pte(ptep, __pte(_PAGE_INVALID));
 585	return pgtable;
 586}
 587#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 588
 589#ifdef CONFIG_PGSTE
 590void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
 591		     pte_t *ptep, pte_t entry)
 592{
 593	pgste_t pgste;
 594
 595	/* the mm_has_pgste() check is done in set_pte_at() */
 596	preempt_disable();
 597	pgste = pgste_get_lock(ptep);
 598	pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
 599	pgste_set_key(ptep, pgste, entry, mm);
 600	pgste = pgste_set_pte(ptep, pgste, entry);
 601	pgste_set_unlock(ptep, pgste);
 602	preempt_enable();
 603}
 604
 605void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 
 
 606{
 607	pgste_t pgste;
 608
 609	preempt_disable();
 610	pgste = pgste_get_lock(ptep);
 611	pgste_val(pgste) |= PGSTE_IN_BIT;
 612	pgste_set_unlock(ptep, pgste);
 613	preempt_enable();
 614}
 615
 616/**
 617 * ptep_force_prot - change access rights of a locked pte
 618 * @mm: pointer to the process mm_struct
 619 * @addr: virtual address in the guest address space
 620 * @ptep: pointer to the page table entry
 621 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
 622 * @bit: pgste bit to set (e.g. for notification)
 623 *
 624 * Returns 0 if the access rights were changed and -EAGAIN if the current
 625 * and requested access rights are incompatible.
 626 */
 627int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
 628		    pte_t *ptep, int prot, unsigned long bit)
 629{
 630	pte_t entry;
 631	pgste_t pgste;
 632	int pte_i, pte_p, nodat;
 633
 634	pgste = pgste_get_lock(ptep);
 635	entry = *ptep;
 636	/* Check pte entry after all locks have been acquired */
 637	pte_i = pte_val(entry) & _PAGE_INVALID;
 638	pte_p = pte_val(entry) & _PAGE_PROTECT;
 639	if ((pte_i && (prot != PROT_NONE)) ||
 640	    (pte_p && (prot & PROT_WRITE))) {
 641		pgste_set_unlock(ptep, pgste);
 642		return -EAGAIN;
 643	}
 644	/* Change access rights and set pgste bit */
 645	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 646	if (prot == PROT_NONE && !pte_i) {
 647		ptep_flush_direct(mm, addr, ptep, nodat);
 648		pgste = pgste_update_all(entry, pgste, mm);
 649		entry = set_pte_bit(entry, __pgprot(_PAGE_INVALID));
 650	}
 651	if (prot == PROT_READ && !pte_p) {
 652		ptep_flush_direct(mm, addr, ptep, nodat);
 653		entry = clear_pte_bit(entry, __pgprot(_PAGE_INVALID));
 654		entry = set_pte_bit(entry, __pgprot(_PAGE_PROTECT));
 655	}
 656	pgste_val(pgste) |= bit;
 657	pgste = pgste_set_pte(ptep, pgste, entry);
 658	pgste_set_unlock(ptep, pgste);
 659	return 0;
 660}
 661
 662int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
 663		    pte_t *sptep, pte_t *tptep, pte_t pte)
 664{
 665	pgste_t spgste, tpgste;
 666	pte_t spte, tpte;
 667	int rc = -EAGAIN;
 668
 669	if (!(pte_val(*tptep) & _PAGE_INVALID))
 670		return 0;	/* already shadowed */
 671	spgste = pgste_get_lock(sptep);
 672	spte = *sptep;
 673	if (!(pte_val(spte) & _PAGE_INVALID) &&
 674	    !((pte_val(spte) & _PAGE_PROTECT) &&
 675	      !(pte_val(pte) & _PAGE_PROTECT))) {
 676		pgste_val(spgste) |= PGSTE_VSIE_BIT;
 677		tpgste = pgste_get_lock(tptep);
 678		tpte = __pte((pte_val(spte) & PAGE_MASK) |
 679			     (pte_val(pte) & _PAGE_PROTECT));
 680		/* don't touch the storage key - it belongs to parent pgste */
 681		tpgste = pgste_set_pte(tptep, tpgste, tpte);
 682		pgste_set_unlock(tptep, tpgste);
 683		rc = 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 684	}
 685	pgste_set_unlock(sptep, spgste);
 686	return rc;
 687}
 688
 689void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
 690{
 691	pgste_t pgste;
 692	int nodat;
 693
 694	pgste = pgste_get_lock(ptep);
 695	/* notifier is called by the caller */
 696	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 697	ptep_flush_direct(mm, saddr, ptep, nodat);
 698	/* don't touch the storage key - it belongs to parent pgste */
 699	pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
 700	pgste_set_unlock(ptep, pgste);
 
 
 
 
 
 
 
 
 
 
 
 
 701}
 702
 703static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
 704{
 705	if (!non_swap_entry(entry))
 706		dec_mm_counter(mm, MM_SWAPENTS);
 707	else if (is_migration_entry(entry)) {
 708		struct page *page = pfn_swap_entry_to_page(entry);
 709
 710		dec_mm_counter(mm, mm_counter(page));
 
 
 
 
 
 
 
 711	}
 712	free_swap_and_cache(entry);
 713}
 714
 715void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
 716		     pte_t *ptep, int reset)
 717{
 718	unsigned long pgstev;
 719	pgste_t pgste;
 720	pte_t pte;
 721
 722	/* Zap unused and logically-zero pages */
 723	preempt_disable();
 724	pgste = pgste_get_lock(ptep);
 725	pgstev = pgste_val(pgste);
 726	pte = *ptep;
 727	if (!reset && pte_swap(pte) &&
 728	    ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
 729	     (pgstev & _PGSTE_GPS_ZERO))) {
 730		ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
 731		pte_clear(mm, addr, ptep);
 732	}
 733	if (reset)
 734		pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
 735	pgste_set_unlock(ptep, pgste);
 736	preempt_enable();
 737}
 738
 739void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 740{
 741	unsigned long ptev;
 742	pgste_t pgste;
 
 
 
 
 
 
 
 
 743
 744	/* Clear storage key ACC and F, but set R/C */
 745	preempt_disable();
 746	pgste = pgste_get_lock(ptep);
 747	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
 748	pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
 749	ptev = pte_val(*ptep);
 750	if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
 751		page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
 752	pgste_set_unlock(ptep, pgste);
 753	preempt_enable();
 754}
 755
 756/*
 757 * Test and reset if a guest page is dirty
 758 */
 759bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
 760		       pte_t *ptep)
 761{
 762	pgste_t pgste;
 763	pte_t pte;
 764	bool dirty;
 765	int nodat;
 766
 767	pgste = pgste_get_lock(ptep);
 768	dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
 769	pgste_val(pgste) &= ~PGSTE_UC_BIT;
 770	pte = *ptep;
 771	if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
 772		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
 773		nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 774		ptep_ipte_global(mm, addr, ptep, nodat);
 775		if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
 776			pte = set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
 777		else
 778			pte = set_pte_bit(pte, __pgprot(_PAGE_INVALID));
 779		set_pte(ptep, pte);
 780	}
 781	pgste_set_unlock(ptep, pgste);
 782	return dirty;
 783}
 784EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
 785
 786int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 787			  unsigned char key, bool nq)
 788{
 789	unsigned long keyul, paddr;
 790	spinlock_t *ptl;
 791	pgste_t old, new;
 792	pmd_t *pmdp;
 793	pte_t *ptep;
 794
 795	/*
 796	 * If we don't have a PTE table and if there is no huge page mapped,
 797	 * we can ignore attempts to set the key to 0, because it already is 0.
 
 
 
 798	 */
 799	switch (pmd_lookup(mm, addr, &pmdp)) {
 800	case -ENOENT:
 801		return key ? -EFAULT : 0;
 802	case 0:
 803		break;
 804	default:
 805		return -EFAULT;
 806	}
 807
 808	ptl = pmd_lock(mm, pmdp);
 809	if (!pmd_present(*pmdp)) {
 810		spin_unlock(ptl);
 811		return key ? -EFAULT : 0;
 812	}
 813
 814	if (pmd_large(*pmdp)) {
 815		paddr = pmd_val(*pmdp) & HPAGE_MASK;
 816		paddr |= addr & ~HPAGE_MASK;
 817		/*
 818		 * Huge pmds need quiescing operations, they are
 819		 * always mapped.
 820		 */
 821		page_set_storage_key(paddr, key, 1);
 822		spin_unlock(ptl);
 823		return 0;
 824	}
 825	spin_unlock(ptl);
 826
 827	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
 828	new = old = pgste_get_lock(ptep);
 829	pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
 830			    PGSTE_ACC_BITS | PGSTE_FP_BIT);
 831	keyul = (unsigned long) key;
 832	pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
 833	pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
 834	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
 835		unsigned long bits, skey;
 836
 837		paddr = pte_val(*ptep) & PAGE_MASK;
 838		skey = (unsigned long) page_get_storage_key(paddr);
 839		bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
 840		skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
 841		/* Set storage key ACC and FP */
 842		page_set_storage_key(paddr, skey, !nq);
 843		/* Merge host changed & referenced into pgste  */
 844		pgste_val(new) |= bits << 52;
 845	}
 846	/* changing the guest storage key is considered a change of the page */
 847	if ((pgste_val(new) ^ pgste_val(old)) &
 848	    (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
 849		pgste_val(new) |= PGSTE_UC_BIT;
 850
 851	pgste_set_unlock(ptep, new);
 852	pte_unmap_unlock(ptep, ptl);
 853	return 0;
 854}
 855EXPORT_SYMBOL(set_guest_storage_key);
 856
 857/*
 858 * Conditionally set a guest storage key (handling csske).
 859 * oldkey will be updated when either mr or mc is set and a pointer is given.
 860 *
 861 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
 862 * storage key was updated and -EFAULT on access errors.
 863 */
 864int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 865			       unsigned char key, unsigned char *oldkey,
 866			       bool nq, bool mr, bool mc)
 867{
 868	unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
 869	int rc;
 870
 871	/* we can drop the pgste lock between getting and setting the key */
 872	if (mr | mc) {
 873		rc = get_guest_storage_key(current->mm, addr, &tmp);
 874		if (rc)
 875			return rc;
 876		if (oldkey)
 877			*oldkey = tmp;
 878		if (!mr)
 879			mask |= _PAGE_REFERENCED;
 880		if (!mc)
 881			mask |= _PAGE_CHANGED;
 882		if (!((tmp ^ key) & mask))
 883			return 0;
 884	}
 885	rc = set_guest_storage_key(current->mm, addr, key, nq);
 886	return rc < 0 ? rc : 1;
 887}
 888EXPORT_SYMBOL(cond_set_guest_storage_key);
 889
 890/*
 891 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
 892 *
 893 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
 894 */
 895int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
 896{
 897	spinlock_t *ptl;
 898	unsigned long paddr;
 899	pgste_t old, new;
 900	pmd_t *pmdp;
 901	pte_t *ptep;
 902	int cc = 0;
 903
 904	/*
 905	 * If we don't have a PTE table and if there is no huge page mapped,
 906	 * the storage key is 0 and there is nothing for us to do.
 907	 */
 908	switch (pmd_lookup(mm, addr, &pmdp)) {
 909	case -ENOENT:
 910		return 0;
 911	case 0:
 912		break;
 913	default:
 914		return -EFAULT;
 915	}
 916
 917	ptl = pmd_lock(mm, pmdp);
 918	if (!pmd_present(*pmdp)) {
 919		spin_unlock(ptl);
 920		return 0;
 921	}
 922
 923	if (pmd_large(*pmdp)) {
 924		paddr = pmd_val(*pmdp) & HPAGE_MASK;
 925		paddr |= addr & ~HPAGE_MASK;
 926		cc = page_reset_referenced(paddr);
 927		spin_unlock(ptl);
 928		return cc;
 929	}
 930	spin_unlock(ptl);
 931
 932	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
 933	new = old = pgste_get_lock(ptep);
 934	/* Reset guest reference bit only */
 935	pgste_val(new) &= ~PGSTE_GR_BIT;
 936
 937	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
 938		paddr = pte_val(*ptep) & PAGE_MASK;
 939		cc = page_reset_referenced(paddr);
 940		/* Merge real referenced bit into host-set */
 941		pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
 942	}
 943	/* Reflect guest's logical view, not physical */
 944	cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
 945	/* Changing the guest storage key is considered a change of the page */
 946	if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
 947		pgste_val(new) |= PGSTE_UC_BIT;
 948
 949	pgste_set_unlock(ptep, new);
 950	pte_unmap_unlock(ptep, ptl);
 951	return cc;
 952}
 953EXPORT_SYMBOL(reset_guest_reference_bit);
 954
 955int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 956			  unsigned char *key)
 957{
 958	unsigned long paddr;
 959	spinlock_t *ptl;
 960	pgste_t pgste;
 961	pmd_t *pmdp;
 962	pte_t *ptep;
 963
 964	/*
 965	 * If we don't have a PTE table and if there is no huge page mapped,
 966	 * the storage key is 0.
 967	 */
 968	*key = 0;
 969
 970	switch (pmd_lookup(mm, addr, &pmdp)) {
 971	case -ENOENT:
 972		return 0;
 973	case 0:
 974		break;
 975	default:
 976		return -EFAULT;
 977	}
 
 978
 979	ptl = pmd_lock(mm, pmdp);
 980	if (!pmd_present(*pmdp)) {
 981		spin_unlock(ptl);
 982		return 0;
 983	}
 984
 985	if (pmd_large(*pmdp)) {
 986		paddr = pmd_val(*pmdp) & HPAGE_MASK;
 987		paddr |= addr & ~HPAGE_MASK;
 988		*key = page_get_storage_key(paddr);
 989		spin_unlock(ptl);
 990		return 0;
 
 
 
 991	}
 992	spin_unlock(ptl);
 993
 994	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
 995	pgste = pgste_get_lock(ptep);
 996	*key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
 997	paddr = pte_val(*ptep) & PAGE_MASK;
 998	if (!(pte_val(*ptep) & _PAGE_INVALID))
 999		*key = page_get_storage_key(paddr);
1000	/* Reflect guest's logical view, not physical */
1001	*key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
1002	pgste_set_unlock(ptep, pgste);
1003	pte_unmap_unlock(ptep, ptl);
1004	return 0;
1005}
1006EXPORT_SYMBOL(get_guest_storage_key);
1007
1008/**
1009 * pgste_perform_essa - perform ESSA actions on the PGSTE.
1010 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1011 * @hva: the host virtual address of the page whose PGSTE is to be processed
1012 * @orc: the specific action to perform, see the ESSA_SET_* macros.
1013 * @oldpte: the PTE will be saved there if the pointer is not NULL.
1014 * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
1015 *
1016 * Return: 1 if the page is to be added to the CBRL, otherwise 0,
1017 *	   or < 0 in case of error. -EINVAL is returned for invalid values
1018 *	   of orc, -EFAULT for invalid addresses.
1019 */
1020int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1021			unsigned long *oldpte, unsigned long *oldpgste)
1022{
1023	struct vm_area_struct *vma;
1024	unsigned long pgstev;
1025	spinlock_t *ptl;
1026	pgste_t pgste;
1027	pte_t *ptep;
1028	int res = 0;
1029
1030	WARN_ON_ONCE(orc > ESSA_MAX);
1031	if (unlikely(orc > ESSA_MAX))
1032		return -EINVAL;
1033
1034	vma = vma_lookup(mm, hva);
1035	if (!vma || is_vm_hugetlb_page(vma))
1036		return -EFAULT;
1037	ptep = get_locked_pte(mm, hva, &ptl);
1038	if (unlikely(!ptep))
1039		return -EFAULT;
1040	pgste = pgste_get_lock(ptep);
1041	pgstev = pgste_val(pgste);
1042	if (oldpte)
1043		*oldpte = pte_val(*ptep);
1044	if (oldpgste)
1045		*oldpgste = pgstev;
1046
1047	switch (orc) {
1048	case ESSA_GET_STATE:
1049		break;
1050	case ESSA_SET_STABLE:
1051		pgstev &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
1052		pgstev |= _PGSTE_GPS_USAGE_STABLE;
1053		break;
1054	case ESSA_SET_UNUSED:
1055		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1056		pgstev |= _PGSTE_GPS_USAGE_UNUSED;
1057		if (pte_val(*ptep) & _PAGE_INVALID)
1058			res = 1;
1059		break;
1060	case ESSA_SET_VOLATILE:
1061		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1062		pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1063		if (pte_val(*ptep) & _PAGE_INVALID)
1064			res = 1;
1065		break;
1066	case ESSA_SET_POT_VOLATILE:
1067		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1068		if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1069			pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE;
1070			break;
1071		}
1072		if (pgstev & _PGSTE_GPS_ZERO) {
1073			pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1074			break;
1075		}
1076		if (!(pgstev & PGSTE_GC_BIT)) {
1077			pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1078			res = 1;
1079			break;
1080		}
1081		break;
1082	case ESSA_SET_STABLE_RESIDENT:
1083		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1084		pgstev |= _PGSTE_GPS_USAGE_STABLE;
1085		/*
1086		 * Since the resident state can go away any time after this
1087		 * call, we will not make this page resident. We can revisit
1088		 * this decision if a guest will ever start using this.
1089		 */
1090		break;
1091	case ESSA_SET_STABLE_IF_RESIDENT:
1092		if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1093			pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1094			pgstev |= _PGSTE_GPS_USAGE_STABLE;
1095		}
1096		break;
1097	case ESSA_SET_STABLE_NODAT:
1098		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1099		pgstev |= _PGSTE_GPS_USAGE_STABLE | _PGSTE_GPS_NODAT;
1100		break;
1101	default:
1102		/* we should never get here! */
1103		break;
1104	}
1105	/* If we are discarding a page, set it to logical zero */
1106	if (res)
1107		pgstev |= _PGSTE_GPS_ZERO;
1108
1109	pgste_val(pgste) = pgstev;
1110	pgste_set_unlock(ptep, pgste);
1111	pte_unmap_unlock(ptep, ptl);
1112	return res;
1113}
1114EXPORT_SYMBOL(pgste_perform_essa);
1115
1116/**
1117 * set_pgste_bits - set specific PGSTE bits.
1118 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1119 * @hva: the host virtual address of the page whose PGSTE is to be processed
1120 * @bits: a bitmask representing the bits that will be touched
1121 * @value: the values of the bits to be written. Only the bits in the mask
1122 *	   will be written.
1123 *
1124 * Return: 0 on success, < 0 in case of error.
1125 */
1126int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
1127			unsigned long bits, unsigned long value)
1128{
1129	struct vm_area_struct *vma;
1130	spinlock_t *ptl;
1131	pgste_t new;
1132	pte_t *ptep;
1133
1134	vma = vma_lookup(mm, hva);
1135	if (!vma || is_vm_hugetlb_page(vma))
1136		return -EFAULT;
1137	ptep = get_locked_pte(mm, hva, &ptl);
1138	if (unlikely(!ptep))
1139		return -EFAULT;
1140	new = pgste_get_lock(ptep);
1141
1142	pgste_val(new) &= ~bits;
1143	pgste_val(new) |= value & bits;
1144
1145	pgste_set_unlock(ptep, new);
1146	pte_unmap_unlock(ptep, ptl);
 
 
 
 
 
 
 
 
 
1147	return 0;
1148}
1149EXPORT_SYMBOL(set_pgste_bits);
1150
1151/**
1152 * get_pgste - get the current PGSTE for the given address.
1153 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1154 * @hva: the host virtual address of the page whose PGSTE is to be processed
1155 * @pgstep: will be written with the current PGSTE for the given address.
1156 *
1157 * Return: 0 on success, < 0 in case of error.
1158 */
1159int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
1160{
1161	struct vm_area_struct *vma;
1162	spinlock_t *ptl;
1163	pte_t *ptep;
1164
1165	vma = vma_lookup(mm, hva);
1166	if (!vma || is_vm_hugetlb_page(vma))
1167		return -EFAULT;
1168	ptep = get_locked_pte(mm, hva, &ptl);
1169	if (unlikely(!ptep))
1170		return -EFAULT;
1171	*pgstep = pgste_val(pgste_get(ptep));
1172	pte_unmap_unlock(ptep, ptl);
1173	return 0;
1174}
1175EXPORT_SYMBOL(get_pgste);
1176#endif
v3.5.6
 
  1/*
  2 *    Copyright IBM Corp. 2007,2011
  3 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  4 */
  5
  6#include <linux/sched.h>
  7#include <linux/kernel.h>
  8#include <linux/errno.h>
  9#include <linux/gfp.h>
 10#include <linux/mm.h>
 11#include <linux/swap.h>
 12#include <linux/smp.h>
 13#include <linux/highmem.h>
 14#include <linux/pagemap.h>
 15#include <linux/spinlock.h>
 16#include <linux/module.h>
 17#include <linux/quicklist.h>
 18#include <linux/rcupdate.h>
 19#include <linux/slab.h>
 
 
 
 
 20
 21#include <asm/pgtable.h>
 22#include <asm/pgalloc.h>
 23#include <asm/tlb.h>
 24#include <asm/tlbflush.h>
 25#include <asm/mmu_context.h>
 
 26
 27#ifndef CONFIG_64BIT
 28#define ALLOC_ORDER	1
 29#define FRAG_MASK	0x0f
 30#else
 31#define ALLOC_ORDER	2
 32#define FRAG_MASK	0x03
 33#endif
 
 
 34
 
 
 
 
 
 
 
 
 
 35
 36unsigned long *crst_table_alloc(struct mm_struct *mm)
 
 37{
 38	struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
 39
 40	if (!page)
 41		return NULL;
 42	return (unsigned long *) page_to_phys(page);
 43}
 44
 45void crst_table_free(struct mm_struct *mm, unsigned long *table)
 46{
 47	free_pages((unsigned long) table, ALLOC_ORDER);
 48}
 49
 50#ifdef CONFIG_64BIT
 51int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
 52{
 53	unsigned long *table, *pgd;
 54	unsigned long entry;
 55
 56	BUG_ON(limit > (1UL << 53));
 57repeat:
 58	table = crst_table_alloc(mm);
 59	if (!table)
 60		return -ENOMEM;
 61	spin_lock_bh(&mm->page_table_lock);
 62	if (mm->context.asce_limit < limit) {
 63		pgd = (unsigned long *) mm->pgd;
 64		if (mm->context.asce_limit <= (1UL << 31)) {
 65			entry = _REGION3_ENTRY_EMPTY;
 66			mm->context.asce_limit = 1UL << 42;
 67			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
 68						_ASCE_USER_BITS |
 69						_ASCE_TYPE_REGION3;
 70		} else {
 71			entry = _REGION2_ENTRY_EMPTY;
 72			mm->context.asce_limit = 1UL << 53;
 73			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
 74						_ASCE_USER_BITS |
 75						_ASCE_TYPE_REGION2;
 76		}
 77		crst_table_init(table, entry);
 78		pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
 79		mm->pgd = (pgd_t *) table;
 80		mm->task_size = mm->context.asce_limit;
 81		table = NULL;
 82	}
 83	spin_unlock_bh(&mm->page_table_lock);
 84	if (table)
 85		crst_table_free(mm, table);
 86	if (mm->context.asce_limit < limit)
 87		goto repeat;
 88	return 0;
 89}
 90
 91void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
 
 92{
 93	pgd_t *pgd;
 94
 95	while (mm->context.asce_limit > limit) {
 96		pgd = mm->pgd;
 97		switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
 98		case _REGION_ENTRY_TYPE_R2:
 99			mm->context.asce_limit = 1UL << 42;
100			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
101						_ASCE_USER_BITS |
102						_ASCE_TYPE_REGION3;
103			break;
104		case _REGION_ENTRY_TYPE_R3:
105			mm->context.asce_limit = 1UL << 31;
106			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
107						_ASCE_USER_BITS |
108						_ASCE_TYPE_SEGMENT;
109			break;
110		default:
111			BUG();
112		}
113		mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
114		mm->task_size = mm->context.asce_limit;
115		crst_table_free(mm, (unsigned long *) pgd);
116	}
117}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118#endif
 
 
119
 
 
120#ifdef CONFIG_PGSTE
 
 
 
 
 
 
 
 
121
122/**
123 * gmap_alloc - allocate a guest address space
124 * @mm: pointer to the parent mm_struct
125 *
126 * Returns a guest address space structure.
127 */
128struct gmap *gmap_alloc(struct mm_struct *mm)
129{
130	struct gmap *gmap;
131	struct page *page;
132	unsigned long *table;
133
134	gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
135	if (!gmap)
136		goto out;
137	INIT_LIST_HEAD(&gmap->crst_list);
138	gmap->mm = mm;
139	page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
140	if (!page)
141		goto out_free;
142	list_add(&page->lru, &gmap->crst_list);
143	table = (unsigned long *) page_to_phys(page);
144	crst_table_init(table, _REGION1_ENTRY_EMPTY);
145	gmap->table = table;
146	gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
147		     _ASCE_USER_BITS | __pa(table);
148	list_add(&gmap->list, &mm->context.gmap_list);
149	return gmap;
150
151out_free:
152	kfree(gmap);
153out:
154	return NULL;
155}
156EXPORT_SYMBOL_GPL(gmap_alloc);
157
158static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
159{
160	struct gmap_pgtable *mp;
161	struct gmap_rmap *rmap;
162	struct page *page;
163
164	if (*table & _SEGMENT_ENTRY_INV)
165		return 0;
166	page = pfn_to_page(*table >> PAGE_SHIFT);
167	mp = (struct gmap_pgtable *) page->index;
168	list_for_each_entry(rmap, &mp->mapper, list) {
169		if (rmap->entry != table)
170			continue;
171		list_del(&rmap->list);
172		kfree(rmap);
173		break;
174	}
175	*table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
176	return 1;
177}
178
179static void gmap_flush_tlb(struct gmap *gmap)
 
180{
181	if (MACHINE_HAS_IDTE)
182		__tlb_flush_idte((unsigned long) gmap->table |
183				 _ASCE_TYPE_REGION1);
184	else
185		__tlb_flush_global();
 
 
 
 
 
 
 
 
 
 
 
186}
187
188/**
189 * gmap_free - free a guest address space
190 * @gmap: pointer to the guest address space structure
191 */
192void gmap_free(struct gmap *gmap)
193{
194	struct page *page, *next;
195	unsigned long *table;
196	int i;
197
 
 
 
 
 
 
 
 
 
 
 
 
 
 
198
199	/* Flush tlb. */
200	if (MACHINE_HAS_IDTE)
201		__tlb_flush_idte((unsigned long) gmap->table |
202				 _ASCE_TYPE_REGION1);
203	else
204		__tlb_flush_global();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
205
206	/* Free all segment & region tables. */
207	down_read(&gmap->mm->mmap_sem);
208	spin_lock(&gmap->mm->page_table_lock);
209	list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
210		table = (unsigned long *) page_to_phys(page);
211		if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
212			/* Remove gmap rmap structures for segment table. */
213			for (i = 0; i < PTRS_PER_PMD; i++, table++)
214				gmap_unlink_segment(gmap, table);
215		__free_pages(page, ALLOC_ORDER);
216	}
217	spin_unlock(&gmap->mm->page_table_lock);
218	up_read(&gmap->mm->mmap_sem);
219	list_del(&gmap->list);
220	kfree(gmap);
221}
222EXPORT_SYMBOL_GPL(gmap_free);
223
224/**
225 * gmap_enable - switch primary space to the guest address space
226 * @gmap: pointer to the guest address space structure
227 */
228void gmap_enable(struct gmap *gmap)
229{
230	S390_lowcore.gmap = (unsigned long) gmap;
 
 
 
 
 
 
231}
232EXPORT_SYMBOL_GPL(gmap_enable);
233
234/**
235 * gmap_disable - switch back to the standard primary address space
236 * @gmap: pointer to the guest address space structure
237 */
238void gmap_disable(struct gmap *gmap)
239{
240	S390_lowcore.gmap = 0UL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
241}
242EXPORT_SYMBOL_GPL(gmap_disable);
243
244/*
245 * gmap_alloc_table is assumed to be called with mmap_sem held
246 */
247static int gmap_alloc_table(struct gmap *gmap,
248			       unsigned long *table, unsigned long init)
249{
250	struct page *page;
251	unsigned long *new;
 
252
253	/* since we dont free the gmap table until gmap_free we can unlock */
254	spin_unlock(&gmap->mm->page_table_lock);
255	page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
256	spin_lock(&gmap->mm->page_table_lock);
257	if (!page)
258		return -ENOMEM;
259	new = (unsigned long *) page_to_phys(page);
260	crst_table_init(new, init);
261	if (*table & _REGION_ENTRY_INV) {
262		list_add(&page->lru, &gmap->crst_list);
263		*table = (unsigned long) new | _REGION_ENTRY_LENGTH |
264			(*table & _REGION_ENTRY_TYPE_MASK);
265	} else
266		__free_pages(page, ALLOC_ORDER);
267	return 0;
268}
 
269
270/**
271 * gmap_unmap_segment - unmap segment from the guest address space
272 * @gmap: pointer to the guest address space structure
273 * @addr: address in the guest address space
274 * @len: length of the memory area to unmap
275 *
276 * Returns 0 if the unmap succeded, -EINVAL if not.
277 */
278int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
279{
280	unsigned long *table;
281	unsigned long off;
282	int flush;
 
 
 
 
 
 
 
 
 
 
283
284	if ((to | len) & (PMD_SIZE - 1))
285		return -EINVAL;
286	if (len == 0 || to + len < to)
287		return -EINVAL;
 
 
 
288
289	flush = 0;
290	down_read(&gmap->mm->mmap_sem);
291	spin_lock(&gmap->mm->page_table_lock);
292	for (off = 0; off < len; off += PMD_SIZE) {
293		/* Walk the guest addr space page table */
294		table = gmap->table + (((to + off) >> 53) & 0x7ff);
295		if (*table & _REGION_ENTRY_INV)
296			goto out;
297		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
298		table = table + (((to + off) >> 42) & 0x7ff);
299		if (*table & _REGION_ENTRY_INV)
300			goto out;
301		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
302		table = table + (((to + off) >> 31) & 0x7ff);
303		if (*table & _REGION_ENTRY_INV)
304			goto out;
305		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
306		table = table + (((to + off) >> 20) & 0x7ff);
307
308		/* Clear segment table entry in guest address space. */
309		flush |= gmap_unlink_segment(gmap, table);
310		*table = _SEGMENT_ENTRY_INV;
311	}
312out:
313	spin_unlock(&gmap->mm->page_table_lock);
314	up_read(&gmap->mm->mmap_sem);
315	if (flush)
316		gmap_flush_tlb(gmap);
317	return 0;
318}
319EXPORT_SYMBOL_GPL(gmap_unmap_segment);
320
321/**
322 * gmap_mmap_segment - map a segment to the guest address space
323 * @gmap: pointer to the guest address space structure
324 * @from: source address in the parent address space
325 * @to: target address in the guest address space
326 *
327 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
328 */
329int gmap_map_segment(struct gmap *gmap, unsigned long from,
330		     unsigned long to, unsigned long len)
331{
332	unsigned long *table;
333	unsigned long off;
334	int flush;
335
336	if ((from | to | len) & (PMD_SIZE - 1))
337		return -EINVAL;
338	if (len == 0 || from + len > PGDIR_SIZE ||
339	    from + len < from || to + len < to)
340		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
341
342	flush = 0;
343	down_read(&gmap->mm->mmap_sem);
344	spin_lock(&gmap->mm->page_table_lock);
345	for (off = 0; off < len; off += PMD_SIZE) {
346		/* Walk the gmap address space page table */
347		table = gmap->table + (((to + off) >> 53) & 0x7ff);
348		if ((*table & _REGION_ENTRY_INV) &&
349		    gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
350			goto out_unmap;
351		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
352		table = table + (((to + off) >> 42) & 0x7ff);
353		if ((*table & _REGION_ENTRY_INV) &&
354		    gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
355			goto out_unmap;
356		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
357		table = table + (((to + off) >> 31) & 0x7ff);
358		if ((*table & _REGION_ENTRY_INV) &&
359		    gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
360			goto out_unmap;
361		table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
362		table = table + (((to + off) >> 20) & 0x7ff);
363
364		/* Store 'from' address in an invalid segment table entry. */
365		flush |= gmap_unlink_segment(gmap, table);
366		*table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
367	}
368	spin_unlock(&gmap->mm->page_table_lock);
369	up_read(&gmap->mm->mmap_sem);
370	if (flush)
371		gmap_flush_tlb(gmap);
372	return 0;
373
374out_unmap:
375	spin_unlock(&gmap->mm->page_table_lock);
376	up_read(&gmap->mm->mmap_sem);
377	gmap_unmap_segment(gmap, to, len);
378	return -ENOMEM;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
379}
380EXPORT_SYMBOL_GPL(gmap_map_segment);
381
382/*
383 * this function is assumed to be called with mmap_sem held
384 */
385unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
386{
387	unsigned long *table, vmaddr, segment;
388	struct mm_struct *mm;
389	struct gmap_pgtable *mp;
390	struct gmap_rmap *rmap;
391	struct vm_area_struct *vma;
392	struct page *page;
393	pgd_t *pgd;
 
394	pud_t *pud;
395	pmd_t *pmd;
396
397	current->thread.gmap_addr = address;
398	mm = gmap->mm;
399	/* Walk the gmap address space page table */
400	table = gmap->table + ((address >> 53) & 0x7ff);
401	if (unlikely(*table & _REGION_ENTRY_INV))
402		return -EFAULT;
403	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
404	table = table + ((address >> 42) & 0x7ff);
405	if (unlikely(*table & _REGION_ENTRY_INV))
406		return -EFAULT;
407	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
408	table = table + ((address >> 31) & 0x7ff);
409	if (unlikely(*table & _REGION_ENTRY_INV))
410		return -EFAULT;
411	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
412	table = table + ((address >> 20) & 0x7ff);
413
414	/* Convert the gmap address to an mm address. */
415	segment = *table;
416	if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
417		page = pfn_to_page(segment >> PAGE_SHIFT);
418		mp = (struct gmap_pgtable *) page->index;
419		return mp->vmaddr | (address & ~PMD_MASK);
420	} else if (segment & _SEGMENT_ENTRY_RO) {
421		vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
422		vma = find_vma(mm, vmaddr);
423		if (!vma || vma->vm_start > vmaddr)
424			return -EFAULT;
425
426		/* Walk the parent mm page table */
427		pgd = pgd_offset(mm, vmaddr);
428		pud = pud_alloc(mm, pgd, vmaddr);
429		if (!pud)
430			return -ENOMEM;
431		pmd = pmd_alloc(mm, pud, vmaddr);
432		if (!pmd)
433			return -ENOMEM;
434		if (!pmd_present(*pmd) &&
435		    __pte_alloc(mm, vma, pmd, vmaddr))
436			return -ENOMEM;
437		/* pmd now points to a valid segment table entry. */
438		rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
439		if (!rmap)
440			return -ENOMEM;
441		/* Link gmap segment table entry location to page table. */
442		page = pmd_page(*pmd);
443		mp = (struct gmap_pgtable *) page->index;
444		rmap->entry = table;
445		spin_lock(&mm->page_table_lock);
446		list_add(&rmap->list, &mp->mapper);
447		spin_unlock(&mm->page_table_lock);
448		/* Set gmap segment table entry to page table. */
449		*table = pmd_val(*pmd) & PAGE_MASK;
450		return vmaddr | (address & ~PMD_MASK);
451	}
452	return -EFAULT;
453}
454
455unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
456{
457	unsigned long rc;
458
459	down_read(&gmap->mm->mmap_sem);
460	rc = __gmap_fault(address, gmap);
461	up_read(&gmap->mm->mmap_sem);
462
463	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
464}
465EXPORT_SYMBOL_GPL(gmap_fault);
466
467void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
 
468{
 
469
470	unsigned long *table, address, size;
471	struct vm_area_struct *vma;
472	struct gmap_pgtable *mp;
473	struct page *page;
474
475	down_read(&gmap->mm->mmap_sem);
476	address = from;
477	while (address < to) {
478		/* Walk the gmap address space page table */
479		table = gmap->table + ((address >> 53) & 0x7ff);
480		if (unlikely(*table & _REGION_ENTRY_INV)) {
481			address = (address + PMD_SIZE) & PMD_MASK;
482			continue;
483		}
484		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
485		table = table + ((address >> 42) & 0x7ff);
486		if (unlikely(*table & _REGION_ENTRY_INV)) {
487			address = (address + PMD_SIZE) & PMD_MASK;
488			continue;
489		}
490		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
491		table = table + ((address >> 31) & 0x7ff);
492		if (unlikely(*table & _REGION_ENTRY_INV)) {
493			address = (address + PMD_SIZE) & PMD_MASK;
494			continue;
495		}
496		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
497		table = table + ((address >> 20) & 0x7ff);
498		if (unlikely(*table & _SEGMENT_ENTRY_INV)) {
499			address = (address + PMD_SIZE) & PMD_MASK;
500			continue;
501		}
502		page = pfn_to_page(*table >> PAGE_SHIFT);
503		mp = (struct gmap_pgtable *) page->index;
504		vma = find_vma(gmap->mm, mp->vmaddr);
505		size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
506		zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
507			       size, NULL);
508		address = (address + PMD_SIZE) & PMD_MASK;
509	}
510	up_read(&gmap->mm->mmap_sem);
511}
512EXPORT_SYMBOL_GPL(gmap_discard);
513
514void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
 
515{
516	struct gmap_rmap *rmap, *next;
517	struct gmap_pgtable *mp;
518	struct page *page;
519	int flush;
520
521	flush = 0;
522	spin_lock(&mm->page_table_lock);
523	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
524	mp = (struct gmap_pgtable *) page->index;
525	list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
526		*rmap->entry =
527			_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
528		list_del(&rmap->list);
529		kfree(rmap);
530		flush = 1;
531	}
532	spin_unlock(&mm->page_table_lock);
533	if (flush)
534		__tlb_flush_global();
535}
 
536
537static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
538						    unsigned long vmaddr)
539{
540	struct page *page;
541	unsigned long *table;
542	struct gmap_pgtable *mp;
 
 
 
543
544	page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
545	if (!page)
546		return NULL;
547	mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
548	if (!mp) {
549		__free_page(page);
550		return NULL;
551	}
552	pgtable_page_ctor(page);
553	mp->vmaddr = vmaddr & PMD_MASK;
554	INIT_LIST_HEAD(&mp->mapper);
555	page->index = (unsigned long) mp;
556	atomic_set(&page->_mapcount, 3);
557	table = (unsigned long *) page_to_phys(page);
558	clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
559	clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
560	return table;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
561}
562
563static inline void page_table_free_pgste(unsigned long *table)
 
564{
565	struct page *page;
566	struct gmap_pgtable *mp;
567
568	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
569	mp = (struct gmap_pgtable *) page->index;
570	BUG_ON(!list_empty(&mp->mapper));
571	pgtable_page_dtor(page);
572	atomic_set(&page->_mapcount, -1);
573	kfree(mp);
574	__free_page(page);
575}
 
576
577#else /* CONFIG_PGSTE */
 
 
 
 
 
 
578
579static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
580						    unsigned long vmaddr)
581{
582	return NULL;
 
 
583}
584
585static inline void page_table_free_pgste(unsigned long *table)
586{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
587}
 
588
589static inline void gmap_unmap_notifier(struct mm_struct *mm,
590					  unsigned long *table)
 
591{
 
 
 
 
 
 
 
 
 
 
592}
593
594#endif /* CONFIG_PGSTE */
595
596static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
597{
598	unsigned int old, new;
599
600	do {
601		old = atomic_read(v);
602		new = old ^ bits;
603	} while (atomic_cmpxchg(v, old, new) != old);
604	return new;
605}
606
607/*
608 * page table entry allocation/free routines.
 
 
 
 
 
 
 
 
609 */
610unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
611{
612	struct page *page;
613	unsigned long *table;
614	unsigned int mask, bit;
615
616	if (mm_has_pgste(mm))
617		return page_table_alloc_pgste(mm, vmaddr);
618	/* Allocate fragments of a 4K page as 1K/2K page table */
619	spin_lock_bh(&mm->context.list_lock);
620	mask = FRAG_MASK;
621	if (!list_empty(&mm->context.pgtable_list)) {
622		page = list_first_entry(&mm->context.pgtable_list,
623					struct page, lru);
624		table = (unsigned long *) page_to_phys(page);
625		mask = atomic_read(&page->_mapcount);
626		mask = mask | (mask >> 4);
627	}
628	if ((mask & FRAG_MASK) == FRAG_MASK) {
629		spin_unlock_bh(&mm->context.list_lock);
630		page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
631		if (!page)
632			return NULL;
633		pgtable_page_ctor(page);
634		atomic_set(&page->_mapcount, 1);
635		table = (unsigned long *) page_to_phys(page);
636		clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
637		spin_lock_bh(&mm->context.list_lock);
638		list_add(&page->lru, &mm->context.pgtable_list);
639	} else {
640		for (bit = 1; mask & bit; bit <<= 1)
641			table += PTRS_PER_PTE;
642		mask = atomic_xor_bits(&page->_mapcount, bit);
643		if ((mask & FRAG_MASK) == FRAG_MASK)
644			list_del(&page->lru);
645	}
646	spin_unlock_bh(&mm->context.list_lock);
647	return table;
648}
649
650void page_table_free(struct mm_struct *mm, unsigned long *table)
651{
652	struct page *page;
653	unsigned int bit, mask;
654
655	if (mm_has_pgste(mm)) {
656		gmap_unmap_notifier(mm, table);
657		return page_table_free_pgste(table);
658	}
659	/* Free 1K/2K page table fragment of a 4K page */
660	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
661	bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
662	spin_lock_bh(&mm->context.list_lock);
663	if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
664		list_del(&page->lru);
665	mask = atomic_xor_bits(&page->_mapcount, bit);
666	if (mask & FRAG_MASK)
667		list_add(&page->lru, &mm->context.pgtable_list);
668	spin_unlock_bh(&mm->context.list_lock);
669	if (mask == 0) {
670		pgtable_page_dtor(page);
671		atomic_set(&page->_mapcount, -1);
672		__free_page(page);
673	}
674}
675
676static void __page_table_free_rcu(void *table, unsigned bit)
677{
678	struct page *page;
 
 
 
679
680	if (bit == FRAG_MASK)
681		return page_table_free_pgste(table);
682	/* Free 1K/2K page table fragment of a 4K page */
683	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
684	if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
685		pgtable_page_dtor(page);
686		atomic_set(&page->_mapcount, -1);
687		__free_page(page);
688	}
 
689}
690
691void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
 
692{
693	struct mm_struct *mm;
694	struct page *page;
695	unsigned int bit, mask;
696
697	mm = tlb->mm;
698	if (mm_has_pgste(mm)) {
699		gmap_unmap_notifier(mm, table);
700		table = (unsigned long *) (__pa(table) | FRAG_MASK);
701		tlb_remove_table(tlb, table);
702		return;
 
 
 
 
703	}
704	bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
705	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
706	spin_lock_bh(&mm->context.list_lock);
707	if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
708		list_del(&page->lru);
709	mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
710	if (mask & FRAG_MASK)
711		list_add_tail(&page->lru, &mm->context.pgtable_list);
712	spin_unlock_bh(&mm->context.list_lock);
713	table = (unsigned long *) (__pa(table) | (bit << 4));
714	tlb_remove_table(tlb, table);
715}
716
717void __tlb_remove_table(void *_table)
718{
719	const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
720	void *table = (void *)((unsigned long) _table & ~mask);
721	unsigned type = (unsigned long) _table & mask;
722
723	if (type)
724		__page_table_free_rcu(table, type);
725	else
726		free_pages((unsigned long) table, ALLOC_ORDER);
 
 
 
 
 
 
727}
728
729static void tlb_remove_table_smp_sync(void *arg)
 
 
 
 
730{
731	/* Simply deliver the interrupt */
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
732}
 
733
734static void tlb_remove_table_one(void *table)
 
735{
 
 
 
 
 
 
736	/*
737	 * This isn't an RCU grace period and hence the page-tables cannot be
738	 * assumed to be actually RCU-freed.
739	 *
740	 * It is however sufficient for software page-table walkers that rely
741	 * on IRQ disabling. See the comment near struct mmu_table_batch.
742	 */
743	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
744	__tlb_remove_table(table);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
745}
 
746
747static void tlb_remove_table_rcu(struct rcu_head *head)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
748{
749	struct mmu_table_batch *batch;
750	int i;
 
 
 
 
751
752	batch = container_of(head, struct mmu_table_batch, rcu);
 
 
 
 
 
 
 
 
 
 
 
753
754	for (i = 0; i < batch->nr; i++)
755		__tlb_remove_table(batch->tables[i]);
 
 
 
756
757	free_page((unsigned long)batch);
758}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
759
760void tlb_table_flush(struct mmu_gather *tlb)
761{
762	struct mmu_table_batch **batch = &tlb->batch;
 
 
763
764	if (*batch) {
765		__tlb_flush_mm(tlb->mm);
766		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
767		*batch = NULL;
 
 
 
768	}
769}
770
771void tlb_remove_table(struct mmu_gather *tlb, void *table)
772{
773	struct mmu_table_batch **batch = &tlb->batch;
 
 
774
775	if (*batch == NULL) {
776		*batch = (struct mmu_table_batch *)
777			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
778		if (*batch == NULL) {
779			__tlb_flush_mm(tlb->mm);
780			tlb_remove_table_one(table);
781			return;
782		}
783		(*batch)->nr = 0;
784	}
785	(*batch)->tables[(*batch)->nr++] = table;
786	if ((*batch)->nr == MAX_TABLE_BATCH)
787		tlb_table_flush(tlb);
 
 
 
 
 
 
 
 
 
 
788}
 
789
790/*
791 * switch on pgstes for its userspace process (for kvm)
 
 
 
 
 
 
 
 
 
792 */
793int s390_enable_sie(void)
 
794{
795	struct task_struct *tsk = current;
796	struct mm_struct *mm, *old_mm;
 
 
 
 
797
798	/* Do we have switched amode? If no, we cannot do sie */
799	if (user_mode == HOME_SPACE_MODE)
800		return -EINVAL;
801
802	/* Do we have pgstes? if yes, we are done */
803	if (mm_has_pgste(tsk->mm))
804		return 0;
 
 
 
 
 
 
 
 
 
805
806	/* lets check if we are allowed to replace the mm */
807	task_lock(tsk);
808	if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
809#ifdef CONFIG_AIO
810	    !hlist_empty(&tsk->mm->ioctx_list) ||
811#endif
812	    tsk->mm != tsk->active_mm) {
813		task_unlock(tsk);
814		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
815	}
816	task_unlock(tsk);
 
 
 
 
 
 
 
 
 
817
818	/* we copy the mm and let dup_mm create the page tables with_pgstes */
819	tsk->mm->context.alloc_pgste = 1;
820	/* make sure that both mms have a correct rss state */
821	sync_mm_rss(tsk->mm);
822	mm = dup_mm(tsk);
823	tsk->mm->context.alloc_pgste = 0;
824	if (!mm)
825		return -ENOMEM;
826
827	/* Now lets check again if something happened */
828	task_lock(tsk);
829	if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
830#ifdef CONFIG_AIO
831	    !hlist_empty(&tsk->mm->ioctx_list) ||
832#endif
833	    tsk->mm != tsk->active_mm) {
834		mmput(mm);
835		task_unlock(tsk);
836		return -EINVAL;
837	}
 
 
 
 
 
 
 
 
838
839	/* ok, we are alone. No ptrace, no threads, etc. */
840	old_mm = tsk->mm;
841	tsk->mm = tsk->active_mm = mm;
842	preempt_disable();
843	update_mm(mm, tsk);
844	atomic_inc(&mm->context.attach_count);
845	atomic_dec(&old_mm->context.attach_count);
846	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
847	preempt_enable();
848	task_unlock(tsk);
849	mmput(old_mm);
850	return 0;
851}
852EXPORT_SYMBOL_GPL(s390_enable_sie);
853
854#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
855bool kernel_page_present(struct page *page)
 
 
 
 
 
 
 
856{
857	unsigned long addr;
858	int cc;
 
859
860	addr = page_to_phys(page);
861	asm volatile(
862		"	lra	%1,0(%1)\n"
863		"	ipm	%0\n"
864		"	srl	%0,28"
865		: "=d" (cc), "+a" (addr) : : "cc");
866	return cc == 0;
 
 
867}
868#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */