Linux Audio

Check our new training course

Loading...
v5.9
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *    Copyright IBM Corp. 2007, 2011
   4 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
   5 */
   6
   7#include <linux/sched.h>
   8#include <linux/kernel.h>
   9#include <linux/errno.h>
  10#include <linux/gfp.h>
  11#include <linux/mm.h>
  12#include <linux/swap.h>
  13#include <linux/smp.h>
 
 
  14#include <linux/spinlock.h>
 
 
  15#include <linux/rcupdate.h>
  16#include <linux/slab.h>
  17#include <linux/swapops.h>
  18#include <linux/sysctl.h>
  19#include <linux/ksm.h>
  20#include <linux/mman.h>
  21
 
 
  22#include <asm/tlb.h>
  23#include <asm/tlbflush.h>
  24#include <asm/mmu_context.h>
  25#include <asm/page-states.h>
  26
  27static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
  28				   pte_t *ptep, int nodat)
  29{
  30	unsigned long opt, asce;
 
 
 
  31
  32	if (MACHINE_HAS_TLB_GUEST) {
  33		opt = 0;
  34		asce = READ_ONCE(mm->context.gmap_asce);
  35		if (asce == 0UL || nodat)
  36			opt |= IPTE_NODAT;
  37		if (asce != -1UL) {
  38			asce = asce ? : mm->context.asce;
  39			opt |= IPTE_GUEST_ASCE;
  40		}
  41		__ptep_ipte(addr, ptep, opt, asce, IPTE_LOCAL);
  42	} else {
  43		__ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL);
  44	}
  45}
  46
  47static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
  48				    pte_t *ptep, int nodat)
  49{
  50	unsigned long opt, asce;
  51
  52	if (MACHINE_HAS_TLB_GUEST) {
  53		opt = 0;
  54		asce = READ_ONCE(mm->context.gmap_asce);
  55		if (asce == 0UL || nodat)
  56			opt |= IPTE_NODAT;
  57		if (asce != -1UL) {
  58			asce = asce ? : mm->context.asce;
  59			opt |= IPTE_GUEST_ASCE;
  60		}
  61		__ptep_ipte(addr, ptep, opt, asce, IPTE_GLOBAL);
  62	} else {
  63		__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
  64	}
  65}
  66
  67static inline pte_t ptep_flush_direct(struct mm_struct *mm,
  68				      unsigned long addr, pte_t *ptep,
  69				      int nodat)
  70{
  71	pte_t old;
  72
  73	old = *ptep;
  74	if (unlikely(pte_val(old) & _PAGE_INVALID))
  75		return old;
  76	atomic_inc(&mm->context.flush_count);
  77	if (MACHINE_HAS_TLB_LC &&
  78	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
  79		ptep_ipte_local(mm, addr, ptep, nodat);
  80	else
  81		ptep_ipte_global(mm, addr, ptep, nodat);
  82	atomic_dec(&mm->context.flush_count);
  83	return old;
  84}
  85
  86static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
  87				    unsigned long addr, pte_t *ptep,
  88				    int nodat)
  89{
  90	pte_t old;
  91
  92	old = *ptep;
  93	if (unlikely(pte_val(old) & _PAGE_INVALID))
  94		return old;
  95	atomic_inc(&mm->context.flush_count);
  96	if (cpumask_equal(&mm->context.cpu_attach_mask,
  97			  cpumask_of(smp_processor_id()))) {
  98		pte_val(*ptep) |= _PAGE_INVALID;
  99		mm->context.flush_mm = 1;
 100	} else
 101		ptep_ipte_global(mm, addr, ptep, nodat);
 102	atomic_dec(&mm->context.flush_count);
 103	return old;
 104}
 105
 106static inline pgste_t pgste_get_lock(pte_t *ptep)
 107{
 108	unsigned long new = 0;
 109#ifdef CONFIG_PGSTE
 110	unsigned long old;
 111
 112	asm(
 113		"	lg	%0,%2\n"
 114		"0:	lgr	%1,%0\n"
 115		"	nihh	%0,0xff7f\n"	/* clear PCL bit in old */
 116		"	oihh	%1,0x0080\n"	/* set PCL bit in new */
 117		"	csg	%0,%1,%2\n"
 118		"	jl	0b\n"
 119		: "=&d" (old), "=&d" (new), "=Q" (ptep[PTRS_PER_PTE])
 120		: "Q" (ptep[PTRS_PER_PTE]) : "cc", "memory");
 121#endif
 122	return __pgste(new);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 123}
 124
 125static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
 126{
 127#ifdef CONFIG_PGSTE
 128	asm(
 129		"	nihh	%1,0xff7f\n"	/* clear PCL bit */
 130		"	stg	%1,%0\n"
 131		: "=Q" (ptep[PTRS_PER_PTE])
 132		: "d" (pgste_val(pgste)), "Q" (ptep[PTRS_PER_PTE])
 133		: "cc", "memory");
 134#endif
 135}
 136
 137static inline pgste_t pgste_get(pte_t *ptep)
 138{
 139	unsigned long pgste = 0;
 140#ifdef CONFIG_PGSTE
 141	pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
 142#endif
 143	return __pgste(pgste);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 144}
 145
 146static inline void pgste_set(pte_t *ptep, pgste_t pgste)
 147{
 148#ifdef CONFIG_PGSTE
 149	*(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
 150#endif
 151}
 152
 153static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
 154				       struct mm_struct *mm)
 155{
 156#ifdef CONFIG_PGSTE
 157	unsigned long address, bits, skey;
 158
 159	if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID)
 160		return pgste;
 161	address = pte_val(pte) & PAGE_MASK;
 162	skey = (unsigned long) page_get_storage_key(address);
 163	bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
 164	/* Transfer page changed & referenced bit to guest bits in pgste */
 165	pgste_val(pgste) |= bits << 48;		/* GR bit & GC bit */
 166	/* Copy page access key and fetch protection bit to pgste */
 167	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
 168	pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
 169#endif
 170	return pgste;
 171
 172}
 173
 174static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
 175				 struct mm_struct *mm)
 
 
 
 176{
 177#ifdef CONFIG_PGSTE
 178	unsigned long address;
 179	unsigned long nkey;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 180
 181	if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID)
 182		return;
 183	VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
 184	address = pte_val(entry) & PAGE_MASK;
 185	/*
 186	 * Set page access key and fetch protection bit from pgste.
 187	 * The guest C/R information is still in the PGSTE, set real
 188	 * key C/R to 0.
 189	 */
 190	nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
 191	nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
 192	page_set_storage_key(address, nkey, 0);
 193#endif
 194}
 195
 196static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
 197{
 198#ifdef CONFIG_PGSTE
 199	if ((pte_val(entry) & _PAGE_PRESENT) &&
 200	    (pte_val(entry) & _PAGE_WRITE) &&
 201	    !(pte_val(entry) & _PAGE_INVALID)) {
 202		if (!MACHINE_HAS_ESOP) {
 203			/*
 204			 * Without enhanced suppression-on-protection force
 205			 * the dirty bit on for all writable ptes.
 206			 */
 207			pte_val(entry) |= _PAGE_DIRTY;
 208			pte_val(entry) &= ~_PAGE_PROTECT;
 209		}
 210		if (!(pte_val(entry) & _PAGE_PROTECT))
 211			/* This pte allows write access, set user-dirty */
 212			pgste_val(pgste) |= PGSTE_UC_BIT;
 213	}
 214#endif
 215	*ptep = entry;
 216	return pgste;
 217}
 218
 219static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
 220				       unsigned long addr,
 221				       pte_t *ptep, pgste_t pgste)
 
 
 222{
 223#ifdef CONFIG_PGSTE
 224	unsigned long bits;
 
 225
 226	bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
 227	if (bits) {
 228		pgste_val(pgste) ^= bits;
 229		ptep_notify(mm, addr, ptep, bits);
 230	}
 231#endif
 232	return pgste;
 233}
 234
 235static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
 236				      unsigned long addr, pte_t *ptep)
 237{
 238	pgste_t pgste = __pgste(0);
 
 
 239
 240	if (mm_has_pgste(mm)) {
 241		pgste = pgste_get_lock(ptep);
 242		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
 243	}
 244	return pgste;
 
 
 
 
 
 
 
 
 
 
 245}
 
 246
 247static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
 248				    unsigned long addr, pte_t *ptep,
 249				    pgste_t pgste, pte_t old, pte_t new)
 
 
 250{
 251	if (mm_has_pgste(mm)) {
 252		if (pte_val(old) & _PAGE_INVALID)
 253			pgste_set_key(ptep, pgste, new, mm);
 254		if (pte_val(new) & _PAGE_INVALID) {
 255			pgste = pgste_update_all(old, pgste, mm);
 256			if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
 257			    _PGSTE_GPS_USAGE_UNUSED)
 258				pte_val(old) |= _PAGE_UNUSED;
 259		}
 260		pgste = pgste_set_pte(ptep, pgste, new);
 261		pgste_set_unlock(ptep, pgste);
 262	} else {
 263		*ptep = new;
 264	}
 265	return old;
 266}
 
 267
 268pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
 269		       pte_t *ptep, pte_t new)
 
 
 
 270{
 271	pgste_t pgste;
 272	pte_t old;
 273	int nodat;
 274
 275	preempt_disable();
 276	pgste = ptep_xchg_start(mm, addr, ptep);
 277	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 278	old = ptep_flush_direct(mm, addr, ptep, nodat);
 279	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
 280	preempt_enable();
 281	return old;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 282}
 283EXPORT_SYMBOL(ptep_xchg_direct);
 284
 285pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
 286		     pte_t *ptep, pte_t new)
 
 
 
 
 
 
 
 287{
 288	pgste_t pgste;
 289	pte_t old;
 290	int nodat;
 291
 292	preempt_disable();
 293	pgste = ptep_xchg_start(mm, addr, ptep);
 294	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 295	old = ptep_flush_lazy(mm, addr, ptep, nodat);
 296	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
 297	preempt_enable();
 298	return old;
 299}
 300EXPORT_SYMBOL(ptep_xchg_lazy);
 301
 302pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
 303			     pte_t *ptep)
 304{
 305	pgste_t pgste;
 306	pte_t old;
 307	int nodat;
 308	struct mm_struct *mm = vma->vm_mm;
 309
 310	preempt_disable();
 311	pgste = ptep_xchg_start(mm, addr, ptep);
 312	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 313	old = ptep_flush_lazy(mm, addr, ptep, nodat);
 314	if (mm_has_pgste(mm)) {
 315		pgste = pgste_update_all(old, pgste, mm);
 316		pgste_set(ptep, pgste);
 317	}
 318	return old;
 
 
 
 
 
 
 
 
 
 
 
 
 319}
 
 320
 321void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
 322			     pte_t *ptep, pte_t old_pte, pte_t pte)
 
 
 
 
 
 
 
 
 323{
 324	pgste_t pgste;
 325	struct mm_struct *mm = vma->vm_mm;
 
 326
 327	if (!MACHINE_HAS_NX)
 328		pte_val(pte) &= ~_PAGE_NOEXEC;
 329	if (mm_has_pgste(mm)) {
 330		pgste = pgste_get(ptep);
 331		pgste_set_key(ptep, pgste, pte, mm);
 332		pgste = pgste_set_pte(ptep, pgste, pte);
 333		pgste_set_unlock(ptep, pgste);
 334	} else {
 335		*ptep = pte;
 336	}
 337	preempt_enable();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 338}
 339
 340static inline void pmdp_idte_local(struct mm_struct *mm,
 341				   unsigned long addr, pmd_t *pmdp)
 
 
 
 
 
 
 
 
 
 
 342{
 343	if (MACHINE_HAS_TLB_GUEST)
 344		__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
 345			    mm->context.asce, IDTE_LOCAL);
 346	else
 347		__pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
 348	if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 349		gmap_pmdp_idte_local(mm, addr);
 350}
 351
 352static inline void pmdp_idte_global(struct mm_struct *mm,
 353				    unsigned long addr, pmd_t *pmdp)
 354{
 355	if (MACHINE_HAS_TLB_GUEST) {
 356		__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
 357			    mm->context.asce, IDTE_GLOBAL);
 358		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 359			gmap_pmdp_idte_global(mm, addr);
 360	} else if (MACHINE_HAS_IDTE) {
 361		__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
 362		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 363			gmap_pmdp_idte_global(mm, addr);
 364	} else {
 365		__pmdp_csp(pmdp);
 366		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 367			gmap_pmdp_csp(mm, addr);
 368	}
 
 369}
 
 370
 371static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
 372				      unsigned long addr, pmd_t *pmdp)
 
 
 
 
 
 
 
 
 373{
 374	pmd_t old;
 375
 376	old = *pmdp;
 377	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
 378		return old;
 379	atomic_inc(&mm->context.flush_count);
 380	if (MACHINE_HAS_TLB_LC &&
 381	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
 382		pmdp_idte_local(mm, addr, pmdp);
 383	else
 384		pmdp_idte_global(mm, addr, pmdp);
 385	atomic_dec(&mm->context.flush_count);
 386	return old;
 387}
 388
 389static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
 390				    unsigned long addr, pmd_t *pmdp)
 391{
 392	pmd_t old;
 393
 394	old = *pmdp;
 395	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
 396		return old;
 397	atomic_inc(&mm->context.flush_count);
 398	if (cpumask_equal(&mm->context.cpu_attach_mask,
 399			  cpumask_of(smp_processor_id()))) {
 400		pmd_val(*pmdp) |= _SEGMENT_ENTRY_INVALID;
 401		mm->context.flush_mm = 1;
 402		if (mm_has_pgste(mm))
 403			gmap_pmdp_invalidate(mm, addr);
 404	} else {
 405		pmdp_idte_global(mm, addr, pmdp);
 406	}
 407	atomic_dec(&mm->context.flush_count);
 408	return old;
 409}
 
 410
 411#ifdef CONFIG_PGSTE
 412static pmd_t *pmd_alloc_map(struct mm_struct *mm, unsigned long addr)
 413{
 
 
 
 
 
 
 414	pgd_t *pgd;
 415	p4d_t *p4d;
 416	pud_t *pud;
 417	pmd_t *pmd;
 418
 419	pgd = pgd_offset(mm, addr);
 420	p4d = p4d_alloc(mm, pgd, addr);
 421	if (!p4d)
 422		return NULL;
 423	pud = pud_alloc(mm, p4d, addr);
 
 
 
 424	if (!pud)
 425		return NULL;
 426	pmd = pmd_alloc(mm, pud, addr);
 427	return pmd;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 428}
 429#endif
 430
 431pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
 432		       pmd_t *pmdp, pmd_t new)
 433{
 434	pmd_t old;
 435
 436	preempt_disable();
 437	old = pmdp_flush_direct(mm, addr, pmdp);
 438	*pmdp = new;
 439	preempt_enable();
 440	return old;
 
 
 
 
 
 
 
 
 
 
 
 
 441}
 442EXPORT_SYMBOL(pmdp_xchg_direct);
 443
 444pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
 445		     pmd_t *pmdp, pmd_t new)
 
 
 446{
 447	pmd_t old;
 
 
 
 448
 449	preempt_disable();
 450	old = pmdp_flush_lazy(mm, addr, pmdp);
 451	*pmdp = new;
 452	preempt_enable();
 453	return old;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 454}
 455EXPORT_SYMBOL(pmdp_xchg_lazy);
 456
 457static inline void pudp_idte_local(struct mm_struct *mm,
 458				   unsigned long addr, pud_t *pudp)
 459{
 460	if (MACHINE_HAS_TLB_GUEST)
 461		__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
 462			    mm->context.asce, IDTE_LOCAL);
 463	else
 464		__pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL);
 465}
 466
 467static inline void pudp_idte_global(struct mm_struct *mm,
 468				    unsigned long addr, pud_t *pudp)
 469{
 470	if (MACHINE_HAS_TLB_GUEST)
 471		__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
 472			    mm->context.asce, IDTE_GLOBAL);
 473	else if (MACHINE_HAS_IDTE)
 474		__pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL);
 475	else
 476		/*
 477		 * Invalid bit position is the same for pmd and pud, so we can
 478		 * re-use _pmd_csp() here
 479		 */
 480		__pmdp_csp((pmd_t *) pudp);
 481}
 482
 483static inline pud_t pudp_flush_direct(struct mm_struct *mm,
 484				      unsigned long addr, pud_t *pudp)
 485{
 486	pud_t old;
 487
 488	old = *pudp;
 489	if (pud_val(old) & _REGION_ENTRY_INVALID)
 490		return old;
 491	atomic_inc(&mm->context.flush_count);
 492	if (MACHINE_HAS_TLB_LC &&
 493	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
 494		pudp_idte_local(mm, addr, pudp);
 495	else
 496		pudp_idte_global(mm, addr, pudp);
 497	atomic_dec(&mm->context.flush_count);
 498	return old;
 499}
 
 500
 501pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
 502		       pud_t *pudp, pud_t new)
 503{
 504	pud_t old;
 
 
 
 505
 506	preempt_disable();
 507	old = pudp_flush_direct(mm, addr, pudp);
 508	*pudp = new;
 509	preempt_enable();
 510	return old;
 
 511}
 512EXPORT_SYMBOL(pudp_xchg_direct);
 513
 514#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 515void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 516				pgtable_t pgtable)
 
 517{
 518	struct list_head *lh = (struct list_head *) pgtable;
 519
 520	assert_spin_locked(pmd_lockptr(mm, pmdp));
 
 521
 522	/* FIFO */
 523	if (!pmd_huge_pte(mm, pmdp))
 524		INIT_LIST_HEAD(lh);
 525	else
 526		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
 527	pmd_huge_pte(mm, pmdp) = pgtable;
 
 
 
 
 
 
 
 
 
 
 
 
 528}
 529
 530pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
 
 
 
 531{
 532	struct list_head *lh;
 533	pgtable_t pgtable;
 534	pte_t *ptep;
 535
 536	assert_spin_locked(pmd_lockptr(mm, pmdp));
 537
 538	/* FIFO */
 539	pgtable = pmd_huge_pte(mm, pmdp);
 540	lh = (struct list_head *) pgtable;
 541	if (list_empty(lh))
 542		pmd_huge_pte(mm, pmdp) = NULL;
 543	else {
 544		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
 545		list_del(lh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 546	}
 547	ptep = (pte_t *) pgtable;
 548	pte_val(*ptep) = _PAGE_INVALID;
 549	ptep++;
 550	pte_val(*ptep) = _PAGE_INVALID;
 551	return pgtable;
 552}
 553#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 554
 555#ifdef CONFIG_PGSTE
 556void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
 557		     pte_t *ptep, pte_t entry)
 558{
 559	pgste_t pgste;
 560
 561	/* the mm_has_pgste() check is done in set_pte_at() */
 562	preempt_disable();
 563	pgste = pgste_get_lock(ptep);
 564	pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
 565	pgste_set_key(ptep, pgste, entry, mm);
 566	pgste = pgste_set_pte(ptep, pgste, entry);
 567	pgste_set_unlock(ptep, pgste);
 568	preempt_enable();
 
 569}
 
 570
 571void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 
 
 
 
 572{
 573	pgste_t pgste;
 574
 575	preempt_disable();
 576	pgste = pgste_get_lock(ptep);
 577	pgste_val(pgste) |= PGSTE_IN_BIT;
 578	pgste_set_unlock(ptep, pgste);
 579	preempt_enable();
 580}
 
 581
 582/**
 583 * ptep_force_prot - change access rights of a locked pte
 584 * @mm: pointer to the process mm_struct
 585 * @addr: virtual address in the guest address space
 586 * @ptep: pointer to the page table entry
 587 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
 588 * @bit: pgste bit to set (e.g. for notification)
 589 *
 590 * Returns 0 if the access rights were changed and -EAGAIN if the current
 591 * and requested access rights are incompatible.
 
 
 592 */
 593int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
 594		    pte_t *ptep, int prot, unsigned long bit)
 595{
 596	pte_t entry;
 
 
 597	pgste_t pgste;
 598	int pte_i, pte_p, nodat;
 599
 600	pgste = pgste_get_lock(ptep);
 601	entry = *ptep;
 602	/* Check pte entry after all locks have been acquired */
 603	pte_i = pte_val(entry) & _PAGE_INVALID;
 604	pte_p = pte_val(entry) & _PAGE_PROTECT;
 605	if ((pte_i && (prot != PROT_NONE)) ||
 606	    (pte_p && (prot & PROT_WRITE))) {
 607		pgste_set_unlock(ptep, pgste);
 608		return -EAGAIN;
 609	}
 610	/* Change access rights and set pgste bit */
 611	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 612	if (prot == PROT_NONE && !pte_i) {
 613		ptep_flush_direct(mm, addr, ptep, nodat);
 614		pgste = pgste_update_all(entry, pgste, mm);
 615		pte_val(entry) |= _PAGE_INVALID;
 616	}
 617	if (prot == PROT_READ && !pte_p) {
 618		ptep_flush_direct(mm, addr, ptep, nodat);
 619		pte_val(entry) &= ~_PAGE_INVALID;
 620		pte_val(entry) |= _PAGE_PROTECT;
 
 
 
 
 
 
 
 
 621	}
 622	pgste_val(pgste) |= bit;
 623	pgste = pgste_set_pte(ptep, pgste, entry);
 624	pgste_set_unlock(ptep, pgste);
 625	return 0;
 626}
 
 627
 628int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
 629		    pte_t *sptep, pte_t *tptep, pte_t pte)
 
 
 
 
 
 
 
 630{
 631	pgste_t spgste, tpgste;
 632	pte_t spte, tpte;
 633	int rc = -EAGAIN;
 634
 635	if (!(pte_val(*tptep) & _PAGE_INVALID))
 636		return 0;	/* already shadowed */
 637	spgste = pgste_get_lock(sptep);
 638	spte = *sptep;
 639	if (!(pte_val(spte) & _PAGE_INVALID) &&
 640	    !((pte_val(spte) & _PAGE_PROTECT) &&
 641	      !(pte_val(pte) & _PAGE_PROTECT))) {
 642		pgste_val(spgste) |= PGSTE_VSIE_BIT;
 643		tpgste = pgste_get_lock(tptep);
 644		pte_val(tpte) = (pte_val(spte) & PAGE_MASK) |
 645				(pte_val(pte) & _PAGE_PROTECT);
 646		/* don't touch the storage key - it belongs to parent pgste */
 647		tpgste = pgste_set_pte(tptep, tpgste, tpte);
 648		pgste_set_unlock(tptep, tpgste);
 649		rc = 1;
 650	}
 651	pgste_set_unlock(sptep, spgste);
 652	return rc;
 653}
 654
 655void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
 656{
 657	pgste_t pgste;
 658	int nodat;
 659
 660	pgste = pgste_get_lock(ptep);
 661	/* notifier is called by the caller */
 662	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 663	ptep_flush_direct(mm, saddr, ptep, nodat);
 664	/* don't touch the storage key - it belongs to parent pgste */
 665	pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
 666	pgste_set_unlock(ptep, pgste);
 667}
 668
 669static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
 
 670{
 671	if (!non_swap_entry(entry))
 672		dec_mm_counter(mm, MM_SWAPENTS);
 673	else if (is_migration_entry(entry)) {
 674		struct page *page = migration_entry_to_page(entry);
 675
 676		dec_mm_counter(mm, mm_counter(page));
 
 
 
 
 
 
 677	}
 678	free_swap_and_cache(entry);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 679}
 680
 681void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
 682		     pte_t *ptep, int reset)
 683{
 684	unsigned long pgstev;
 
 685	pgste_t pgste;
 686	pte_t pte;
 687
 688	/* Zap unused and logically-zero pages */
 689	preempt_disable();
 690	pgste = pgste_get_lock(ptep);
 691	pgstev = pgste_val(pgste);
 692	pte = *ptep;
 693	if (!reset && pte_swap(pte) &&
 694	    ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
 695	     (pgstev & _PGSTE_GPS_ZERO))) {
 696		ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
 697		pte_clear(mm, addr, ptep);
 698	}
 699	if (reset)
 700		pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
 701	pgste_set_unlock(ptep, pgste);
 702	preempt_enable();
 
 
 
 703}
 704
 705void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 
 706{
 707	unsigned long ptev;
 708	pgste_t pgste;
 709
 710	/* Clear storage key ACC and F, but set R/C */
 711	preempt_disable();
 712	pgste = pgste_get_lock(ptep);
 713	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
 714	pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
 715	ptev = pte_val(*ptep);
 716	if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
 717		page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
 718	pgste_set_unlock(ptep, pgste);
 719	preempt_enable();
 720}
 721
 722/*
 723 * Test and reset if a guest page is dirty
 724 */
 725bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
 726		       pte_t *ptep)
 727{
 728	pgste_t pgste;
 729	pte_t pte;
 730	bool dirty;
 731	int nodat;
 732
 733	pgste = pgste_get_lock(ptep);
 734	dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
 735	pgste_val(pgste) &= ~PGSTE_UC_BIT;
 736	pte = *ptep;
 737	if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
 738		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
 739		nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 740		ptep_ipte_global(mm, addr, ptep, nodat);
 741		if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
 742			pte_val(pte) |= _PAGE_PROTECT;
 743		else
 744			pte_val(pte) |= _PAGE_INVALID;
 745		*ptep = pte;
 746	}
 747	pgste_set_unlock(ptep, pgste);
 748	return dirty;
 
 
 
 
 
 
 
 
 
 
 
 749}
 750EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
 751
 752int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 753			  unsigned char key, bool nq)
 754{
 755	unsigned long keyul, paddr;
 756	spinlock_t *ptl;
 757	pgste_t old, new;
 758	pmd_t *pmdp;
 759	pte_t *ptep;
 760
 761	pmdp = pmd_alloc_map(mm, addr);
 762	if (unlikely(!pmdp))
 763		return -EFAULT;
 764
 765	ptl = pmd_lock(mm, pmdp);
 766	if (!pmd_present(*pmdp)) {
 767		spin_unlock(ptl);
 768		return -EFAULT;
 769	}
 770
 771	if (pmd_large(*pmdp)) {
 772		paddr = pmd_val(*pmdp) & HPAGE_MASK;
 773		paddr |= addr & ~HPAGE_MASK;
 774		/*
 775		 * Huge pmds need quiescing operations, they are
 776		 * always mapped.
 777		 */
 778		page_set_storage_key(paddr, key, 1);
 779		spin_unlock(ptl);
 780		return 0;
 781	}
 782	spin_unlock(ptl);
 783
 784	ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
 785	if (unlikely(!ptep))
 786		return -EFAULT;
 787
 788	new = old = pgste_get_lock(ptep);
 789	pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
 790			    PGSTE_ACC_BITS | PGSTE_FP_BIT);
 791	keyul = (unsigned long) key;
 792	pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
 793	pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
 794	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
 795		unsigned long bits, skey;
 796
 797		paddr = pte_val(*ptep) & PAGE_MASK;
 798		skey = (unsigned long) page_get_storage_key(paddr);
 799		bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
 800		skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
 801		/* Set storage key ACC and FP */
 802		page_set_storage_key(paddr, skey, !nq);
 803		/* Merge host changed & referenced into pgste  */
 804		pgste_val(new) |= bits << 52;
 805	}
 806	/* changing the guest storage key is considered a change of the page */
 807	if ((pgste_val(new) ^ pgste_val(old)) &
 808	    (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
 809		pgste_val(new) |= PGSTE_UC_BIT;
 810
 811	pgste_set_unlock(ptep, new);
 812	pte_unmap_unlock(ptep, ptl);
 
 813	return 0;
 814}
 815EXPORT_SYMBOL(set_guest_storage_key);
 816
 817/**
 818 * Conditionally set a guest storage key (handling csske).
 819 * oldkey will be updated when either mr or mc is set and a pointer is given.
 820 *
 821 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
 822 * storage key was updated and -EFAULT on access errors.
 823 */
 824int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 825			       unsigned char key, unsigned char *oldkey,
 826			       bool nq, bool mr, bool mc)
 827{
 828	unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
 829	int rc;
 830
 831	/* we can drop the pgste lock between getting and setting the key */
 832	if (mr | mc) {
 833		rc = get_guest_storage_key(current->mm, addr, &tmp);
 834		if (rc)
 835			return rc;
 836		if (oldkey)
 837			*oldkey = tmp;
 838		if (!mr)
 839			mask |= _PAGE_REFERENCED;
 840		if (!mc)
 841			mask |= _PAGE_CHANGED;
 842		if (!((tmp ^ key) & mask))
 843			return 0;
 844	}
 845	rc = set_guest_storage_key(current->mm, addr, key, nq);
 846	return rc < 0 ? rc : 1;
 847}
 848EXPORT_SYMBOL(cond_set_guest_storage_key);
 849
 850/**
 851 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
 852 *
 853 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 854 */
 855int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
 856{
 857	spinlock_t *ptl;
 858	unsigned long paddr;
 859	pgste_t old, new;
 860	pmd_t *pmdp;
 861	pte_t *ptep;
 862	int cc = 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 863
 864	pmdp = pmd_alloc_map(mm, addr);
 865	if (unlikely(!pmdp))
 866		return -EFAULT;
 
 867
 868	ptl = pmd_lock(mm, pmdp);
 869	if (!pmd_present(*pmdp)) {
 870		spin_unlock(ptl);
 871		return -EFAULT;
 872	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 873
 874	if (pmd_large(*pmdp)) {
 875		paddr = pmd_val(*pmdp) & HPAGE_MASK;
 876		paddr |= addr & ~HPAGE_MASK;
 877		cc = page_reset_referenced(paddr);
 878		spin_unlock(ptl);
 879		return cc;
 
 
 
 
 
 
 880	}
 881	spin_unlock(ptl);
 882
 883	ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
 884	if (unlikely(!ptep))
 885		return -EFAULT;
 
 
 886
 887	new = old = pgste_get_lock(ptep);
 888	/* Reset guest reference bit only */
 889	pgste_val(new) &= ~PGSTE_GR_BIT;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 890
 891	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
 892		paddr = pte_val(*ptep) & PAGE_MASK;
 893		cc = page_reset_referenced(paddr);
 894		/* Merge real referenced bit into host-set */
 895		pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
 896	}
 897	/* Reflect guest's logical view, not physical */
 898	cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
 899	/* Changing the guest storage key is considered a change of the page */
 900	if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
 901		pgste_val(new) |= PGSTE_UC_BIT;
 902
 903	pgste_set_unlock(ptep, new);
 904	pte_unmap_unlock(ptep, ptl);
 905	return cc;
 906}
 907EXPORT_SYMBOL(reset_guest_reference_bit);
 908
 909int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 910			  unsigned char *key)
 911{
 912	unsigned long paddr;
 913	spinlock_t *ptl;
 914	pgste_t pgste;
 915	pmd_t *pmdp;
 916	pte_t *ptep;
 
 
 
 
 
 917
 918	pmdp = pmd_alloc_map(mm, addr);
 919	if (unlikely(!pmdp))
 920		return -EFAULT;
 
 921
 922	ptl = pmd_lock(mm, pmdp);
 923	if (!pmd_present(*pmdp)) {
 924		/* Not yet mapped memory has a zero key */
 925		spin_unlock(ptl);
 926		*key = 0;
 927		return 0;
 
 
 
 
 
 
 
 
 
 928	}
 
 929
 930	if (pmd_large(*pmdp)) {
 931		paddr = pmd_val(*pmdp) & HPAGE_MASK;
 932		paddr |= addr & ~HPAGE_MASK;
 933		*key = page_get_storage_key(paddr);
 934		spin_unlock(ptl);
 935		return 0;
 
 
 
 
 
 
 
 
 936	}
 937	spin_unlock(ptl);
 
 
 
 938
 939	ptep = pte_alloc_map_lock(mm, pmdp, addr, &ptl);
 940	if (unlikely(!ptep))
 941		return -EFAULT;
 
 942
 943	pgste = pgste_get_lock(ptep);
 944	*key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
 945	paddr = pte_val(*ptep) & PAGE_MASK;
 946	if (!(pte_val(*ptep) & _PAGE_INVALID))
 947		*key = page_get_storage_key(paddr);
 948	/* Reflect guest's logical view, not physical */
 949	*key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
 950	pgste_set_unlock(ptep, pgste);
 951	pte_unmap_unlock(ptep, ptl);
 952	return 0;
 953}
 954EXPORT_SYMBOL(get_guest_storage_key);
 955
 956/**
 957 * pgste_perform_essa - perform ESSA actions on the PGSTE.
 958 * @mm: the memory context. It must have PGSTEs, no check is performed here!
 959 * @hva: the host virtual address of the page whose PGSTE is to be processed
 960 * @orc: the specific action to perform, see the ESSA_SET_* macros.
 961 * @oldpte: the PTE will be saved there if the pointer is not NULL.
 962 * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
 963 *
 964 * Return: 1 if the page is to be added to the CBRL, otherwise 0,
 965 *	   or < 0 in case of error. -EINVAL is returned for invalid values
 966 *	   of orc, -EFAULT for invalid addresses.
 967 */
 968int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
 969			unsigned long *oldpte, unsigned long *oldpgste)
 970{
 971	unsigned long pgstev;
 972	spinlock_t *ptl;
 973	pgste_t pgste;
 974	pte_t *ptep;
 975	int res = 0;
 976
 977	WARN_ON_ONCE(orc > ESSA_MAX);
 978	if (unlikely(orc > ESSA_MAX))
 979		return -EINVAL;
 980	ptep = get_locked_pte(mm, hva, &ptl);
 981	if (unlikely(!ptep))
 982		return -EFAULT;
 983	pgste = pgste_get_lock(ptep);
 984	pgstev = pgste_val(pgste);
 985	if (oldpte)
 986		*oldpte = pte_val(*ptep);
 987	if (oldpgste)
 988		*oldpgste = pgstev;
 989
 990	switch (orc) {
 991	case ESSA_GET_STATE:
 992		break;
 993	case ESSA_SET_STABLE:
 994		pgstev &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
 995		pgstev |= _PGSTE_GPS_USAGE_STABLE;
 996		break;
 997	case ESSA_SET_UNUSED:
 998		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
 999		pgstev |= _PGSTE_GPS_USAGE_UNUSED;
1000		if (pte_val(*ptep) & _PAGE_INVALID)
1001			res = 1;
1002		break;
1003	case ESSA_SET_VOLATILE:
1004		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1005		pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1006		if (pte_val(*ptep) & _PAGE_INVALID)
1007			res = 1;
1008		break;
1009	case ESSA_SET_POT_VOLATILE:
1010		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1011		if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1012			pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE;
1013			break;
1014		}
1015		if (pgstev & _PGSTE_GPS_ZERO) {
1016			pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1017			break;
1018		}
1019		if (!(pgstev & PGSTE_GC_BIT)) {
1020			pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1021			res = 1;
1022			break;
 
 
 
1023		}
1024		break;
1025	case ESSA_SET_STABLE_RESIDENT:
1026		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1027		pgstev |= _PGSTE_GPS_USAGE_STABLE;
1028		/*
1029		 * Since the resident state can go away any time after this
1030		 * call, we will not make this page resident. We can revisit
1031		 * this decision if a guest will ever start using this.
1032		 */
1033		break;
1034	case ESSA_SET_STABLE_IF_RESIDENT:
1035		if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1036			pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1037			pgstev |= _PGSTE_GPS_USAGE_STABLE;
1038		}
1039		break;
1040	case ESSA_SET_STABLE_NODAT:
1041		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1042		pgstev |= _PGSTE_GPS_USAGE_STABLE | _PGSTE_GPS_NODAT;
1043		break;
1044	default:
1045		/* we should never get here! */
1046		break;
1047	}
1048	/* If we are discarding a page, set it to logical zero */
1049	if (res)
1050		pgstev |= _PGSTE_GPS_ZERO;
1051
1052	pgste_val(pgste) = pgstev;
1053	pgste_set_unlock(ptep, pgste);
1054	pte_unmap_unlock(ptep, ptl);
1055	return res;
1056}
1057EXPORT_SYMBOL(pgste_perform_essa);
1058
1059/**
1060 * set_pgste_bits - set specific PGSTE bits.
1061 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1062 * @hva: the host virtual address of the page whose PGSTE is to be processed
1063 * @bits: a bitmask representing the bits that will be touched
1064 * @value: the values of the bits to be written. Only the bits in the mask
1065 *	   will be written.
1066 *
1067 * Return: 0 on success, < 0 in case of error.
1068 */
1069int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
1070			unsigned long bits, unsigned long value)
1071{
1072	spinlock_t *ptl;
1073	pgste_t new;
1074	pte_t *ptep;
1075
1076	ptep = get_locked_pte(mm, hva, &ptl);
1077	if (unlikely(!ptep))
1078		return -EFAULT;
1079	new = pgste_get_lock(ptep);
 
 
 
 
 
1080
1081	pgste_val(new) &= ~bits;
1082	pgste_val(new) |= value & bits;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1083
1084	pgste_set_unlock(ptep, new);
1085	pte_unmap_unlock(ptep, ptl);
1086	return 0;
1087}
1088EXPORT_SYMBOL(set_pgste_bits);
1089
1090/**
1091 * get_pgste - get the current PGSTE for the given address.
1092 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1093 * @hva: the host virtual address of the page whose PGSTE is to be processed
1094 * @pgstep: will be written with the current PGSTE for the given address.
1095 *
1096 * Return: 0 on success, < 0 in case of error.
1097 */
1098int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
1099{
1100	spinlock_t *ptl;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1101	pte_t *ptep;
1102
1103	ptep = get_locked_pte(mm, hva, &ptl);
1104	if (unlikely(!ptep))
1105		return -EFAULT;
1106	*pgstep = pgste_val(pgste_get(ptep));
1107	pte_unmap_unlock(ptep, ptl);
1108	return 0;
 
 
 
 
 
 
 
 
 
 
1109}
1110EXPORT_SYMBOL(get_pgste);
1111#endif
v3.15
 
   1/*
   2 *    Copyright IBM Corp. 2007, 2011
   3 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
   4 */
   5
   6#include <linux/sched.h>
   7#include <linux/kernel.h>
   8#include <linux/errno.h>
   9#include <linux/gfp.h>
  10#include <linux/mm.h>
  11#include <linux/swap.h>
  12#include <linux/smp.h>
  13#include <linux/highmem.h>
  14#include <linux/pagemap.h>
  15#include <linux/spinlock.h>
  16#include <linux/module.h>
  17#include <linux/quicklist.h>
  18#include <linux/rcupdate.h>
  19#include <linux/slab.h>
  20#include <linux/swapops.h>
 
 
 
  21
  22#include <asm/pgtable.h>
  23#include <asm/pgalloc.h>
  24#include <asm/tlb.h>
  25#include <asm/tlbflush.h>
  26#include <asm/mmu_context.h>
 
  27
  28#ifndef CONFIG_64BIT
  29#define ALLOC_ORDER	1
  30#define FRAG_MASK	0x0f
  31#else
  32#define ALLOC_ORDER	2
  33#define FRAG_MASK	0x03
  34#endif
  35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  36
  37unsigned long *crst_table_alloc(struct mm_struct *mm)
 
  38{
  39	struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
  40
  41	if (!page)
  42		return NULL;
  43	return (unsigned long *) page_to_phys(page);
 
 
 
 
 
 
 
 
 
 
  44}
  45
  46void crst_table_free(struct mm_struct *mm, unsigned long *table)
 
 
  47{
  48	free_pages((unsigned long) table, ALLOC_ORDER);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
  49}
  50
  51#ifdef CONFIG_64BIT
  52static void __crst_table_upgrade(void *arg)
  53{
  54	struct mm_struct *mm = arg;
  55
  56	if (current->active_mm == mm)
  57		update_user_asce(mm, 1);
  58	__tlb_flush_local();
  59}
  60
  61int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
  62{
  63	unsigned long *table, *pgd;
  64	unsigned long entry;
  65	int flush;
  66
  67	BUG_ON(limit > (1UL << 53));
  68	flush = 0;
  69repeat:
  70	table = crst_table_alloc(mm);
  71	if (!table)
  72		return -ENOMEM;
  73	spin_lock_bh(&mm->page_table_lock);
  74	if (mm->context.asce_limit < limit) {
  75		pgd = (unsigned long *) mm->pgd;
  76		if (mm->context.asce_limit <= (1UL << 31)) {
  77			entry = _REGION3_ENTRY_EMPTY;
  78			mm->context.asce_limit = 1UL << 42;
  79			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  80						_ASCE_USER_BITS |
  81						_ASCE_TYPE_REGION3;
  82		} else {
  83			entry = _REGION2_ENTRY_EMPTY;
  84			mm->context.asce_limit = 1UL << 53;
  85			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
  86						_ASCE_USER_BITS |
  87						_ASCE_TYPE_REGION2;
  88		}
  89		crst_table_init(table, entry);
  90		pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
  91		mm->pgd = (pgd_t *) table;
  92		mm->task_size = mm->context.asce_limit;
  93		table = NULL;
  94		flush = 1;
  95	}
  96	spin_unlock_bh(&mm->page_table_lock);
  97	if (table)
  98		crst_table_free(mm, table);
  99	if (mm->context.asce_limit < limit)
 100		goto repeat;
 101	if (flush)
 102		on_each_cpu(__crst_table_upgrade, mm, 0);
 103	return 0;
 104}
 105
 106void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
 107{
 108	pgd_t *pgd;
 
 
 
 
 
 
 
 
 109
 110	if (current->active_mm == mm) {
 111		clear_user_asce(mm, 1);
 112		__tlb_flush_mm(mm);
 113	}
 114	while (mm->context.asce_limit > limit) {
 115		pgd = mm->pgd;
 116		switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
 117		case _REGION_ENTRY_TYPE_R2:
 118			mm->context.asce_limit = 1UL << 42;
 119			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
 120						_ASCE_USER_BITS |
 121						_ASCE_TYPE_REGION3;
 122			break;
 123		case _REGION_ENTRY_TYPE_R3:
 124			mm->context.asce_limit = 1UL << 31;
 125			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
 126						_ASCE_USER_BITS |
 127						_ASCE_TYPE_SEGMENT;
 128			break;
 129		default:
 130			BUG();
 131		}
 132		mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
 133		mm->task_size = mm->context.asce_limit;
 134		crst_table_free(mm, (unsigned long *) pgd);
 135	}
 136	if (current->active_mm == mm)
 137		update_user_asce(mm, 1);
 138}
 
 
 
 
 
 139#endif
 
 140
 
 
 
 141#ifdef CONFIG_PGSTE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 142
 143/**
 144 * gmap_alloc - allocate a guest address space
 145 * @mm: pointer to the parent mm_struct
 146 *
 147 * Returns a guest address space structure.
 148 */
 149struct gmap *gmap_alloc(struct mm_struct *mm)
 150{
 151	struct gmap *gmap;
 152	struct page *page;
 153	unsigned long *table;
 154
 155	gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
 156	if (!gmap)
 157		goto out;
 158	INIT_LIST_HEAD(&gmap->crst_list);
 159	gmap->mm = mm;
 160	page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
 161	if (!page)
 162		goto out_free;
 163	list_add(&page->lru, &gmap->crst_list);
 164	table = (unsigned long *) page_to_phys(page);
 165	crst_table_init(table, _REGION1_ENTRY_EMPTY);
 166	gmap->table = table;
 167	gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
 168		     _ASCE_USER_BITS | __pa(table);
 169	list_add(&gmap->list, &mm->context.gmap_list);
 170	return gmap;
 171
 172out_free:
 173	kfree(gmap);
 174out:
 175	return NULL;
 176}
 177EXPORT_SYMBOL_GPL(gmap_alloc);
 178
 179static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
 180{
 181	struct gmap_pgtable *mp;
 182	struct gmap_rmap *rmap;
 183	struct page *page;
 184
 185	if (*table & _SEGMENT_ENTRY_INVALID)
 186		return 0;
 187	page = pfn_to_page(*table >> PAGE_SHIFT);
 188	mp = (struct gmap_pgtable *) page->index;
 189	list_for_each_entry(rmap, &mp->mapper, list) {
 190		if (rmap->entry != table)
 191			continue;
 192		list_del(&rmap->list);
 193		kfree(rmap);
 194		break;
 195	}
 196	*table = mp->vmaddr | _SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_PROTECT;
 197	return 1;
 198}
 199
 200static void gmap_flush_tlb(struct gmap *gmap)
 201{
 202	if (MACHINE_HAS_IDTE)
 203		__tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
 204				 _ASCE_TYPE_REGION1);
 205	else
 206		__tlb_flush_global();
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 207}
 208
 209/**
 210 * gmap_free - free a guest address space
 211 * @gmap: pointer to the guest address space structure
 212 */
 213void gmap_free(struct gmap *gmap)
 214{
 215	struct page *page, *next;
 216	unsigned long *table;
 217	int i;
 218
 
 
 
 
 
 
 
 
 219
 220	/* Flush tlb. */
 221	if (MACHINE_HAS_IDTE)
 222		__tlb_flush_asce(gmap->mm, (unsigned long) gmap->table |
 223				 _ASCE_TYPE_REGION1);
 224	else
 225		__tlb_flush_global();
 226
 227	/* Free all segment & region tables. */
 228	down_read(&gmap->mm->mmap_sem);
 229	spin_lock(&gmap->mm->page_table_lock);
 230	list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
 231		table = (unsigned long *) page_to_phys(page);
 232		if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
 233			/* Remove gmap rmap structures for segment table. */
 234			for (i = 0; i < PTRS_PER_PMD; i++, table++)
 235				gmap_unlink_segment(gmap, table);
 236		__free_pages(page, ALLOC_ORDER);
 237	}
 238	spin_unlock(&gmap->mm->page_table_lock);
 239	up_read(&gmap->mm->mmap_sem);
 240	list_del(&gmap->list);
 241	kfree(gmap);
 242}
 243EXPORT_SYMBOL_GPL(gmap_free);
 244
 245/**
 246 * gmap_enable - switch primary space to the guest address space
 247 * @gmap: pointer to the guest address space structure
 248 */
 249void gmap_enable(struct gmap *gmap)
 250{
 251	S390_lowcore.gmap = (unsigned long) gmap;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 252}
 253EXPORT_SYMBOL_GPL(gmap_enable);
 254
 255/**
 256 * gmap_disable - switch back to the standard primary address space
 257 * @gmap: pointer to the guest address space structure
 258 */
 259void gmap_disable(struct gmap *gmap)
 260{
 261	S390_lowcore.gmap = 0UL;
 262}
 263EXPORT_SYMBOL_GPL(gmap_disable);
 264
 265/*
 266 * gmap_alloc_table is assumed to be called with mmap_sem held
 267 */
 268static int gmap_alloc_table(struct gmap *gmap,
 269			    unsigned long *table, unsigned long init)
 270	__releases(&gmap->mm->page_table_lock)
 271	__acquires(&gmap->mm->page_table_lock)
 272{
 273	struct page *page;
 274	unsigned long *new;
 275
 276	/* since we dont free the gmap table until gmap_free we can unlock */
 277	spin_unlock(&gmap->mm->page_table_lock);
 278	page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
 279	spin_lock(&gmap->mm->page_table_lock);
 280	if (!page)
 281		return -ENOMEM;
 282	new = (unsigned long *) page_to_phys(page);
 283	crst_table_init(new, init);
 284	if (*table & _REGION_ENTRY_INVALID) {
 285		list_add(&page->lru, &gmap->crst_list);
 286		*table = (unsigned long) new | _REGION_ENTRY_LENGTH |
 287			(*table & _REGION_ENTRY_TYPE_MASK);
 288	} else
 289		__free_pages(page, ALLOC_ORDER);
 290	return 0;
 291}
 
 292
 293/**
 294 * gmap_unmap_segment - unmap segment from the guest address space
 295 * @gmap: pointer to the guest address space structure
 296 * @addr: address in the guest address space
 297 * @len: length of the memory area to unmap
 298 *
 299 * Returns 0 if the unmap succeeded, -EINVAL if not.
 300 */
 301int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
 302{
 303	unsigned long *table;
 304	unsigned long off;
 305	int flush;
 306
 307	if ((to | len) & (PMD_SIZE - 1))
 308		return -EINVAL;
 309	if (len == 0 || to + len < to)
 310		return -EINVAL;
 
 
 
 
 
 311
 312	flush = 0;
 313	down_read(&gmap->mm->mmap_sem);
 314	spin_lock(&gmap->mm->page_table_lock);
 315	for (off = 0; off < len; off += PMD_SIZE) {
 316		/* Walk the guest addr space page table */
 317		table = gmap->table + (((to + off) >> 53) & 0x7ff);
 318		if (*table & _REGION_ENTRY_INVALID)
 319			goto out;
 320		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 321		table = table + (((to + off) >> 42) & 0x7ff);
 322		if (*table & _REGION_ENTRY_INVALID)
 323			goto out;
 324		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 325		table = table + (((to + off) >> 31) & 0x7ff);
 326		if (*table & _REGION_ENTRY_INVALID)
 327			goto out;
 328		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 329		table = table + (((to + off) >> 20) & 0x7ff);
 330
 331		/* Clear segment table entry in guest address space. */
 332		flush |= gmap_unlink_segment(gmap, table);
 333		*table = _SEGMENT_ENTRY_INVALID;
 334	}
 335out:
 336	spin_unlock(&gmap->mm->page_table_lock);
 337	up_read(&gmap->mm->mmap_sem);
 338	if (flush)
 339		gmap_flush_tlb(gmap);
 340	return 0;
 341}
 342EXPORT_SYMBOL_GPL(gmap_unmap_segment);
 343
 344/**
 345 * gmap_mmap_segment - map a segment to the guest address space
 346 * @gmap: pointer to the guest address space structure
 347 * @from: source address in the parent address space
 348 * @to: target address in the guest address space
 349 *
 350 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
 351 */
 352int gmap_map_segment(struct gmap *gmap, unsigned long from,
 353		     unsigned long to, unsigned long len)
 354{
 355	unsigned long *table;
 356	unsigned long off;
 357	int flush;
 358
 359	if ((from | to | len) & (PMD_SIZE - 1))
 360		return -EINVAL;
 361	if (len == 0 || from + len > TASK_MAX_SIZE ||
 362	    from + len < from || to + len < to)
 363		return -EINVAL;
 364
 365	flush = 0;
 366	down_read(&gmap->mm->mmap_sem);
 367	spin_lock(&gmap->mm->page_table_lock);
 368	for (off = 0; off < len; off += PMD_SIZE) {
 369		/* Walk the gmap address space page table */
 370		table = gmap->table + (((to + off) >> 53) & 0x7ff);
 371		if ((*table & _REGION_ENTRY_INVALID) &&
 372		    gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
 373			goto out_unmap;
 374		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 375		table = table + (((to + off) >> 42) & 0x7ff);
 376		if ((*table & _REGION_ENTRY_INVALID) &&
 377		    gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
 378			goto out_unmap;
 379		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 380		table = table + (((to + off) >> 31) & 0x7ff);
 381		if ((*table & _REGION_ENTRY_INVALID) &&
 382		    gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
 383			goto out_unmap;
 384		table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
 385		table = table + (((to + off) >> 20) & 0x7ff);
 386
 387		/* Store 'from' address in an invalid segment table entry. */
 388		flush |= gmap_unlink_segment(gmap, table);
 389		*table =  (from + off) | (_SEGMENT_ENTRY_INVALID |
 390					  _SEGMENT_ENTRY_PROTECT);
 391	}
 392	spin_unlock(&gmap->mm->page_table_lock);
 393	up_read(&gmap->mm->mmap_sem);
 394	if (flush)
 395		gmap_flush_tlb(gmap);
 396	return 0;
 397
 398out_unmap:
 399	spin_unlock(&gmap->mm->page_table_lock);
 400	up_read(&gmap->mm->mmap_sem);
 401	gmap_unmap_segment(gmap, to, len);
 402	return -ENOMEM;
 403}
 404EXPORT_SYMBOL_GPL(gmap_map_segment);
 405
 406static unsigned long *gmap_table_walk(unsigned long address, struct gmap *gmap)
 407{
 408	unsigned long *table;
 409
 410	table = gmap->table + ((address >> 53) & 0x7ff);
 411	if (unlikely(*table & _REGION_ENTRY_INVALID))
 412		return ERR_PTR(-EFAULT);
 413	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 414	table = table + ((address >> 42) & 0x7ff);
 415	if (unlikely(*table & _REGION_ENTRY_INVALID))
 416		return ERR_PTR(-EFAULT);
 417	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 418	table = table + ((address >> 31) & 0x7ff);
 419	if (unlikely(*table & _REGION_ENTRY_INVALID))
 420		return ERR_PTR(-EFAULT);
 421	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 422	table = table + ((address >> 20) & 0x7ff);
 423	return table;
 424}
 425
 426/**
 427 * __gmap_translate - translate a guest address to a user space address
 428 * @address: guest address
 429 * @gmap: pointer to guest mapping meta data structure
 430 *
 431 * Returns user space address which corresponds to the guest address or
 432 * -EFAULT if no such mapping exists.
 433 * This function does not establish potentially missing page table entries.
 434 * The mmap_sem of the mm that belongs to the address space must be held
 435 * when this function gets called.
 436 */
 437unsigned long __gmap_translate(unsigned long address, struct gmap *gmap)
 438{
 439	unsigned long *segment_ptr, vmaddr, segment;
 440	struct gmap_pgtable *mp;
 441	struct page *page;
 442
 443	current->thread.gmap_addr = address;
 444	segment_ptr = gmap_table_walk(address, gmap);
 445	if (IS_ERR(segment_ptr))
 446		return PTR_ERR(segment_ptr);
 447	/* Convert the gmap address to an mm address. */
 448	segment = *segment_ptr;
 449	if (!(segment & _SEGMENT_ENTRY_INVALID)) {
 450		page = pfn_to_page(segment >> PAGE_SHIFT);
 451		mp = (struct gmap_pgtable *) page->index;
 452		return mp->vmaddr | (address & ~PMD_MASK);
 453	} else if (segment & _SEGMENT_ENTRY_PROTECT) {
 454		vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
 455		return vmaddr | (address & ~PMD_MASK);
 
 
 
 
 
 
 
 
 456	}
 457	return -EFAULT;
 458}
 459EXPORT_SYMBOL_GPL(__gmap_translate);
 460
 461/**
 462 * gmap_translate - translate a guest address to a user space address
 463 * @address: guest address
 464 * @gmap: pointer to guest mapping meta data structure
 465 *
 466 * Returns user space address which corresponds to the guest address or
 467 * -EFAULT if no such mapping exists.
 468 * This function does not establish potentially missing page table entries.
 469 */
 470unsigned long gmap_translate(unsigned long address, struct gmap *gmap)
 471{
 472	unsigned long rc;
 473
 474	down_read(&gmap->mm->mmap_sem);
 475	rc = __gmap_translate(address, gmap);
 476	up_read(&gmap->mm->mmap_sem);
 477	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 478}
 479EXPORT_SYMBOL_GPL(gmap_translate);
 480
 481static int gmap_connect_pgtable(unsigned long address, unsigned long segment,
 482				unsigned long *segment_ptr, struct gmap *gmap)
 483{
 484	unsigned long vmaddr;
 485	struct vm_area_struct *vma;
 486	struct gmap_pgtable *mp;
 487	struct gmap_rmap *rmap;
 488	struct mm_struct *mm;
 489	struct page *page;
 490	pgd_t *pgd;
 
 491	pud_t *pud;
 492	pmd_t *pmd;
 493
 494	mm = gmap->mm;
 495	vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
 496	vma = find_vma(mm, vmaddr);
 497	if (!vma || vma->vm_start > vmaddr)
 498		return -EFAULT;
 499	/* Walk the parent mm page table */
 500	pgd = pgd_offset(mm, vmaddr);
 501	pud = pud_alloc(mm, pgd, vmaddr);
 502	if (!pud)
 503		return -ENOMEM;
 504	pmd = pmd_alloc(mm, pud, vmaddr);
 505	if (!pmd)
 506		return -ENOMEM;
 507	if (!pmd_present(*pmd) &&
 508	    __pte_alloc(mm, vma, pmd, vmaddr))
 509		return -ENOMEM;
 510	/* large pmds cannot yet be handled */
 511	if (pmd_large(*pmd))
 512		return -EFAULT;
 513	/* pmd now points to a valid segment table entry. */
 514	rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
 515	if (!rmap)
 516		return -ENOMEM;
 517	/* Link gmap segment table entry location to page table. */
 518	page = pmd_page(*pmd);
 519	mp = (struct gmap_pgtable *) page->index;
 520	rmap->gmap = gmap;
 521	rmap->entry = segment_ptr;
 522	rmap->vmaddr = address & PMD_MASK;
 523	spin_lock(&mm->page_table_lock);
 524	if (*segment_ptr == segment) {
 525		list_add(&rmap->list, &mp->mapper);
 526		/* Set gmap segment table entry to page table. */
 527		*segment_ptr = pmd_val(*pmd) & PAGE_MASK;
 528		rmap = NULL;
 529	}
 530	spin_unlock(&mm->page_table_lock);
 531	kfree(rmap);
 532	return 0;
 533}
 
 534
 535static void gmap_disconnect_pgtable(struct mm_struct *mm, unsigned long *table)
 
 536{
 537	struct gmap_rmap *rmap, *next;
 538	struct gmap_pgtable *mp;
 539	struct page *page;
 540	int flush;
 541
 542	flush = 0;
 543	spin_lock(&mm->page_table_lock);
 544	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
 545	mp = (struct gmap_pgtable *) page->index;
 546	list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
 547		*rmap->entry = mp->vmaddr | (_SEGMENT_ENTRY_INVALID |
 548					     _SEGMENT_ENTRY_PROTECT);
 549		list_del(&rmap->list);
 550		kfree(rmap);
 551		flush = 1;
 552	}
 553	spin_unlock(&mm->page_table_lock);
 554	if (flush)
 555		__tlb_flush_global();
 556}
 
 557
 558/*
 559 * this function is assumed to be called with mmap_sem held
 560 */
 561unsigned long __gmap_fault(unsigned long address, struct gmap *gmap)
 562{
 563	unsigned long *segment_ptr, segment;
 564	struct gmap_pgtable *mp;
 565	struct page *page;
 566	int rc;
 567
 568	current->thread.gmap_addr = address;
 569	segment_ptr = gmap_table_walk(address, gmap);
 570	if (IS_ERR(segment_ptr))
 571		return -EFAULT;
 572	/* Convert the gmap address to an mm address. */
 573	while (1) {
 574		segment = *segment_ptr;
 575		if (!(segment & _SEGMENT_ENTRY_INVALID)) {
 576			/* Page table is present */
 577			page = pfn_to_page(segment >> PAGE_SHIFT);
 578			mp = (struct gmap_pgtable *) page->index;
 579			return mp->vmaddr | (address & ~PMD_MASK);
 580		}
 581		if (!(segment & _SEGMENT_ENTRY_PROTECT))
 582			/* Nothing mapped in the gmap address space. */
 583			break;
 584		rc = gmap_connect_pgtable(address, segment, segment_ptr, gmap);
 585		if (rc)
 586			return rc;
 587	}
 588	return -EFAULT;
 589}
 
 590
 591unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
 
 592{
 593	unsigned long rc;
 
 
 
 
 
 594
 595	down_read(&gmap->mm->mmap_sem);
 596	rc = __gmap_fault(address, gmap);
 597	up_read(&gmap->mm->mmap_sem);
 598
 599	return rc;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 600}
 601EXPORT_SYMBOL_GPL(gmap_fault);
 602
 603static void gmap_zap_swap_entry(swp_entry_t entry, struct mm_struct *mm)
 
 604{
 605	if (!non_swap_entry(entry))
 606		dec_mm_counter(mm, MM_SWAPENTS);
 607	else if (is_migration_entry(entry)) {
 608		struct page *page = migration_entry_to_page(entry);
 609
 610		if (PageAnon(page))
 611			dec_mm_counter(mm, MM_ANONPAGES);
 612		else
 613			dec_mm_counter(mm, MM_FILEPAGES);
 614	}
 615	free_swap_and_cache(entry);
 616}
 
 617
 618/**
 619 * The mm->mmap_sem lock must be held
 620 */
 621static void gmap_zap_unused(struct mm_struct *mm, unsigned long address)
 622{
 623	unsigned long ptev, pgstev;
 624	spinlock_t *ptl;
 625	pgste_t pgste;
 626	pte_t *ptep, pte;
 627
 628	ptep = get_locked_pte(mm, address, &ptl);
 629	if (unlikely(!ptep))
 630		return;
 631	pte = *ptep;
 632	if (!pte_swap(pte))
 633		goto out_pte;
 634	/* Zap unused and logically-zero pages */
 635	pgste = pgste_get_lock(ptep);
 636	pgstev = pgste_val(pgste);
 637	ptev = pte_val(pte);
 638	if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
 639	    ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID))) {
 640		gmap_zap_swap_entry(pte_to_swp_entry(pte), mm);
 641		pte_clear(mm, address, ptep);
 642	}
 643	pgste_set_unlock(ptep, pgste);
 644out_pte:
 645	pte_unmap_unlock(*ptep, ptl);
 646}
 647
 648/*
 649 * this function is assumed to be called with mmap_sem held
 650 */
 651void __gmap_zap(unsigned long address, struct gmap *gmap)
 652{
 653	unsigned long *table, *segment_ptr;
 654	unsigned long segment, pgstev, ptev;
 655	struct gmap_pgtable *mp;
 656	struct page *page;
 
 657
 658	segment_ptr = gmap_table_walk(address, gmap);
 659	if (IS_ERR(segment_ptr))
 660		return;
 661	segment = *segment_ptr;
 662	if (segment & _SEGMENT_ENTRY_INVALID)
 663		return;
 664	page = pfn_to_page(segment >> PAGE_SHIFT);
 665	mp = (struct gmap_pgtable *) page->index;
 666	address = mp->vmaddr | (address & ~PMD_MASK);
 667	/* Page table is present */
 668	table = (unsigned long *)(segment & _SEGMENT_ENTRY_ORIGIN);
 669	table = table + ((address >> 12) & 0xff);
 670	pgstev = table[PTRS_PER_PTE];
 671	ptev = table[0];
 672	/* quick check, checked again with locks held */
 673	if (((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED) ||
 674	    ((pgstev & _PGSTE_GPS_ZERO) && (ptev & _PAGE_INVALID)))
 675		gmap_zap_unused(gmap->mm, address);
 676}
 677EXPORT_SYMBOL_GPL(__gmap_zap);
 678
 679void gmap_discard(unsigned long from, unsigned long to, struct gmap *gmap)
 680{
 681
 682	unsigned long *table, address, size;
 683	struct vm_area_struct *vma;
 684	struct gmap_pgtable *mp;
 685	struct page *page;
 686
 687	down_read(&gmap->mm->mmap_sem);
 688	address = from;
 689	while (address < to) {
 690		/* Walk the gmap address space page table */
 691		table = gmap->table + ((address >> 53) & 0x7ff);
 692		if (unlikely(*table & _REGION_ENTRY_INVALID)) {
 693			address = (address + PMD_SIZE) & PMD_MASK;
 694			continue;
 695		}
 696		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 697		table = table + ((address >> 42) & 0x7ff);
 698		if (unlikely(*table & _REGION_ENTRY_INVALID)) {
 699			address = (address + PMD_SIZE) & PMD_MASK;
 700			continue;
 701		}
 702		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 703		table = table + ((address >> 31) & 0x7ff);
 704		if (unlikely(*table & _REGION_ENTRY_INVALID)) {
 705			address = (address + PMD_SIZE) & PMD_MASK;
 706			continue;
 707		}
 708		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
 709		table = table + ((address >> 20) & 0x7ff);
 710		if (unlikely(*table & _SEGMENT_ENTRY_INVALID)) {
 711			address = (address + PMD_SIZE) & PMD_MASK;
 712			continue;
 713		}
 714		page = pfn_to_page(*table >> PAGE_SHIFT);
 715		mp = (struct gmap_pgtable *) page->index;
 716		vma = find_vma(gmap->mm, mp->vmaddr);
 717		size = min(to - address, PMD_SIZE - (address & ~PMD_MASK));
 718		zap_page_range(vma, mp->vmaddr | (address & ~PMD_MASK),
 719			       size, NULL);
 720		address = (address + PMD_SIZE) & PMD_MASK;
 721	}
 722	up_read(&gmap->mm->mmap_sem);
 
 
 
 
 723}
 724EXPORT_SYMBOL_GPL(gmap_discard);
 725
 726static LIST_HEAD(gmap_notifier_list);
 727static DEFINE_SPINLOCK(gmap_notifier_lock);
 
 
 
 728
 729/**
 730 * gmap_register_ipte_notifier - register a pte invalidation callback
 731 * @nb: pointer to the gmap notifier block
 732 */
 733void gmap_register_ipte_notifier(struct gmap_notifier *nb)
 734{
 735	spin_lock(&gmap_notifier_lock);
 736	list_add(&nb->list, &gmap_notifier_list);
 737	spin_unlock(&gmap_notifier_lock);
 738}
 739EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier);
 740
 741/**
 742 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
 743 * @nb: pointer to the gmap notifier block
 744 */
 745void gmap_unregister_ipte_notifier(struct gmap_notifier *nb)
 746{
 747	spin_lock(&gmap_notifier_lock);
 748	list_del_init(&nb->list);
 749	spin_unlock(&gmap_notifier_lock);
 
 
 
 
 750}
 751EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier);
 752
 753/**
 754 * gmap_ipte_notify - mark a range of ptes for invalidation notification
 755 * @gmap: pointer to guest mapping meta data structure
 756 * @start: virtual address in the guest address space
 757 * @len: size of area
 
 
 758 *
 759 * Returns 0 if for each page in the given range a gmap mapping exists and
 760 * the invalidation notification could be set. If the gmap mapping is missing
 761 * for one or more pages -EFAULT is returned. If no memory could be allocated
 762 * -ENOMEM is returned. This function establishes missing page table entries.
 763 */
 764int gmap_ipte_notify(struct gmap *gmap, unsigned long start, unsigned long len)
 
 765{
 766	unsigned long addr;
 767	spinlock_t *ptl;
 768	pte_t *ptep, entry;
 769	pgste_t pgste;
 770	int rc = 0;
 771
 772	if ((start & ~PAGE_MASK) || (len & ~PAGE_MASK))
 773		return -EINVAL;
 774	down_read(&gmap->mm->mmap_sem);
 775	while (len) {
 776		/* Convert gmap address and connect the page tables */
 777		addr = __gmap_fault(start, gmap);
 778		if (IS_ERR_VALUE(addr)) {
 779			rc = addr;
 780			break;
 781		}
 782		/* Get the page mapped */
 783		if (fixup_user_fault(current, gmap->mm, addr, FAULT_FLAG_WRITE)) {
 784			rc = -EFAULT;
 785			break;
 786		}
 787		/* Walk the process page table, lock and get pte pointer */
 788		ptep = get_locked_pte(gmap->mm, addr, &ptl);
 789		if (unlikely(!ptep))
 790			continue;
 791		/* Set notification bit in the pgste of the pte */
 792		entry = *ptep;
 793		if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
 794			pgste = pgste_get_lock(ptep);
 795			pgste_val(pgste) |= PGSTE_IN_BIT;
 796			pgste_set_unlock(ptep, pgste);
 797			start += PAGE_SIZE;
 798			len -= PAGE_SIZE;
 799		}
 800		spin_unlock(ptl);
 801	}
 802	up_read(&gmap->mm->mmap_sem);
 803	return rc;
 
 
 804}
 805EXPORT_SYMBOL_GPL(gmap_ipte_notify);
 806
 807/**
 808 * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
 809 * @mm: pointer to the process mm_struct
 810 * @pte: pointer to the page table entry
 811 *
 812 * This function is assumed to be called with the page table lock held
 813 * for the pte to notify.
 814 */
 815void gmap_do_ipte_notify(struct mm_struct *mm, pte_t *pte)
 816{
 817	unsigned long segment_offset;
 818	struct gmap_notifier *nb;
 819	struct gmap_pgtable *mp;
 820	struct gmap_rmap *rmap;
 821	struct page *page;
 822
 823	segment_offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
 824	segment_offset = segment_offset * (4096 / sizeof(pte_t));
 825	page = pfn_to_page(__pa(pte) >> PAGE_SHIFT);
 826	mp = (struct gmap_pgtable *) page->index;
 827	spin_lock(&gmap_notifier_lock);
 828	list_for_each_entry(rmap, &mp->mapper, list) {
 829		list_for_each_entry(nb, &gmap_notifier_list, list)
 830			nb->notifier_call(rmap->gmap,
 831					  rmap->vmaddr + segment_offset);
 
 
 
 
 832	}
 833	spin_unlock(&gmap_notifier_lock);
 
 834}
 835
 836static inline int page_table_with_pgste(struct page *page)
 837{
 838	return atomic_read(&page->_mapcount) == 0;
 
 
 
 
 
 
 
 
 
 839}
 840
 841static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
 842						    unsigned long vmaddr)
 843{
 844	struct page *page;
 845	unsigned long *table;
 846	struct gmap_pgtable *mp;
 
 847
 848	page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
 849	if (!page)
 850		return NULL;
 851	mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
 852	if (!mp) {
 853		__free_page(page);
 854		return NULL;
 855	}
 856	if (!pgtable_page_ctor(page)) {
 857		kfree(mp);
 858		__free_page(page);
 859		return NULL;
 860	}
 861	mp->vmaddr = vmaddr & PMD_MASK;
 862	INIT_LIST_HEAD(&mp->mapper);
 863	page->index = (unsigned long) mp;
 864	atomic_set(&page->_mapcount, 0);
 865	table = (unsigned long *) page_to_phys(page);
 866	clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
 867	clear_table(table + PTRS_PER_PTE, PGSTE_HR_BIT | PGSTE_HC_BIT,
 868		    PAGE_SIZE/2);
 869	return table;
 870}
 871
 872static inline void page_table_free_pgste(unsigned long *table)
 873{
 874	struct page *page;
 875	struct gmap_pgtable *mp;
 876
 877	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
 878	mp = (struct gmap_pgtable *) page->index;
 879	BUG_ON(!list_empty(&mp->mapper));
 880	pgtable_page_dtor(page);
 881	atomic_set(&page->_mapcount, -1);
 882	kfree(mp);
 883	__free_page(page);
 884}
 885
 886static inline unsigned long page_table_reset_pte(struct mm_struct *mm,
 887			pmd_t *pmd, unsigned long addr, unsigned long end)
 888{
 889	pte_t *start_pte, *pte;
 890	spinlock_t *ptl;
 891	pgste_t pgste;
 
 892
 893	start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 894	pte = start_pte;
 895	do {
 896		pgste = pgste_get_lock(pte);
 
 
 
 
 
 
 
 
 897		pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
 898		pgste_set_unlock(pte, pgste);
 899	} while (pte++, addr += PAGE_SIZE, addr != end);
 900	pte_unmap_unlock(start_pte, ptl);
 901
 902	return addr;
 903}
 904
 905static inline unsigned long page_table_reset_pmd(struct mm_struct *mm,
 906			pud_t *pud, unsigned long addr, unsigned long end)
 907{
 908	unsigned long next;
 909	pmd_t *pmd;
 910
 911	pmd = pmd_offset(pud, addr);
 912	do {
 913		next = pmd_addr_end(addr, end);
 914		if (pmd_none_or_clear_bad(pmd))
 915			continue;
 916		next = page_table_reset_pte(mm, pmd, addr, next);
 917	} while (pmd++, addr = next, addr != end);
 918
 919	return addr;
 
 920}
 921
 922static inline unsigned long page_table_reset_pud(struct mm_struct *mm,
 923			pgd_t *pgd, unsigned long addr, unsigned long end)
 
 
 
 924{
 925	unsigned long next;
 926	pud_t *pud;
 
 
 927
 928	pud = pud_offset(pgd, addr);
 929	do {
 930		next = pud_addr_end(addr, end);
 931		if (pud_none_or_clear_bad(pud))
 932			continue;
 933		next = page_table_reset_pmd(mm, pud, addr, next);
 934	} while (pud++, addr = next, addr != end);
 935
 936	return addr;
 937}
 938
 939void page_table_reset_pgste(struct mm_struct *mm,
 940			unsigned long start, unsigned long end)
 941{
 942	unsigned long addr, next;
 943	pgd_t *pgd;
 944
 945	addr = start;
 946	down_read(&mm->mmap_sem);
 947	pgd = pgd_offset(mm, addr);
 948	do {
 949		next = pgd_addr_end(addr, end);
 950		if (pgd_none_or_clear_bad(pgd))
 951			continue;
 952		next = page_table_reset_pud(mm, pgd, addr, next);
 953	} while (pgd++, addr = next, addr != end);
 954	up_read(&mm->mmap_sem);
 955}
 956EXPORT_SYMBOL(page_table_reset_pgste);
 957
 958int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 959			  unsigned long key, bool nq)
 960{
 
 961	spinlock_t *ptl;
 962	pgste_t old, new;
 
 963	pte_t *ptep;
 964
 965	down_read(&mm->mmap_sem);
 966	ptep = get_locked_pte(current->mm, addr, &ptl);
 967	if (unlikely(!ptep)) {
 968		up_read(&mm->mmap_sem);
 
 
 
 969		return -EFAULT;
 970	}
 971
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 972	new = old = pgste_get_lock(ptep);
 973	pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
 974			    PGSTE_ACC_BITS | PGSTE_FP_BIT);
 975	pgste_val(new) |= (key & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
 976	pgste_val(new) |= (key & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
 
 977	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
 978		unsigned long address, bits, skey;
 979
 980		address = pte_val(*ptep) & PAGE_MASK;
 981		skey = (unsigned long) page_get_storage_key(address);
 982		bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
 983		skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
 984		/* Set storage key ACC and FP */
 985		page_set_storage_key(address, skey, !nq);
 986		/* Merge host changed & referenced into pgste  */
 987		pgste_val(new) |= bits << 52;
 988	}
 989	/* changing the guest storage key is considered a change of the page */
 990	if ((pgste_val(new) ^ pgste_val(old)) &
 991	    (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
 992		pgste_val(new) |= PGSTE_HC_BIT;
 993
 994	pgste_set_unlock(ptep, new);
 995	pte_unmap_unlock(*ptep, ptl);
 996	up_read(&mm->mmap_sem);
 997	return 0;
 998}
 999EXPORT_SYMBOL(set_guest_storage_key);
1000
1001#else /* CONFIG_PGSTE */
1002
1003static inline int page_table_with_pgste(struct page *page)
 
 
 
 
 
 
 
1004{
1005	return 0;
1006}
1007
1008static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
1009						    unsigned long vmaddr)
1010{
1011	return NULL;
 
 
 
 
 
 
 
 
 
 
 
 
1012}
 
1013
1014static inline void page_table_free_pgste(unsigned long *table)
1015{
1016}
1017
1018static inline void gmap_disconnect_pgtable(struct mm_struct *mm,
1019					   unsigned long *table)
1020{
1021}
1022
1023#endif /* CONFIG_PGSTE */
1024
1025static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
1026{
1027	unsigned int old, new;
1028
1029	do {
1030		old = atomic_read(v);
1031		new = old ^ bits;
1032	} while (atomic_cmpxchg(v, old, new) != old);
1033	return new;
1034}
1035
1036/*
1037 * page table entry allocation/free routines.
1038 */
1039unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
1040{
1041	unsigned long *uninitialized_var(table);
1042	struct page *uninitialized_var(page);
1043	unsigned int mask, bit;
1044
1045	if (mm_has_pgste(mm))
1046		return page_table_alloc_pgste(mm, vmaddr);
1047	/* Allocate fragments of a 4K page as 1K/2K page table */
1048	spin_lock_bh(&mm->context.list_lock);
1049	mask = FRAG_MASK;
1050	if (!list_empty(&mm->context.pgtable_list)) {
1051		page = list_first_entry(&mm->context.pgtable_list,
1052					struct page, lru);
1053		table = (unsigned long *) page_to_phys(page);
1054		mask = atomic_read(&page->_mapcount);
1055		mask = mask | (mask >> 4);
1056	}
1057	if ((mask & FRAG_MASK) == FRAG_MASK) {
1058		spin_unlock_bh(&mm->context.list_lock);
1059		page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
1060		if (!page)
1061			return NULL;
1062		if (!pgtable_page_ctor(page)) {
1063			__free_page(page);
1064			return NULL;
1065		}
1066		atomic_set(&page->_mapcount, 1);
1067		table = (unsigned long *) page_to_phys(page);
1068		clear_table(table, _PAGE_INVALID, PAGE_SIZE);
1069		spin_lock_bh(&mm->context.list_lock);
1070		list_add(&page->lru, &mm->context.pgtable_list);
1071	} else {
1072		for (bit = 1; mask & bit; bit <<= 1)
1073			table += PTRS_PER_PTE;
1074		mask = atomic_xor_bits(&page->_mapcount, bit);
1075		if ((mask & FRAG_MASK) == FRAG_MASK)
1076			list_del(&page->lru);
1077	}
1078	spin_unlock_bh(&mm->context.list_lock);
1079	return table;
1080}
1081
1082void page_table_free(struct mm_struct *mm, unsigned long *table)
1083{
1084	struct page *page;
1085	unsigned int bit, mask;
1086
1087	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1088	if (page_table_with_pgste(page)) {
1089		gmap_disconnect_pgtable(mm, table);
1090		return page_table_free_pgste(table);
1091	}
1092	/* Free 1K/2K page table fragment of a 4K page */
1093	bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
1094	spin_lock_bh(&mm->context.list_lock);
1095	if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
1096		list_del(&page->lru);
1097	mask = atomic_xor_bits(&page->_mapcount, bit);
1098	if (mask & FRAG_MASK)
1099		list_add(&page->lru, &mm->context.pgtable_list);
1100	spin_unlock_bh(&mm->context.list_lock);
1101	if (mask == 0) {
1102		pgtable_page_dtor(page);
1103		atomic_set(&page->_mapcount, -1);
1104		__free_page(page);
1105	}
1106}
1107
1108static void __page_table_free_rcu(void *table, unsigned bit)
1109{
1110	struct page *page;
1111
1112	if (bit == FRAG_MASK)
1113		return page_table_free_pgste(table);
1114	/* Free 1K/2K page table fragment of a 4K page */
1115	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1116	if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
1117		pgtable_page_dtor(page);
1118		atomic_set(&page->_mapcount, -1);
1119		__free_page(page);
1120	}
1121}
1122
1123void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
1124{
1125	struct mm_struct *mm;
1126	struct page *page;
1127	unsigned int bit, mask;
1128
1129	mm = tlb->mm;
1130	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1131	if (page_table_with_pgste(page)) {
1132		gmap_disconnect_pgtable(mm, table);
1133		table = (unsigned long *) (__pa(table) | FRAG_MASK);
1134		tlb_remove_table(tlb, table);
1135		return;
1136	}
1137	bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
1138	spin_lock_bh(&mm->context.list_lock);
1139	if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
1140		list_del(&page->lru);
1141	mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
1142	if (mask & FRAG_MASK)
1143		list_add_tail(&page->lru, &mm->context.pgtable_list);
1144	spin_unlock_bh(&mm->context.list_lock);
1145	table = (unsigned long *) (__pa(table) | (bit << 4));
1146	tlb_remove_table(tlb, table);
1147}
1148
1149static void __tlb_remove_table(void *_table)
1150{
1151	const unsigned long mask = (FRAG_MASK << 4) | FRAG_MASK;
1152	void *table = (void *)((unsigned long) _table & ~mask);
1153	unsigned type = (unsigned long) _table & mask;
1154
1155	if (type)
1156		__page_table_free_rcu(table, type);
1157	else
1158		free_pages((unsigned long) table, ALLOC_ORDER);
1159}
 
 
 
 
 
 
1160
1161static void tlb_remove_table_smp_sync(void *arg)
1162{
1163	/* Simply deliver the interrupt */
1164}
 
1165
1166static void tlb_remove_table_one(void *table)
 
1167{
1168	/*
1169	 * This isn't an RCU grace period and hence the page-tables cannot be
1170	 * assumed to be actually RCU-freed.
1171	 *
1172	 * It is however sufficient for software page-table walkers that rely
1173	 * on IRQ disabling. See the comment near struct mmu_table_batch.
1174	 */
1175	smp_call_function(tlb_remove_table_smp_sync, NULL, 1);
1176	__tlb_remove_table(table);
1177}
1178
1179static void tlb_remove_table_rcu(struct rcu_head *head)
1180{
1181	struct mmu_table_batch *batch;
1182	int i;
1183
1184	batch = container_of(head, struct mmu_table_batch, rcu);
1185
1186	for (i = 0; i < batch->nr; i++)
1187		__tlb_remove_table(batch->tables[i]);
1188
1189	free_page((unsigned long)batch);
1190}
1191
1192void tlb_table_flush(struct mmu_gather *tlb)
1193{
1194	struct mmu_table_batch **batch = &tlb->batch;
1195
1196	if (*batch) {
1197		call_rcu_sched(&(*batch)->rcu, tlb_remove_table_rcu);
1198		*batch = NULL;
1199	}
1200}
1201
1202void tlb_remove_table(struct mmu_gather *tlb, void *table)
1203{
1204	struct mmu_table_batch **batch = &tlb->batch;
1205
1206	tlb->mm->context.flush_mm = 1;
1207	if (*batch == NULL) {
1208		*batch = (struct mmu_table_batch *)
1209			__get_free_page(GFP_NOWAIT | __GFP_NOWARN);
1210		if (*batch == NULL) {
1211			__tlb_flush_mm_lazy(tlb->mm);
1212			tlb_remove_table_one(table);
1213			return;
1214		}
1215		(*batch)->nr = 0;
1216	}
1217	(*batch)->tables[(*batch)->nr++] = table;
1218	if ((*batch)->nr == MAX_TABLE_BATCH)
1219		tlb_flush_mmu(tlb);
1220}
1221
1222#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1223static inline void thp_split_vma(struct vm_area_struct *vma)
1224{
1225	unsigned long addr;
1226
1227	for (addr = vma->vm_start; addr < vma->vm_end; addr += PAGE_SIZE)
1228		follow_page(vma, addr, FOLL_SPLIT);
 
 
 
 
 
 
 
 
1229}
 
1230
1231static inline void thp_split_mm(struct mm_struct *mm)
 
 
 
 
 
 
 
 
 
 
 
 
 
1232{
1233	struct vm_area_struct *vma;
 
 
 
 
1234
1235	for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1236		thp_split_vma(vma);
1237		vma->vm_flags &= ~VM_HUGEPAGE;
1238		vma->vm_flags |= VM_NOHUGEPAGE;
1239	}
1240	mm->def_flags |= VM_NOHUGEPAGE;
1241}
1242#else
1243static inline void thp_split_mm(struct mm_struct *mm)
1244{
1245}
1246#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1247
1248static unsigned long page_table_realloc_pmd(struct mmu_gather *tlb,
1249				struct mm_struct *mm, pud_t *pud,
1250				unsigned long addr, unsigned long end)
1251{
1252	unsigned long next, *table, *new;
1253	struct page *page;
1254	pmd_t *pmd;
1255
1256	pmd = pmd_offset(pud, addr);
1257	do {
1258		next = pmd_addr_end(addr, end);
1259again:
1260		if (pmd_none_or_clear_bad(pmd))
1261			continue;
1262		table = (unsigned long *) pmd_deref(*pmd);
1263		page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
1264		if (page_table_with_pgste(page))
1265			continue;
1266		/* Allocate new page table with pgstes */
1267		new = page_table_alloc_pgste(mm, addr);
1268		if (!new)
1269			return -ENOMEM;
1270
1271		spin_lock(&mm->page_table_lock);
1272		if (likely((unsigned long *) pmd_deref(*pmd) == table)) {
1273			/* Nuke pmd entry pointing to the "short" page table */
1274			pmdp_flush_lazy(mm, addr, pmd);
1275			pmd_clear(pmd);
1276			/* Copy ptes from old table to new table */
1277			memcpy(new, table, PAGE_SIZE/2);
1278			clear_table(table, _PAGE_INVALID, PAGE_SIZE/2);
1279			/* Establish new table */
1280			pmd_populate(mm, pmd, (pte_t *) new);
1281			/* Free old table with rcu, there might be a walker! */
1282			page_table_free_rcu(tlb, table);
1283			new = NULL;
1284		}
1285		spin_unlock(&mm->page_table_lock);
1286		if (new) {
1287			page_table_free_pgste(new);
1288			goto again;
 
 
 
 
 
 
 
 
 
 
1289		}
1290	} while (pmd++, addr = next, addr != end);
 
 
 
 
 
 
 
 
 
 
 
1291
1292	return addr;
 
 
 
1293}
 
1294
1295static unsigned long page_table_realloc_pud(struct mmu_gather *tlb,
1296				   struct mm_struct *mm, pgd_t *pgd,
1297				   unsigned long addr, unsigned long end)
 
 
 
 
 
 
 
 
 
1298{
1299	unsigned long next;
1300	pud_t *pud;
 
1301
1302	pud = pud_offset(pgd, addr);
1303	do {
1304		next = pud_addr_end(addr, end);
1305		if (pud_none_or_clear_bad(pud))
1306			continue;
1307		next = page_table_realloc_pmd(tlb, mm, pud, addr, next);
1308		if (unlikely(IS_ERR_VALUE(next)))
1309			return next;
1310	} while (pud++, addr = next, addr != end);
1311
1312	return addr;
1313}
1314
1315static unsigned long page_table_realloc(struct mmu_gather *tlb, struct mm_struct *mm,
1316					unsigned long addr, unsigned long end)
1317{
1318	unsigned long next;
1319	pgd_t *pgd;
1320
1321	pgd = pgd_offset(mm, addr);
1322	do {
1323		next = pgd_addr_end(addr, end);
1324		if (pgd_none_or_clear_bad(pgd))
1325			continue;
1326		next = page_table_realloc_pud(tlb, mm, pgd, addr, next);
1327		if (unlikely(IS_ERR_VALUE(next)))
1328			return next;
1329	} while (pgd++, addr = next, addr != end);
1330
 
 
1331	return 0;
1332}
 
1333
1334/*
1335 * switch on pgstes for its userspace process (for kvm)
 
 
 
 
 
1336 */
1337int s390_enable_sie(void)
1338{
1339	struct task_struct *tsk = current;
1340	struct mm_struct *mm = tsk->mm;
1341	struct mmu_gather tlb;
1342
1343	/* Do we have pgstes? if yes, we are done */
1344	if (mm_has_pgste(tsk->mm))
1345		return 0;
1346
1347	down_write(&mm->mmap_sem);
1348	/* split thp mappings and disable thp for future mappings */
1349	thp_split_mm(mm);
1350	/* Reallocate the page tables with pgstes */
1351	tlb_gather_mmu(&tlb, mm, 0, TASK_SIZE);
1352	if (!page_table_realloc(&tlb, mm, 0, TASK_SIZE))
1353		mm->context.has_pgste = 1;
1354	tlb_finish_mmu(&tlb, 0, TASK_SIZE);
1355	up_write(&mm->mmap_sem);
1356	return mm->context.has_pgste ? 0 : -ENOMEM;
1357}
1358EXPORT_SYMBOL_GPL(s390_enable_sie);
1359
1360#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1361int pmdp_clear_flush_young(struct vm_area_struct *vma, unsigned long address,
1362			   pmd_t *pmdp)
1363{
1364	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1365	/* No need to flush TLB
1366	 * On s390 reference bits are in storage key and never in TLB */
1367	return pmdp_test_and_clear_young(vma, address, pmdp);
1368}
1369
1370int pmdp_set_access_flags(struct vm_area_struct *vma,
1371			  unsigned long address, pmd_t *pmdp,
1372			  pmd_t entry, int dirty)
1373{
1374	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1375
1376	if (pmd_same(*pmdp, entry))
1377		return 0;
1378	pmdp_invalidate(vma, address, pmdp);
1379	set_pmd_at(vma->vm_mm, address, pmdp, entry);
1380	return 1;
1381}
1382
1383static void pmdp_splitting_flush_sync(void *arg)
1384{
1385	/* Simply deliver the interrupt */
1386}
1387
1388void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
1389			  pmd_t *pmdp)
1390{
1391	VM_BUG_ON(address & ~HPAGE_PMD_MASK);
1392	if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT,
1393			      (unsigned long *) pmdp)) {
1394		/* need to serialize against gup-fast (IRQ disabled) */
1395		smp_call_function(pmdp_splitting_flush_sync, NULL, 1);
1396	}
1397}
1398
1399void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
1400				pgtable_t pgtable)
1401{
1402	struct list_head *lh = (struct list_head *) pgtable;
1403
1404	assert_spin_locked(pmd_lockptr(mm, pmdp));
1405
1406	/* FIFO */
1407	if (!pmd_huge_pte(mm, pmdp))
1408		INIT_LIST_HEAD(lh);
1409	else
1410		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
1411	pmd_huge_pte(mm, pmdp) = pgtable;
1412}
1413
1414pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
1415{
1416	struct list_head *lh;
1417	pgtable_t pgtable;
1418	pte_t *ptep;
1419
1420	assert_spin_locked(pmd_lockptr(mm, pmdp));
1421
1422	/* FIFO */
1423	pgtable = pmd_huge_pte(mm, pmdp);
1424	lh = (struct list_head *) pgtable;
1425	if (list_empty(lh))
1426		pmd_huge_pte(mm, pmdp) = NULL;
1427	else {
1428		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
1429		list_del(lh);
1430	}
1431	ptep = (pte_t *) pgtable;
1432	pte_val(*ptep) = _PAGE_INVALID;
1433	ptep++;
1434	pte_val(*ptep) = _PAGE_INVALID;
1435	return pgtable;
1436}
1437#endif /* CONFIG_TRANSPARENT_HUGEPAGE */