Linux Audio

Check our new training course

Loading...
v6.13.7
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 *    Copyright IBM Corp. 2007, 2011
   4 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
   5 */
   6
   7#include <linux/sched.h>
   8#include <linux/kernel.h>
   9#include <linux/errno.h>
  10#include <linux/gfp.h>
  11#include <linux/mm.h>
  12#include <linux/swap.h>
  13#include <linux/smp.h>
 
 
  14#include <linux/spinlock.h>
 
 
  15#include <linux/rcupdate.h>
  16#include <linux/slab.h>
  17#include <linux/swapops.h>
  18#include <linux/sysctl.h>
  19#include <linux/ksm.h>
  20#include <linux/mman.h>
  21
 
 
 
  22#include <asm/tlb.h>
  23#include <asm/tlbflush.h>
  24#include <asm/mmu_context.h>
  25#include <asm/page-states.h>
  26
  27pgprot_t pgprot_writecombine(pgprot_t prot)
  28{
  29	/*
  30	 * mio_wb_bit_mask may be set on a different CPU, but it is only set
  31	 * once at init and only read afterwards.
  32	 */
  33	return __pgprot(pgprot_val(prot) | mio_wb_bit_mask);
  34}
  35EXPORT_SYMBOL_GPL(pgprot_writecombine);
  36
  37pgprot_t pgprot_writethrough(pgprot_t prot)
  38{
  39	/*
  40	 * mio_wb_bit_mask may be set on a different CPU, but it is only set
  41	 * once at init and only read afterwards.
  42	 */
  43	return __pgprot(pgprot_val(prot) & ~mio_wb_bit_mask);
  44}
  45EXPORT_SYMBOL_GPL(pgprot_writethrough);
  46
  47static inline void ptep_ipte_local(struct mm_struct *mm, unsigned long addr,
  48				   pte_t *ptep, int nodat)
  49{
  50	unsigned long opt, asce;
  51
  52	if (MACHINE_HAS_TLB_GUEST) {
  53		opt = 0;
  54		asce = READ_ONCE(mm->context.gmap_asce);
  55		if (asce == 0UL || nodat)
  56			opt |= IPTE_NODAT;
  57		if (asce != -1UL) {
  58			asce = asce ? : mm->context.asce;
  59			opt |= IPTE_GUEST_ASCE;
  60		}
  61		__ptep_ipte(addr, ptep, opt, asce, IPTE_LOCAL);
  62	} else {
  63		__ptep_ipte(addr, ptep, 0, 0, IPTE_LOCAL);
  64	}
  65}
  66
  67static inline void ptep_ipte_global(struct mm_struct *mm, unsigned long addr,
  68				    pte_t *ptep, int nodat)
  69{
  70	unsigned long opt, asce;
  71
  72	if (MACHINE_HAS_TLB_GUEST) {
  73		opt = 0;
  74		asce = READ_ONCE(mm->context.gmap_asce);
  75		if (asce == 0UL || nodat)
  76			opt |= IPTE_NODAT;
  77		if (asce != -1UL) {
  78			asce = asce ? : mm->context.asce;
  79			opt |= IPTE_GUEST_ASCE;
  80		}
  81		__ptep_ipte(addr, ptep, opt, asce, IPTE_GLOBAL);
  82	} else {
  83		__ptep_ipte(addr, ptep, 0, 0, IPTE_GLOBAL);
  84	}
  85}
  86
  87static inline pte_t ptep_flush_direct(struct mm_struct *mm,
  88				      unsigned long addr, pte_t *ptep,
  89				      int nodat)
  90{
  91	pte_t old;
  92
  93	old = *ptep;
  94	if (unlikely(pte_val(old) & _PAGE_INVALID))
  95		return old;
  96	atomic_inc(&mm->context.flush_count);
  97	if (MACHINE_HAS_TLB_LC &&
  98	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
  99		ptep_ipte_local(mm, addr, ptep, nodat);
 100	else
 101		ptep_ipte_global(mm, addr, ptep, nodat);
 102	atomic_dec(&mm->context.flush_count);
 103	return old;
 104}
 105
 106static inline pte_t ptep_flush_lazy(struct mm_struct *mm,
 107				    unsigned long addr, pte_t *ptep,
 108				    int nodat)
 109{
 110	pte_t old;
 111
 112	old = *ptep;
 113	if (unlikely(pte_val(old) & _PAGE_INVALID))
 114		return old;
 115	atomic_inc(&mm->context.flush_count);
 116	if (cpumask_equal(&mm->context.cpu_attach_mask,
 117			  cpumask_of(smp_processor_id()))) {
 118		set_pte(ptep, set_pte_bit(*ptep, __pgprot(_PAGE_INVALID)));
 119		mm->context.flush_mm = 1;
 120	} else
 121		ptep_ipte_global(mm, addr, ptep, nodat);
 122	atomic_dec(&mm->context.flush_count);
 123	return old;
 124}
 125
 126static inline pgste_t pgste_get_lock(pte_t *ptep)
 127{
 128	unsigned long value = 0;
 129#ifdef CONFIG_PGSTE
 130	unsigned long *ptr = (unsigned long *)(ptep + PTRS_PER_PTE);
 131
 132	do {
 133		value = __atomic64_or_barrier(PGSTE_PCL_BIT, ptr);
 134	} while (value & PGSTE_PCL_BIT);
 135	value |= PGSTE_PCL_BIT;
 136#endif
 137	return __pgste(value);
 138}
 139
 140static inline void pgste_set_unlock(pte_t *ptep, pgste_t pgste)
 141{
 142#ifdef CONFIG_PGSTE
 143	barrier();
 144	WRITE_ONCE(*(unsigned long *)(ptep + PTRS_PER_PTE), pgste_val(pgste) & ~PGSTE_PCL_BIT);
 145#endif
 146}
 147
 148static inline pgste_t pgste_get(pte_t *ptep)
 149{
 150	unsigned long pgste = 0;
 151#ifdef CONFIG_PGSTE
 152	pgste = *(unsigned long *)(ptep + PTRS_PER_PTE);
 153#endif
 154	return __pgste(pgste);
 155}
 156
 157static inline void pgste_set(pte_t *ptep, pgste_t pgste)
 158{
 159#ifdef CONFIG_PGSTE
 160	*(pgste_t *)(ptep + PTRS_PER_PTE) = pgste;
 161#endif
 
 162}
 
 163
 164static inline pgste_t pgste_update_all(pte_t pte, pgste_t pgste,
 165				       struct mm_struct *mm)
 166{
 167#ifdef CONFIG_PGSTE
 168	unsigned long address, bits, skey;
 169
 170	if (!mm_uses_skeys(mm) || pte_val(pte) & _PAGE_INVALID)
 171		return pgste;
 172	address = pte_val(pte) & PAGE_MASK;
 173	skey = (unsigned long) page_get_storage_key(address);
 174	bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
 175	/* Transfer page changed & referenced bit to guest bits in pgste */
 176	pgste_val(pgste) |= bits << 48;		/* GR bit & GC bit */
 177	/* Copy page access key and fetch protection bit to pgste */
 178	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
 179	pgste_val(pgste) |= (skey & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
 180#endif
 181	return pgste;
 182
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 183}
 184
 185static inline void pgste_set_key(pte_t *ptep, pgste_t pgste, pte_t entry,
 186				 struct mm_struct *mm)
 187{
 188#ifdef CONFIG_PGSTE
 189	unsigned long address;
 190	unsigned long nkey;
 191
 192	if (!mm_uses_skeys(mm) || pte_val(entry) & _PAGE_INVALID)
 193		return;
 194	VM_BUG_ON(!(pte_val(*ptep) & _PAGE_INVALID));
 195	address = pte_val(entry) & PAGE_MASK;
 196	/*
 197	 * Set page access key and fetch protection bit from pgste.
 198	 * The guest C/R information is still in the PGSTE, set real
 199	 * key C/R to 0.
 200	 */
 201	nkey = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
 202	nkey |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
 203	page_set_storage_key(address, nkey, 0);
 204#endif
 205}
 206
 207static inline pgste_t pgste_set_pte(pte_t *ptep, pgste_t pgste, pte_t entry)
 208{
 209#ifdef CONFIG_PGSTE
 210	if ((pte_val(entry) & _PAGE_PRESENT) &&
 211	    (pte_val(entry) & _PAGE_WRITE) &&
 212	    !(pte_val(entry) & _PAGE_INVALID)) {
 213		if (!MACHINE_HAS_ESOP) {
 214			/*
 215			 * Without enhanced suppression-on-protection force
 216			 * the dirty bit on for all writable ptes.
 217			 */
 218			entry = set_pte_bit(entry, __pgprot(_PAGE_DIRTY));
 219			entry = clear_pte_bit(entry, __pgprot(_PAGE_PROTECT));
 220		}
 221		if (!(pte_val(entry) & _PAGE_PROTECT))
 222			/* This pte allows write access, set user-dirty */
 223			pgste_val(pgste) |= PGSTE_UC_BIT;
 224	}
 225#endif
 226	set_pte(ptep, entry);
 227	return pgste;
 228}
 
 229
 230static inline pgste_t pgste_pte_notify(struct mm_struct *mm,
 231				       unsigned long addr,
 232				       pte_t *ptep, pgste_t pgste)
 233{
 234#ifdef CONFIG_PGSTE
 235	unsigned long bits;
 236
 237	bits = pgste_val(pgste) & (PGSTE_IN_BIT | PGSTE_VSIE_BIT);
 238	if (bits) {
 239		pgste_val(pgste) ^= bits;
 240		ptep_notify(mm, addr, ptep, bits);
 241	}
 242#endif
 243	return pgste;
 244}
 245
 246static inline pgste_t ptep_xchg_start(struct mm_struct *mm,
 247				      unsigned long addr, pte_t *ptep)
 
 
 
 
 
 248{
 249	pgste_t pgste = __pgste(0);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 250
 251	if (mm_has_pgste(mm)) {
 252		pgste = pgste_get_lock(ptep);
 253		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
 254	}
 255	return pgste;
 256}
 257
 258static inline pte_t ptep_xchg_commit(struct mm_struct *mm,
 259				    unsigned long addr, pte_t *ptep,
 260				    pgste_t pgste, pte_t old, pte_t new)
 261{
 262	if (mm_has_pgste(mm)) {
 263		if (pte_val(old) & _PAGE_INVALID)
 264			pgste_set_key(ptep, pgste, new, mm);
 265		if (pte_val(new) & _PAGE_INVALID) {
 266			pgste = pgste_update_all(old, pgste, mm);
 267			if ((pgste_val(pgste) & _PGSTE_GPS_USAGE_MASK) ==
 268			    _PGSTE_GPS_USAGE_UNUSED)
 269				old = set_pte_bit(old, __pgprot(_PAGE_UNUSED));
 270		}
 271		pgste = pgste_set_pte(ptep, pgste, new);
 272		pgste_set_unlock(ptep, pgste);
 273	} else {
 274		set_pte(ptep, new);
 275	}
 276	return old;
 277}
 278
 279pte_t ptep_xchg_direct(struct mm_struct *mm, unsigned long addr,
 280		       pte_t *ptep, pte_t new)
 281{
 282	pgste_t pgste;
 283	pte_t old;
 284	int nodat;
 285
 286	preempt_disable();
 287	pgste = ptep_xchg_start(mm, addr, ptep);
 288	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 289	old = ptep_flush_direct(mm, addr, ptep, nodat);
 290	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
 291	preempt_enable();
 292	return old;
 293}
 294EXPORT_SYMBOL(ptep_xchg_direct);
 295
 296/*
 297 * Caller must check that new PTE only differs in _PAGE_PROTECT HW bit, so that
 298 * RDP can be used instead of IPTE. See also comments at pte_allow_rdp().
 299 */
 300void ptep_reset_dat_prot(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
 301			 pte_t new)
 302{
 303	preempt_disable();
 304	atomic_inc(&mm->context.flush_count);
 305	if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
 306		__ptep_rdp(addr, ptep, 0, 0, 1);
 307	else
 308		__ptep_rdp(addr, ptep, 0, 0, 0);
 309	/*
 310	 * PTE is not invalidated by RDP, only _PAGE_PROTECT is cleared. That
 311	 * means it is still valid and active, and must not be changed according
 312	 * to the architecture. But writing a new value that only differs in SW
 313	 * bits is allowed.
 314	 */
 315	set_pte(ptep, new);
 316	atomic_dec(&mm->context.flush_count);
 317	preempt_enable();
 318}
 319EXPORT_SYMBOL(ptep_reset_dat_prot);
 320
 321pte_t ptep_xchg_lazy(struct mm_struct *mm, unsigned long addr,
 322		     pte_t *ptep, pte_t new)
 
 
 
 323{
 324	pgste_t pgste;
 325	pte_t old;
 326	int nodat;
 327
 328	preempt_disable();
 329	pgste = ptep_xchg_start(mm, addr, ptep);
 330	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 331	old = ptep_flush_lazy(mm, addr, ptep, nodat);
 332	old = ptep_xchg_commit(mm, addr, ptep, pgste, old, new);
 333	preempt_enable();
 334	return old;
 335}
 336EXPORT_SYMBOL(ptep_xchg_lazy);
 337
 338pte_t ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr,
 339			     pte_t *ptep)
 340{
 341	pgste_t pgste;
 342	pte_t old;
 343	int nodat;
 344	struct mm_struct *mm = vma->vm_mm;
 345
 346	preempt_disable();
 347	pgste = ptep_xchg_start(mm, addr, ptep);
 348	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 349	old = ptep_flush_lazy(mm, addr, ptep, nodat);
 350	if (mm_has_pgste(mm)) {
 351		pgste = pgste_update_all(old, pgste, mm);
 352		pgste_set(ptep, pgste);
 353	}
 354	return old;
 
 
 
 
 355}
 
 356
 357void ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr,
 358			     pte_t *ptep, pte_t old_pte, pte_t pte)
 
 
 
 359{
 360	pgste_t pgste;
 361	struct mm_struct *mm = vma->vm_mm;
 362
 363	if (!MACHINE_HAS_NX)
 364		pte = clear_pte_bit(pte, __pgprot(_PAGE_NOEXEC));
 365	if (mm_has_pgste(mm)) {
 366		pgste = pgste_get(ptep);
 367		pgste_set_key(ptep, pgste, pte, mm);
 368		pgste = pgste_set_pte(ptep, pgste, pte);
 369		pgste_set_unlock(ptep, pgste);
 370	} else {
 371		set_pte(ptep, pte);
 372	}
 373	preempt_enable();
 374}
 
 375
 376static inline void pmdp_idte_local(struct mm_struct *mm,
 377				   unsigned long addr, pmd_t *pmdp)
 
 
 
 378{
 379	if (MACHINE_HAS_TLB_GUEST)
 380		__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
 381			    mm->context.asce, IDTE_LOCAL);
 382	else
 383		__pmdp_idte(addr, pmdp, 0, 0, IDTE_LOCAL);
 384	if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 385		gmap_pmdp_idte_local(mm, addr);
 386}
 387
 388static inline void pmdp_idte_global(struct mm_struct *mm,
 389				    unsigned long addr, pmd_t *pmdp)
 390{
 391	if (MACHINE_HAS_TLB_GUEST) {
 392		__pmdp_idte(addr, pmdp, IDTE_NODAT | IDTE_GUEST_ASCE,
 393			    mm->context.asce, IDTE_GLOBAL);
 394		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 395			gmap_pmdp_idte_global(mm, addr);
 396	} else if (MACHINE_HAS_IDTE) {
 397		__pmdp_idte(addr, pmdp, 0, 0, IDTE_GLOBAL);
 398		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 399			gmap_pmdp_idte_global(mm, addr);
 400	} else {
 401		__pmdp_csp(pmdp);
 402		if (mm_has_pgste(mm) && mm->context.allow_gmap_hpage_1m)
 403			gmap_pmdp_csp(mm, addr);
 404	}
 405}
 
 406
 407static inline pmd_t pmdp_flush_direct(struct mm_struct *mm,
 408				      unsigned long addr, pmd_t *pmdp)
 409{
 410	pmd_t old;
 411
 412	old = *pmdp;
 413	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
 414		return old;
 415	atomic_inc(&mm->context.flush_count);
 416	if (MACHINE_HAS_TLB_LC &&
 417	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
 418		pmdp_idte_local(mm, addr, pmdp);
 419	else
 420		pmdp_idte_global(mm, addr, pmdp);
 421	atomic_dec(&mm->context.flush_count);
 422	return old;
 423}
 424
 425static inline pmd_t pmdp_flush_lazy(struct mm_struct *mm,
 426				    unsigned long addr, pmd_t *pmdp)
 427{
 428	pmd_t old;
 429
 430	old = *pmdp;
 431	if (pmd_val(old) & _SEGMENT_ENTRY_INVALID)
 432		return old;
 433	atomic_inc(&mm->context.flush_count);
 434	if (cpumask_equal(&mm->context.cpu_attach_mask,
 435			  cpumask_of(smp_processor_id()))) {
 436		set_pmd(pmdp, set_pmd_bit(*pmdp, __pgprot(_SEGMENT_ENTRY_INVALID)));
 437		mm->context.flush_mm = 1;
 438		if (mm_has_pgste(mm))
 439			gmap_pmdp_invalidate(mm, addr);
 440	} else {
 441		pmdp_idte_global(mm, addr, pmdp);
 442	}
 443	atomic_dec(&mm->context.flush_count);
 444	return old;
 445}
 446
 447#ifdef CONFIG_PGSTE
 448static int pmd_lookup(struct mm_struct *mm, unsigned long addr, pmd_t **pmdp)
 
 
 
 
 
 
 
 449{
 450	struct vm_area_struct *vma;
 451	pgd_t *pgd;
 452	p4d_t *p4d;
 453	pud_t *pud;
 454
 455	/* We need a valid VMA, otherwise this is clearly a fault. */
 456	vma = vma_lookup(mm, addr);
 457	if (!vma)
 458		return -EFAULT;
 459
 460	pgd = pgd_offset(mm, addr);
 461	if (!pgd_present(*pgd))
 462		return -ENOENT;
 463
 464	p4d = p4d_offset(pgd, addr);
 465	if (!p4d_present(*p4d))
 466		return -ENOENT;
 467
 468	pud = pud_offset(p4d, addr);
 469	if (!pud_present(*pud))
 470		return -ENOENT;
 471
 472	/* Large PUDs are not supported yet. */
 473	if (pud_leaf(*pud))
 474		return -EFAULT;
 475
 476	*pmdp = pmd_offset(pud, addr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 477	return 0;
 478}
 479#endif
 480
 481pmd_t pmdp_xchg_direct(struct mm_struct *mm, unsigned long addr,
 482		       pmd_t *pmdp, pmd_t new)
 
 
 
 
 
 
 
 
 483{
 484	pmd_t old;
 485
 486	preempt_disable();
 487	old = pmdp_flush_direct(mm, addr, pmdp);
 488	set_pmd(pmdp, new);
 489	preempt_enable();
 490	return old;
 491}
 492EXPORT_SYMBOL(pmdp_xchg_direct);
 493
 494pmd_t pmdp_xchg_lazy(struct mm_struct *mm, unsigned long addr,
 495		     pmd_t *pmdp, pmd_t new)
 496{
 497	pmd_t old;
 
 498
 499	preempt_disable();
 500	old = pmdp_flush_lazy(mm, addr, pmdp);
 501	set_pmd(pmdp, new);
 502	preempt_enable();
 503	return old;
 504}
 505EXPORT_SYMBOL(pmdp_xchg_lazy);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 506
 507static inline void pudp_idte_local(struct mm_struct *mm,
 508				   unsigned long addr, pud_t *pudp)
 509{
 510	if (MACHINE_HAS_TLB_GUEST)
 511		__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
 512			    mm->context.asce, IDTE_LOCAL);
 513	else
 514		__pudp_idte(addr, pudp, 0, 0, IDTE_LOCAL);
 515}
 
 
 
 
 
 
 
 
 
 516
 517static inline void pudp_idte_global(struct mm_struct *mm,
 518				    unsigned long addr, pud_t *pudp)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 519{
 520	if (MACHINE_HAS_TLB_GUEST)
 521		__pudp_idte(addr, pudp, IDTE_NODAT | IDTE_GUEST_ASCE,
 522			    mm->context.asce, IDTE_GLOBAL);
 523	else if (MACHINE_HAS_IDTE)
 524		__pudp_idte(addr, pudp, 0, 0, IDTE_GLOBAL);
 525	else
 526		/*
 527		 * Invalid bit position is the same for pmd and pud, so we can
 528		 * reuse _pmd_csp() here
 529		 */
 530		__pmdp_csp((pmd_t *) pudp);
 531}
 532
 533static inline pud_t pudp_flush_direct(struct mm_struct *mm,
 534				      unsigned long addr, pud_t *pudp)
 535{
 536	pud_t old;
 537
 538	old = *pudp;
 539	if (pud_val(old) & _REGION_ENTRY_INVALID)
 540		return old;
 541	atomic_inc(&mm->context.flush_count);
 542	if (MACHINE_HAS_TLB_LC &&
 543	    cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
 544		pudp_idte_local(mm, addr, pudp);
 545	else
 546		pudp_idte_global(mm, addr, pudp);
 547	atomic_dec(&mm->context.flush_count);
 548	return old;
 549}
 550
 551pud_t pudp_xchg_direct(struct mm_struct *mm, unsigned long addr,
 552		       pud_t *pudp, pud_t new)
 553{
 554	pud_t old;
 555
 556	preempt_disable();
 557	old = pudp_flush_direct(mm, addr, pudp);
 558	set_pud(pudp, new);
 559	preempt_enable();
 560	return old;
 561}
 562EXPORT_SYMBOL(pudp_xchg_direct);
 563
 564#ifdef CONFIG_TRANSPARENT_HUGEPAGE
 565void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
 566				pgtable_t pgtable)
 567{
 568	struct list_head *lh = (struct list_head *) pgtable;
 569
 570	assert_spin_locked(pmd_lockptr(mm, pmdp));
 571
 572	/* FIFO */
 573	if (!pmd_huge_pte(mm, pmdp))
 574		INIT_LIST_HEAD(lh);
 575	else
 576		list_add(lh, (struct list_head *) pmd_huge_pte(mm, pmdp));
 577	pmd_huge_pte(mm, pmdp) = pgtable;
 578}
 579
 580pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
 
 
 
 581{
 582	struct list_head *lh;
 583	pgtable_t pgtable;
 584	pte_t *ptep;
 585
 586	assert_spin_locked(pmd_lockptr(mm, pmdp));
 587
 588	/* FIFO */
 589	pgtable = pmd_huge_pte(mm, pmdp);
 590	lh = (struct list_head *) pgtable;
 591	if (list_empty(lh))
 592		pmd_huge_pte(mm, pmdp) = NULL;
 593	else {
 594		pmd_huge_pte(mm, pmdp) = (pgtable_t) lh->next;
 595		list_del(lh);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 596	}
 597	ptep = (pte_t *) pgtable;
 598	set_pte(ptep, __pte(_PAGE_INVALID));
 599	ptep++;
 600	set_pte(ptep, __pte(_PAGE_INVALID));
 601	return pgtable;
 602}
 603#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
 604
 605#ifdef CONFIG_PGSTE
 606void ptep_set_pte_at(struct mm_struct *mm, unsigned long addr,
 607		     pte_t *ptep, pte_t entry)
 608{
 609	pgste_t pgste;
 610
 611	/* the mm_has_pgste() check is done in set_pte_at() */
 612	preempt_disable();
 613	pgste = pgste_get_lock(ptep);
 614	pgste_val(pgste) &= ~_PGSTE_GPS_ZERO;
 615	pgste_set_key(ptep, pgste, entry, mm);
 616	pgste = pgste_set_pte(ptep, pgste, entry);
 617	pgste_set_unlock(ptep, pgste);
 618	preempt_enable();
 619}
 620
 621void ptep_set_notify(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 622{
 623	pgste_t pgste;
 624
 625	preempt_disable();
 626	pgste = pgste_get_lock(ptep);
 627	pgste_val(pgste) |= PGSTE_IN_BIT;
 628	pgste_set_unlock(ptep, pgste);
 629	preempt_enable();
 630}
 631
 632/**
 633 * ptep_force_prot - change access rights of a locked pte
 634 * @mm: pointer to the process mm_struct
 635 * @addr: virtual address in the guest address space
 636 * @ptep: pointer to the page table entry
 637 * @prot: indicates guest access rights: PROT_NONE, PROT_READ or PROT_WRITE
 638 * @bit: pgste bit to set (e.g. for notification)
 639 *
 640 * Returns 0 if the access rights were changed and -EAGAIN if the current
 641 * and requested access rights are incompatible.
 642 */
 643int ptep_force_prot(struct mm_struct *mm, unsigned long addr,
 644		    pte_t *ptep, int prot, unsigned long bit)
 645{
 646	pte_t entry;
 647	pgste_t pgste;
 648	int pte_i, pte_p, nodat;
 649
 650	pgste = pgste_get_lock(ptep);
 651	entry = *ptep;
 652	/* Check pte entry after all locks have been acquired */
 653	pte_i = pte_val(entry) & _PAGE_INVALID;
 654	pte_p = pte_val(entry) & _PAGE_PROTECT;
 655	if ((pte_i && (prot != PROT_NONE)) ||
 656	    (pte_p && (prot & PROT_WRITE))) {
 657		pgste_set_unlock(ptep, pgste);
 658		return -EAGAIN;
 659	}
 660	/* Change access rights and set pgste bit */
 661	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 662	if (prot == PROT_NONE && !pte_i) {
 663		ptep_flush_direct(mm, addr, ptep, nodat);
 664		pgste = pgste_update_all(entry, pgste, mm);
 665		entry = set_pte_bit(entry, __pgprot(_PAGE_INVALID));
 666	}
 667	if (prot == PROT_READ && !pte_p) {
 668		ptep_flush_direct(mm, addr, ptep, nodat);
 669		entry = clear_pte_bit(entry, __pgprot(_PAGE_INVALID));
 670		entry = set_pte_bit(entry, __pgprot(_PAGE_PROTECT));
 671	}
 672	pgste_val(pgste) |= bit;
 673	pgste = pgste_set_pte(ptep, pgste, entry);
 674	pgste_set_unlock(ptep, pgste);
 675	return 0;
 676}
 677
 678int ptep_shadow_pte(struct mm_struct *mm, unsigned long saddr,
 679		    pte_t *sptep, pte_t *tptep, pte_t pte)
 680{
 681	pgste_t spgste, tpgste;
 682	pte_t spte, tpte;
 683	int rc = -EAGAIN;
 684
 685	if (!(pte_val(*tptep) & _PAGE_INVALID))
 686		return 0;	/* already shadowed */
 687	spgste = pgste_get_lock(sptep);
 688	spte = *sptep;
 689	if (!(pte_val(spte) & _PAGE_INVALID) &&
 690	    !((pte_val(spte) & _PAGE_PROTECT) &&
 691	      !(pte_val(pte) & _PAGE_PROTECT))) {
 692		pgste_val(spgste) |= PGSTE_VSIE_BIT;
 693		tpgste = pgste_get_lock(tptep);
 694		tpte = __pte((pte_val(spte) & PAGE_MASK) |
 695			     (pte_val(pte) & _PAGE_PROTECT));
 696		/* don't touch the storage key - it belongs to parent pgste */
 697		tpgste = pgste_set_pte(tptep, tpgste, tpte);
 698		pgste_set_unlock(tptep, tpgste);
 699		rc = 1;
 700	}
 701	pgste_set_unlock(sptep, spgste);
 702	return rc;
 703}
 704
 705void ptep_unshadow_pte(struct mm_struct *mm, unsigned long saddr, pte_t *ptep)
 706{
 707	pgste_t pgste;
 708	int nodat;
 709
 710	pgste = pgste_get_lock(ptep);
 711	/* notifier is called by the caller */
 712	nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 713	ptep_flush_direct(mm, saddr, ptep, nodat);
 714	/* don't touch the storage key - it belongs to parent pgste */
 715	pgste = pgste_set_pte(ptep, pgste, __pte(_PAGE_INVALID));
 716	pgste_set_unlock(ptep, pgste);
 717}
 718
 719static void ptep_zap_swap_entry(struct mm_struct *mm, swp_entry_t entry)
 720{
 721	if (!non_swap_entry(entry))
 722		dec_mm_counter(mm, MM_SWAPENTS);
 723	else if (is_migration_entry(entry)) {
 724		struct folio *folio = pfn_swap_entry_folio(entry);
 725
 726		dec_mm_counter(mm, mm_counter(folio));
 727	}
 728	free_swap_and_cache(entry);
 729}
 730
 731void ptep_zap_unused(struct mm_struct *mm, unsigned long addr,
 732		     pte_t *ptep, int reset)
 733{
 734	unsigned long pgstev;
 735	pgste_t pgste;
 736	pte_t pte;
 737
 738	/* Zap unused and logically-zero pages */
 739	preempt_disable();
 740	pgste = pgste_get_lock(ptep);
 741	pgstev = pgste_val(pgste);
 742	pte = *ptep;
 743	if (!reset && pte_swap(pte) &&
 744	    ((pgstev & _PGSTE_GPS_USAGE_MASK) == _PGSTE_GPS_USAGE_UNUSED ||
 745	     (pgstev & _PGSTE_GPS_ZERO))) {
 746		ptep_zap_swap_entry(mm, pte_to_swp_entry(pte));
 747		pte_clear(mm, addr, ptep);
 748	}
 749	if (reset)
 750		pgste_val(pgste) &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
 751	pgste_set_unlock(ptep, pgste);
 752	preempt_enable();
 753}
 754
 755void ptep_zap_key(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
 756{
 757	unsigned long ptev;
 758	pgste_t pgste;
 
 759
 760	/* Clear storage key ACC and F, but set R/C */
 761	preempt_disable();
 762	pgste = pgste_get_lock(ptep);
 763	pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT);
 764	pgste_val(pgste) |= PGSTE_GR_BIT | PGSTE_GC_BIT;
 765	ptev = pte_val(*ptep);
 766	if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
 767		page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 0);
 768	pgste_set_unlock(ptep, pgste);
 769	preempt_enable();
 770}
 771
 772/*
 773 * Test and reset if a guest page is dirty
 774 */
 775bool ptep_test_and_clear_uc(struct mm_struct *mm, unsigned long addr,
 776		       pte_t *ptep)
 777{
 778	pgste_t pgste;
 779	pte_t pte;
 780	bool dirty;
 781	int nodat;
 782
 783	pgste = pgste_get_lock(ptep);
 784	dirty = !!(pgste_val(pgste) & PGSTE_UC_BIT);
 785	pgste_val(pgste) &= ~PGSTE_UC_BIT;
 786	pte = *ptep;
 787	if (dirty && (pte_val(pte) & _PAGE_PRESENT)) {
 788		pgste = pgste_pte_notify(mm, addr, ptep, pgste);
 789		nodat = !!(pgste_val(pgste) & _PGSTE_GPS_NODAT);
 790		ptep_ipte_global(mm, addr, ptep, nodat);
 791		if (MACHINE_HAS_ESOP || !(pte_val(pte) & _PAGE_WRITE))
 792			pte = set_pte_bit(pte, __pgprot(_PAGE_PROTECT));
 793		else
 794			pte = set_pte_bit(pte, __pgprot(_PAGE_INVALID));
 795		set_pte(ptep, pte);
 796	}
 797	pgste_set_unlock(ptep, pgste);
 798	return dirty;
 
 
 
 
 
 
 
 
 
 799}
 800EXPORT_SYMBOL_GPL(ptep_test_and_clear_uc);
 801
 802int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 803			  unsigned char key, bool nq)
 804{
 805	unsigned long keyul, paddr;
 806	spinlock_t *ptl;
 807	pgste_t old, new;
 808	pmd_t *pmdp;
 809	pte_t *ptep;
 810
 811	/*
 812	 * If we don't have a PTE table and if there is no huge page mapped,
 813	 * we can ignore attempts to set the key to 0, because it already is 0.
 814	 */
 815	switch (pmd_lookup(mm, addr, &pmdp)) {
 816	case -ENOENT:
 817		return key ? -EFAULT : 0;
 818	case 0:
 819		break;
 820	default:
 821		return -EFAULT;
 822	}
 823again:
 824	ptl = pmd_lock(mm, pmdp);
 825	if (!pmd_present(*pmdp)) {
 826		spin_unlock(ptl);
 827		return key ? -EFAULT : 0;
 828	}
 829
 830	if (pmd_leaf(*pmdp)) {
 831		paddr = pmd_val(*pmdp) & HPAGE_MASK;
 832		paddr |= addr & ~HPAGE_MASK;
 833		/*
 834		 * Huge pmds need quiescing operations, they are
 835		 * always mapped.
 836		 */
 837		page_set_storage_key(paddr, key, 1);
 838		spin_unlock(ptl);
 839		return 0;
 840	}
 841	spin_unlock(ptl);
 842
 843	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
 844	if (!ptep)
 845		goto again;
 846	new = old = pgste_get_lock(ptep);
 847	pgste_val(new) &= ~(PGSTE_GR_BIT | PGSTE_GC_BIT |
 848			    PGSTE_ACC_BITS | PGSTE_FP_BIT);
 849	keyul = (unsigned long) key;
 850	pgste_val(new) |= (keyul & (_PAGE_CHANGED | _PAGE_REFERENCED)) << 48;
 851	pgste_val(new) |= (keyul & (_PAGE_ACC_BITS | _PAGE_FP_BIT)) << 56;
 852	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
 853		unsigned long bits, skey;
 854
 855		paddr = pte_val(*ptep) & PAGE_MASK;
 856		skey = (unsigned long) page_get_storage_key(paddr);
 857		bits = skey & (_PAGE_CHANGED | _PAGE_REFERENCED);
 858		skey = key & (_PAGE_ACC_BITS | _PAGE_FP_BIT);
 859		/* Set storage key ACC and FP */
 860		page_set_storage_key(paddr, skey, !nq);
 861		/* Merge host changed & referenced into pgste  */
 862		pgste_val(new) |= bits << 52;
 863	}
 864	/* changing the guest storage key is considered a change of the page */
 865	if ((pgste_val(new) ^ pgste_val(old)) &
 866	    (PGSTE_ACC_BITS | PGSTE_FP_BIT | PGSTE_GR_BIT | PGSTE_GC_BIT))
 867		pgste_val(new) |= PGSTE_UC_BIT;
 868
 869	pgste_set_unlock(ptep, new);
 870	pte_unmap_unlock(ptep, ptl);
 871	return 0;
 872}
 873EXPORT_SYMBOL(set_guest_storage_key);
 874
 875/*
 876 * Conditionally set a guest storage key (handling csske).
 877 * oldkey will be updated when either mr or mc is set and a pointer is given.
 878 *
 879 * Returns 0 if a guests storage key update wasn't necessary, 1 if the guest
 880 * storage key was updated and -EFAULT on access errors.
 881 */
 882int cond_set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 883			       unsigned char key, unsigned char *oldkey,
 884			       bool nq, bool mr, bool mc)
 885{
 886	unsigned char tmp, mask = _PAGE_ACC_BITS | _PAGE_FP_BIT;
 887	int rc;
 888
 889	/* we can drop the pgste lock between getting and setting the key */
 890	if (mr | mc) {
 891		rc = get_guest_storage_key(current->mm, addr, &tmp);
 892		if (rc)
 893			return rc;
 894		if (oldkey)
 895			*oldkey = tmp;
 896		if (!mr)
 897			mask |= _PAGE_REFERENCED;
 898		if (!mc)
 899			mask |= _PAGE_CHANGED;
 900		if (!((tmp ^ key) & mask))
 901			return 0;
 902	}
 903	rc = set_guest_storage_key(current->mm, addr, key, nq);
 904	return rc < 0 ? rc : 1;
 905}
 906EXPORT_SYMBOL(cond_set_guest_storage_key);
 907
 908/*
 909 * Reset a guest reference bit (rrbe), returning the reference and changed bit.
 910 *
 911 * Returns < 0 in case of error, otherwise the cc to be reported to the guest.
 912 */
 913int reset_guest_reference_bit(struct mm_struct *mm, unsigned long addr)
 914{
 915	spinlock_t *ptl;
 916	unsigned long paddr;
 917	pgste_t old, new;
 918	pmd_t *pmdp;
 919	pte_t *ptep;
 920	int cc = 0;
 921
 922	/*
 923	 * If we don't have a PTE table and if there is no huge page mapped,
 924	 * the storage key is 0 and there is nothing for us to do.
 925	 */
 926	switch (pmd_lookup(mm, addr, &pmdp)) {
 927	case -ENOENT:
 928		return 0;
 929	case 0:
 930		break;
 931	default:
 932		return -EFAULT;
 933	}
 934again:
 935	ptl = pmd_lock(mm, pmdp);
 936	if (!pmd_present(*pmdp)) {
 937		spin_unlock(ptl);
 938		return 0;
 939	}
 940
 941	if (pmd_leaf(*pmdp)) {
 942		paddr = pmd_val(*pmdp) & HPAGE_MASK;
 943		paddr |= addr & ~HPAGE_MASK;
 944		cc = page_reset_referenced(paddr);
 945		spin_unlock(ptl);
 946		return cc;
 947	}
 948	spin_unlock(ptl);
 949
 950	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
 951	if (!ptep)
 952		goto again;
 953	new = old = pgste_get_lock(ptep);
 954	/* Reset guest reference bit only */
 955	pgste_val(new) &= ~PGSTE_GR_BIT;
 956
 957	if (!(pte_val(*ptep) & _PAGE_INVALID)) {
 958		paddr = pte_val(*ptep) & PAGE_MASK;
 959		cc = page_reset_referenced(paddr);
 960		/* Merge real referenced bit into host-set */
 961		pgste_val(new) |= ((unsigned long) cc << 53) & PGSTE_HR_BIT;
 962	}
 963	/* Reflect guest's logical view, not physical */
 964	cc |= (pgste_val(old) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 49;
 965	/* Changing the guest storage key is considered a change of the page */
 966	if ((pgste_val(new) ^ pgste_val(old)) & PGSTE_GR_BIT)
 967		pgste_val(new) |= PGSTE_UC_BIT;
 968
 969	pgste_set_unlock(ptep, new);
 970	pte_unmap_unlock(ptep, ptl);
 971	return cc;
 972}
 973EXPORT_SYMBOL(reset_guest_reference_bit);
 974
 975int get_guest_storage_key(struct mm_struct *mm, unsigned long addr,
 976			  unsigned char *key)
 977{
 978	unsigned long paddr;
 979	spinlock_t *ptl;
 980	pgste_t pgste;
 981	pmd_t *pmdp;
 982	pte_t *ptep;
 983
 984	/*
 985	 * If we don't have a PTE table and if there is no huge page mapped,
 986	 * the storage key is 0.
 987	 */
 988	*key = 0;
 989
 990	switch (pmd_lookup(mm, addr, &pmdp)) {
 991	case -ENOENT:
 992		return 0;
 993	case 0:
 994		break;
 995	default:
 996		return -EFAULT;
 997	}
 998again:
 999	ptl = pmd_lock(mm, pmdp);
1000	if (!pmd_present(*pmdp)) {
1001		spin_unlock(ptl);
1002		return 0;
1003	}
1004
1005	if (pmd_leaf(*pmdp)) {
1006		paddr = pmd_val(*pmdp) & HPAGE_MASK;
1007		paddr |= addr & ~HPAGE_MASK;
1008		*key = page_get_storage_key(paddr);
1009		spin_unlock(ptl);
1010		return 0;
 
 
 
1011	}
1012	spin_unlock(ptl);
1013
1014	ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
1015	if (!ptep)
1016		goto again;
1017	pgste = pgste_get_lock(ptep);
1018	*key = (pgste_val(pgste) & (PGSTE_ACC_BITS | PGSTE_FP_BIT)) >> 56;
1019	paddr = pte_val(*ptep) & PAGE_MASK;
1020	if (!(pte_val(*ptep) & _PAGE_INVALID))
1021		*key = page_get_storage_key(paddr);
1022	/* Reflect guest's logical view, not physical */
1023	*key |= (pgste_val(pgste) & (PGSTE_GR_BIT | PGSTE_GC_BIT)) >> 48;
1024	pgste_set_unlock(ptep, pgste);
1025	pte_unmap_unlock(ptep, ptl);
1026	return 0;
1027}
1028EXPORT_SYMBOL(get_guest_storage_key);
1029
1030/**
1031 * pgste_perform_essa - perform ESSA actions on the PGSTE.
1032 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1033 * @hva: the host virtual address of the page whose PGSTE is to be processed
1034 * @orc: the specific action to perform, see the ESSA_SET_* macros.
1035 * @oldpte: the PTE will be saved there if the pointer is not NULL.
1036 * @oldpgste: the old PGSTE will be saved there if the pointer is not NULL.
1037 *
1038 * Return: 1 if the page is to be added to the CBRL, otherwise 0,
1039 *	   or < 0 in case of error. -EINVAL is returned for invalid values
1040 *	   of orc, -EFAULT for invalid addresses.
1041 */
1042int pgste_perform_essa(struct mm_struct *mm, unsigned long hva, int orc,
1043			unsigned long *oldpte, unsigned long *oldpgste)
1044{
1045	struct vm_area_struct *vma;
1046	unsigned long pgstev;
1047	spinlock_t *ptl;
1048	pgste_t pgste;
1049	pte_t *ptep;
1050	int res = 0;
1051
1052	WARN_ON_ONCE(orc > ESSA_MAX);
1053	if (unlikely(orc > ESSA_MAX))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1054		return -EINVAL;
1055
1056	vma = vma_lookup(mm, hva);
1057	if (!vma || is_vm_hugetlb_page(vma))
1058		return -EFAULT;
1059	ptep = get_locked_pte(mm, hva, &ptl);
1060	if (unlikely(!ptep))
1061		return -EFAULT;
1062	pgste = pgste_get_lock(ptep);
1063	pgstev = pgste_val(pgste);
1064	if (oldpte)
1065		*oldpte = pte_val(*ptep);
1066	if (oldpgste)
1067		*oldpgste = pgstev;
1068
1069	switch (orc) {
1070	case ESSA_GET_STATE:
1071		break;
1072	case ESSA_SET_STABLE:
1073		pgstev &= ~(_PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT);
1074		pgstev |= _PGSTE_GPS_USAGE_STABLE;
1075		break;
1076	case ESSA_SET_UNUSED:
1077		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1078		pgstev |= _PGSTE_GPS_USAGE_UNUSED;
1079		if (pte_val(*ptep) & _PAGE_INVALID)
1080			res = 1;
1081		break;
1082	case ESSA_SET_VOLATILE:
1083		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1084		pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1085		if (pte_val(*ptep) & _PAGE_INVALID)
1086			res = 1;
1087		break;
1088	case ESSA_SET_POT_VOLATILE:
1089		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1090		if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1091			pgstev |= _PGSTE_GPS_USAGE_POT_VOLATILE;
1092			break;
1093		}
1094		if (pgstev & _PGSTE_GPS_ZERO) {
1095			pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1096			break;
1097		}
1098		if (!(pgstev & PGSTE_GC_BIT)) {
1099			pgstev |= _PGSTE_GPS_USAGE_VOLATILE;
1100			res = 1;
1101			break;
1102		}
1103		break;
1104	case ESSA_SET_STABLE_RESIDENT:
1105		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1106		pgstev |= _PGSTE_GPS_USAGE_STABLE;
1107		/*
1108		 * Since the resident state can go away any time after this
1109		 * call, we will not make this page resident. We can revisit
1110		 * this decision if a guest will ever start using this.
1111		 */
1112		break;
1113	case ESSA_SET_STABLE_IF_RESIDENT:
1114		if (!(pte_val(*ptep) & _PAGE_INVALID)) {
1115			pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1116			pgstev |= _PGSTE_GPS_USAGE_STABLE;
1117		}
1118		break;
1119	case ESSA_SET_STABLE_NODAT:
1120		pgstev &= ~_PGSTE_GPS_USAGE_MASK;
1121		pgstev |= _PGSTE_GPS_USAGE_STABLE | _PGSTE_GPS_NODAT;
1122		break;
1123	default:
1124		/* we should never get here! */
1125		break;
1126	}
1127	/* If we are discarding a page, set it to logical zero */
1128	if (res)
1129		pgstev |= _PGSTE_GPS_ZERO;
1130
1131	pgste_val(pgste) = pgstev;
1132	pgste_set_unlock(ptep, pgste);
1133	pte_unmap_unlock(ptep, ptl);
1134	return res;
1135}
1136EXPORT_SYMBOL(pgste_perform_essa);
1137
1138/**
1139 * set_pgste_bits - set specific PGSTE bits.
1140 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1141 * @hva: the host virtual address of the page whose PGSTE is to be processed
1142 * @bits: a bitmask representing the bits that will be touched
1143 * @value: the values of the bits to be written. Only the bits in the mask
1144 *	   will be written.
1145 *
1146 * Return: 0 on success, < 0 in case of error.
1147 */
1148int set_pgste_bits(struct mm_struct *mm, unsigned long hva,
1149			unsigned long bits, unsigned long value)
1150{
1151	struct vm_area_struct *vma;
1152	spinlock_t *ptl;
1153	pgste_t new;
1154	pte_t *ptep;
1155
1156	vma = vma_lookup(mm, hva);
1157	if (!vma || is_vm_hugetlb_page(vma))
1158		return -EFAULT;
1159	ptep = get_locked_pte(mm, hva, &ptl);
1160	if (unlikely(!ptep))
1161		return -EFAULT;
1162	new = pgste_get_lock(ptep);
1163
1164	pgste_val(new) &= ~bits;
1165	pgste_val(new) |= value & bits;
1166
1167	pgste_set_unlock(ptep, new);
1168	pte_unmap_unlock(ptep, ptl);
 
 
 
 
 
 
 
 
 
1169	return 0;
1170}
1171EXPORT_SYMBOL(set_pgste_bits);
1172
1173/**
1174 * get_pgste - get the current PGSTE for the given address.
1175 * @mm: the memory context. It must have PGSTEs, no check is performed here!
1176 * @hva: the host virtual address of the page whose PGSTE is to be processed
1177 * @pgstep: will be written with the current PGSTE for the given address.
1178 *
1179 * Return: 0 on success, < 0 in case of error.
1180 */
1181int get_pgste(struct mm_struct *mm, unsigned long hva, unsigned long *pgstep)
1182{
1183	struct vm_area_struct *vma;
1184	spinlock_t *ptl;
1185	pte_t *ptep;
1186
1187	vma = vma_lookup(mm, hva);
1188	if (!vma || is_vm_hugetlb_page(vma))
1189		return -EFAULT;
1190	ptep = get_locked_pte(mm, hva, &ptl);
1191	if (unlikely(!ptep))
1192		return -EFAULT;
1193	*pgstep = pgste_val(pgste_get(ptep));
1194	pte_unmap_unlock(ptep, ptl);
1195	return 0;
1196}
1197EXPORT_SYMBOL(get_pgste);
1198#endif
v3.1
 
  1/*
  2 *    Copyright IBM Corp. 2007,2009
  3 *    Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
  4 */
  5
  6#include <linux/sched.h>
  7#include <linux/kernel.h>
  8#include <linux/errno.h>
  9#include <linux/gfp.h>
 10#include <linux/mm.h>
 11#include <linux/swap.h>
 12#include <linux/smp.h>
 13#include <linux/highmem.h>
 14#include <linux/pagemap.h>
 15#include <linux/spinlock.h>
 16#include <linux/module.h>
 17#include <linux/quicklist.h>
 18#include <linux/rcupdate.h>
 19#include <linux/slab.h>
 
 
 
 
 20
 21#include <asm/system.h>
 22#include <asm/pgtable.h>
 23#include <asm/pgalloc.h>
 24#include <asm/tlb.h>
 25#include <asm/tlbflush.h>
 26#include <asm/mmu_context.h>
 
 27
 28#ifndef CONFIG_64BIT
 29#define ALLOC_ORDER	1
 30#define FRAG_MASK	0x0f
 31#else
 32#define ALLOC_ORDER	2
 33#define FRAG_MASK	0x03
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 34#endif
 
 
 35
 36unsigned long VMALLOC_START = VMALLOC_END - VMALLOC_SIZE;
 37EXPORT_SYMBOL(VMALLOC_START);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 38
 39static int __init parse_vmalloc(char *arg)
 40{
 41	if (!arg)
 42		return -EINVAL;
 43	VMALLOC_START = (VMALLOC_END - memparse(arg, &arg)) & PAGE_MASK;
 44	return 0;
 45}
 46early_param("vmalloc", parse_vmalloc);
 47
 48unsigned long *crst_table_alloc(struct mm_struct *mm)
 
 49{
 50	struct page *page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 51
 52	if (!page)
 53		return NULL;
 54	return (unsigned long *) page_to_phys(page);
 55}
 56
 57void crst_table_free(struct mm_struct *mm, unsigned long *table)
 58{
 59	free_pages((unsigned long) table, ALLOC_ORDER);
 60}
 61
 62#ifdef CONFIG_64BIT
 63int crst_table_upgrade(struct mm_struct *mm, unsigned long limit)
 64{
 65	unsigned long *table, *pgd;
 66	unsigned long entry;
 67
 68	BUG_ON(limit > (1UL << 53));
 69repeat:
 70	table = crst_table_alloc(mm);
 71	if (!table)
 72		return -ENOMEM;
 73	spin_lock_bh(&mm->page_table_lock);
 74	if (mm->context.asce_limit < limit) {
 75		pgd = (unsigned long *) mm->pgd;
 76		if (mm->context.asce_limit <= (1UL << 31)) {
 77			entry = _REGION3_ENTRY_EMPTY;
 78			mm->context.asce_limit = 1UL << 42;
 79			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
 80						_ASCE_USER_BITS |
 81						_ASCE_TYPE_REGION3;
 82		} else {
 83			entry = _REGION2_ENTRY_EMPTY;
 84			mm->context.asce_limit = 1UL << 53;
 85			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
 86						_ASCE_USER_BITS |
 87						_ASCE_TYPE_REGION2;
 88		}
 89		crst_table_init(table, entry);
 90		pgd_populate(mm, (pgd_t *) table, (pud_t *) pgd);
 91		mm->pgd = (pgd_t *) table;
 92		mm->task_size = mm->context.asce_limit;
 93		table = NULL;
 94	}
 95	spin_unlock_bh(&mm->page_table_lock);
 96	if (table)
 97		crst_table_free(mm, table);
 98	if (mm->context.asce_limit < limit)
 99		goto repeat;
100	update_mm(mm, current);
101	return 0;
102}
103
104void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
 
105{
106	pgd_t *pgd;
 
 
107
108	if (mm->context.asce_limit <= limit)
109		return;
110	__tlb_flush_mm(mm);
111	while (mm->context.asce_limit > limit) {
112		pgd = mm->pgd;
113		switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
114		case _REGION_ENTRY_TYPE_R2:
115			mm->context.asce_limit = 1UL << 42;
116			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
117						_ASCE_USER_BITS |
118						_ASCE_TYPE_REGION3;
119			break;
120		case _REGION_ENTRY_TYPE_R3:
121			mm->context.asce_limit = 1UL << 31;
122			mm->context.asce_bits = _ASCE_TABLE_LENGTH |
123						_ASCE_USER_BITS |
124						_ASCE_TYPE_SEGMENT;
125			break;
126		default:
127			BUG();
 
 
 
 
 
 
 
 
128		}
129		mm->pgd = (pgd_t *) (pgd_val(*pgd) & _REGION_ENTRY_ORIGIN);
130		mm->task_size = mm->context.asce_limit;
131		crst_table_free(mm, (unsigned long *) pgd);
132	}
133	update_mm(mm, current);
 
 
134}
135#endif
136
 
 
 
 
137#ifdef CONFIG_PGSTE
 
 
 
 
 
 
 
 
 
 
138
139/**
140 * gmap_alloc - allocate a guest address space
141 * @mm: pointer to the parent mm_struct
142 *
143 * Returns a guest address space structure.
144 */
145struct gmap *gmap_alloc(struct mm_struct *mm)
146{
147	struct gmap *gmap;
148	struct page *page;
149	unsigned long *table;
150
151	gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
152	if (!gmap)
153		goto out;
154	INIT_LIST_HEAD(&gmap->crst_list);
155	gmap->mm = mm;
156	page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
157	if (!page)
158		goto out_free;
159	list_add(&page->lru, &gmap->crst_list);
160	table = (unsigned long *) page_to_phys(page);
161	crst_table_init(table, _REGION1_ENTRY_EMPTY);
162	gmap->table = table;
163	gmap->asce = _ASCE_TYPE_REGION1 | _ASCE_TABLE_LENGTH |
164		     _ASCE_USER_BITS | __pa(table);
165	list_add(&gmap->list, &mm->context.gmap_list);
166	return gmap;
167
168out_free:
169	kfree(gmap);
170out:
171	return NULL;
172}
173EXPORT_SYMBOL_GPL(gmap_alloc);
174
175static int gmap_unlink_segment(struct gmap *gmap, unsigned long *table)
176{
177	struct gmap_pgtable *mp;
178	struct gmap_rmap *rmap;
179	struct page *page;
180
181	if (*table & _SEGMENT_ENTRY_INV)
182		return 0;
183	page = pfn_to_page(*table >> PAGE_SHIFT);
184	mp = (struct gmap_pgtable *) page->index;
185	list_for_each_entry(rmap, &mp->mapper, list) {
186		if (rmap->entry != table)
187			continue;
188		list_del(&rmap->list);
189		kfree(rmap);
190		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
191	}
192	*table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
193	return 1;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194}
 
195
196static void gmap_flush_tlb(struct gmap *gmap)
 
 
 
 
 
197{
198	if (MACHINE_HAS_IDTE)
199		__tlb_flush_idte((unsigned long) gmap->table |
200				 _ASCE_TYPE_REGION1);
 
201	else
202		__tlb_flush_global();
 
 
 
 
 
 
 
 
 
203}
 
204
205/**
206 * gmap_free - free a guest address space
207 * @gmap: pointer to the guest address space structure
208 */
209void gmap_free(struct gmap *gmap)
210{
211	struct page *page, *next;
212	unsigned long *table;
213	int i;
214
 
 
 
 
 
 
 
 
 
215
216	/* Flush tlb. */
217	if (MACHINE_HAS_IDTE)
218		__tlb_flush_idte((unsigned long) gmap->table |
219				 _ASCE_TYPE_REGION1);
220	else
221		__tlb_flush_global();
 
222
223	/* Free all segment & region tables. */
224	down_read(&gmap->mm->mmap_sem);
225	list_for_each_entry_safe(page, next, &gmap->crst_list, lru) {
226		table = (unsigned long *) page_to_phys(page);
227		if ((*table & _REGION_ENTRY_TYPE_MASK) == 0)
228			/* Remove gmap rmap structures for segment table. */
229			for (i = 0; i < PTRS_PER_PMD; i++, table++)
230				gmap_unlink_segment(gmap, table);
231		__free_pages(page, ALLOC_ORDER);
232	}
233	up_read(&gmap->mm->mmap_sem);
234	list_del(&gmap->list);
235	kfree(gmap);
236}
237EXPORT_SYMBOL_GPL(gmap_free);
238
239/**
240 * gmap_enable - switch primary space to the guest address space
241 * @gmap: pointer to the guest address space structure
242 */
243void gmap_enable(struct gmap *gmap)
244{
245	S390_lowcore.gmap = (unsigned long) gmap;
 
 
 
 
 
 
 
 
 
 
 
 
 
246}
247EXPORT_SYMBOL_GPL(gmap_enable);
248
249/**
250 * gmap_disable - switch back to the standard primary address space
251 * @gmap: pointer to the guest address space structure
252 */
253void gmap_disable(struct gmap *gmap)
254{
255	S390_lowcore.gmap = 0UL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
256}
257EXPORT_SYMBOL_GPL(gmap_disable);
258
259static int gmap_alloc_table(struct gmap *gmap,
260			       unsigned long *table, unsigned long init)
261{
262	struct page *page;
263	unsigned long *new;
264
265	page = alloc_pages(GFP_KERNEL, ALLOC_ORDER);
266	if (!page)
267		return -ENOMEM;
268	new = (unsigned long *) page_to_phys(page);
269	crst_table_init(new, init);
270	down_read(&gmap->mm->mmap_sem);
271	if (*table & _REGION_ENTRY_INV) {
272		list_add(&page->lru, &gmap->crst_list);
273		*table = (unsigned long) new | _REGION_ENTRY_LENGTH |
274			(*table & _REGION_ENTRY_TYPE_MASK);
275	} else
276		__free_pages(page, ALLOC_ORDER);
277	up_read(&gmap->mm->mmap_sem);
278	return 0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279}
280
281/**
282 * gmap_unmap_segment - unmap segment from the guest address space
283 * @gmap: pointer to the guest address space structure
284 * @addr: address in the guest address space
285 * @len: length of the memory area to unmap
286 *
287 * Returns 0 if the unmap succeded, -EINVAL if not.
288 */
289int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
290{
291	unsigned long *table;
292	unsigned long off;
293	int flush;
 
294
295	if ((to | len) & (PMD_SIZE - 1))
296		return -EINVAL;
297	if (len == 0 || to + len < to)
298		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
299
300	flush = 0;
301	down_read(&gmap->mm->mmap_sem);
302	for (off = 0; off < len; off += PMD_SIZE) {
303		/* Walk the guest addr space page table */
304		table = gmap->table + (((to + off) >> 53) & 0x7ff);
305		if (*table & _REGION_ENTRY_INV)
306			goto out;
307		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
308		table = table + (((to + off) >> 42) & 0x7ff);
309		if (*table & _REGION_ENTRY_INV)
310			goto out;
311		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
312		table = table + (((to + off) >> 31) & 0x7ff);
313		if (*table & _REGION_ENTRY_INV)
314			goto out;
315		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
316		table = table + (((to + off) >> 20) & 0x7ff);
317
318		/* Clear segment table entry in guest address space. */
319		flush |= gmap_unlink_segment(gmap, table);
320		*table = _SEGMENT_ENTRY_INV;
321	}
322out:
323	up_read(&gmap->mm->mmap_sem);
324	if (flush)
325		gmap_flush_tlb(gmap);
326	return 0;
327}
328EXPORT_SYMBOL_GPL(gmap_unmap_segment);
329
330/**
331 * gmap_mmap_segment - map a segment to the guest address space
332 * @gmap: pointer to the guest address space structure
333 * @from: source address in the parent address space
334 * @to: target address in the guest address space
335 *
336 * Returns 0 if the mmap succeded, -EINVAL or -ENOMEM if not.
337 */
338int gmap_map_segment(struct gmap *gmap, unsigned long from,
339		     unsigned long to, unsigned long len)
340{
341	unsigned long *table;
342	unsigned long off;
343	int flush;
 
 
 
 
 
 
344
345	if ((from | to | len) & (PMD_SIZE - 1))
346		return -EINVAL;
347	if (len == 0 || from + len > PGDIR_SIZE ||
348	    from + len < from || to + len < to)
349		return -EINVAL;
350
351	flush = 0;
352	down_read(&gmap->mm->mmap_sem);
353	for (off = 0; off < len; off += PMD_SIZE) {
354		/* Walk the gmap address space page table */
355		table = gmap->table + (((to + off) >> 53) & 0x7ff);
356		if ((*table & _REGION_ENTRY_INV) &&
357		    gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY))
358			goto out_unmap;
359		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
360		table = table + (((to + off) >> 42) & 0x7ff);
361		if ((*table & _REGION_ENTRY_INV) &&
362		    gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY))
363			goto out_unmap;
364		table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
365		table = table + (((to + off) >> 31) & 0x7ff);
366		if ((*table & _REGION_ENTRY_INV) &&
367		    gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY))
368			goto out_unmap;
369		table = (unsigned long *) (*table & _REGION_ENTRY_ORIGIN);
370		table = table + (((to + off) >> 20) & 0x7ff);
371
372		/* Store 'from' address in an invalid segment table entry. */
373		flush |= gmap_unlink_segment(gmap, table);
374		*table = _SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | (from + off);
375	}
376	up_read(&gmap->mm->mmap_sem);
377	if (flush)
378		gmap_flush_tlb(gmap);
379	return 0;
380
381out_unmap:
382	up_read(&gmap->mm->mmap_sem);
383	gmap_unmap_segment(gmap, to, len);
384	return -ENOMEM;
385}
386EXPORT_SYMBOL_GPL(gmap_map_segment);
387
388unsigned long gmap_fault(unsigned long address, struct gmap *gmap)
389{
390	unsigned long *table, vmaddr, segment;
391	struct mm_struct *mm;
392	struct gmap_pgtable *mp;
393	struct gmap_rmap *rmap;
394	struct vm_area_struct *vma;
395	struct page *page;
396	pgd_t *pgd;
397	pud_t *pud;
398	pmd_t *pmd;
399
400	current->thread.gmap_addr = address;
401	mm = gmap->mm;
402	/* Walk the gmap address space page table */
403	table = gmap->table + ((address >> 53) & 0x7ff);
404	if (unlikely(*table & _REGION_ENTRY_INV))
405		return -EFAULT;
406	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
407	table = table + ((address >> 42) & 0x7ff);
408	if (unlikely(*table & _REGION_ENTRY_INV))
409		return -EFAULT;
410	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
411	table = table + ((address >> 31) & 0x7ff);
412	if (unlikely(*table & _REGION_ENTRY_INV))
413		return -EFAULT;
414	table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
415	table = table + ((address >> 20) & 0x7ff);
416
417	/* Convert the gmap address to an mm address. */
418	segment = *table;
419	if (likely(!(segment & _SEGMENT_ENTRY_INV))) {
420		page = pfn_to_page(segment >> PAGE_SHIFT);
421		mp = (struct gmap_pgtable *) page->index;
422		return mp->vmaddr | (address & ~PMD_MASK);
423	} else if (segment & _SEGMENT_ENTRY_RO) {
424		vmaddr = segment & _SEGMENT_ENTRY_ORIGIN;
425		vma = find_vma(mm, vmaddr);
426		if (!vma || vma->vm_start > vmaddr)
427			return -EFAULT;
428
429		/* Walk the parent mm page table */
430		pgd = pgd_offset(mm, vmaddr);
431		pud = pud_alloc(mm, pgd, vmaddr);
432		if (!pud)
433			return -ENOMEM;
434		pmd = pmd_alloc(mm, pud, vmaddr);
435		if (!pmd)
436			return -ENOMEM;
437		if (!pmd_present(*pmd) &&
438		    __pte_alloc(mm, vma, pmd, vmaddr))
439			return -ENOMEM;
440		/* pmd now points to a valid segment table entry. */
441		rmap = kmalloc(sizeof(*rmap), GFP_KERNEL|__GFP_REPEAT);
442		if (!rmap)
443			return -ENOMEM;
444		/* Link gmap segment table entry location to page table. */
445		page = pmd_page(*pmd);
446		mp = (struct gmap_pgtable *) page->index;
447		rmap->entry = table;
448		list_add(&rmap->list, &mp->mapper);
449		/* Set gmap segment table entry to page table. */
450		*table = pmd_val(*pmd) & PAGE_MASK;
451		return vmaddr | (address & ~PMD_MASK);
452	}
453	return -EFAULT;
454
455}
456EXPORT_SYMBOL_GPL(gmap_fault);
457
458void gmap_unmap_notifier(struct mm_struct *mm, unsigned long *table)
459{
460	struct gmap_rmap *rmap, *next;
461	struct gmap_pgtable *mp;
462	struct page *page;
463	int flush;
464
465	flush = 0;
466	spin_lock(&mm->page_table_lock);
467	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
468	mp = (struct gmap_pgtable *) page->index;
469	list_for_each_entry_safe(rmap, next, &mp->mapper, list) {
470		*rmap->entry =
471			_SEGMENT_ENTRY_INV | _SEGMENT_ENTRY_RO | mp->vmaddr;
472		list_del(&rmap->list);
473		kfree(rmap);
474		flush = 1;
475	}
476	spin_unlock(&mm->page_table_lock);
477	if (flush)
478		__tlb_flush_global();
479}
480
481static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
482						    unsigned long vmaddr)
483{
484	struct page *page;
485	unsigned long *table;
486	struct gmap_pgtable *mp;
487
488	page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
489	if (!page)
490		return NULL;
491	mp = kmalloc(sizeof(*mp), GFP_KERNEL|__GFP_REPEAT);
492	if (!mp) {
493		__free_page(page);
494		return NULL;
495	}
496	pgtable_page_ctor(page);
497	mp->vmaddr = vmaddr & PMD_MASK;
498	INIT_LIST_HEAD(&mp->mapper);
499	page->index = (unsigned long) mp;
500	atomic_set(&page->_mapcount, 3);
501	table = (unsigned long *) page_to_phys(page);
502	clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE/2);
503	clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2);
504	return table;
505}
506
507static inline void page_table_free_pgste(unsigned long *table)
508{
509	struct page *page;
510	struct gmap_pgtable *mp;
511
512	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
513	mp = (struct gmap_pgtable *) page->index;
514	BUG_ON(!list_empty(&mp->mapper));
515	pgtable_page_ctor(page);
516	atomic_set(&page->_mapcount, -1);
517	kfree(mp);
518	__free_page(page);
519}
520
521#else /* CONFIG_PGSTE */
522
523static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm,
524						    unsigned long vmaddr)
525{
526	return NULL;
527}
528
529static inline void page_table_free_pgste(unsigned long *table)
530{
531}
532
533static inline void gmap_unmap_notifier(struct mm_struct *mm,
534					  unsigned long *table)
535{
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
536}
537
538#endif /* CONFIG_PGSTE */
 
 
 
 
 
 
 
 
 
 
 
539
540static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
 
 
541{
542	unsigned int old, new;
 
 
543
544	do {
545		old = atomic_read(v);
546		new = old ^ bits;
547	} while (atomic_cmpxchg(v, old, new) != old);
548	return new;
 
549}
550
551/*
552 * page table entry allocation/free routines.
553 */
554unsigned long *page_table_alloc(struct mm_struct *mm, unsigned long vmaddr)
555{
556	struct page *page;
557	unsigned long *table;
558	unsigned int mask, bit;
559
560	if (mm_has_pgste(mm))
561		return page_table_alloc_pgste(mm, vmaddr);
562	/* Allocate fragments of a 4K page as 1K/2K page table */
563	spin_lock_bh(&mm->context.list_lock);
564	mask = FRAG_MASK;
565	if (!list_empty(&mm->context.pgtable_list)) {
566		page = list_first_entry(&mm->context.pgtable_list,
567					struct page, lru);
568		table = (unsigned long *) page_to_phys(page);
569		mask = atomic_read(&page->_mapcount);
570		mask = mask | (mask >> 4);
571	}
572	if ((mask & FRAG_MASK) == FRAG_MASK) {
573		spin_unlock_bh(&mm->context.list_lock);
574		page = alloc_page(GFP_KERNEL|__GFP_REPEAT);
575		if (!page)
576			return NULL;
577		pgtable_page_ctor(page);
578		atomic_set(&page->_mapcount, 1);
579		table = (unsigned long *) page_to_phys(page);
580		clear_table(table, _PAGE_TYPE_EMPTY, PAGE_SIZE);
581		spin_lock_bh(&mm->context.list_lock);
582		list_add(&page->lru, &mm->context.pgtable_list);
583	} else {
584		for (bit = 1; mask & bit; bit <<= 1)
585			table += PTRS_PER_PTE;
586		mask = atomic_xor_bits(&page->_mapcount, bit);
587		if ((mask & FRAG_MASK) == FRAG_MASK)
588			list_del(&page->lru);
589	}
590	spin_unlock_bh(&mm->context.list_lock);
591	return table;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
592}
593
594void page_table_free(struct mm_struct *mm, unsigned long *table)
595{
596	struct page *page;
597	unsigned int bit, mask;
 
 
 
 
 
 
598
599	if (mm_has_pgste(mm)) {
600		gmap_unmap_notifier(mm, table);
601		return page_table_free_pgste(table);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
602	}
603	/* Free 1K/2K page table fragment of a 4K page */
604	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
605	bit = 1 << ((__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)));
606	spin_lock_bh(&mm->context.list_lock);
607	if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
608		list_del(&page->lru);
609	mask = atomic_xor_bits(&page->_mapcount, bit);
610	if (mask & FRAG_MASK)
611		list_add(&page->lru, &mm->context.pgtable_list);
612	spin_unlock_bh(&mm->context.list_lock);
613	if (mask == 0) {
614		pgtable_page_dtor(page);
615		atomic_set(&page->_mapcount, -1);
616		__free_page(page);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
617	}
 
 
618}
619
620#ifdef CONFIG_HAVE_RCU_TABLE_FREE
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
621
622static void __page_table_free_rcu(void *table, unsigned bit)
 
623{
624	struct page *page;
 
 
625
626	if (bit == FRAG_MASK)
627		return page_table_free_pgste(table);
628	/* Free 1K/2K page table fragment of a 4K page */
629	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
630	if (atomic_xor_bits(&page->_mapcount, bit) == 0) {
631		pgtable_page_dtor(page);
632		atomic_set(&page->_mapcount, -1);
633		__free_page(page);
 
 
634	}
 
 
 
 
635}
636
637void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table)
638{
639	struct mm_struct *mm;
640	struct page *page;
641	unsigned int bit, mask;
642
643	mm = tlb->mm;
644	if (mm_has_pgste(mm)) {
645		gmap_unmap_notifier(mm, table);
646		table = (unsigned long *) (__pa(table) | FRAG_MASK);
647		tlb_remove_table(tlb, table);
648		return;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
649	}
650	bit = 1 << ((__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)));
651	page = pfn_to_page(__pa(table) >> PAGE_SHIFT);
652	spin_lock_bh(&mm->context.list_lock);
653	if ((atomic_read(&page->_mapcount) & FRAG_MASK) != FRAG_MASK)
654		list_del(&page->lru);
655	mask = atomic_xor_bits(&page->_mapcount, bit | (bit << 4));
656	if (mask & FRAG_MASK)
657		list_add_tail(&page->lru, &mm->context.pgtable_list);
658	spin_unlock_bh(&mm->context.list_lock);
659	table = (unsigned long *) (__pa(table) | (bit << 4));
660	tlb_remove_table(tlb, table);
661}
 
662
663void __tlb_remove_table(void *_table)
 
664{
665	void *table = (void *)((unsigned long) _table & PAGE_MASK);
666	unsigned type = (unsigned long) _table & ~PAGE_MASK;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
667
668	if (type)
669		__page_table_free_rcu(table, type);
670	else
671		free_pages((unsigned long) table, ALLOC_ORDER);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
672}
 
673
674#endif
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
675
676/*
677 * switch on pgstes for its userspace process (for kvm)
 
 
678 */
679int s390_enable_sie(void)
680{
681	struct task_struct *tsk = current;
682	struct mm_struct *mm, *old_mm;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
683
684	/* Do we have switched amode? If no, we cannot do sie */
685	if (user_mode == HOME_SPACE_MODE)
686		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
687
688	/* Do we have pgstes? if yes, we are done */
689	if (mm_has_pgste(tsk->mm))
 
 
 
 
 
 
 
 
 
 
690		return 0;
 
691
692	/* lets check if we are allowed to replace the mm */
693	task_lock(tsk);
694	if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
695#ifdef CONFIG_AIO
696	    !hlist_empty(&tsk->mm->ioctx_list) ||
697#endif
698	    tsk->mm != tsk->active_mm) {
699		task_unlock(tsk);
700		return -EINVAL;
701	}
702	task_unlock(tsk);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
703
704	/* we copy the mm and let dup_mm create the page tables with_pgstes */
705	tsk->mm->context.alloc_pgste = 1;
706	mm = dup_mm(tsk);
707	tsk->mm->context.alloc_pgste = 0;
708	if (!mm)
709		return -ENOMEM;
710
711	/* Now lets check again if something happened */
712	task_lock(tsk);
713	if (!tsk->mm || atomic_read(&tsk->mm->mm_users) > 1 ||
714#ifdef CONFIG_AIO
715	    !hlist_empty(&tsk->mm->ioctx_list) ||
716#endif
717	    tsk->mm != tsk->active_mm) {
718		mmput(mm);
719		task_unlock(tsk);
720		return -EINVAL;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
721	}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
722
723	/* ok, we are alone. No ptrace, no threads, etc. */
724	old_mm = tsk->mm;
725	tsk->mm = tsk->active_mm = mm;
726	preempt_disable();
727	update_mm(mm, tsk);
728	atomic_inc(&mm->context.attach_count);
729	atomic_dec(&old_mm->context.attach_count);
730	cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
731	preempt_enable();
732	task_unlock(tsk);
733	mmput(old_mm);
734	return 0;
735}
736EXPORT_SYMBOL_GPL(s390_enable_sie);
737
738#if defined(CONFIG_DEBUG_PAGEALLOC) && defined(CONFIG_HIBERNATION)
739bool kernel_page_present(struct page *page)
 
 
 
 
 
 
 
740{
741	unsigned long addr;
742	int cc;
 
743
744	addr = page_to_phys(page);
745	asm volatile(
746		"	lra	%1,0(%1)\n"
747		"	ipm	%0\n"
748		"	srl	%0,28"
749		: "=d" (cc), "+a" (addr) : : "cc");
750	return cc == 0;
 
 
751}
752#endif /* CONFIG_HIBERNATION && CONFIG_DEBUG_PAGEALLOC */