Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.2.
  1/*
  2 * native hashtable management.
  3 *
  4 * SMP scalability work:
  5 *    Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
  6 * 
  7 * This program is free software; you can redistribute it and/or
  8 * modify it under the terms of the GNU General Public License
  9 * as published by the Free Software Foundation; either version
 10 * 2 of the License, or (at your option) any later version.
 11 */
 12
 13#undef DEBUG_LOW
 14
 15#include <linux/spinlock.h>
 16#include <linux/bitops.h>
 17#include <linux/of.h>
 18#include <linux/threads.h>
 19#include <linux/smp.h>
 20
 21#include <asm/machdep.h>
 22#include <asm/mmu.h>
 23#include <asm/mmu_context.h>
 24#include <asm/pgtable.h>
 25#include <asm/tlbflush.h>
 26#include <asm/tlb.h>
 27#include <asm/cputable.h>
 28#include <asm/udbg.h>
 29#include <asm/kexec.h>
 30#include <asm/ppc-opcode.h>
 31
 32#include <misc/cxl-base.h>
 33
 34#ifdef DEBUG_LOW
 35#define DBG_LOW(fmt...) udbg_printf(fmt)
 36#else
 37#define DBG_LOW(fmt...)
 38#endif
 39
 40#ifdef __BIG_ENDIAN__
 41#define HPTE_LOCK_BIT 3
 42#else
 43#define HPTE_LOCK_BIT (56+3)
 44#endif
 45
 46DEFINE_RAW_SPINLOCK(native_tlbie_lock);
 47
 48static inline void __tlbie(unsigned long vpn, int psize, int apsize, int ssize)
 49{
 50	unsigned long va;
 51	unsigned int penc;
 52	unsigned long sllp;
 53
 54	/*
 55	 * We need 14 to 65 bits of va for a tlibe of 4K page
 56	 * With vpn we ignore the lower VPN_SHIFT bits already.
 57	 * And top two bits are already ignored because we can
 58	 * only accomadate 76 bits in a 64 bit vpn with a VPN_SHIFT
 59	 * of 12.
 60	 */
 61	va = vpn << VPN_SHIFT;
 62	/*
 63	 * clear top 16 bits of 64bit va, non SLS segment
 64	 * Older versions of the architecture (2.02 and earler) require the
 65	 * masking of the top 16 bits.
 66	 */
 67	va &= ~(0xffffULL << 48);
 68
 69	switch (psize) {
 70	case MMU_PAGE_4K:
 71		/* clear out bits after (52) [0....52.....63] */
 72		va &= ~((1ul << (64 - 52)) - 1);
 73		va |= ssize << 8;
 74		sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
 75			((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
 76		va |= sllp << 5;
 77		asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2)
 78			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
 79			     : "memory");
 80		break;
 81	default:
 82		/* We need 14 to 14 + i bits of va */
 83		penc = mmu_psize_defs[psize].penc[apsize];
 84		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
 85		va |= penc << 12;
 86		va |= ssize << 8;
 87		/*
 88		 * AVAL bits:
 89		 * We don't need all the bits, but rest of the bits
 90		 * must be ignored by the processor.
 91		 * vpn cover upto 65 bits of va. (0...65) and we need
 92		 * 58..64 bits of va.
 93		 */
 94		va |= (vpn & 0xfe); /* AVAL */
 95		va |= 1; /* L */
 96		asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2)
 97			     : : "r" (va), "r"(0), "i" (CPU_FTR_ARCH_206)
 98			     : "memory");
 99		break;
100	}
101}
102
103static inline void __tlbiel(unsigned long vpn, int psize, int apsize, int ssize)
104{
105	unsigned long va;
106	unsigned int penc;
107	unsigned long sllp;
108
109	/* VPN_SHIFT can be atmost 12 */
110	va = vpn << VPN_SHIFT;
111	/*
112	 * clear top 16 bits of 64 bit va, non SLS segment
113	 * Older versions of the architecture (2.02 and earler) require the
114	 * masking of the top 16 bits.
115	 */
116	va &= ~(0xffffULL << 48);
117
118	switch (psize) {
119	case MMU_PAGE_4K:
120		/* clear out bits after(52) [0....52.....63] */
121		va &= ~((1ul << (64 - 52)) - 1);
122		va |= ssize << 8;
123		sllp = ((mmu_psize_defs[apsize].sllp & SLB_VSID_L) >> 6) |
124			((mmu_psize_defs[apsize].sllp & SLB_VSID_LP) >> 4);
125		va |= sllp << 5;
126		asm volatile(".long 0x7c000224 | (%0 << 11) | (0 << 21)"
127			     : : "r"(va) : "memory");
128		break;
129	default:
130		/* We need 14 to 14 + i bits of va */
131		penc = mmu_psize_defs[psize].penc[apsize];
132		va &= ~((1ul << mmu_psize_defs[apsize].shift) - 1);
133		va |= penc << 12;
134		va |= ssize << 8;
135		/*
136		 * AVAL bits:
137		 * We don't need all the bits, but rest of the bits
138		 * must be ignored by the processor.
139		 * vpn cover upto 65 bits of va. (0...65) and we need
140		 * 58..64 bits of va.
141		 */
142		va |= (vpn & 0xfe);
143		va |= 1; /* L */
144		asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
145			     : : "r"(va) : "memory");
146		break;
147	}
148
149}
150
151static inline void tlbie(unsigned long vpn, int psize, int apsize,
152			 int ssize, int local)
153{
154	unsigned int use_local;
155	int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
156
157	use_local = local && mmu_has_feature(MMU_FTR_TLBIEL) && !cxl_ctx_in_use();
158
159	if (use_local)
160		use_local = mmu_psize_defs[psize].tlbiel;
161	if (lock_tlbie && !use_local)
162		raw_spin_lock(&native_tlbie_lock);
163	asm volatile("ptesync": : :"memory");
164	if (use_local) {
165		__tlbiel(vpn, psize, apsize, ssize);
166		asm volatile("ptesync": : :"memory");
167	} else {
168		__tlbie(vpn, psize, apsize, ssize);
169		asm volatile("eieio; tlbsync; ptesync": : :"memory");
170	}
171	if (lock_tlbie && !use_local)
172		raw_spin_unlock(&native_tlbie_lock);
173}
174
175static inline void native_lock_hpte(struct hash_pte *hptep)
176{
177	unsigned long *word = (unsigned long *)&hptep->v;
178
179	while (1) {
180		if (!test_and_set_bit_lock(HPTE_LOCK_BIT, word))
181			break;
182		while(test_bit(HPTE_LOCK_BIT, word))
183			cpu_relax();
184	}
185}
186
187static inline void native_unlock_hpte(struct hash_pte *hptep)
188{
189	unsigned long *word = (unsigned long *)&hptep->v;
190
191	clear_bit_unlock(HPTE_LOCK_BIT, word);
192}
193
194static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
195			unsigned long pa, unsigned long rflags,
196			unsigned long vflags, int psize, int apsize, int ssize)
197{
198	struct hash_pte *hptep = htab_address + hpte_group;
199	unsigned long hpte_v, hpte_r;
200	int i;
201
202	if (!(vflags & HPTE_V_BOLTED)) {
203		DBG_LOW("    insert(group=%lx, vpn=%016lx, pa=%016lx,"
204			" rflags=%lx, vflags=%lx, psize=%d)\n",
205			hpte_group, vpn, pa, rflags, vflags, psize);
206	}
207
208	for (i = 0; i < HPTES_PER_GROUP; i++) {
209		if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID)) {
210			/* retry with lock held */
211			native_lock_hpte(hptep);
212			if (! (be64_to_cpu(hptep->v) & HPTE_V_VALID))
213				break;
214			native_unlock_hpte(hptep);
215		}
216
217		hptep++;
218	}
219
220	if (i == HPTES_PER_GROUP)
221		return -1;
222
223	hpte_v = hpte_encode_v(vpn, psize, apsize, ssize) | vflags | HPTE_V_VALID;
224	hpte_r = hpte_encode_r(pa, psize, apsize) | rflags;
225
226	if (!(vflags & HPTE_V_BOLTED)) {
227		DBG_LOW(" i=%x hpte_v=%016lx, hpte_r=%016lx\n",
228			i, hpte_v, hpte_r);
229	}
230
231	hptep->r = cpu_to_be64(hpte_r);
232	/* Guarantee the second dword is visible before the valid bit */
233	eieio();
234	/*
235	 * Now set the first dword including the valid bit
236	 * NOTE: this also unlocks the hpte
237	 */
238	hptep->v = cpu_to_be64(hpte_v);
239
240	__asm__ __volatile__ ("ptesync" : : : "memory");
241
242	return i | (!!(vflags & HPTE_V_SECONDARY) << 3);
243}
244
245static long native_hpte_remove(unsigned long hpte_group)
246{
247	struct hash_pte *hptep;
248	int i;
249	int slot_offset;
250	unsigned long hpte_v;
251
252	DBG_LOW("    remove(group=%lx)\n", hpte_group);
253
254	/* pick a random entry to start at */
255	slot_offset = mftb() & 0x7;
256
257	for (i = 0; i < HPTES_PER_GROUP; i++) {
258		hptep = htab_address + hpte_group + slot_offset;
259		hpte_v = be64_to_cpu(hptep->v);
260
261		if ((hpte_v & HPTE_V_VALID) && !(hpte_v & HPTE_V_BOLTED)) {
262			/* retry with lock held */
263			native_lock_hpte(hptep);
264			hpte_v = be64_to_cpu(hptep->v);
265			if ((hpte_v & HPTE_V_VALID)
266			    && !(hpte_v & HPTE_V_BOLTED))
267				break;
268			native_unlock_hpte(hptep);
269		}
270
271		slot_offset++;
272		slot_offset &= 0x7;
273	}
274
275	if (i == HPTES_PER_GROUP)
276		return -1;
277
278	/* Invalidate the hpte. NOTE: this also unlocks it */
279	hptep->v = 0;
280
281	return i;
282}
283
284static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
285				 unsigned long vpn, int bpsize,
286				 int apsize, int ssize, unsigned long flags)
287{
288	struct hash_pte *hptep = htab_address + slot;
289	unsigned long hpte_v, want_v;
290	int ret = 0, local = 0;
291
292	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
293
294	DBG_LOW("    update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
295		vpn, want_v & HPTE_V_AVPN, slot, newpp);
296
297	hpte_v = be64_to_cpu(hptep->v);
298	/*
299	 * We need to invalidate the TLB always because hpte_remove doesn't do
300	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
301	 * random entry from it. When we do that we don't invalidate the TLB
302	 * (hpte_remove) because we assume the old translation is still
303	 * technically "valid".
304	 */
305	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID)) {
306		DBG_LOW(" -> miss\n");
307		ret = -1;
308	} else {
309		native_lock_hpte(hptep);
310		/* recheck with locks held */
311		hpte_v = be64_to_cpu(hptep->v);
312		if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
313			     !(hpte_v & HPTE_V_VALID))) {
314			ret = -1;
315		} else {
316			DBG_LOW(" -> hit\n");
317			/* Update the HPTE */
318			hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
319						~(HPTE_R_PP | HPTE_R_N)) |
320					       (newpp & (HPTE_R_PP | HPTE_R_N |
321							 HPTE_R_C)));
322		}
323		native_unlock_hpte(hptep);
324	}
325
326	if (flags & HPTE_LOCAL_UPDATE)
327		local = 1;
328	/*
329	 * Ensure it is out of the tlb too if it is not a nohpte fault
330	 */
331	if (!(flags & HPTE_NOHPTE_UPDATE))
332		tlbie(vpn, bpsize, apsize, ssize, local);
333
334	return ret;
335}
336
337static long native_hpte_find(unsigned long vpn, int psize, int ssize)
338{
339	struct hash_pte *hptep;
340	unsigned long hash;
341	unsigned long i;
342	long slot;
343	unsigned long want_v, hpte_v;
344
345	hash = hpt_hash(vpn, mmu_psize_defs[psize].shift, ssize);
346	want_v = hpte_encode_avpn(vpn, psize, ssize);
347
348	/* Bolted mappings are only ever in the primary group */
349	slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
350	for (i = 0; i < HPTES_PER_GROUP; i++) {
351		hptep = htab_address + slot;
352		hpte_v = be64_to_cpu(hptep->v);
353
354		if (HPTE_V_COMPARE(hpte_v, want_v) && (hpte_v & HPTE_V_VALID))
355			/* HPTE matches */
356			return slot;
357		++slot;
358	}
359
360	return -1;
361}
362
363/*
364 * Update the page protection bits. Intended to be used to create
365 * guard pages for kernel data structures on pages which are bolted
366 * in the HPT. Assumes pages being operated on will not be stolen.
367 *
368 * No need to lock here because we should be the only user.
369 */
370static void native_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
371				       int psize, int ssize)
372{
373	unsigned long vpn;
374	unsigned long vsid;
375	long slot;
376	struct hash_pte *hptep;
377
378	vsid = get_kernel_vsid(ea, ssize);
379	vpn = hpt_vpn(ea, vsid, ssize);
380
381	slot = native_hpte_find(vpn, psize, ssize);
382	if (slot == -1)
383		panic("could not find page to bolt\n");
384	hptep = htab_address + slot;
385
386	/* Update the HPTE */
387	hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
388			~(HPTE_R_PP | HPTE_R_N)) |
389		(newpp & (HPTE_R_PP | HPTE_R_N)));
390	/*
391	 * Ensure it is out of the tlb too. Bolted entries base and
392	 * actual page size will be same.
393	 */
394	tlbie(vpn, psize, psize, ssize, 0);
395}
396
397static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
398				   int bpsize, int apsize, int ssize, int local)
399{
400	struct hash_pte *hptep = htab_address + slot;
401	unsigned long hpte_v;
402	unsigned long want_v;
403	unsigned long flags;
404
405	local_irq_save(flags);
406
407	DBG_LOW("    invalidate(vpn=%016lx, hash: %lx)\n", vpn, slot);
408
409	want_v = hpte_encode_avpn(vpn, bpsize, ssize);
410	native_lock_hpte(hptep);
411	hpte_v = be64_to_cpu(hptep->v);
412
413	/*
414	 * We need to invalidate the TLB always because hpte_remove doesn't do
415	 * a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
416	 * random entry from it. When we do that we don't invalidate the TLB
417	 * (hpte_remove) because we assume the old translation is still
418	 * technically "valid".
419	 */
420	if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
421		native_unlock_hpte(hptep);
422	else
423		/* Invalidate the hpte. NOTE: this also unlocks it */
424		hptep->v = 0;
425
426	/* Invalidate the TLB */
427	tlbie(vpn, bpsize, apsize, ssize, local);
428
429	local_irq_restore(flags);
430}
431
432#ifdef CONFIG_TRANSPARENT_HUGEPAGE
433static void native_hugepage_invalidate(unsigned long vsid,
434				       unsigned long addr,
435				       unsigned char *hpte_slot_array,
436				       int psize, int ssize, int local)
437{
438	int i;
439	struct hash_pte *hptep;
440	int actual_psize = MMU_PAGE_16M;
441	unsigned int max_hpte_count, valid;
442	unsigned long flags, s_addr = addr;
443	unsigned long hpte_v, want_v, shift;
444	unsigned long hidx, vpn = 0, hash, slot;
445
446	shift = mmu_psize_defs[psize].shift;
447	max_hpte_count = 1U << (PMD_SHIFT - shift);
448
449	local_irq_save(flags);
450	for (i = 0; i < max_hpte_count; i++) {
451		valid = hpte_valid(hpte_slot_array, i);
452		if (!valid)
453			continue;
454		hidx =  hpte_hash_index(hpte_slot_array, i);
455
456		/* get the vpn */
457		addr = s_addr + (i * (1ul << shift));
458		vpn = hpt_vpn(addr, vsid, ssize);
459		hash = hpt_hash(vpn, shift, ssize);
460		if (hidx & _PTEIDX_SECONDARY)
461			hash = ~hash;
462
463		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
464		slot += hidx & _PTEIDX_GROUP_IX;
465
466		hptep = htab_address + slot;
467		want_v = hpte_encode_avpn(vpn, psize, ssize);
468		native_lock_hpte(hptep);
469		hpte_v = be64_to_cpu(hptep->v);
470
471		/* Even if we miss, we need to invalidate the TLB */
472		if (!HPTE_V_COMPARE(hpte_v, want_v) || !(hpte_v & HPTE_V_VALID))
473			native_unlock_hpte(hptep);
474		else
475			/* Invalidate the hpte. NOTE: this also unlocks it */
476			hptep->v = 0;
477		/*
478		 * We need to do tlb invalidate for all the address, tlbie
479		 * instruction compares entry_VA in tlb with the VA specified
480		 * here
481		 */
482		tlbie(vpn, psize, actual_psize, ssize, local);
483	}
484	local_irq_restore(flags);
485}
486#else
487static void native_hugepage_invalidate(unsigned long vsid,
488				       unsigned long addr,
489				       unsigned char *hpte_slot_array,
490				       int psize, int ssize, int local)
491{
492	WARN(1, "%s called without THP support\n", __func__);
493}
494#endif
495
496static inline int __hpte_actual_psize(unsigned int lp, int psize)
497{
498	int i, shift;
499	unsigned int mask;
500
501	/* start from 1 ignoring MMU_PAGE_4K */
502	for (i = 1; i < MMU_PAGE_COUNT; i++) {
503
504		/* invalid penc */
505		if (mmu_psize_defs[psize].penc[i] == -1)
506			continue;
507		/*
508		 * encoding bits per actual page size
509		 *        PTE LP     actual page size
510		 *    rrrr rrrz		>=8KB
511		 *    rrrr rrzz		>=16KB
512		 *    rrrr rzzz		>=32KB
513		 *    rrrr zzzz		>=64KB
514		 * .......
515		 */
516		shift = mmu_psize_defs[i].shift - LP_SHIFT;
517		if (shift > LP_BITS)
518			shift = LP_BITS;
519		mask = (1 << shift) - 1;
520		if ((lp & mask) == mmu_psize_defs[psize].penc[i])
521			return i;
522	}
523	return -1;
524}
525
526static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
527			int *psize, int *apsize, int *ssize, unsigned long *vpn)
528{
529	unsigned long avpn, pteg, vpi;
530	unsigned long hpte_v = be64_to_cpu(hpte->v);
531	unsigned long hpte_r = be64_to_cpu(hpte->r);
532	unsigned long vsid, seg_off;
533	int size, a_size, shift;
534	/* Look at the 8 bit LP value */
535	unsigned int lp = (hpte_r >> LP_SHIFT) & ((1 << LP_BITS) - 1);
536
537	if (!(hpte_v & HPTE_V_LARGE)) {
538		size   = MMU_PAGE_4K;
539		a_size = MMU_PAGE_4K;
540	} else {
541		for (size = 0; size < MMU_PAGE_COUNT; size++) {
542
543			/* valid entries have a shift value */
544			if (!mmu_psize_defs[size].shift)
545				continue;
546
547			a_size = __hpte_actual_psize(lp, size);
548			if (a_size != -1)
549				break;
550		}
551	}
552	/* This works for all page sizes, and for 256M and 1T segments */
553	*ssize = hpte_v >> HPTE_V_SSIZE_SHIFT;
554	shift = mmu_psize_defs[size].shift;
555
556	avpn = (HPTE_V_AVPN_VAL(hpte_v) & ~mmu_psize_defs[size].avpnm);
557	pteg = slot / HPTES_PER_GROUP;
558	if (hpte_v & HPTE_V_SECONDARY)
559		pteg = ~pteg;
560
561	switch (*ssize) {
562	case MMU_SEGSIZE_256M:
563		/* We only have 28 - 23 bits of seg_off in avpn */
564		seg_off = (avpn & 0x1f) << 23;
565		vsid    =  avpn >> 5;
566		/* We can find more bits from the pteg value */
567		if (shift < 23) {
568			vpi = (vsid ^ pteg) & htab_hash_mask;
569			seg_off |= vpi << shift;
570		}
571		*vpn = vsid << (SID_SHIFT - VPN_SHIFT) | seg_off >> VPN_SHIFT;
572		break;
573	case MMU_SEGSIZE_1T:
574		/* We only have 40 - 23 bits of seg_off in avpn */
575		seg_off = (avpn & 0x1ffff) << 23;
576		vsid    = avpn >> 17;
577		if (shift < 23) {
578			vpi = (vsid ^ (vsid << 25) ^ pteg) & htab_hash_mask;
579			seg_off |= vpi << shift;
580		}
581		*vpn = vsid << (SID_SHIFT_1T - VPN_SHIFT) | seg_off >> VPN_SHIFT;
582		break;
583	default:
584		*vpn = size = 0;
585	}
586	*psize  = size;
587	*apsize = a_size;
588}
589
590/*
591 * clear all mappings on kexec.  All cpus are in real mode (or they will
592 * be when they isi), and we are the only one left.  We rely on our kernel
593 * mapping being 0xC0's and the hardware ignoring those two real bits.
594 *
595 * This must be called with interrupts disabled.
596 *
597 * Taking the native_tlbie_lock is unsafe here due to the possibility of
598 * lockdep being on. On pre POWER5 hardware, not taking the lock could
599 * cause deadlock. POWER5 and newer not taking the lock is fine. This only
600 * gets called during boot before secondary CPUs have come up and during
601 * crashdump and all bets are off anyway.
602 *
603 * TODO: add batching support when enabled.  remember, no dynamic memory here,
604 * athough there is the control page available...
605 */
606static void native_hpte_clear(void)
607{
608	unsigned long vpn = 0;
609	unsigned long slot, slots;
610	struct hash_pte *hptep = htab_address;
611	unsigned long hpte_v;
612	unsigned long pteg_count;
613	int psize, apsize, ssize;
614
615	pteg_count = htab_hash_mask + 1;
616
617	slots = pteg_count * HPTES_PER_GROUP;
618
619	for (slot = 0; slot < slots; slot++, hptep++) {
620		/*
621		 * we could lock the pte here, but we are the only cpu
622		 * running,  right?  and for crash dump, we probably
623		 * don't want to wait for a maybe bad cpu.
624		 */
625		hpte_v = be64_to_cpu(hptep->v);
626
627		/*
628		 * Call __tlbie() here rather than tlbie() since we can't take the
629		 * native_tlbie_lock.
630		 */
631		if (hpte_v & HPTE_V_VALID) {
632			hpte_decode(hptep, slot, &psize, &apsize, &ssize, &vpn);
633			hptep->v = 0;
634			__tlbie(vpn, psize, apsize, ssize);
635		}
636	}
637
638	asm volatile("eieio; tlbsync; ptesync":::"memory");
639}
640
641/*
642 * Batched hash table flush, we batch the tlbie's to avoid taking/releasing
643 * the lock all the time
644 */
645static void native_flush_hash_range(unsigned long number, int local)
646{
647	unsigned long vpn;
648	unsigned long hash, index, hidx, shift, slot;
649	struct hash_pte *hptep;
650	unsigned long hpte_v;
651	unsigned long want_v;
652	unsigned long flags;
653	real_pte_t pte;
654	struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
655	unsigned long psize = batch->psize;
656	int ssize = batch->ssize;
657	int i;
658
659	local_irq_save(flags);
660
661	for (i = 0; i < number; i++) {
662		vpn = batch->vpn[i];
663		pte = batch->pte[i];
664
665		pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
666			hash = hpt_hash(vpn, shift, ssize);
667			hidx = __rpte_to_hidx(pte, index);
668			if (hidx & _PTEIDX_SECONDARY)
669				hash = ~hash;
670			slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
671			slot += hidx & _PTEIDX_GROUP_IX;
672			hptep = htab_address + slot;
673			want_v = hpte_encode_avpn(vpn, psize, ssize);
674			native_lock_hpte(hptep);
675			hpte_v = be64_to_cpu(hptep->v);
676			if (!HPTE_V_COMPARE(hpte_v, want_v) ||
677			    !(hpte_v & HPTE_V_VALID))
678				native_unlock_hpte(hptep);
679			else
680				hptep->v = 0;
681		} pte_iterate_hashed_end();
682	}
683
684	if (mmu_has_feature(MMU_FTR_TLBIEL) &&
685	    mmu_psize_defs[psize].tlbiel && local) {
686		asm volatile("ptesync":::"memory");
687		for (i = 0; i < number; i++) {
688			vpn = batch->vpn[i];
689			pte = batch->pte[i];
690
691			pte_iterate_hashed_subpages(pte, psize,
692						    vpn, index, shift) {
693				__tlbiel(vpn, psize, psize, ssize);
694			} pte_iterate_hashed_end();
695		}
696		asm volatile("ptesync":::"memory");
697	} else {
698		int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
699
700		if (lock_tlbie)
701			raw_spin_lock(&native_tlbie_lock);
702
703		asm volatile("ptesync":::"memory");
704		for (i = 0; i < number; i++) {
705			vpn = batch->vpn[i];
706			pte = batch->pte[i];
707
708			pte_iterate_hashed_subpages(pte, psize,
709						    vpn, index, shift) {
710				__tlbie(vpn, psize, psize, ssize);
711			} pte_iterate_hashed_end();
712		}
713		asm volatile("eieio; tlbsync; ptesync":::"memory");
714
715		if (lock_tlbie)
716			raw_spin_unlock(&native_tlbie_lock);
717	}
718
719	local_irq_restore(flags);
720}
721
722void __init hpte_init_native(void)
723{
724	ppc_md.hpte_invalidate	= native_hpte_invalidate;
725	ppc_md.hpte_updatepp	= native_hpte_updatepp;
726	ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp;
727	ppc_md.hpte_insert	= native_hpte_insert;
728	ppc_md.hpte_remove	= native_hpte_remove;
729	ppc_md.hpte_clear_all	= native_hpte_clear;
730	ppc_md.flush_hash_range = native_flush_hash_range;
731	ppc_md.hugepage_invalidate   = native_hugepage_invalidate;
732}