Linux Audio

Check our new training course

Loading...
v6.8
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
  4 *
  5 * Authors:
  6 *     Alexander Graf <agraf@suse.de>
  7 *     Kevin Wolf <mail@kevin-wolf.de>
 
 
 
 
 
 
 
 
 
 
 
 
 
  8 */
  9
 10#include <linux/kvm_host.h>
 11#include <linux/pkeys.h>
 12
 13#include <asm/kvm_ppc.h>
 14#include <asm/kvm_book3s.h>
 15#include <asm/book3s/64/mmu-hash.h>
 16#include <asm/machdep.h>
 17#include <asm/mmu_context.h>
 18#include <asm/hw_irq.h>
 19#include "trace_pr.h"
 20#include "book3s.h"
 21
 22#define PTE_SIZE 12
 23
 24void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
 25{
 26	mmu_hash_ops.hpte_invalidate(pte->slot, pte->host_vpn,
 27				     pte->pagesize, pte->pagesize,
 28				     MMU_SEGSIZE_256M, false);
 29}
 30
 31/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
 32 * a hash, so we don't waste cycles on looping */
 33static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
 34{
 35	return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
 36		     ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
 37		     ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
 38		     ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
 39		     ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
 40		     ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
 41		     ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
 42		     ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
 43}
 44
 45
 46static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
 47{
 48	struct kvmppc_sid_map *map;
 49	u16 sid_map_mask;
 50
 51	if (kvmppc_get_msr(vcpu) & MSR_PR)
 52		gvsid |= VSID_PR;
 53
 54	sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
 55	map = &to_book3s(vcpu)->sid_map[sid_map_mask];
 56	if (map->valid && (map->guest_vsid == gvsid)) {
 57		trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
 58		return map;
 59	}
 60
 61	map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
 62	if (map->valid && (map->guest_vsid == gvsid)) {
 63		trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
 64		return map;
 65	}
 66
 67	trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
 68	return NULL;
 69}
 70
 71int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
 72			bool iswrite)
 73{
 74	unsigned long vpn;
 75	kvm_pfn_t hpaddr;
 76	ulong hash, hpteg;
 77	u64 vsid;
 78	int ret;
 79	int rflags = 0x192;
 80	int vflags = 0;
 81	int attempt = 0;
 82	struct kvmppc_sid_map *map;
 83	int r = 0;
 84	int hpsize = MMU_PAGE_4K;
 85	bool writable;
 86	unsigned long mmu_seq;
 87	struct kvm *kvm = vcpu->kvm;
 88	struct hpte_cache *cpte;
 89	unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
 90	unsigned long pfn;
 91
 92	/* used to check for invalidations in progress */
 93	mmu_seq = kvm->mmu_invalidate_seq;
 94	smp_rmb();
 95
 96	/* Get host physical address for gpa */
 97	pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
 98	if (is_error_noslot_pfn(pfn)) {
 99		printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
100		       orig_pte->raddr);
101		r = -EINVAL;
102		goto out;
103	}
104	hpaddr = pfn << PAGE_SHIFT;
105
106	/* and write the mapping ea -> hpa into the pt */
107	vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
108	map = find_sid_vsid(vcpu, vsid);
109	if (!map) {
110		ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
111		WARN_ON(ret < 0);
112		map = find_sid_vsid(vcpu, vsid);
113	}
114	if (!map) {
115		printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
116				vsid, orig_pte->eaddr);
117		WARN_ON(true);
118		r = -EINVAL;
119		goto out;
120	}
121
122	vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
123
124	kvm_set_pfn_accessed(pfn);
125	if (!orig_pte->may_write || !writable)
126		rflags |= PP_RXRX;
127	else {
128		mark_page_dirty(vcpu->kvm, gfn);
129		kvm_set_pfn_dirty(pfn);
130	}
131
132	if (!orig_pte->may_execute)
133		rflags |= HPTE_R_N;
134	else
135		kvmppc_mmu_flush_icache(pfn);
136
137	rflags |= pte_to_hpte_pkey_bits(0, HPTE_USE_KERNEL_KEY);
138	rflags = (rflags & ~HPTE_R_WIMG) | orig_pte->wimg;
139
140	/*
141	 * Use 64K pages if possible; otherwise, on 64K page kernels,
142	 * we need to transfer 4 more bits from guest real to host real addr.
143	 */
144	if (vsid & VSID_64K)
145		hpsize = MMU_PAGE_64K;
146	else
147		hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
148
149	hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
150
151	cpte = kvmppc_mmu_hpte_cache_next(vcpu);
152
153	spin_lock(&kvm->mmu_lock);
154	if (!cpte || mmu_invalidate_retry(kvm, mmu_seq)) {
155		r = -EAGAIN;
156		goto out_unlock;
157	}
158
159map_again:
160	hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
161
162	/* In case we tried normal mapping already, let's nuke old entries */
163	if (attempt > 1)
164		if (mmu_hash_ops.hpte_remove(hpteg) < 0) {
165			r = -1;
166			goto out_unlock;
167		}
168
169	ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
170				       hpsize, hpsize, MMU_SEGSIZE_256M);
171
172	if (ret == -1) {
173		/* If we couldn't map a primary PTE, try a secondary */
174		hash = ~hash;
175		vflags ^= HPTE_V_SECONDARY;
176		attempt++;
177		goto map_again;
178	} else if (ret < 0) {
179		r = -EIO;
180		goto out_unlock;
181	} else {
182		trace_kvm_book3s_64_mmu_map(rflags, hpteg,
183					    vpn, hpaddr, orig_pte);
184
185		/*
186		 * The mmu_hash_ops code may give us a secondary entry even
187		 * though we asked for a primary. Fix up.
188		 */
189		if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
190			hash = ~hash;
191			hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
192		}
193
194		cpte->slot = hpteg + (ret & 7);
195		cpte->host_vpn = vpn;
196		cpte->pte = *orig_pte;
197		cpte->pfn = pfn;
198		cpte->pagesize = hpsize;
199
200		kvmppc_mmu_hpte_cache_map(vcpu, cpte);
201		cpte = NULL;
202	}
203
204out_unlock:
205	spin_unlock(&kvm->mmu_lock);
206	kvm_release_pfn_clean(pfn);
207	if (cpte)
208		kvmppc_mmu_hpte_cache_free(cpte);
209
210out:
211	return r;
212}
213
214void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
215{
216	u64 mask = 0xfffffffffULL;
217	u64 vsid;
218
219	vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
220	if (vsid & VSID_64K)
221		mask = 0xffffffff0ULL;
222	kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
223}
224
225static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
226{
227	unsigned long vsid_bits = VSID_BITS_65_256M;
228	struct kvmppc_sid_map *map;
229	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
230	u16 sid_map_mask;
231	static int backwards_map;
232
233	if (kvmppc_get_msr(vcpu) & MSR_PR)
234		gvsid |= VSID_PR;
235
236	/* We might get collisions that trap in preceding order, so let's
237	   map them differently */
238
239	sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
240	if (backwards_map)
241		sid_map_mask = SID_MAP_MASK - sid_map_mask;
242
243	map = &to_book3s(vcpu)->sid_map[sid_map_mask];
244
245	/* Make sure we're taking the other map next time */
246	backwards_map = !backwards_map;
247
248	/* Uh-oh ... out of mappings. Let's flush! */
249	if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
250		vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
251		memset(vcpu_book3s->sid_map, 0,
252		       sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
253		kvmppc_mmu_pte_flush(vcpu, 0, 0);
254		kvmppc_mmu_flush_segments(vcpu);
255	}
256
257	if (mmu_has_feature(MMU_FTR_68_BIT_VA))
258		vsid_bits = VSID_BITS_256M;
259
260	map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++,
261				       VSID_MULTIPLIER_256M, vsid_bits);
262
263	map->guest_vsid = gvsid;
264	map->valid = true;
265
266	trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
267
268	return map;
269}
270
271static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
272{
273	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
274	int i;
275	int max_slb_size = 64;
276	int found_inval = -1;
277	int r;
278
279	/* Are we overwriting? */
280	for (i = 0; i < svcpu->slb_max; i++) {
281		if (!(svcpu->slb[i].esid & SLB_ESID_V))
282			found_inval = i;
283		else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
284			r = i;
285			goto out;
286		}
287	}
288
289	/* Found a spare entry that was invalidated before */
290	if (found_inval >= 0) {
291		r = found_inval;
292		goto out;
293	}
294
295	/* No spare invalid entry, so create one */
296
297	if (mmu_slb_size < 64)
298		max_slb_size = mmu_slb_size;
299
300	/* Overflowing -> purge */
301	if ((svcpu->slb_max) == max_slb_size)
302		kvmppc_mmu_flush_segments(vcpu);
303
304	r = svcpu->slb_max;
305	svcpu->slb_max++;
306
307out:
308	svcpu_put(svcpu);
309	return r;
310}
311
312int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
313{
314	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
315	u64 esid = eaddr >> SID_SHIFT;
316	u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
317	u64 slb_vsid = SLB_VSID_USER;
318	u64 gvsid;
319	int slb_index;
320	struct kvmppc_sid_map *map;
321	int r = 0;
322
323	slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
324
325	if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
326		/* Invalidate an entry */
327		svcpu->slb[slb_index].esid = 0;
328		r = -ENOENT;
329		goto out;
330	}
331
332	map = find_sid_vsid(vcpu, gvsid);
333	if (!map)
334		map = create_sid_map(vcpu, gvsid);
335
336	map->guest_esid = esid;
337
338	slb_vsid |= (map->host_vsid << 12);
339	slb_vsid &= ~SLB_VSID_KP;
340	slb_esid |= slb_index;
341
342#ifdef CONFIG_PPC_64K_PAGES
343	/* Set host segment base page size to 64K if possible */
344	if (gvsid & VSID_64K)
345		slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
346#endif
347
348	svcpu->slb[slb_index].esid = slb_esid;
349	svcpu->slb[slb_index].vsid = slb_vsid;
350
351	trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
352
353out:
354	svcpu_put(svcpu);
355	return r;
356}
357
358void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
359{
360	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
361	ulong seg_mask = -seg_size;
362	int i;
363
364	for (i = 0; i < svcpu->slb_max; i++) {
365		if ((svcpu->slb[i].esid & SLB_ESID_V) &&
366		    (svcpu->slb[i].esid & seg_mask) == ea) {
367			/* Invalidate this entry */
368			svcpu->slb[i].esid = 0;
369		}
370	}
371
372	svcpu_put(svcpu);
373}
374
375void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
376{
377	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
378	svcpu->slb_max = 0;
379	svcpu->slb[0].esid = 0;
380	svcpu_put(svcpu);
381}
382
383void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
384{
385	kvmppc_mmu_hpte_destroy(vcpu);
386	__destroy_context(to_book3s(vcpu)->context_id[0]);
387}
388
389int kvmppc_mmu_init_pr(struct kvm_vcpu *vcpu)
390{
391	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
392	int err;
393
394	err = hash__alloc_context_id();
395	if (err < 0)
396		return -1;
397	vcpu3s->context_id[0] = err;
398
399	vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1)
400				  << ESID_BITS) - 1;
401	vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS;
402	vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
403
404	kvmppc_mmu_hpte_init(vcpu);
405
406	return 0;
407}
v4.6
 
  1/*
  2 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
  3 *
  4 * Authors:
  5 *     Alexander Graf <agraf@suse.de>
  6 *     Kevin Wolf <mail@kevin-wolf.de>
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License, version 2, as
 10 * published by the Free Software Foundation.
 11 *
 12 * This program is distributed in the hope that it will be useful,
 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15 * GNU General Public License for more details.
 16 *
 17 * You should have received a copy of the GNU General Public License
 18 * along with this program; if not, write to the Free Software
 19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 20 */
 21
 22#include <linux/kvm_host.h>
 
 23
 24#include <asm/kvm_ppc.h>
 25#include <asm/kvm_book3s.h>
 26#include <asm/book3s/64/mmu-hash.h>
 27#include <asm/machdep.h>
 28#include <asm/mmu_context.h>
 29#include <asm/hw_irq.h>
 30#include "trace_pr.h"
 31#include "book3s.h"
 32
 33#define PTE_SIZE 12
 34
 35void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
 36{
 37	ppc_md.hpte_invalidate(pte->slot, pte->host_vpn,
 38			       pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M,
 39			       false);
 40}
 41
 42/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
 43 * a hash, so we don't waste cycles on looping */
 44static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
 45{
 46	return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
 47		     ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
 48		     ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
 49		     ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
 50		     ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
 51		     ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
 52		     ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
 53		     ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
 54}
 55
 56
 57static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
 58{
 59	struct kvmppc_sid_map *map;
 60	u16 sid_map_mask;
 61
 62	if (kvmppc_get_msr(vcpu) & MSR_PR)
 63		gvsid |= VSID_PR;
 64
 65	sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
 66	map = &to_book3s(vcpu)->sid_map[sid_map_mask];
 67	if (map->valid && (map->guest_vsid == gvsid)) {
 68		trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
 69		return map;
 70	}
 71
 72	map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
 73	if (map->valid && (map->guest_vsid == gvsid)) {
 74		trace_kvm_book3s_slb_found(gvsid, map->host_vsid);
 75		return map;
 76	}
 77
 78	trace_kvm_book3s_slb_fail(sid_map_mask, gvsid);
 79	return NULL;
 80}
 81
 82int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte,
 83			bool iswrite)
 84{
 85	unsigned long vpn;
 86	kvm_pfn_t hpaddr;
 87	ulong hash, hpteg;
 88	u64 vsid;
 89	int ret;
 90	int rflags = 0x192;
 91	int vflags = 0;
 92	int attempt = 0;
 93	struct kvmppc_sid_map *map;
 94	int r = 0;
 95	int hpsize = MMU_PAGE_4K;
 96	bool writable;
 97	unsigned long mmu_seq;
 98	struct kvm *kvm = vcpu->kvm;
 99	struct hpte_cache *cpte;
100	unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT;
101	unsigned long pfn;
102
103	/* used to check for invalidations in progress */
104	mmu_seq = kvm->mmu_notifier_seq;
105	smp_rmb();
106
107	/* Get host physical address for gpa */
108	pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable);
109	if (is_error_noslot_pfn(pfn)) {
110		printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n",
111		       orig_pte->raddr);
112		r = -EINVAL;
113		goto out;
114	}
115	hpaddr = pfn << PAGE_SHIFT;
116
117	/* and write the mapping ea -> hpa into the pt */
118	vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
119	map = find_sid_vsid(vcpu, vsid);
120	if (!map) {
121		ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
122		WARN_ON(ret < 0);
123		map = find_sid_vsid(vcpu, vsid);
124	}
125	if (!map) {
126		printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
127				vsid, orig_pte->eaddr);
128		WARN_ON(true);
129		r = -EINVAL;
130		goto out;
131	}
132
133	vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M);
134
135	kvm_set_pfn_accessed(pfn);
136	if (!orig_pte->may_write || !writable)
137		rflags |= PP_RXRX;
138	else {
139		mark_page_dirty(vcpu->kvm, gfn);
140		kvm_set_pfn_dirty(pfn);
141	}
142
143	if (!orig_pte->may_execute)
144		rflags |= HPTE_R_N;
145	else
146		kvmppc_mmu_flush_icache(pfn);
147
 
 
 
148	/*
149	 * Use 64K pages if possible; otherwise, on 64K page kernels,
150	 * we need to transfer 4 more bits from guest real to host real addr.
151	 */
152	if (vsid & VSID_64K)
153		hpsize = MMU_PAGE_64K;
154	else
155		hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
156
157	hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M);
158
159	cpte = kvmppc_mmu_hpte_cache_next(vcpu);
160
161	spin_lock(&kvm->mmu_lock);
162	if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) {
163		r = -EAGAIN;
164		goto out_unlock;
165	}
166
167map_again:
168	hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
169
170	/* In case we tried normal mapping already, let's nuke old entries */
171	if (attempt > 1)
172		if (ppc_md.hpte_remove(hpteg) < 0) {
173			r = -1;
174			goto out_unlock;
175		}
176
177	ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags,
178				 hpsize, hpsize, MMU_SEGSIZE_256M);
179
180	if (ret < 0) {
181		/* If we couldn't map a primary PTE, try a secondary */
182		hash = ~hash;
183		vflags ^= HPTE_V_SECONDARY;
184		attempt++;
185		goto map_again;
 
 
 
186	} else {
187		trace_kvm_book3s_64_mmu_map(rflags, hpteg,
188					    vpn, hpaddr, orig_pte);
189
190		/* The ppc_md code may give us a secondary entry even though we
191		   asked for a primary. Fix up. */
 
 
192		if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
193			hash = ~hash;
194			hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
195		}
196
197		cpte->slot = hpteg + (ret & 7);
198		cpte->host_vpn = vpn;
199		cpte->pte = *orig_pte;
200		cpte->pfn = pfn;
201		cpte->pagesize = hpsize;
202
203		kvmppc_mmu_hpte_cache_map(vcpu, cpte);
204		cpte = NULL;
205	}
206
207out_unlock:
208	spin_unlock(&kvm->mmu_lock);
209	kvm_release_pfn_clean(pfn);
210	if (cpte)
211		kvmppc_mmu_hpte_cache_free(cpte);
212
213out:
214	return r;
215}
216
217void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
218{
219	u64 mask = 0xfffffffffULL;
220	u64 vsid;
221
222	vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid);
223	if (vsid & VSID_64K)
224		mask = 0xffffffff0ULL;
225	kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask);
226}
227
228static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
229{
 
230	struct kvmppc_sid_map *map;
231	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
232	u16 sid_map_mask;
233	static int backwards_map = 0;
234
235	if (kvmppc_get_msr(vcpu) & MSR_PR)
236		gvsid |= VSID_PR;
237
238	/* We might get collisions that trap in preceding order, so let's
239	   map them differently */
240
241	sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
242	if (backwards_map)
243		sid_map_mask = SID_MAP_MASK - sid_map_mask;
244
245	map = &to_book3s(vcpu)->sid_map[sid_map_mask];
246
247	/* Make sure we're taking the other map next time */
248	backwards_map = !backwards_map;
249
250	/* Uh-oh ... out of mappings. Let's flush! */
251	if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) {
252		vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first;
253		memset(vcpu_book3s->sid_map, 0,
254		       sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
255		kvmppc_mmu_pte_flush(vcpu, 0, 0);
256		kvmppc_mmu_flush_segments(vcpu);
257	}
258	map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M);
 
 
 
 
 
259
260	map->guest_vsid = gvsid;
261	map->valid = true;
262
263	trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid);
264
265	return map;
266}
267
268static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
269{
270	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
271	int i;
272	int max_slb_size = 64;
273	int found_inval = -1;
274	int r;
275
276	/* Are we overwriting? */
277	for (i = 0; i < svcpu->slb_max; i++) {
278		if (!(svcpu->slb[i].esid & SLB_ESID_V))
279			found_inval = i;
280		else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
281			r = i;
282			goto out;
283		}
284	}
285
286	/* Found a spare entry that was invalidated before */
287	if (found_inval >= 0) {
288		r = found_inval;
289		goto out;
290	}
291
292	/* No spare invalid entry, so create one */
293
294	if (mmu_slb_size < 64)
295		max_slb_size = mmu_slb_size;
296
297	/* Overflowing -> purge */
298	if ((svcpu->slb_max) == max_slb_size)
299		kvmppc_mmu_flush_segments(vcpu);
300
301	r = svcpu->slb_max;
302	svcpu->slb_max++;
303
304out:
305	svcpu_put(svcpu);
306	return r;
307}
308
309int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
310{
311	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
312	u64 esid = eaddr >> SID_SHIFT;
313	u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
314	u64 slb_vsid = SLB_VSID_USER;
315	u64 gvsid;
316	int slb_index;
317	struct kvmppc_sid_map *map;
318	int r = 0;
319
320	slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
321
322	if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
323		/* Invalidate an entry */
324		svcpu->slb[slb_index].esid = 0;
325		r = -ENOENT;
326		goto out;
327	}
328
329	map = find_sid_vsid(vcpu, gvsid);
330	if (!map)
331		map = create_sid_map(vcpu, gvsid);
332
333	map->guest_esid = esid;
334
335	slb_vsid |= (map->host_vsid << 12);
336	slb_vsid &= ~SLB_VSID_KP;
337	slb_esid |= slb_index;
338
339#ifdef CONFIG_PPC_64K_PAGES
340	/* Set host segment base page size to 64K if possible */
341	if (gvsid & VSID_64K)
342		slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp;
343#endif
344
345	svcpu->slb[slb_index].esid = slb_esid;
346	svcpu->slb[slb_index].vsid = slb_vsid;
347
348	trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
349
350out:
351	svcpu_put(svcpu);
352	return r;
353}
354
355void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size)
356{
357	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
358	ulong seg_mask = -seg_size;
359	int i;
360
361	for (i = 0; i < svcpu->slb_max; i++) {
362		if ((svcpu->slb[i].esid & SLB_ESID_V) &&
363		    (svcpu->slb[i].esid & seg_mask) == ea) {
364			/* Invalidate this entry */
365			svcpu->slb[i].esid = 0;
366		}
367	}
368
369	svcpu_put(svcpu);
370}
371
372void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
373{
374	struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
375	svcpu->slb_max = 0;
376	svcpu->slb[0].esid = 0;
377	svcpu_put(svcpu);
378}
379
380void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu)
381{
382	kvmppc_mmu_hpte_destroy(vcpu);
383	__destroy_context(to_book3s(vcpu)->context_id[0]);
384}
385
386int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
387{
388	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
389	int err;
390
391	err = __init_new_context();
392	if (err < 0)
393		return -1;
394	vcpu3s->context_id[0] = err;
395
396	vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1)
397				  << ESID_BITS) - 1;
398	vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS;
399	vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
400
401	kvmppc_mmu_hpte_init(vcpu);
402
403	return 0;
404}