Linux Audio

Check our new training course

Loading...
  1/*
  2 * This program is free software; you can redistribute it and/or modify
  3 * it under the terms of the GNU General Public License, version 2, as
  4 * published by the Free Software Foundation.
  5 *
  6 * This program is distributed in the hope that it will be useful,
  7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  9 * GNU General Public License for more details.
 10 *
 11 * You should have received a copy of the GNU General Public License
 12 * along with this program; if not, write to the Free Software
 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 14 *
 15 * Copyright IBM Corp. 2007
 16 *
 17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
 18 */
 19
 20#include <linux/types.h>
 21#include <linux/string.h>
 22#include <linux/kvm.h>
 23#include <linux/kvm_host.h>
 24#include <linux/highmem.h>
 25
 26#include <asm/tlbflush.h>
 27#include <asm/mmu-44x.h>
 28#include <asm/kvm_ppc.h>
 29#include <asm/kvm_44x.h>
 30#include "timing.h"
 31
 32#include "44x_tlb.h"
 33#include "trace.h"
 34
 35#ifndef PPC44x_TLBE_SIZE
 36#define PPC44x_TLBE_SIZE	PPC44x_TLB_4K
 37#endif
 38
 39#define PAGE_SIZE_4K (1<<12)
 40#define PAGE_MASK_4K (~(PAGE_SIZE_4K - 1))
 41
 42#define PPC44x_TLB_UATTR_MASK \
 43	(PPC44x_TLB_U0|PPC44x_TLB_U1|PPC44x_TLB_U2|PPC44x_TLB_U3)
 44#define PPC44x_TLB_USER_PERM_MASK (PPC44x_TLB_UX|PPC44x_TLB_UR|PPC44x_TLB_UW)
 45#define PPC44x_TLB_SUPER_PERM_MASK (PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW)
 46
 47#ifdef DEBUG
 48void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
 49{
 50	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
 51	struct kvmppc_44x_tlbe *tlbe;
 52	int i;
 53
 54	printk("vcpu %d TLB dump:\n", vcpu->vcpu_id);
 55	printk("| %2s | %3s | %8s | %8s | %8s |\n",
 56			"nr", "tid", "word0", "word1", "word2");
 57
 58	for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
 59		tlbe = &vcpu_44x->guest_tlb[i];
 60		if (tlbe->word0 & PPC44x_TLB_VALID)
 61			printk(" G%2d |  %02X | %08X | %08X | %08X |\n",
 62			       i, tlbe->tid, tlbe->word0, tlbe->word1,
 63			       tlbe->word2);
 64	}
 65}
 66#endif
 67
 68static inline void kvmppc_44x_tlbie(unsigned int index)
 69{
 70	/* 0 <= index < 64, so the V bit is clear and we can use the index as
 71	 * word0. */
 72	asm volatile(
 73		"tlbwe %[index], %[index], 0\n"
 74	:
 75	: [index] "r"(index)
 76	);
 77}
 78
 79static inline void kvmppc_44x_tlbre(unsigned int index,
 80                                    struct kvmppc_44x_tlbe *tlbe)
 81{
 82	asm volatile(
 83		"tlbre %[word0], %[index], 0\n"
 84		"mfspr %[tid], %[sprn_mmucr]\n"
 85		"andi. %[tid], %[tid], 0xff\n"
 86		"tlbre %[word1], %[index], 1\n"
 87		"tlbre %[word2], %[index], 2\n"
 88		: [word0] "=r"(tlbe->word0),
 89		  [word1] "=r"(tlbe->word1),
 90		  [word2] "=r"(tlbe->word2),
 91		  [tid]   "=r"(tlbe->tid)
 92		: [index] "r"(index),
 93		  [sprn_mmucr] "i"(SPRN_MMUCR)
 94		: "cc"
 95	);
 96}
 97
 98static inline void kvmppc_44x_tlbwe(unsigned int index,
 99                                    struct kvmppc_44x_tlbe *stlbe)
100{
101	unsigned long tmp;
102
103	asm volatile(
104		"mfspr %[tmp], %[sprn_mmucr]\n"
105		"rlwimi %[tmp], %[tid], 0, 0xff\n"
106		"mtspr %[sprn_mmucr], %[tmp]\n"
107		"tlbwe %[word0], %[index], 0\n"
108		"tlbwe %[word1], %[index], 1\n"
109		"tlbwe %[word2], %[index], 2\n"
110		: [tmp]   "=&r"(tmp)
111		: [word0] "r"(stlbe->word0),
112		  [word1] "r"(stlbe->word1),
113		  [word2] "r"(stlbe->word2),
114		  [tid]   "r"(stlbe->tid),
115		  [index] "r"(index),
116		  [sprn_mmucr] "i"(SPRN_MMUCR)
117	);
118}
119
120static u32 kvmppc_44x_tlb_shadow_attrib(u32 attrib, int usermode)
121{
122	/* We only care about the guest's permission and user bits. */
123	attrib &= PPC44x_TLB_PERM_MASK|PPC44x_TLB_UATTR_MASK;
124
125	if (!usermode) {
126		/* Guest is in supervisor mode, so we need to translate guest
127		 * supervisor permissions into user permissions. */
128		attrib &= ~PPC44x_TLB_USER_PERM_MASK;
129		attrib |= (attrib & PPC44x_TLB_SUPER_PERM_MASK) << 3;
130	}
131
132	/* Make sure host can always access this memory. */
133	attrib |= PPC44x_TLB_SX|PPC44x_TLB_SR|PPC44x_TLB_SW;
134
135	/* WIMGE = 0b00100 */
136	attrib |= PPC44x_TLB_M;
137
138	return attrib;
139}
140
141/* Load shadow TLB back into hardware. */
142void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu)
143{
144	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
145	int i;
146
147	for (i = 0; i <= tlb_44x_hwater; i++) {
148		struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
149
150		if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
151			kvmppc_44x_tlbwe(i, stlbe);
152	}
153}
154
155static void kvmppc_44x_tlbe_set_modified(struct kvmppc_vcpu_44x *vcpu_44x,
156                                         unsigned int i)
157{
158	vcpu_44x->shadow_tlb_mod[i] = 1;
159}
160
161/* Save hardware TLB to the vcpu, and invalidate all guest mappings. */
162void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu)
163{
164	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
165	int i;
166
167	for (i = 0; i <= tlb_44x_hwater; i++) {
168		struct kvmppc_44x_tlbe *stlbe = &vcpu_44x->shadow_tlb[i];
169
170		if (vcpu_44x->shadow_tlb_mod[i])
171			kvmppc_44x_tlbre(i, stlbe);
172
173		if (get_tlb_v(stlbe) && get_tlb_ts(stlbe))
174			kvmppc_44x_tlbie(i);
175	}
176}
177
178
179/* Search the guest TLB for a matching entry. */
180int kvmppc_44x_tlb_index(struct kvm_vcpu *vcpu, gva_t eaddr, unsigned int pid,
181                         unsigned int as)
182{
183	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
184	int i;
185
186	/* XXX Replace loop with fancy data structures. */
187	for (i = 0; i < ARRAY_SIZE(vcpu_44x->guest_tlb); i++) {
188		struct kvmppc_44x_tlbe *tlbe = &vcpu_44x->guest_tlb[i];
189		unsigned int tid;
190
191		if (eaddr < get_tlb_eaddr(tlbe))
192			continue;
193
194		if (eaddr > get_tlb_end(tlbe))
195			continue;
196
197		tid = get_tlb_tid(tlbe);
198		if (tid && (tid != pid))
199			continue;
200
201		if (!get_tlb_v(tlbe))
202			continue;
203
204		if (get_tlb_ts(tlbe) != as)
205			continue;
206
207		return i;
208	}
209
210	return -1;
211}
212
213gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
214                       gva_t eaddr)
215{
216	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
217	struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
218	unsigned int pgmask = get_tlb_bytes(gtlbe) - 1;
219
220	return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
221}
222
223int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
224{
225	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
226
227	return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
228}
229
230int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
231{
232	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
233
234	return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as);
235}
236
237void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
238{
239}
240
241void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
242{
243}
244
245static void kvmppc_44x_shadow_release(struct kvmppc_vcpu_44x *vcpu_44x,
246                                      unsigned int stlb_index)
247{
248	struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[stlb_index];
249
250	if (!ref->page)
251		return;
252
253	/* Discard from the TLB. */
254	/* Note: we could actually invalidate a host mapping, if the host overwrote
255	 * this TLB entry since we inserted a guest mapping. */
256	kvmppc_44x_tlbie(stlb_index);
257
258	/* Now release the page. */
259	if (ref->writeable)
260		kvm_release_page_dirty(ref->page);
261	else
262		kvm_release_page_clean(ref->page);
263
264	ref->page = NULL;
265
266	/* XXX set tlb_44x_index to stlb_index? */
267
268	trace_kvm_stlb_inval(stlb_index);
269}
270
271void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
272{
273	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
274	int i;
275
276	for (i = 0; i <= tlb_44x_hwater; i++)
277		kvmppc_44x_shadow_release(vcpu_44x, i);
278}
279
280/**
281 * kvmppc_mmu_map -- create a host mapping for guest memory
282 *
283 * If the guest wanted a larger page than the host supports, only the first
284 * host page is mapped here and the rest are demand faulted.
285 *
286 * If the guest wanted a smaller page than the host page size, we map only the
287 * guest-size page (i.e. not a full host page mapping).
288 *
289 * Caller must ensure that the specified guest TLB entry is safe to insert into
290 * the shadow TLB.
291 */
292void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
293                    unsigned int gtlb_index)
294{
295	struct kvmppc_44x_tlbe stlbe;
296	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
297	struct kvmppc_44x_tlbe *gtlbe = &vcpu_44x->guest_tlb[gtlb_index];
298	struct kvmppc_44x_shadow_ref *ref;
299	struct page *new_page;
300	hpa_t hpaddr;
301	gfn_t gfn;
302	u32 asid = gtlbe->tid;
303	u32 flags = gtlbe->word2;
304	u32 max_bytes = get_tlb_bytes(gtlbe);
305	unsigned int victim;
306
307	/* Select TLB entry to clobber. Indirectly guard against races with the TLB
308	 * miss handler by disabling interrupts. */
309	local_irq_disable();
310	victim = ++tlb_44x_index;
311	if (victim > tlb_44x_hwater)
312		victim = 0;
313	tlb_44x_index = victim;
314	local_irq_enable();
315
316	/* Get reference to new page. */
317	gfn = gpaddr >> PAGE_SHIFT;
318	new_page = gfn_to_page(vcpu->kvm, gfn);
319	if (is_error_page(new_page)) {
320		printk(KERN_ERR "Couldn't get guest page for gfn %llx!\n",
321			(unsigned long long)gfn);
322		kvm_release_page_clean(new_page);
323		return;
324	}
325	hpaddr = page_to_phys(new_page);
326
327	/* Invalidate any previous shadow mappings. */
328	kvmppc_44x_shadow_release(vcpu_44x, victim);
329
330	/* XXX Make sure (va, size) doesn't overlap any other
331	 * entries. 440x6 user manual says the result would be
332	 * "undefined." */
333
334	/* XXX what about AS? */
335
336	/* Force TS=1 for all guest mappings. */
337	stlbe.word0 = PPC44x_TLB_VALID | PPC44x_TLB_TS;
338
339	if (max_bytes >= PAGE_SIZE) {
340		/* Guest mapping is larger than or equal to host page size. We can use
341		 * a "native" host mapping. */
342		stlbe.word0 |= (gvaddr & PAGE_MASK) | PPC44x_TLBE_SIZE;
343	} else {
344		/* Guest mapping is smaller than host page size. We must restrict the
345		 * size of the mapping to be at most the smaller of the two, but for
346		 * simplicity we fall back to a 4K mapping (this is probably what the
347		 * guest is using anyways). */
348		stlbe.word0 |= (gvaddr & PAGE_MASK_4K) | PPC44x_TLB_4K;
349
350		/* 'hpaddr' is a host page, which is larger than the mapping we're
351		 * inserting here. To compensate, we must add the in-page offset to the
352		 * sub-page. */
353		hpaddr |= gpaddr & (PAGE_MASK ^ PAGE_MASK_4K);
354	}
355
356	stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf);
357	stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags,
358	                                            vcpu->arch.shared->msr & MSR_PR);
359	stlbe.tid = !(asid & 0xff);
360
361	/* Keep track of the reference so we can properly release it later. */
362	ref = &vcpu_44x->shadow_refs[victim];
363	ref->page = new_page;
364	ref->gtlb_index = gtlb_index;
365	ref->writeable = !!(stlbe.word2 & PPC44x_TLB_UW);
366	ref->tid = stlbe.tid;
367
368	/* Insert shadow mapping into hardware TLB. */
369	kvmppc_44x_tlbe_set_modified(vcpu_44x, victim);
370	kvmppc_44x_tlbwe(victim, &stlbe);
371	trace_kvm_stlb_write(victim, stlbe.tid, stlbe.word0, stlbe.word1,
372			     stlbe.word2);
373}
374
375/* For a particular guest TLB entry, invalidate the corresponding host TLB
376 * mappings and release the host pages. */
377static void kvmppc_44x_invalidate(struct kvm_vcpu *vcpu,
378                                  unsigned int gtlb_index)
379{
380	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
381	int i;
382
383	for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
384		struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
385		if (ref->gtlb_index == gtlb_index)
386			kvmppc_44x_shadow_release(vcpu_44x, i);
387	}
388}
389
390void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
391{
392	int usermode = vcpu->arch.shared->msr & MSR_PR;
393
394	vcpu->arch.shadow_pid = !usermode;
395}
396
397void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid)
398{
399	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
400	int i;
401
402	if (unlikely(vcpu->arch.pid == new_pid))
403		return;
404
405	vcpu->arch.pid = new_pid;
406
407	/* Guest userspace runs with TID=0 mappings and PID=0, to make sure it
408	 * can't access guest kernel mappings (TID=1). When we switch to a new
409	 * guest PID, which will also use host PID=0, we must discard the old guest
410	 * userspace mappings. */
411	for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) {
412		struct kvmppc_44x_shadow_ref *ref = &vcpu_44x->shadow_refs[i];
413
414		if (ref->tid == 0)
415			kvmppc_44x_shadow_release(vcpu_44x, i);
416	}
417}
418
419static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
420                             const struct kvmppc_44x_tlbe *tlbe)
421{
422	gpa_t gpa;
423
424	if (!get_tlb_v(tlbe))
425		return 0;
426
427	/* Does it match current guest AS? */
428	/* XXX what about IS != DS? */
429	if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
430		return 0;
431
432	gpa = get_tlb_raddr(tlbe);
433	if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
434		/* Mapping is not for RAM. */
435		return 0;
436
437	return 1;
438}
439
440int kvmppc_44x_emul_tlbwe(struct kvm_vcpu *vcpu, u8 ra, u8 rs, u8 ws)
441{
442	struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu);
443	struct kvmppc_44x_tlbe *tlbe;
444	unsigned int gtlb_index;
445
446	gtlb_index = kvmppc_get_gpr(vcpu, ra);
447	if (gtlb_index >= KVM44x_GUEST_TLB_SIZE) {
448		printk("%s: index %d\n", __func__, gtlb_index);
449		kvmppc_dump_vcpu(vcpu);
450		return EMULATE_FAIL;
451	}
452
453	tlbe = &vcpu_44x->guest_tlb[gtlb_index];
454
455	/* Invalidate shadow mappings for the about-to-be-clobbered TLB entry. */
456	if (tlbe->word0 & PPC44x_TLB_VALID)
457		kvmppc_44x_invalidate(vcpu, gtlb_index);
458
459	switch (ws) {
460	case PPC44x_TLB_PAGEID:
461		tlbe->tid = get_mmucr_stid(vcpu);
462		tlbe->word0 = kvmppc_get_gpr(vcpu, rs);
463		break;
464
465	case PPC44x_TLB_XLAT:
466		tlbe->word1 = kvmppc_get_gpr(vcpu, rs);
467		break;
468
469	case PPC44x_TLB_ATTRIB:
470		tlbe->word2 = kvmppc_get_gpr(vcpu, rs);
471		break;
472
473	default:
474		return EMULATE_FAIL;
475	}
476
477	if (tlbe_is_host_safe(vcpu, tlbe)) {
478		gva_t eaddr;
479		gpa_t gpaddr;
480		u32 bytes;
481
482		eaddr = get_tlb_eaddr(tlbe);
483		gpaddr = get_tlb_raddr(tlbe);
484
485		/* Use the advertised page size to mask effective and real addrs. */
486		bytes = get_tlb_bytes(tlbe);
487		eaddr &= ~(bytes - 1);
488		gpaddr &= ~(bytes - 1);
489
490		kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
491	}
492
493	trace_kvm_gtlb_write(gtlb_index, tlbe->tid, tlbe->word0, tlbe->word1,
494			     tlbe->word2);
495
496	kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
497	return EMULATE_DONE;
498}
499
500int kvmppc_44x_emul_tlbsx(struct kvm_vcpu *vcpu, u8 rt, u8 ra, u8 rb, u8 rc)
501{
502	u32 ea;
503	int gtlb_index;
504	unsigned int as = get_mmucr_sts(vcpu);
505	unsigned int pid = get_mmucr_stid(vcpu);
506
507	ea = kvmppc_get_gpr(vcpu, rb);
508	if (ra)
509		ea += kvmppc_get_gpr(vcpu, ra);
510
511	gtlb_index = kvmppc_44x_tlb_index(vcpu, ea, pid, as);
512	if (rc) {
513		u32 cr = kvmppc_get_cr(vcpu);
514
515		if (gtlb_index < 0)
516			kvmppc_set_cr(vcpu, cr & ~0x20000000);
517		else
518			kvmppc_set_cr(vcpu, cr | 0x20000000);
519	}
520	kvmppc_set_gpr(vcpu, rt, gtlb_index);
521
522	kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
523	return EMULATE_DONE;
524}