Linux Audio

Check our new training course

Loading...
  1/*
  2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
  3 *
  4 * Author: Yu Liu <yu.liu@freescale.com>
  5 *         Scott Wood <scottwood@freescale.com>
  6 *         Ashish Kalra <ashish.kalra@freescale.com>
  7 *         Varun Sethi <varun.sethi@freescale.com>
  8 *
  9 * Description:
 10 * This file is based on arch/powerpc/kvm/44x_tlb.h and
 11 * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>,
 12 * Copyright IBM Corp. 2007-2008
 13 *
 14 * This program is free software; you can redistribute it and/or modify
 15 * it under the terms of the GNU General Public License, version 2, as
 16 * published by the Free Software Foundation.
 17 */
 18
 19#ifndef KVM_E500_H
 20#define KVM_E500_H
 21
 22#include <linux/kvm_host.h>
 23#include <asm/mmu-book3e.h>
 24#include <asm/tlb.h>
 25
 26#define E500_PID_NUM   3
 27#define E500_TLB_NUM   2
 28
 29#define E500_TLB_VALID 1
 30#define E500_TLB_DIRTY 2
 31#define E500_TLB_BITMAP 4
 32
 33struct tlbe_ref {
 34	pfn_t pfn;
 35	unsigned int flags; /* E500_TLB_* */
 36};
 37
 38struct tlbe_priv {
 39	struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
 40};
 41
 42#ifdef CONFIG_KVM_E500V2
 43struct vcpu_id_table;
 44#endif
 45
 46struct kvmppc_e500_tlb_params {
 47	int entries, ways, sets;
 48};
 49
 50struct kvmppc_vcpu_e500 {
 51	struct kvm_vcpu vcpu;
 52
 53	/* Unmodified copy of the guest's TLB -- shared with host userspace. */
 54	struct kvm_book3e_206_tlb_entry *gtlb_arch;
 55
 56	/* Starting entry number in gtlb_arch[] */
 57	int gtlb_offset[E500_TLB_NUM];
 58
 59	/* KVM internal information associated with each guest TLB entry */
 60	struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
 61
 62	struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
 63
 64	unsigned int gtlb_nv[E500_TLB_NUM];
 65
 66	/*
 67	 * information associated with each host TLB entry --
 68	 * TLB1 only for now.  If/when guest TLB1 entries can be
 69	 * mapped with host TLB0, this will be used for that too.
 70	 *
 71	 * We don't want to use this for guest TLB0 because then we'd
 72	 * have the overhead of doing the translation again even if
 73	 * the entry is still in the guest TLB (e.g. we swapped out
 74	 * and back, and our host TLB entries got evicted).
 75	 */
 76	struct tlbe_ref *tlb_refs[E500_TLB_NUM];
 77	unsigned int host_tlb1_nv;
 78
 79	u32 svr;
 80	u32 l1csr0;
 81	u32 l1csr1;
 82	u32 hid0;
 83	u32 hid1;
 84	u64 mcar;
 85
 86	struct page **shared_tlb_pages;
 87	int num_shared_tlb_pages;
 88
 89	u64 *g2h_tlb1_map;
 90	unsigned int *h2g_tlb1_rmap;
 91
 92	/* Minimum and maximum address mapped my TLB1 */
 93	unsigned long tlb1_min_eaddr;
 94	unsigned long tlb1_max_eaddr;
 95
 96#ifdef CONFIG_KVM_E500V2
 97	u32 pid[E500_PID_NUM];
 98
 99	/* vcpu id table */
100	struct vcpu_id_table *idt;
101#endif
102};
103
104static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
105{
106	return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
107}
108
109
110/* This geometry is the legacy default -- can be overridden by userspace */
111#define KVM_E500_TLB0_WAY_SIZE		128
112#define KVM_E500_TLB0_WAY_NUM		2
113
114#define KVM_E500_TLB0_SIZE  (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
115#define KVM_E500_TLB1_SIZE  16
116
117#define index_of(tlbsel, esel)	(((tlbsel) << 16) | ((esel) & 0xFFFF))
118#define tlbsel_of(index)	((index) >> 16)
119#define esel_of(index)		((index) & 0xFFFF)
120
121#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
122#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
123#define MAS2_ATTRIB_MASK \
124	  (MAS2_X0 | MAS2_X1)
125#define MAS3_ATTRIB_MASK \
126	  (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
127	   | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
128
129int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
130				ulong value);
131int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
132int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
133int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb);
134int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb);
135int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb);
136int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
137void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
138
139void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
140int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
141
142
143#ifdef CONFIG_KVM_E500V2
144unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
145				 unsigned int as, unsigned int gid,
146				 unsigned int pr, int avoid_recursion);
147#endif
148
149/* TLB helper functions */
150static inline unsigned int
151get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
152{
153	return (tlbe->mas1 >> 7) & 0x1f;
154}
155
156static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
157{
158	return tlbe->mas2 & 0xfffff000;
159}
160
161static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
162{
163	unsigned int pgsize = get_tlb_size(tlbe);
164	return 1ULL << 10 << pgsize;
165}
166
167static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
168{
169	u64 bytes = get_tlb_bytes(tlbe);
170	return get_tlb_eaddr(tlbe) + bytes - 1;
171}
172
173static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
174{
175	return tlbe->mas7_3 & ~0xfffULL;
176}
177
178static inline unsigned int
179get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
180{
181	return (tlbe->mas1 >> 16) & 0xff;
182}
183
184static inline unsigned int
185get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
186{
187	return (tlbe->mas1 >> 12) & 0x1;
188}
189
190static inline unsigned int
191get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
192{
193	return (tlbe->mas1 >> 31) & 0x1;
194}
195
196static inline unsigned int
197get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
198{
199	return (tlbe->mas1 >> 30) & 0x1;
200}
201
202static inline unsigned int
203get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
204{
205	return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
206}
207
208static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
209{
210	return vcpu->arch.pid & 0xff;
211}
212
213static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
214{
215	return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
216}
217
218static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
219{
220	return !!(vcpu->arch.shared->msr & MSR_PR);
221}
222
223static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
224{
225	return (vcpu->arch.shared->mas6 >> 16) & 0xff;
226}
227
228static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
229{
230	return vcpu->arch.shared->mas6 & 0x1;
231}
232
233static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
234{
235	/*
236	 * Manual says that tlbsel has 2 bits wide.
237	 * Since we only have two TLBs, only lower bit is used.
238	 */
239	return (vcpu->arch.shared->mas0 >> 28) & 0x1;
240}
241
242static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
243{
244	return vcpu->arch.shared->mas0 & 0xfff;
245}
246
247static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
248{
249	return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
250}
251
252static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
253			const struct kvm_book3e_206_tlb_entry *tlbe)
254{
255	gpa_t gpa;
256
257	if (!get_tlb_v(tlbe))
258		return 0;
259
260#ifndef CONFIG_KVM_BOOKE_HV
261	/* Does it match current guest AS? */
262	/* XXX what about IS != DS? */
263	if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
264		return 0;
265#endif
266
267	gpa = get_tlb_raddr(tlbe);
268	if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
269		/* Mapping is not for RAM. */
270		return 0;
271
272	return 1;
273}
274
275static inline struct kvm_book3e_206_tlb_entry *get_entry(
276	struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
277{
278	int offset = vcpu_e500->gtlb_offset[tlbsel];
279	return &vcpu_e500->gtlb_arch[offset + entry];
280}
281
282void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
283			   struct kvm_book3e_206_tlb_entry *gtlbe);
284void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
285
286#ifdef CONFIG_KVM_BOOKE_HV
287#define kvmppc_e500_get_tlb_stid(vcpu, gtlbe)       get_tlb_tid(gtlbe)
288#define get_tlbmiss_tid(vcpu)           get_cur_pid(vcpu)
289#define get_tlb_sts(gtlbe)              (gtlbe->mas1 & MAS1_TS)
290#else
291unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
292				      struct kvm_book3e_206_tlb_entry *gtlbe);
293
294static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
295{
296	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
297	unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf;
298
299	return vcpu_e500->pid[tidseld];
300}
301
302/* Force TS=1 for all guest mappings. */
303#define get_tlb_sts(gtlbe)              (MAS1_TS)
304#endif /* !BOOKE_HV */
305
306#endif /* KVM_E500_H */