Linux Audio

Check our new training course

Loading...
Note: File does not exist in v6.8.
   1/*
   2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
   3 *
   4 * Author: Yu Liu, yu.liu@freescale.com
   5 *
   6 * Description:
   7 * This file is based on arch/powerpc/kvm/44x_tlb.c,
   8 * by Hollis Blanchard <hollisb@us.ibm.com>.
   9 *
  10 * This program is free software; you can redistribute it and/or modify
  11 * it under the terms of the GNU General Public License, version 2, as
  12 * published by the Free Software Foundation.
  13 */
  14
  15#include <linux/types.h>
  16#include <linux/slab.h>
  17#include <linux/string.h>
  18#include <linux/kvm.h>
  19#include <linux/kvm_host.h>
  20#include <linux/highmem.h>
  21#include <asm/kvm_ppc.h>
  22#include <asm/kvm_e500.h>
  23
  24#include "../mm/mmu_decl.h"
  25#include "e500_tlb.h"
  26#include "trace.h"
  27#include "timing.h"
  28
  29#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
  30
  31struct id {
  32	unsigned long val;
  33	struct id **pentry;
  34};
  35
  36#define NUM_TIDS 256
  37
  38/*
  39 * This table provide mappings from:
  40 * (guestAS,guestTID,guestPR) --> ID of physical cpu
  41 * guestAS	[0..1]
  42 * guestTID	[0..255]
  43 * guestPR	[0..1]
  44 * ID		[1..255]
  45 * Each vcpu keeps one vcpu_id_table.
  46 */
  47struct vcpu_id_table {
  48	struct id id[2][NUM_TIDS][2];
  49};
  50
  51/*
  52 * This table provide reversed mappings of vcpu_id_table:
  53 * ID --> address of vcpu_id_table item.
  54 * Each physical core has one pcpu_id_table.
  55 */
  56struct pcpu_id_table {
  57	struct id *entry[NUM_TIDS];
  58};
  59
  60static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
  61
  62/* This variable keeps last used shadow ID on local core.
  63 * The valid range of shadow ID is [1..255] */
  64static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
  65
  66static unsigned int tlb1_entry_num;
  67
  68/*
  69 * Allocate a free shadow id and setup a valid sid mapping in given entry.
  70 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
  71 *
  72 * The caller must have preemption disabled, and keep it that way until
  73 * it has finished with the returned shadow id (either written into the
  74 * TLB or arch.shadow_pid, or discarded).
  75 */
  76static inline int local_sid_setup_one(struct id *entry)
  77{
  78	unsigned long sid;
  79	int ret = -1;
  80
  81	sid = ++(__get_cpu_var(pcpu_last_used_sid));
  82	if (sid < NUM_TIDS) {
  83		__get_cpu_var(pcpu_sids).entry[sid] = entry;
  84		entry->val = sid;
  85		entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
  86		ret = sid;
  87	}
  88
  89	/*
  90	 * If sid == NUM_TIDS, we've run out of sids.  We return -1, and
  91	 * the caller will invalidate everything and start over.
  92	 *
  93	 * sid > NUM_TIDS indicates a race, which we disable preemption to
  94	 * avoid.
  95	 */
  96	WARN_ON(sid > NUM_TIDS);
  97
  98	return ret;
  99}
 100
 101/*
 102 * Check if given entry contain a valid shadow id mapping.
 103 * An ID mapping is considered valid only if
 104 * both vcpu and pcpu know this mapping.
 105 *
 106 * The caller must have preemption disabled, and keep it that way until
 107 * it has finished with the returned shadow id (either written into the
 108 * TLB or arch.shadow_pid, or discarded).
 109 */
 110static inline int local_sid_lookup(struct id *entry)
 111{
 112	if (entry && entry->val != 0 &&
 113	    __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
 114	    entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
 115		return entry->val;
 116	return -1;
 117}
 118
 119/* Invalidate all id mappings on local core */
 120static inline void local_sid_destroy_all(void)
 121{
 122	preempt_disable();
 123	__get_cpu_var(pcpu_last_used_sid) = 0;
 124	memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
 125	preempt_enable();
 126}
 127
 128static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
 129{
 130	vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
 131	return vcpu_e500->idt;
 132}
 133
 134static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
 135{
 136	kfree(vcpu_e500->idt);
 137}
 138
 139/* Invalidate all mappings on vcpu */
 140static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
 141{
 142	memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
 143
 144	/* Update shadow pid when mappings are changed */
 145	kvmppc_e500_recalc_shadow_pid(vcpu_e500);
 146}
 147
 148/* Invalidate one ID mapping on vcpu */
 149static inline void kvmppc_e500_id_table_reset_one(
 150			       struct kvmppc_vcpu_e500 *vcpu_e500,
 151			       int as, int pid, int pr)
 152{
 153	struct vcpu_id_table *idt = vcpu_e500->idt;
 154
 155	BUG_ON(as >= 2);
 156	BUG_ON(pid >= NUM_TIDS);
 157	BUG_ON(pr >= 2);
 158
 159	idt->id[as][pid][pr].val = 0;
 160	idt->id[as][pid][pr].pentry = NULL;
 161
 162	/* Update shadow pid when mappings are changed */
 163	kvmppc_e500_recalc_shadow_pid(vcpu_e500);
 164}
 165
 166/*
 167 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
 168 * This function first lookup if a valid mapping exists,
 169 * if not, then creates a new one.
 170 *
 171 * The caller must have preemption disabled, and keep it that way until
 172 * it has finished with the returned shadow id (either written into the
 173 * TLB or arch.shadow_pid, or discarded).
 174 */
 175static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
 176					unsigned int as, unsigned int gid,
 177					unsigned int pr, int avoid_recursion)
 178{
 179	struct vcpu_id_table *idt = vcpu_e500->idt;
 180	int sid;
 181
 182	BUG_ON(as >= 2);
 183	BUG_ON(gid >= NUM_TIDS);
 184	BUG_ON(pr >= 2);
 185
 186	sid = local_sid_lookup(&idt->id[as][gid][pr]);
 187
 188	while (sid <= 0) {
 189		/* No mapping yet */
 190		sid = local_sid_setup_one(&idt->id[as][gid][pr]);
 191		if (sid <= 0) {
 192			_tlbil_all();
 193			local_sid_destroy_all();
 194		}
 195
 196		/* Update shadow pid when mappings are changed */
 197		if (!avoid_recursion)
 198			kvmppc_e500_recalc_shadow_pid(vcpu_e500);
 199	}
 200
 201	return sid;
 202}
 203
 204/* Map guest pid to shadow.
 205 * We use PID to keep shadow of current guest non-zero PID,
 206 * and use PID1 to keep shadow of guest zero PID.
 207 * So that guest tlbe with TID=0 can be accessed at any time */
 208void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
 209{
 210	preempt_disable();
 211	vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
 212			get_cur_as(&vcpu_e500->vcpu),
 213			get_cur_pid(&vcpu_e500->vcpu),
 214			get_cur_pr(&vcpu_e500->vcpu), 1);
 215	vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
 216			get_cur_as(&vcpu_e500->vcpu), 0,
 217			get_cur_pr(&vcpu_e500->vcpu), 1);
 218	preempt_enable();
 219}
 220
 221void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
 222{
 223	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 224	struct tlbe *tlbe;
 225	int i, tlbsel;
 226
 227	printk("| %8s | %8s | %8s | %8s | %8s |\n",
 228			"nr", "mas1", "mas2", "mas3", "mas7");
 229
 230	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
 231		printk("Guest TLB%d:\n", tlbsel);
 232		for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
 233			tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
 234			if (tlbe->mas1 & MAS1_VALID)
 235				printk(" G[%d][%3d] |  %08X | %08X | %08X | %08X |\n",
 236					tlbsel, i, tlbe->mas1, tlbe->mas2,
 237					tlbe->mas3, tlbe->mas7);
 238		}
 239	}
 240}
 241
 242static inline unsigned int tlb0_get_next_victim(
 243		struct kvmppc_vcpu_e500 *vcpu_e500)
 244{
 245	unsigned int victim;
 246
 247	victim = vcpu_e500->gtlb_nv[0]++;
 248	if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
 249		vcpu_e500->gtlb_nv[0] = 0;
 250
 251	return victim;
 252}
 253
 254static inline unsigned int tlb1_max_shadow_size(void)
 255{
 256	/* reserve one entry for magic page */
 257	return tlb1_entry_num - tlbcam_index - 1;
 258}
 259
 260static inline int tlbe_is_writable(struct tlbe *tlbe)
 261{
 262	return tlbe->mas3 & (MAS3_SW|MAS3_UW);
 263}
 264
 265static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
 266{
 267	/* Mask off reserved bits. */
 268	mas3 &= MAS3_ATTRIB_MASK;
 269
 270	if (!usermode) {
 271		/* Guest is in supervisor mode,
 272		 * so we need to translate guest
 273		 * supervisor permissions into user permissions. */
 274		mas3 &= ~E500_TLB_USER_PERM_MASK;
 275		mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
 276	}
 277
 278	return mas3 | E500_TLB_SUPER_PERM_MASK;
 279}
 280
 281static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
 282{
 283#ifdef CONFIG_SMP
 284	return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
 285#else
 286	return mas2 & MAS2_ATTRIB_MASK;
 287#endif
 288}
 289
 290/*
 291 * writing shadow tlb entry to host TLB
 292 */
 293static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
 294{
 295	unsigned long flags;
 296
 297	local_irq_save(flags);
 298	mtspr(SPRN_MAS0, mas0);
 299	mtspr(SPRN_MAS1, stlbe->mas1);
 300	mtspr(SPRN_MAS2, stlbe->mas2);
 301	mtspr(SPRN_MAS3, stlbe->mas3);
 302	mtspr(SPRN_MAS7, stlbe->mas7);
 303	asm volatile("isync; tlbwe" : : : "memory");
 304	local_irq_restore(flags);
 305}
 306
 307static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 308		int tlbsel, int esel, struct tlbe *stlbe)
 309{
 310	if (tlbsel == 0) {
 311		__write_host_tlbe(stlbe,
 312				  MAS0_TLBSEL(0) |
 313				  MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1)));
 314	} else {
 315		__write_host_tlbe(stlbe,
 316				  MAS0_TLBSEL(1) |
 317				  MAS0_ESEL(to_htlb1_esel(esel)));
 318	}
 319	trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
 320			     stlbe->mas3, stlbe->mas7);
 321}
 322
 323void kvmppc_map_magic(struct kvm_vcpu *vcpu)
 324{
 325	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 326	struct tlbe magic;
 327	ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
 328	unsigned int stid;
 329	pfn_t pfn;
 330
 331	pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
 332	get_page(pfn_to_page(pfn));
 333
 334	preempt_disable();
 335	stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
 336
 337	magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
 338		     MAS1_TSIZE(BOOK3E_PAGESZ_4K);
 339	magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
 340	magic.mas3 = (pfn << PAGE_SHIFT) |
 341		     MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
 342	magic.mas7 = pfn >> (32 - PAGE_SHIFT);
 343
 344	__write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
 345	preempt_enable();
 346}
 347
 348void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
 349{
 350	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 351
 352	/* Shadow PID may be expired on local core */
 353	kvmppc_e500_recalc_shadow_pid(vcpu_e500);
 354}
 355
 356void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
 357{
 358}
 359
 360static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
 361					 int tlbsel, int esel)
 362{
 363	struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
 364	struct vcpu_id_table *idt = vcpu_e500->idt;
 365	unsigned int pr, tid, ts, pid;
 366	u32 val, eaddr;
 367	unsigned long flags;
 368
 369	ts = get_tlb_ts(gtlbe);
 370	tid = get_tlb_tid(gtlbe);
 371
 372	preempt_disable();
 373
 374	/* One guest ID may be mapped to two shadow IDs */
 375	for (pr = 0; pr < 2; pr++) {
 376		/*
 377		 * The shadow PID can have a valid mapping on at most one
 378		 * host CPU.  In the common case, it will be valid on this
 379		 * CPU, in which case (for TLB0) we do a local invalidation
 380		 * of the specific address.
 381		 *
 382		 * If the shadow PID is not valid on the current host CPU, or
 383		 * if we're invalidating a TLB1 entry, we invalidate the
 384		 * entire shadow PID.
 385		 */
 386		if (tlbsel == 1 ||
 387		    (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
 388			kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
 389			continue;
 390		}
 391
 392		/*
 393		 * The guest is invalidating a TLB0 entry which is in a PID
 394		 * that has a valid shadow mapping on this host CPU.  We
 395		 * search host TLB0 to invalidate it's shadow TLB entry,
 396		 * similar to __tlbil_va except that we need to look in AS1.
 397		 */
 398		val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
 399		eaddr = get_tlb_eaddr(gtlbe);
 400
 401		local_irq_save(flags);
 402
 403		mtspr(SPRN_MAS6, val);
 404		asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
 405		val = mfspr(SPRN_MAS1);
 406		if (val & MAS1_VALID) {
 407			mtspr(SPRN_MAS1, val & ~MAS1_VALID);
 408			asm volatile("tlbwe");
 409		}
 410
 411		local_irq_restore(flags);
 412	}
 413
 414	preempt_enable();
 415}
 416
 417/* Search the guest TLB for a matching entry. */
 418static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
 419		gva_t eaddr, int tlbsel, unsigned int pid, int as)
 420{
 421	int size = vcpu_e500->gtlb_size[tlbsel];
 422	int set_base;
 423	int i;
 424
 425	if (tlbsel == 0) {
 426		int mask = size / KVM_E500_TLB0_WAY_NUM - 1;
 427		set_base = (eaddr >> PAGE_SHIFT) & mask;
 428		set_base *= KVM_E500_TLB0_WAY_NUM;
 429		size = KVM_E500_TLB0_WAY_NUM;
 430	} else {
 431		set_base = 0;
 432	}
 433
 434	for (i = 0; i < size; i++) {
 435		struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][set_base + i];
 436		unsigned int tid;
 437
 438		if (eaddr < get_tlb_eaddr(tlbe))
 439			continue;
 440
 441		if (eaddr > get_tlb_end(tlbe))
 442			continue;
 443
 444		tid = get_tlb_tid(tlbe);
 445		if (tid && (tid != pid))
 446			continue;
 447
 448		if (!get_tlb_v(tlbe))
 449			continue;
 450
 451		if (get_tlb_ts(tlbe) != as && as != -1)
 452			continue;
 453
 454		return set_base + i;
 455	}
 456
 457	return -1;
 458}
 459
 460static inline void kvmppc_e500_priv_setup(struct tlbe_priv *priv,
 461					  struct tlbe *gtlbe,
 462					  pfn_t pfn)
 463{
 464	priv->pfn = pfn;
 465	priv->flags = E500_TLB_VALID;
 466
 467	if (tlbe_is_writable(gtlbe))
 468		priv->flags |= E500_TLB_DIRTY;
 469}
 470
 471static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv)
 472{
 473	if (priv->flags & E500_TLB_VALID) {
 474		if (priv->flags & E500_TLB_DIRTY)
 475			kvm_release_pfn_dirty(priv->pfn);
 476		else
 477			kvm_release_pfn_clean(priv->pfn);
 478
 479		priv->flags = 0;
 480	}
 481}
 482
 483static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
 484		unsigned int eaddr, int as)
 485{
 486	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 487	unsigned int victim, pidsel, tsized;
 488	int tlbsel;
 489
 490	/* since we only have two TLBs, only lower bit is used. */
 491	tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
 492	victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
 493	pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
 494	tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
 495
 496	vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
 497		| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
 498	vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
 499		| MAS1_TID(vcpu_e500->pid[pidsel])
 500		| MAS1_TSIZE(tsized);
 501	vcpu_e500->mas2 = (eaddr & MAS2_EPN)
 502		| (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
 503	vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
 504	vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
 505		| (get_cur_pid(vcpu) << 16)
 506		| (as ? MAS6_SAS : 0);
 507	vcpu_e500->mas7 = 0;
 508}
 509
 510static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
 511					   struct tlbe *gtlbe, int tsize,
 512					   struct tlbe_priv *priv,
 513					   u64 gvaddr, struct tlbe *stlbe)
 514{
 515	pfn_t pfn = priv->pfn;
 516	unsigned int stid;
 517
 518	stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
 519				   get_tlb_tid(gtlbe),
 520				   get_cur_pr(&vcpu_e500->vcpu), 0);
 521
 522	/* Force TS=1 IPROT=0 for all guest mappings. */
 523	stlbe->mas1 = MAS1_TSIZE(tsize)
 524		| MAS1_TID(stid) | MAS1_TS | MAS1_VALID;
 525	stlbe->mas2 = (gvaddr & MAS2_EPN)
 526		| e500_shadow_mas2_attrib(gtlbe->mas2,
 527				vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
 528	stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
 529		| e500_shadow_mas3_attrib(gtlbe->mas3,
 530				vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
 531	stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
 532}
 533
 534
 535static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 536	u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel,
 537	struct tlbe *stlbe)
 538{
 539	struct kvm_memory_slot *slot;
 540	unsigned long pfn, hva;
 541	int pfnmap = 0;
 542	int tsize = BOOK3E_PAGESZ_4K;
 543	struct tlbe_priv *priv;
 544
 545	/*
 546	 * Translate guest physical to true physical, acquiring
 547	 * a page reference if it is normal, non-reserved memory.
 548	 *
 549	 * gfn_to_memslot() must succeed because otherwise we wouldn't
 550	 * have gotten this far.  Eventually we should just pass the slot
 551	 * pointer through from the first lookup.
 552	 */
 553	slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
 554	hva = gfn_to_hva_memslot(slot, gfn);
 555
 556	if (tlbsel == 1) {
 557		struct vm_area_struct *vma;
 558		down_read(&current->mm->mmap_sem);
 559
 560		vma = find_vma(current->mm, hva);
 561		if (vma && hva >= vma->vm_start &&
 562		    (vma->vm_flags & VM_PFNMAP)) {
 563			/*
 564			 * This VMA is a physically contiguous region (e.g.
 565			 * /dev/mem) that bypasses normal Linux page
 566			 * management.  Find the overlap between the
 567			 * vma and the memslot.
 568			 */
 569
 570			unsigned long start, end;
 571			unsigned long slot_start, slot_end;
 572
 573			pfnmap = 1;
 574
 575			start = vma->vm_pgoff;
 576			end = start +
 577			      ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
 578
 579			pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
 580
 581			slot_start = pfn - (gfn - slot->base_gfn);
 582			slot_end = slot_start + slot->npages;
 583
 584			if (start < slot_start)
 585				start = slot_start;
 586			if (end > slot_end)
 587				end = slot_end;
 588
 589			tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
 590				MAS1_TSIZE_SHIFT;
 591
 592			/*
 593			 * e500 doesn't implement the lowest tsize bit,
 594			 * or 1K pages.
 595			 */
 596			tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
 597
 598			/*
 599			 * Now find the largest tsize (up to what the guest
 600			 * requested) that will cover gfn, stay within the
 601			 * range, and for which gfn and pfn are mutually
 602			 * aligned.
 603			 */
 604
 605			for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
 606				unsigned long gfn_start, gfn_end, tsize_pages;
 607				tsize_pages = 1 << (tsize - 2);
 608
 609				gfn_start = gfn & ~(tsize_pages - 1);
 610				gfn_end = gfn_start + tsize_pages;
 611
 612				if (gfn_start + pfn - gfn < start)
 613					continue;
 614				if (gfn_end + pfn - gfn > end)
 615					continue;
 616				if ((gfn & (tsize_pages - 1)) !=
 617				    (pfn & (tsize_pages - 1)))
 618					continue;
 619
 620				gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
 621				pfn &= ~(tsize_pages - 1);
 622				break;
 623			}
 624		}
 625
 626		up_read(&current->mm->mmap_sem);
 627	}
 628
 629	if (likely(!pfnmap)) {
 630		pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
 631		if (is_error_pfn(pfn)) {
 632			printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
 633					(long)gfn);
 634			kvm_release_pfn_clean(pfn);
 635			return;
 636		}
 637	}
 638
 639	/* Drop old priv and setup new one. */
 640	priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
 641	kvmppc_e500_priv_release(priv);
 642	kvmppc_e500_priv_setup(priv, gtlbe, pfn);
 643
 644	kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, priv, gvaddr, stlbe);
 645}
 646
 647/* XXX only map the one-one case, for now use TLB0 */
 648static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 649				int esel, struct tlbe *stlbe)
 650{
 651	struct tlbe *gtlbe;
 652
 653	gtlbe = &vcpu_e500->gtlb_arch[0][esel];
 654
 655	kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
 656			get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
 657			gtlbe, 0, esel, stlbe);
 658
 659	return esel;
 660}
 661
 662/* Caller must ensure that the specified guest TLB entry is safe to insert into
 663 * the shadow TLB. */
 664/* XXX for both one-one and one-to-many , for now use TLB1 */
 665static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
 666		u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe)
 667{
 668	unsigned int victim;
 669
 670	victim = vcpu_e500->gtlb_nv[1]++;
 671
 672	if (unlikely(vcpu_e500->gtlb_nv[1] >= tlb1_max_shadow_size()))
 673		vcpu_e500->gtlb_nv[1] = 0;
 674
 675	kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim, stlbe);
 676
 677	return victim;
 678}
 679
 680void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
 681{
 682	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 683
 684	/* Recalc shadow pid since MSR changes */
 685	kvmppc_e500_recalc_shadow_pid(vcpu_e500);
 686}
 687
 688static inline int kvmppc_e500_gtlbe_invalidate(
 689				struct kvmppc_vcpu_e500 *vcpu_e500,
 690				int tlbsel, int esel)
 691{
 692	struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
 693
 694	if (unlikely(get_tlb_iprot(gtlbe)))
 695		return -1;
 696
 697	gtlbe->mas1 = 0;
 698
 699	return 0;
 700}
 701
 702int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
 703{
 704	int esel;
 705
 706	if (value & MMUCSR0_TLB0FI)
 707		for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++)
 708			kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
 709	if (value & MMUCSR0_TLB1FI)
 710		for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++)
 711			kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
 712
 713	/* Invalidate all vcpu id mappings */
 714	kvmppc_e500_id_table_reset_all(vcpu_e500);
 715
 716	return EMULATE_DONE;
 717}
 718
 719int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
 720{
 721	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 722	unsigned int ia;
 723	int esel, tlbsel;
 724	gva_t ea;
 725
 726	ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
 727
 728	ia = (ea >> 2) & 0x1;
 729
 730	/* since we only have two TLBs, only lower bit is used. */
 731	tlbsel = (ea >> 3) & 0x1;
 732
 733	if (ia) {
 734		/* invalidate all entries */
 735		for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++)
 736			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
 737	} else {
 738		ea &= 0xfffff000;
 739		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
 740				get_cur_pid(vcpu), -1);
 741		if (esel >= 0)
 742			kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
 743	}
 744
 745	/* Invalidate all vcpu id mappings */
 746	kvmppc_e500_id_table_reset_all(vcpu_e500);
 747
 748	return EMULATE_DONE;
 749}
 750
 751int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
 752{
 753	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 754	int tlbsel, esel;
 755	struct tlbe *gtlbe;
 756
 757	tlbsel = get_tlb_tlbsel(vcpu_e500);
 758	esel = get_tlb_esel(vcpu_e500, tlbsel);
 759
 760	gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
 761	vcpu_e500->mas0 &= ~MAS0_NV(~0);
 762	vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
 763	vcpu_e500->mas1 = gtlbe->mas1;
 764	vcpu_e500->mas2 = gtlbe->mas2;
 765	vcpu_e500->mas3 = gtlbe->mas3;
 766	vcpu_e500->mas7 = gtlbe->mas7;
 767
 768	return EMULATE_DONE;
 769}
 770
 771int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
 772{
 773	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 774	int as = !!get_cur_sas(vcpu_e500);
 775	unsigned int pid = get_cur_spid(vcpu_e500);
 776	int esel, tlbsel;
 777	struct tlbe *gtlbe = NULL;
 778	gva_t ea;
 779
 780	ea = kvmppc_get_gpr(vcpu, rb);
 781
 782	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
 783		esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
 784		if (esel >= 0) {
 785			gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
 786			break;
 787		}
 788	}
 789
 790	if (gtlbe) {
 791		vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
 792			| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
 793		vcpu_e500->mas1 = gtlbe->mas1;
 794		vcpu_e500->mas2 = gtlbe->mas2;
 795		vcpu_e500->mas3 = gtlbe->mas3;
 796		vcpu_e500->mas7 = gtlbe->mas7;
 797	} else {
 798		int victim;
 799
 800		/* since we only have two TLBs, only lower bit is used. */
 801		tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
 802		victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
 803
 804		vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
 805			| MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
 806		vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
 807			| (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
 808			| (vcpu_e500->mas4 & MAS4_TSIZED(~0));
 809		vcpu_e500->mas2 &= MAS2_EPN;
 810		vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
 811		vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
 812		vcpu_e500->mas7 = 0;
 813	}
 814
 815	kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
 816	return EMULATE_DONE;
 817}
 818
 819int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
 820{
 821	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 822	struct tlbe *gtlbe;
 823	int tlbsel, esel;
 824
 825	tlbsel = get_tlb_tlbsel(vcpu_e500);
 826	esel = get_tlb_esel(vcpu_e500, tlbsel);
 827
 828	gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
 829
 830	if (get_tlb_v(gtlbe))
 831		kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
 832
 833	gtlbe->mas1 = vcpu_e500->mas1;
 834	gtlbe->mas2 = vcpu_e500->mas2;
 835	gtlbe->mas3 = vcpu_e500->mas3;
 836	gtlbe->mas7 = vcpu_e500->mas7;
 837
 838	trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
 839			     gtlbe->mas3, gtlbe->mas7);
 840
 841	/* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
 842	if (tlbe_is_host_safe(vcpu, gtlbe)) {
 843		struct tlbe stlbe;
 844		int stlbsel, sesel;
 845		u64 eaddr;
 846		u64 raddr;
 847
 848		preempt_disable();
 849		switch (tlbsel) {
 850		case 0:
 851			/* TLB0 */
 852			gtlbe->mas1 &= ~MAS1_TSIZE(~0);
 853			gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
 854
 855			stlbsel = 0;
 856			sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
 857
 858			break;
 859
 860		case 1:
 861			/* TLB1 */
 862			eaddr = get_tlb_eaddr(gtlbe);
 863			raddr = get_tlb_raddr(gtlbe);
 864
 865			/* Create a 4KB mapping on the host.
 866			 * If the guest wanted a large page,
 867			 * only the first 4KB is mapped here and the rest
 868			 * are mapped on the fly. */
 869			stlbsel = 1;
 870			sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
 871					raddr >> PAGE_SHIFT, gtlbe, &stlbe);
 872			break;
 873
 874		default:
 875			BUG();
 876		}
 877		write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
 878		preempt_enable();
 879	}
 880
 881	kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
 882	return EMULATE_DONE;
 883}
 884
 885int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 886{
 887	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
 888
 889	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
 890}
 891
 892int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
 893{
 894	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
 895
 896	return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
 897}
 898
 899void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
 900{
 901	unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
 902
 903	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
 904}
 905
 906void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
 907{
 908	unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
 909
 910	kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
 911}
 912
 913gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
 914			gva_t eaddr)
 915{
 916	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 917	struct tlbe *gtlbe =
 918		&vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)];
 919	u64 pgmask = get_tlb_bytes(gtlbe) - 1;
 920
 921	return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
 922}
 923
 924void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
 925{
 926}
 927
 928void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
 929			unsigned int index)
 930{
 931	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 932	struct tlbe_priv *priv;
 933	struct tlbe *gtlbe, stlbe;
 934	int tlbsel = tlbsel_of(index);
 935	int esel = esel_of(index);
 936	int stlbsel, sesel;
 937
 938	gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
 939
 940	preempt_disable();
 941	switch (tlbsel) {
 942	case 0:
 943		stlbsel = 0;
 944		sesel = esel;
 945		priv = &vcpu_e500->gtlb_priv[stlbsel][sesel];
 946
 947		kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
 948					priv, eaddr, &stlbe);
 949		break;
 950
 951	case 1: {
 952		gfn_t gfn = gpaddr >> PAGE_SHIFT;
 953
 954		stlbsel = 1;
 955		sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
 956					     gtlbe, &stlbe);
 957		break;
 958	}
 959
 960	default:
 961		BUG();
 962		break;
 963	}
 964
 965	write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe);
 966	preempt_enable();
 967}
 968
 969int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
 970				gva_t eaddr, unsigned int pid, int as)
 971{
 972	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 973	int esel, tlbsel;
 974
 975	for (tlbsel = 0; tlbsel < 2; tlbsel++) {
 976		esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
 977		if (esel >= 0)
 978			return index_of(tlbsel, esel);
 979	}
 980
 981	return -1;
 982}
 983
 984void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
 985{
 986	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 987
 988	if (vcpu->arch.pid != pid) {
 989		vcpu_e500->pid[0] = vcpu->arch.pid = pid;
 990		kvmppc_e500_recalc_shadow_pid(vcpu_e500);
 991	}
 992}
 993
 994void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
 995{
 996	struct tlbe *tlbe;
 997
 998	/* Insert large initial mapping for guest. */
 999	tlbe = &vcpu_e500->gtlb_arch[1][0];
1000	tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
1001	tlbe->mas2 = 0;
1002	tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
1003	tlbe->mas7 = 0;
1004
1005	/* 4K map for serial output. Used by kernel wrapper. */
1006	tlbe = &vcpu_e500->gtlb_arch[1][1];
1007	tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
1008	tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
1009	tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
1010	tlbe->mas7 = 0;
1011}
1012
1013int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1014{
1015	tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
1016
1017	vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE;
1018	vcpu_e500->gtlb_arch[0] =
1019		kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
1020	if (vcpu_e500->gtlb_arch[0] == NULL)
1021		goto err_out;
1022
1023	vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE;
1024	vcpu_e500->gtlb_arch[1] =
1025		kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
1026	if (vcpu_e500->gtlb_arch[1] == NULL)
1027		goto err_out_guest0;
1028
1029	vcpu_e500->gtlb_priv[0] = (struct tlbe_priv *)
1030		kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
1031	if (vcpu_e500->gtlb_priv[0] == NULL)
1032		goto err_out_guest1;
1033	vcpu_e500->gtlb_priv[1] = (struct tlbe_priv *)
1034		kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
1035
1036	if (vcpu_e500->gtlb_priv[1] == NULL)
1037		goto err_out_priv0;
1038
1039	if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
1040		goto err_out_priv1;
1041
1042	/* Init TLB configuration register */
1043	vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
1044	vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0];
1045	vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
1046	vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1];
1047
1048	return 0;
1049
1050err_out_priv1:
1051	kfree(vcpu_e500->gtlb_priv[1]);
1052err_out_priv0:
1053	kfree(vcpu_e500->gtlb_priv[0]);
1054err_out_guest1:
1055	kfree(vcpu_e500->gtlb_arch[1]);
1056err_out_guest0:
1057	kfree(vcpu_e500->gtlb_arch[0]);
1058err_out:
1059	return -1;
1060}
1061
1062void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
1063{
1064	int stlbsel, i;
1065
1066	/* release all privs */
1067	for (stlbsel = 0; stlbsel < 2; stlbsel++)
1068		for (i = 0; i < vcpu_e500->gtlb_size[stlbsel]; i++) {
1069			struct tlbe_priv *priv =
1070				&vcpu_e500->gtlb_priv[stlbsel][i];
1071			kvmppc_e500_priv_release(priv);
1072		}
1073
1074	kvmppc_e500_id_table_free(vcpu_e500);
1075	kfree(vcpu_e500->gtlb_arch[1]);
1076	kfree(vcpu_e500->gtlb_arch[0]);
1077}