Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
  4 * Copyright 2010-2011 Freescale Semiconductor, Inc.
  5 *
  6 * Authors:
  7 *     Alexander Graf <agraf@suse.de>
 
 
 
 
 
 
 
 
 
 
 
 
 
  8 */
  9
 10#include <linux/kvm_host.h>
 11#include <linux/init.h>
 12#include <linux/export.h>
 13#include <linux/kmemleak.h>
 14#include <linux/kvm_para.h>
 15#include <linux/slab.h>
 16#include <linux/of.h>
 17#include <linux/pagemap.h>
 18
 19#include <asm/reg.h>
 20#include <asm/sections.h>
 21#include <asm/cacheflush.h>
 22#include <asm/disassemble.h>
 23#include <asm/ppc-opcode.h>
 24#include <asm/epapr_hcalls.h>
 25
 26#define KVM_MAGIC_PAGE		(-4096L)
 27#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
 28
 29#define KVM_INST_LWZ		0x80000000
 30#define KVM_INST_STW		0x90000000
 31#define KVM_INST_LD		0xe8000000
 32#define KVM_INST_STD		0xf8000000
 33#define KVM_INST_NOP		0x60000000
 34#define KVM_INST_B		0x48000000
 35#define KVM_INST_B_MASK		0x03ffffff
 36#define KVM_INST_B_MAX		0x01ffffff
 37#define KVM_INST_LI		0x38000000
 38
 39#define KVM_MASK_RT		0x03e00000
 40#define KVM_RT_30		0x03c00000
 41#define KVM_MASK_RB		0x0000f800
 42#define KVM_INST_MFMSR		0x7c0000a6
 43
 44#define SPR_FROM		0
 45#define SPR_TO			0x100
 46
 47#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
 48				    (((sprn) & 0x1f) << 16) | \
 49				    (((sprn) & 0x3e0) << 6) | \
 50				    (moveto))
 51
 52#define KVM_INST_MFSPR(sprn)	KVM_INST_SPR(sprn, SPR_FROM)
 53#define KVM_INST_MTSPR(sprn)	KVM_INST_SPR(sprn, SPR_TO)
 54
 55#define KVM_INST_TLBSYNC	0x7c00046c
 56#define KVM_INST_MTMSRD_L0	0x7c000164
 57#define KVM_INST_MTMSRD_L1	0x7c010164
 58#define KVM_INST_MTMSR		0x7c000124
 59
 60#define KVM_INST_WRTEE		0x7c000106
 61#define KVM_INST_WRTEEI_0	0x7c000146
 62#define KVM_INST_WRTEEI_1	0x7c008146
 63
 64#define KVM_INST_MTSRIN		0x7c0001e4
 65
 66static bool kvm_patching_worked = true;
 67extern char kvm_tmp[];
 68extern char kvm_tmp_end[];
 69static int kvm_tmp_index;
 70
 71static void __init kvm_patch_ins(u32 *inst, u32 new_inst)
 72{
 73	*inst = new_inst;
 74	flush_icache_range((ulong)inst, (ulong)inst + 4);
 75}
 76
 77static void __init kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
 78{
 79#ifdef CONFIG_64BIT
 80	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
 81#else
 82	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
 83#endif
 84}
 85
 86static void __init kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
 87{
 88#ifdef CONFIG_64BIT
 89	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
 90#else
 91	kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
 92#endif
 93}
 94
 95static void __init kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
 96{
 97	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
 98}
 99
100static void __init kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
101{
102#ifdef CONFIG_64BIT
103	kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
104#else
105	kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
106#endif
107}
108
109static void __init kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
110{
111	kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
112}
113
114static void __init kvm_patch_ins_nop(u32 *inst)
115{
116	kvm_patch_ins(inst, KVM_INST_NOP);
117}
118
119static void __init kvm_patch_ins_b(u32 *inst, int addr)
120{
121#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
122	/* On relocatable kernels interrupts handlers and our code
123	   can be in different regions, so we don't patch them */
124
125	if ((ulong)inst < (ulong)&__end_interrupts)
126		return;
127#endif
128
129	kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
130}
131
132static u32 * __init kvm_alloc(int len)
133{
134	u32 *p;
135
136	if ((kvm_tmp_index + len) > (kvm_tmp_end - kvm_tmp)) {
137		printk(KERN_ERR "KVM: No more space (%d + %d)\n",
138				kvm_tmp_index, len);
139		kvm_patching_worked = false;
140		return NULL;
141	}
142
143	p = (void*)&kvm_tmp[kvm_tmp_index];
144	kvm_tmp_index += len;
145
146	return p;
147}
148
149extern u32 kvm_emulate_mtmsrd_branch_offs;
150extern u32 kvm_emulate_mtmsrd_reg_offs;
151extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
152extern u32 kvm_emulate_mtmsrd_len;
153extern u32 kvm_emulate_mtmsrd[];
154
155static void __init kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
156{
157	u32 *p;
158	int distance_start;
159	int distance_end;
160	ulong next_inst;
161
162	p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
163	if (!p)
164		return;
165
166	/* Find out where we are and put everything there */
167	distance_start = (ulong)p - (ulong)inst;
168	next_inst = ((ulong)inst + 4);
169	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
170
171	/* Make sure we only write valid b instructions */
172	if (distance_start > KVM_INST_B_MAX) {
173		kvm_patching_worked = false;
174		return;
175	}
176
177	/* Modify the chunk to fit the invocation */
178	memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
179	p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
180	switch (get_rt(rt)) {
181	case 30:
182		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
183				 magic_var(scratch2), KVM_RT_30);
184		break;
185	case 31:
186		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
187				 magic_var(scratch1), KVM_RT_30);
188		break;
189	default:
190		p[kvm_emulate_mtmsrd_reg_offs] |= rt;
191		break;
192	}
193
194	p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
195	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
196
197	/* Patch the invocation */
198	kvm_patch_ins_b(inst, distance_start);
199}
200
201extern u32 kvm_emulate_mtmsr_branch_offs;
202extern u32 kvm_emulate_mtmsr_reg1_offs;
203extern u32 kvm_emulate_mtmsr_reg2_offs;
204extern u32 kvm_emulate_mtmsr_orig_ins_offs;
205extern u32 kvm_emulate_mtmsr_len;
206extern u32 kvm_emulate_mtmsr[];
207
208static void __init kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
209{
210	u32 *p;
211	int distance_start;
212	int distance_end;
213	ulong next_inst;
214
215	p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
216	if (!p)
217		return;
218
219	/* Find out where we are and put everything there */
220	distance_start = (ulong)p - (ulong)inst;
221	next_inst = ((ulong)inst + 4);
222	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
223
224	/* Make sure we only write valid b instructions */
225	if (distance_start > KVM_INST_B_MAX) {
226		kvm_patching_worked = false;
227		return;
228	}
229
230	/* Modify the chunk to fit the invocation */
231	memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
232	p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
233
234	/* Make clobbered registers work too */
235	switch (get_rt(rt)) {
236	case 30:
237		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
238				 magic_var(scratch2), KVM_RT_30);
239		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
240				 magic_var(scratch2), KVM_RT_30);
241		break;
242	case 31:
243		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
244				 magic_var(scratch1), KVM_RT_30);
245		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
246				 magic_var(scratch1), KVM_RT_30);
247		break;
248	default:
249		p[kvm_emulate_mtmsr_reg1_offs] |= rt;
250		p[kvm_emulate_mtmsr_reg2_offs] |= rt;
251		break;
252	}
253
254	p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
255	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
256
257	/* Patch the invocation */
258	kvm_patch_ins_b(inst, distance_start);
259}
260
261#ifdef CONFIG_BOOKE
262
263extern u32 kvm_emulate_wrtee_branch_offs;
264extern u32 kvm_emulate_wrtee_reg_offs;
265extern u32 kvm_emulate_wrtee_orig_ins_offs;
266extern u32 kvm_emulate_wrtee_len;
267extern u32 kvm_emulate_wrtee[];
268
269static void __init kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
270{
271	u32 *p;
272	int distance_start;
273	int distance_end;
274	ulong next_inst;
275
276	p = kvm_alloc(kvm_emulate_wrtee_len * 4);
277	if (!p)
278		return;
279
280	/* Find out where we are and put everything there */
281	distance_start = (ulong)p - (ulong)inst;
282	next_inst = ((ulong)inst + 4);
283	distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
284
285	/* Make sure we only write valid b instructions */
286	if (distance_start > KVM_INST_B_MAX) {
287		kvm_patching_worked = false;
288		return;
289	}
290
291	/* Modify the chunk to fit the invocation */
292	memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
293	p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
294
295	if (imm_one) {
296		p[kvm_emulate_wrtee_reg_offs] =
297			KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
298	} else {
299		/* Make clobbered registers work too */
300		switch (get_rt(rt)) {
301		case 30:
302			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
303					 magic_var(scratch2), KVM_RT_30);
304			break;
305		case 31:
306			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
307					 magic_var(scratch1), KVM_RT_30);
308			break;
309		default:
310			p[kvm_emulate_wrtee_reg_offs] |= rt;
311			break;
312		}
313	}
314
315	p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
316	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
317
318	/* Patch the invocation */
319	kvm_patch_ins_b(inst, distance_start);
320}
321
322extern u32 kvm_emulate_wrteei_0_branch_offs;
323extern u32 kvm_emulate_wrteei_0_len;
324extern u32 kvm_emulate_wrteei_0[];
325
326static void __init kvm_patch_ins_wrteei_0(u32 *inst)
327{
328	u32 *p;
329	int distance_start;
330	int distance_end;
331	ulong next_inst;
332
333	p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
334	if (!p)
335		return;
336
337	/* Find out where we are and put everything there */
338	distance_start = (ulong)p - (ulong)inst;
339	next_inst = ((ulong)inst + 4);
340	distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
341
342	/* Make sure we only write valid b instructions */
343	if (distance_start > KVM_INST_B_MAX) {
344		kvm_patching_worked = false;
345		return;
346	}
347
348	memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
349	p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
350	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
351
352	/* Patch the invocation */
353	kvm_patch_ins_b(inst, distance_start);
354}
355
356#endif
357
358#ifdef CONFIG_PPC_BOOK3S_32
359
360extern u32 kvm_emulate_mtsrin_branch_offs;
361extern u32 kvm_emulate_mtsrin_reg1_offs;
362extern u32 kvm_emulate_mtsrin_reg2_offs;
363extern u32 kvm_emulate_mtsrin_orig_ins_offs;
364extern u32 kvm_emulate_mtsrin_len;
365extern u32 kvm_emulate_mtsrin[];
366
367static void __init kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
368{
369	u32 *p;
370	int distance_start;
371	int distance_end;
372	ulong next_inst;
373
374	p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
375	if (!p)
376		return;
377
378	/* Find out where we are and put everything there */
379	distance_start = (ulong)p - (ulong)inst;
380	next_inst = ((ulong)inst + 4);
381	distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
382
383	/* Make sure we only write valid b instructions */
384	if (distance_start > KVM_INST_B_MAX) {
385		kvm_patching_worked = false;
386		return;
387	}
388
389	/* Modify the chunk to fit the invocation */
390	memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
391	p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
392	p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
393	p[kvm_emulate_mtsrin_reg2_offs] |= rt;
394	p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
395	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
396
397	/* Patch the invocation */
398	kvm_patch_ins_b(inst, distance_start);
399}
400
401#endif
402
403static void __init kvm_map_magic_page(void *data)
404{
405	u32 *features = data;
406
407	ulong in[8] = {0};
408	ulong out[8];
409
410	in[0] = KVM_MAGIC_PAGE;
411	in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
412
413	epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
414
415	*features = out[0];
416}
417
418static void __init kvm_check_ins(u32 *inst, u32 features)
419{
420	u32 _inst = *inst;
421	u32 inst_no_rt = _inst & ~KVM_MASK_RT;
422	u32 inst_rt = _inst & KVM_MASK_RT;
423
424	switch (inst_no_rt) {
425	/* Loads */
426	case KVM_INST_MFMSR:
427		kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
428		break;
429	case KVM_INST_MFSPR(SPRN_SPRG0):
430		kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
431		break;
432	case KVM_INST_MFSPR(SPRN_SPRG1):
433		kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
434		break;
435	case KVM_INST_MFSPR(SPRN_SPRG2):
436		kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
437		break;
438	case KVM_INST_MFSPR(SPRN_SPRG3):
439		kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
440		break;
441	case KVM_INST_MFSPR(SPRN_SRR0):
442		kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
443		break;
444	case KVM_INST_MFSPR(SPRN_SRR1):
445		kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
446		break;
447#ifdef CONFIG_BOOKE
448	case KVM_INST_MFSPR(SPRN_DEAR):
449#else
450	case KVM_INST_MFSPR(SPRN_DAR):
451#endif
452		kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
453		break;
454	case KVM_INST_MFSPR(SPRN_DSISR):
455		kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
456		break;
457
458#ifdef CONFIG_PPC_BOOK3E_MMU
459	case KVM_INST_MFSPR(SPRN_MAS0):
460		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
461			kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
462		break;
463	case KVM_INST_MFSPR(SPRN_MAS1):
464		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
465			kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
466		break;
467	case KVM_INST_MFSPR(SPRN_MAS2):
468		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
469			kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
470		break;
471	case KVM_INST_MFSPR(SPRN_MAS3):
472		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
473			kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
474		break;
475	case KVM_INST_MFSPR(SPRN_MAS4):
476		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
477			kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
478		break;
479	case KVM_INST_MFSPR(SPRN_MAS6):
480		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
481			kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
482		break;
483	case KVM_INST_MFSPR(SPRN_MAS7):
484		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
485			kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
486		break;
487#endif /* CONFIG_PPC_BOOK3E_MMU */
488
489	case KVM_INST_MFSPR(SPRN_SPRG4):
490#ifdef CONFIG_BOOKE
491	case KVM_INST_MFSPR(SPRN_SPRG4R):
492#endif
493		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
494			kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
495		break;
496	case KVM_INST_MFSPR(SPRN_SPRG5):
497#ifdef CONFIG_BOOKE
498	case KVM_INST_MFSPR(SPRN_SPRG5R):
499#endif
500		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
501			kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
502		break;
503	case KVM_INST_MFSPR(SPRN_SPRG6):
504#ifdef CONFIG_BOOKE
505	case KVM_INST_MFSPR(SPRN_SPRG6R):
506#endif
507		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
508			kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
509		break;
510	case KVM_INST_MFSPR(SPRN_SPRG7):
511#ifdef CONFIG_BOOKE
512	case KVM_INST_MFSPR(SPRN_SPRG7R):
513#endif
514		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
515			kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
516		break;
517
518#ifdef CONFIG_BOOKE
519	case KVM_INST_MFSPR(SPRN_ESR):
520		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
521			kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
522		break;
523#endif
524
525	case KVM_INST_MFSPR(SPRN_PIR):
526		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
527			kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
528		break;
529
530
531	/* Stores */
532	case KVM_INST_MTSPR(SPRN_SPRG0):
533		kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
534		break;
535	case KVM_INST_MTSPR(SPRN_SPRG1):
536		kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
537		break;
538	case KVM_INST_MTSPR(SPRN_SPRG2):
539		kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
540		break;
541	case KVM_INST_MTSPR(SPRN_SPRG3):
542		kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
543		break;
544	case KVM_INST_MTSPR(SPRN_SRR0):
545		kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
546		break;
547	case KVM_INST_MTSPR(SPRN_SRR1):
548		kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
549		break;
550#ifdef CONFIG_BOOKE
551	case KVM_INST_MTSPR(SPRN_DEAR):
552#else
553	case KVM_INST_MTSPR(SPRN_DAR):
554#endif
555		kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
556		break;
557	case KVM_INST_MTSPR(SPRN_DSISR):
558		kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
559		break;
560#ifdef CONFIG_PPC_BOOK3E_MMU
561	case KVM_INST_MTSPR(SPRN_MAS0):
562		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
563			kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
564		break;
565	case KVM_INST_MTSPR(SPRN_MAS1):
566		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
567			kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
568		break;
569	case KVM_INST_MTSPR(SPRN_MAS2):
570		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
571			kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
572		break;
573	case KVM_INST_MTSPR(SPRN_MAS3):
574		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
575			kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
576		break;
577	case KVM_INST_MTSPR(SPRN_MAS4):
578		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
579			kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
580		break;
581	case KVM_INST_MTSPR(SPRN_MAS6):
582		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
583			kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
584		break;
585	case KVM_INST_MTSPR(SPRN_MAS7):
586		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
587			kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
588		break;
589#endif /* CONFIG_PPC_BOOK3E_MMU */
590
591	case KVM_INST_MTSPR(SPRN_SPRG4):
592		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
593			kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
594		break;
595	case KVM_INST_MTSPR(SPRN_SPRG5):
596		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
597			kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
598		break;
599	case KVM_INST_MTSPR(SPRN_SPRG6):
600		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
601			kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
602		break;
603	case KVM_INST_MTSPR(SPRN_SPRG7):
604		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
605			kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
606		break;
607
608#ifdef CONFIG_BOOKE
609	case KVM_INST_MTSPR(SPRN_ESR):
610		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
611			kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
612		break;
613#endif
614
615	/* Nops */
616	case KVM_INST_TLBSYNC:
617		kvm_patch_ins_nop(inst);
618		break;
619
620	/* Rewrites */
621	case KVM_INST_MTMSRD_L1:
622		kvm_patch_ins_mtmsrd(inst, inst_rt);
623		break;
624	case KVM_INST_MTMSR:
625	case KVM_INST_MTMSRD_L0:
626		kvm_patch_ins_mtmsr(inst, inst_rt);
627		break;
628#ifdef CONFIG_BOOKE
629	case KVM_INST_WRTEE:
630		kvm_patch_ins_wrtee(inst, inst_rt, 0);
631		break;
632#endif
633	}
634
635	switch (inst_no_rt & ~KVM_MASK_RB) {
636#ifdef CONFIG_PPC_BOOK3S_32
637	case KVM_INST_MTSRIN:
638		if (features & KVM_MAGIC_FEAT_SR) {
639			u32 inst_rb = _inst & KVM_MASK_RB;
640			kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
641		}
642		break;
 
643#endif
644	}
645
646	switch (_inst) {
647#ifdef CONFIG_BOOKE
648	case KVM_INST_WRTEEI_0:
649		kvm_patch_ins_wrteei_0(inst);
650		break;
651
652	case KVM_INST_WRTEEI_1:
653		kvm_patch_ins_wrtee(inst, 0, 1);
654		break;
655#endif
656	}
657}
658
659extern u32 kvm_template_start[];
660extern u32 kvm_template_end[];
661
662static void __init kvm_use_magic_page(void)
663{
664	u32 *p;
665	u32 *start, *end;
 
666	u32 features;
667
668	/* Tell the host to map the magic page to -4096 on all CPUs */
669	on_each_cpu(kvm_map_magic_page, &features, 1);
670
671	/* Quick self-test to see if the mapping works */
672	if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
673		kvm_patching_worked = false;
674		return;
675	}
676
677	/* Now loop through all code and find instructions */
678	start = (void*)_stext;
679	end = (void*)_etext;
680
681	/*
682	 * Being interrupted in the middle of patching would
683	 * be bad for SPRG4-7, which KVM can't keep in sync
684	 * with emulated accesses because reads don't trap.
685	 */
686	local_irq_disable();
687
688	for (p = start; p < end; p++) {
689		/* Avoid patching the template code */
690		if (p >= kvm_template_start && p < kvm_template_end) {
691			p = kvm_template_end - 1;
692			continue;
693		}
694		kvm_check_ins(p, features);
695	}
696
697	local_irq_enable();
698
699	printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
700			 kvm_patching_worked ? "worked" : "failed");
701}
702
 
 
 
 
 
 
703static int __init kvm_guest_init(void)
704{
705	if (!kvm_para_available())
706		return 0;
707
708	if (!epapr_paravirt_enabled)
709		return 0;
710
711	if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
712		kvm_use_magic_page();
713
714#ifdef CONFIG_PPC_BOOK3S_64
715	/* Enable napping */
716	powersave_nap = 1;
717#endif
 
 
 
718
719	return 0;
720}
721
722postcore_initcall(kvm_guest_init);
v3.15
 
  1/*
  2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
  3 * Copyright 2010-2011 Freescale Semiconductor, Inc.
  4 *
  5 * Authors:
  6 *     Alexander Graf <agraf@suse.de>
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License, version 2, as
 10 * published by the Free Software Foundation.
 11 *
 12 * This program is distributed in the hope that it will be useful,
 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15 * GNU General Public License for more details.
 16 *
 17 * You should have received a copy of the GNU General Public License
 18 * along with this program; if not, write to the Free Software
 19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 20 */
 21
 22#include <linux/kvm_host.h>
 23#include <linux/init.h>
 24#include <linux/export.h>
 
 25#include <linux/kvm_para.h>
 26#include <linux/slab.h>
 27#include <linux/of.h>
 
 28
 29#include <asm/reg.h>
 30#include <asm/sections.h>
 31#include <asm/cacheflush.h>
 32#include <asm/disassemble.h>
 33#include <asm/ppc-opcode.h>
 34#include <asm/epapr_hcalls.h>
 35
 36#define KVM_MAGIC_PAGE		(-4096L)
 37#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
 38
 39#define KVM_INST_LWZ		0x80000000
 40#define KVM_INST_STW		0x90000000
 41#define KVM_INST_LD		0xe8000000
 42#define KVM_INST_STD		0xf8000000
 43#define KVM_INST_NOP		0x60000000
 44#define KVM_INST_B		0x48000000
 45#define KVM_INST_B_MASK		0x03ffffff
 46#define KVM_INST_B_MAX		0x01ffffff
 47#define KVM_INST_LI		0x38000000
 48
 49#define KVM_MASK_RT		0x03e00000
 50#define KVM_RT_30		0x03c00000
 51#define KVM_MASK_RB		0x0000f800
 52#define KVM_INST_MFMSR		0x7c0000a6
 53
 54#define SPR_FROM		0
 55#define SPR_TO			0x100
 56
 57#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
 58				    (((sprn) & 0x1f) << 16) | \
 59				    (((sprn) & 0x3e0) << 6) | \
 60				    (moveto))
 61
 62#define KVM_INST_MFSPR(sprn)	KVM_INST_SPR(sprn, SPR_FROM)
 63#define KVM_INST_MTSPR(sprn)	KVM_INST_SPR(sprn, SPR_TO)
 64
 65#define KVM_INST_TLBSYNC	0x7c00046c
 66#define KVM_INST_MTMSRD_L0	0x7c000164
 67#define KVM_INST_MTMSRD_L1	0x7c010164
 68#define KVM_INST_MTMSR		0x7c000124
 69
 70#define KVM_INST_WRTEE		0x7c000106
 71#define KVM_INST_WRTEEI_0	0x7c000146
 72#define KVM_INST_WRTEEI_1	0x7c008146
 73
 74#define KVM_INST_MTSRIN		0x7c0001e4
 75
 76static bool kvm_patching_worked = true;
 77char kvm_tmp[1024 * 1024];
 
 78static int kvm_tmp_index;
 79
 80static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
 81{
 82	*inst = new_inst;
 83	flush_icache_range((ulong)inst, (ulong)inst + 4);
 84}
 85
 86static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
 87{
 88#ifdef CONFIG_64BIT
 89	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
 90#else
 91	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
 92#endif
 93}
 94
 95static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
 96{
 97#ifdef CONFIG_64BIT
 98	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
 99#else
100	kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
101#endif
102}
103
104static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
105{
106	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
107}
108
109static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
110{
111#ifdef CONFIG_64BIT
112	kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
113#else
114	kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
115#endif
116}
117
118static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
119{
120	kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
121}
122
123static void kvm_patch_ins_nop(u32 *inst)
124{
125	kvm_patch_ins(inst, KVM_INST_NOP);
126}
127
128static void kvm_patch_ins_b(u32 *inst, int addr)
129{
130#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
131	/* On relocatable kernels interrupts handlers and our code
132	   can be in different regions, so we don't patch them */
133
134	if ((ulong)inst < (ulong)&__end_interrupts)
135		return;
136#endif
137
138	kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
139}
140
141static u32 *kvm_alloc(int len)
142{
143	u32 *p;
144
145	if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
146		printk(KERN_ERR "KVM: No more space (%d + %d)\n",
147				kvm_tmp_index, len);
148		kvm_patching_worked = false;
149		return NULL;
150	}
151
152	p = (void*)&kvm_tmp[kvm_tmp_index];
153	kvm_tmp_index += len;
154
155	return p;
156}
157
158extern u32 kvm_emulate_mtmsrd_branch_offs;
159extern u32 kvm_emulate_mtmsrd_reg_offs;
160extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
161extern u32 kvm_emulate_mtmsrd_len;
162extern u32 kvm_emulate_mtmsrd[];
163
164static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
165{
166	u32 *p;
167	int distance_start;
168	int distance_end;
169	ulong next_inst;
170
171	p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
172	if (!p)
173		return;
174
175	/* Find out where we are and put everything there */
176	distance_start = (ulong)p - (ulong)inst;
177	next_inst = ((ulong)inst + 4);
178	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
179
180	/* Make sure we only write valid b instructions */
181	if (distance_start > KVM_INST_B_MAX) {
182		kvm_patching_worked = false;
183		return;
184	}
185
186	/* Modify the chunk to fit the invocation */
187	memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
188	p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
189	switch (get_rt(rt)) {
190	case 30:
191		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
192				 magic_var(scratch2), KVM_RT_30);
193		break;
194	case 31:
195		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
196				 magic_var(scratch1), KVM_RT_30);
197		break;
198	default:
199		p[kvm_emulate_mtmsrd_reg_offs] |= rt;
200		break;
201	}
202
203	p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
204	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
205
206	/* Patch the invocation */
207	kvm_patch_ins_b(inst, distance_start);
208}
209
210extern u32 kvm_emulate_mtmsr_branch_offs;
211extern u32 kvm_emulate_mtmsr_reg1_offs;
212extern u32 kvm_emulate_mtmsr_reg2_offs;
213extern u32 kvm_emulate_mtmsr_orig_ins_offs;
214extern u32 kvm_emulate_mtmsr_len;
215extern u32 kvm_emulate_mtmsr[];
216
217static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
218{
219	u32 *p;
220	int distance_start;
221	int distance_end;
222	ulong next_inst;
223
224	p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
225	if (!p)
226		return;
227
228	/* Find out where we are and put everything there */
229	distance_start = (ulong)p - (ulong)inst;
230	next_inst = ((ulong)inst + 4);
231	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
232
233	/* Make sure we only write valid b instructions */
234	if (distance_start > KVM_INST_B_MAX) {
235		kvm_patching_worked = false;
236		return;
237	}
238
239	/* Modify the chunk to fit the invocation */
240	memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
241	p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
242
243	/* Make clobbered registers work too */
244	switch (get_rt(rt)) {
245	case 30:
246		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
247				 magic_var(scratch2), KVM_RT_30);
248		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
249				 magic_var(scratch2), KVM_RT_30);
250		break;
251	case 31:
252		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
253				 magic_var(scratch1), KVM_RT_30);
254		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
255				 magic_var(scratch1), KVM_RT_30);
256		break;
257	default:
258		p[kvm_emulate_mtmsr_reg1_offs] |= rt;
259		p[kvm_emulate_mtmsr_reg2_offs] |= rt;
260		break;
261	}
262
263	p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
264	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
265
266	/* Patch the invocation */
267	kvm_patch_ins_b(inst, distance_start);
268}
269
270#ifdef CONFIG_BOOKE
271
272extern u32 kvm_emulate_wrtee_branch_offs;
273extern u32 kvm_emulate_wrtee_reg_offs;
274extern u32 kvm_emulate_wrtee_orig_ins_offs;
275extern u32 kvm_emulate_wrtee_len;
276extern u32 kvm_emulate_wrtee[];
277
278static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
279{
280	u32 *p;
281	int distance_start;
282	int distance_end;
283	ulong next_inst;
284
285	p = kvm_alloc(kvm_emulate_wrtee_len * 4);
286	if (!p)
287		return;
288
289	/* Find out where we are and put everything there */
290	distance_start = (ulong)p - (ulong)inst;
291	next_inst = ((ulong)inst + 4);
292	distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
293
294	/* Make sure we only write valid b instructions */
295	if (distance_start > KVM_INST_B_MAX) {
296		kvm_patching_worked = false;
297		return;
298	}
299
300	/* Modify the chunk to fit the invocation */
301	memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
302	p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
303
304	if (imm_one) {
305		p[kvm_emulate_wrtee_reg_offs] =
306			KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
307	} else {
308		/* Make clobbered registers work too */
309		switch (get_rt(rt)) {
310		case 30:
311			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
312					 magic_var(scratch2), KVM_RT_30);
313			break;
314		case 31:
315			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
316					 magic_var(scratch1), KVM_RT_30);
317			break;
318		default:
319			p[kvm_emulate_wrtee_reg_offs] |= rt;
320			break;
321		}
322	}
323
324	p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
325	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
326
327	/* Patch the invocation */
328	kvm_patch_ins_b(inst, distance_start);
329}
330
331extern u32 kvm_emulate_wrteei_0_branch_offs;
332extern u32 kvm_emulate_wrteei_0_len;
333extern u32 kvm_emulate_wrteei_0[];
334
335static void kvm_patch_ins_wrteei_0(u32 *inst)
336{
337	u32 *p;
338	int distance_start;
339	int distance_end;
340	ulong next_inst;
341
342	p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
343	if (!p)
344		return;
345
346	/* Find out where we are and put everything there */
347	distance_start = (ulong)p - (ulong)inst;
348	next_inst = ((ulong)inst + 4);
349	distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
350
351	/* Make sure we only write valid b instructions */
352	if (distance_start > KVM_INST_B_MAX) {
353		kvm_patching_worked = false;
354		return;
355	}
356
357	memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
358	p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
359	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
360
361	/* Patch the invocation */
362	kvm_patch_ins_b(inst, distance_start);
363}
364
365#endif
366
367#ifdef CONFIG_PPC_BOOK3S_32
368
369extern u32 kvm_emulate_mtsrin_branch_offs;
370extern u32 kvm_emulate_mtsrin_reg1_offs;
371extern u32 kvm_emulate_mtsrin_reg2_offs;
372extern u32 kvm_emulate_mtsrin_orig_ins_offs;
373extern u32 kvm_emulate_mtsrin_len;
374extern u32 kvm_emulate_mtsrin[];
375
376static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
377{
378	u32 *p;
379	int distance_start;
380	int distance_end;
381	ulong next_inst;
382
383	p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
384	if (!p)
385		return;
386
387	/* Find out where we are and put everything there */
388	distance_start = (ulong)p - (ulong)inst;
389	next_inst = ((ulong)inst + 4);
390	distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
391
392	/* Make sure we only write valid b instructions */
393	if (distance_start > KVM_INST_B_MAX) {
394		kvm_patching_worked = false;
395		return;
396	}
397
398	/* Modify the chunk to fit the invocation */
399	memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
400	p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
401	p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
402	p[kvm_emulate_mtsrin_reg2_offs] |= rt;
403	p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
404	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
405
406	/* Patch the invocation */
407	kvm_patch_ins_b(inst, distance_start);
408}
409
410#endif
411
412static void kvm_map_magic_page(void *data)
413{
414	u32 *features = data;
415
416	ulong in[8] = {0};
417	ulong out[8];
418
419	in[0] = KVM_MAGIC_PAGE;
420	in[1] = KVM_MAGIC_PAGE;
421
422	epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
423
424	*features = out[0];
425}
426
427static void kvm_check_ins(u32 *inst, u32 features)
428{
429	u32 _inst = *inst;
430	u32 inst_no_rt = _inst & ~KVM_MASK_RT;
431	u32 inst_rt = _inst & KVM_MASK_RT;
432
433	switch (inst_no_rt) {
434	/* Loads */
435	case KVM_INST_MFMSR:
436		kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
437		break;
438	case KVM_INST_MFSPR(SPRN_SPRG0):
439		kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
440		break;
441	case KVM_INST_MFSPR(SPRN_SPRG1):
442		kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
443		break;
444	case KVM_INST_MFSPR(SPRN_SPRG2):
445		kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
446		break;
447	case KVM_INST_MFSPR(SPRN_SPRG3):
448		kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
449		break;
450	case KVM_INST_MFSPR(SPRN_SRR0):
451		kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
452		break;
453	case KVM_INST_MFSPR(SPRN_SRR1):
454		kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
455		break;
456#ifdef CONFIG_BOOKE
457	case KVM_INST_MFSPR(SPRN_DEAR):
458#else
459	case KVM_INST_MFSPR(SPRN_DAR):
460#endif
461		kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
462		break;
463	case KVM_INST_MFSPR(SPRN_DSISR):
464		kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
465		break;
466
467#ifdef CONFIG_PPC_BOOK3E_MMU
468	case KVM_INST_MFSPR(SPRN_MAS0):
469		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
470			kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
471		break;
472	case KVM_INST_MFSPR(SPRN_MAS1):
473		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
474			kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
475		break;
476	case KVM_INST_MFSPR(SPRN_MAS2):
477		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
478			kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
479		break;
480	case KVM_INST_MFSPR(SPRN_MAS3):
481		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
482			kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
483		break;
484	case KVM_INST_MFSPR(SPRN_MAS4):
485		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
486			kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
487		break;
488	case KVM_INST_MFSPR(SPRN_MAS6):
489		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
490			kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
491		break;
492	case KVM_INST_MFSPR(SPRN_MAS7):
493		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
494			kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
495		break;
496#endif /* CONFIG_PPC_BOOK3E_MMU */
497
498	case KVM_INST_MFSPR(SPRN_SPRG4):
499#ifdef CONFIG_BOOKE
500	case KVM_INST_MFSPR(SPRN_SPRG4R):
501#endif
502		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
503			kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
504		break;
505	case KVM_INST_MFSPR(SPRN_SPRG5):
506#ifdef CONFIG_BOOKE
507	case KVM_INST_MFSPR(SPRN_SPRG5R):
508#endif
509		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
510			kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
511		break;
512	case KVM_INST_MFSPR(SPRN_SPRG6):
513#ifdef CONFIG_BOOKE
514	case KVM_INST_MFSPR(SPRN_SPRG6R):
515#endif
516		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
517			kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
518		break;
519	case KVM_INST_MFSPR(SPRN_SPRG7):
520#ifdef CONFIG_BOOKE
521	case KVM_INST_MFSPR(SPRN_SPRG7R):
522#endif
523		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
524			kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
525		break;
526
527#ifdef CONFIG_BOOKE
528	case KVM_INST_MFSPR(SPRN_ESR):
529		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
530			kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
531		break;
532#endif
533
534	case KVM_INST_MFSPR(SPRN_PIR):
535		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
536			kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
537		break;
538
539
540	/* Stores */
541	case KVM_INST_MTSPR(SPRN_SPRG0):
542		kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
543		break;
544	case KVM_INST_MTSPR(SPRN_SPRG1):
545		kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
546		break;
547	case KVM_INST_MTSPR(SPRN_SPRG2):
548		kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
549		break;
550	case KVM_INST_MTSPR(SPRN_SPRG3):
551		kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
552		break;
553	case KVM_INST_MTSPR(SPRN_SRR0):
554		kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
555		break;
556	case KVM_INST_MTSPR(SPRN_SRR1):
557		kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
558		break;
559#ifdef CONFIG_BOOKE
560	case KVM_INST_MTSPR(SPRN_DEAR):
561#else
562	case KVM_INST_MTSPR(SPRN_DAR):
563#endif
564		kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
565		break;
566	case KVM_INST_MTSPR(SPRN_DSISR):
567		kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
568		break;
569#ifdef CONFIG_PPC_BOOK3E_MMU
570	case KVM_INST_MTSPR(SPRN_MAS0):
571		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
572			kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
573		break;
574	case KVM_INST_MTSPR(SPRN_MAS1):
575		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
576			kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
577		break;
578	case KVM_INST_MTSPR(SPRN_MAS2):
579		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
580			kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
581		break;
582	case KVM_INST_MTSPR(SPRN_MAS3):
583		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
584			kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
585		break;
586	case KVM_INST_MTSPR(SPRN_MAS4):
587		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
588			kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
589		break;
590	case KVM_INST_MTSPR(SPRN_MAS6):
591		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
592			kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
593		break;
594	case KVM_INST_MTSPR(SPRN_MAS7):
595		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
596			kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
597		break;
598#endif /* CONFIG_PPC_BOOK3E_MMU */
599
600	case KVM_INST_MTSPR(SPRN_SPRG4):
601		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
602			kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
603		break;
604	case KVM_INST_MTSPR(SPRN_SPRG5):
605		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
606			kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
607		break;
608	case KVM_INST_MTSPR(SPRN_SPRG6):
609		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
610			kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
611		break;
612	case KVM_INST_MTSPR(SPRN_SPRG7):
613		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
614			kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
615		break;
616
617#ifdef CONFIG_BOOKE
618	case KVM_INST_MTSPR(SPRN_ESR):
619		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
620			kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
621		break;
622#endif
623
624	/* Nops */
625	case KVM_INST_TLBSYNC:
626		kvm_patch_ins_nop(inst);
627		break;
628
629	/* Rewrites */
630	case KVM_INST_MTMSRD_L1:
631		kvm_patch_ins_mtmsrd(inst, inst_rt);
632		break;
633	case KVM_INST_MTMSR:
634	case KVM_INST_MTMSRD_L0:
635		kvm_patch_ins_mtmsr(inst, inst_rt);
636		break;
637#ifdef CONFIG_BOOKE
638	case KVM_INST_WRTEE:
639		kvm_patch_ins_wrtee(inst, inst_rt, 0);
640		break;
641#endif
642	}
643
644	switch (inst_no_rt & ~KVM_MASK_RB) {
645#ifdef CONFIG_PPC_BOOK3S_32
646	case KVM_INST_MTSRIN:
647		if (features & KVM_MAGIC_FEAT_SR) {
648			u32 inst_rb = _inst & KVM_MASK_RB;
649			kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
650		}
651		break;
652		break;
653#endif
654	}
655
656	switch (_inst) {
657#ifdef CONFIG_BOOKE
658	case KVM_INST_WRTEEI_0:
659		kvm_patch_ins_wrteei_0(inst);
660		break;
661
662	case KVM_INST_WRTEEI_1:
663		kvm_patch_ins_wrtee(inst, 0, 1);
664		break;
665#endif
666	}
667}
668
669extern u32 kvm_template_start[];
670extern u32 kvm_template_end[];
671
672static void kvm_use_magic_page(void)
673{
674	u32 *p;
675	u32 *start, *end;
676	u32 tmp;
677	u32 features;
678
679	/* Tell the host to map the magic page to -4096 on all CPUs */
680	on_each_cpu(kvm_map_magic_page, &features, 1);
681
682	/* Quick self-test to see if the mapping works */
683	if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
684		kvm_patching_worked = false;
685		return;
686	}
687
688	/* Now loop through all code and find instructions */
689	start = (void*)_stext;
690	end = (void*)_etext;
691
692	/*
693	 * Being interrupted in the middle of patching would
694	 * be bad for SPRG4-7, which KVM can't keep in sync
695	 * with emulated accesses because reads don't trap.
696	 */
697	local_irq_disable();
698
699	for (p = start; p < end; p++) {
700		/* Avoid patching the template code */
701		if (p >= kvm_template_start && p < kvm_template_end) {
702			p = kvm_template_end - 1;
703			continue;
704		}
705		kvm_check_ins(p, features);
706	}
707
708	local_irq_enable();
709
710	printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
711			 kvm_patching_worked ? "worked" : "failed");
712}
713
714static __init void kvm_free_tmp(void)
715{
716	free_reserved_area(&kvm_tmp[kvm_tmp_index],
717			   &kvm_tmp[ARRAY_SIZE(kvm_tmp)], -1, NULL);
718}
719
720static int __init kvm_guest_init(void)
721{
722	if (!kvm_para_available())
723		goto free_tmp;
724
725	if (!epapr_paravirt_enabled)
726		goto free_tmp;
727
728	if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
729		kvm_use_magic_page();
730
731#ifdef CONFIG_PPC_BOOK3S_64
732	/* Enable napping */
733	powersave_nap = 1;
734#endif
735
736free_tmp:
737	kvm_free_tmp();
738
739	return 0;
740}
741
742postcore_initcall(kvm_guest_init);