Linux Audio

Check our new training course

Loading...
v5.9
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
  4 * Copyright 2010-2011 Freescale Semiconductor, Inc.
  5 *
  6 * Authors:
  7 *     Alexander Graf <agraf@suse.de>
 
 
 
 
 
 
 
 
 
 
 
 
 
  8 */
  9
 10#include <linux/kvm_host.h>
 11#include <linux/init.h>
 12#include <linux/export.h>
 13#include <linux/kmemleak.h>
 14#include <linux/kvm_para.h>
 15#include <linux/slab.h>
 16#include <linux/of.h>
 17#include <linux/pagemap.h>
 18
 19#include <asm/reg.h>
 20#include <asm/sections.h>
 21#include <asm/cacheflush.h>
 22#include <asm/disassemble.h>
 23#include <asm/ppc-opcode.h>
 24#include <asm/epapr_hcalls.h>
 25
 26#define KVM_MAGIC_PAGE		(-4096L)
 27#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
 28
 29#define KVM_INST_LWZ		0x80000000
 30#define KVM_INST_STW		0x90000000
 31#define KVM_INST_LD		0xe8000000
 32#define KVM_INST_STD		0xf8000000
 33#define KVM_INST_NOP		0x60000000
 34#define KVM_INST_B		0x48000000
 35#define KVM_INST_B_MASK		0x03ffffff
 36#define KVM_INST_B_MAX		0x01ffffff
 37#define KVM_INST_LI		0x38000000
 38
 39#define KVM_MASK_RT		0x03e00000
 40#define KVM_RT_30		0x03c00000
 41#define KVM_MASK_RB		0x0000f800
 42#define KVM_INST_MFMSR		0x7c0000a6
 43
 44#define SPR_FROM		0
 45#define SPR_TO			0x100
 46
 47#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
 48				    (((sprn) & 0x1f) << 16) | \
 49				    (((sprn) & 0x3e0) << 6) | \
 50				    (moveto))
 51
 52#define KVM_INST_MFSPR(sprn)	KVM_INST_SPR(sprn, SPR_FROM)
 53#define KVM_INST_MTSPR(sprn)	KVM_INST_SPR(sprn, SPR_TO)
 54
 55#define KVM_INST_TLBSYNC	0x7c00046c
 56#define KVM_INST_MTMSRD_L0	0x7c000164
 57#define KVM_INST_MTMSRD_L1	0x7c010164
 58#define KVM_INST_MTMSR		0x7c000124
 59
 60#define KVM_INST_WRTEE		0x7c000106
 61#define KVM_INST_WRTEEI_0	0x7c000146
 62#define KVM_INST_WRTEEI_1	0x7c008146
 63
 64#define KVM_INST_MTSRIN		0x7c0001e4
 65
 66static bool kvm_patching_worked = true;
 67extern char kvm_tmp[];
 68extern char kvm_tmp_end[];
 69static int kvm_tmp_index;
 70
 71static void __init kvm_patch_ins(u32 *inst, u32 new_inst)
 72{
 73	*inst = new_inst;
 74	flush_icache_range((ulong)inst, (ulong)inst + 4);
 75}
 76
 77static void __init kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
 78{
 79#ifdef CONFIG_64BIT
 80	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
 81#else
 82	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
 83#endif
 84}
 85
 86static void __init kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
 87{
 88#ifdef CONFIG_64BIT
 89	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
 90#else
 91	kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
 92#endif
 93}
 94
 95static void __init kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
 96{
 97	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
 98}
 99
100static void __init kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
101{
102#ifdef CONFIG_64BIT
103	kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
104#else
105	kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
106#endif
107}
108
109static void __init kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
110{
111	kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
112}
113
114static void __init kvm_patch_ins_nop(u32 *inst)
115{
116	kvm_patch_ins(inst, KVM_INST_NOP);
117}
118
119static void __init kvm_patch_ins_b(u32 *inst, int addr)
120{
121#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
122	/* On relocatable kernels interrupts handlers and our code
123	   can be in different regions, so we don't patch them */
124
125	if ((ulong)inst < (ulong)&__end_interrupts)
126		return;
127#endif
128
129	kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
130}
131
132static u32 * __init kvm_alloc(int len)
133{
134	u32 *p;
135
136	if ((kvm_tmp_index + len) > (kvm_tmp_end - kvm_tmp)) {
137		printk(KERN_ERR "KVM: No more space (%d + %d)\n",
138				kvm_tmp_index, len);
139		kvm_patching_worked = false;
140		return NULL;
141	}
142
143	p = (void*)&kvm_tmp[kvm_tmp_index];
144	kvm_tmp_index += len;
145
146	return p;
147}
148
149extern u32 kvm_emulate_mtmsrd_branch_offs;
150extern u32 kvm_emulate_mtmsrd_reg_offs;
151extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
152extern u32 kvm_emulate_mtmsrd_len;
153extern u32 kvm_emulate_mtmsrd[];
154
155static void __init kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
156{
157	u32 *p;
158	int distance_start;
159	int distance_end;
160	ulong next_inst;
161
162	p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
163	if (!p)
164		return;
165
166	/* Find out where we are and put everything there */
167	distance_start = (ulong)p - (ulong)inst;
168	next_inst = ((ulong)inst + 4);
169	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
170
171	/* Make sure we only write valid b instructions */
172	if (distance_start > KVM_INST_B_MAX) {
173		kvm_patching_worked = false;
174		return;
175	}
176
177	/* Modify the chunk to fit the invocation */
178	memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
179	p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
180	switch (get_rt(rt)) {
181	case 30:
182		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
183				 magic_var(scratch2), KVM_RT_30);
184		break;
185	case 31:
186		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
187				 magic_var(scratch1), KVM_RT_30);
188		break;
189	default:
190		p[kvm_emulate_mtmsrd_reg_offs] |= rt;
191		break;
192	}
193
194	p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
195	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
196
197	/* Patch the invocation */
198	kvm_patch_ins_b(inst, distance_start);
199}
200
201extern u32 kvm_emulate_mtmsr_branch_offs;
202extern u32 kvm_emulate_mtmsr_reg1_offs;
203extern u32 kvm_emulate_mtmsr_reg2_offs;
204extern u32 kvm_emulate_mtmsr_orig_ins_offs;
205extern u32 kvm_emulate_mtmsr_len;
206extern u32 kvm_emulate_mtmsr[];
207
208static void __init kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
209{
210	u32 *p;
211	int distance_start;
212	int distance_end;
213	ulong next_inst;
214
215	p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
216	if (!p)
217		return;
218
219	/* Find out where we are and put everything there */
220	distance_start = (ulong)p - (ulong)inst;
221	next_inst = ((ulong)inst + 4);
222	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
223
224	/* Make sure we only write valid b instructions */
225	if (distance_start > KVM_INST_B_MAX) {
226		kvm_patching_worked = false;
227		return;
228	}
229
230	/* Modify the chunk to fit the invocation */
231	memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
232	p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
233
234	/* Make clobbered registers work too */
235	switch (get_rt(rt)) {
236	case 30:
237		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
238				 magic_var(scratch2), KVM_RT_30);
239		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
240				 magic_var(scratch2), KVM_RT_30);
241		break;
242	case 31:
243		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
244				 magic_var(scratch1), KVM_RT_30);
245		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
246				 magic_var(scratch1), KVM_RT_30);
247		break;
248	default:
249		p[kvm_emulate_mtmsr_reg1_offs] |= rt;
250		p[kvm_emulate_mtmsr_reg2_offs] |= rt;
251		break;
252	}
253
254	p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
255	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
256
257	/* Patch the invocation */
258	kvm_patch_ins_b(inst, distance_start);
259}
260
261#ifdef CONFIG_BOOKE
262
263extern u32 kvm_emulate_wrtee_branch_offs;
264extern u32 kvm_emulate_wrtee_reg_offs;
265extern u32 kvm_emulate_wrtee_orig_ins_offs;
266extern u32 kvm_emulate_wrtee_len;
267extern u32 kvm_emulate_wrtee[];
268
269static void __init kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
270{
271	u32 *p;
272	int distance_start;
273	int distance_end;
274	ulong next_inst;
275
276	p = kvm_alloc(kvm_emulate_wrtee_len * 4);
277	if (!p)
278		return;
279
280	/* Find out where we are and put everything there */
281	distance_start = (ulong)p - (ulong)inst;
282	next_inst = ((ulong)inst + 4);
283	distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
284
285	/* Make sure we only write valid b instructions */
286	if (distance_start > KVM_INST_B_MAX) {
287		kvm_patching_worked = false;
288		return;
289	}
290
291	/* Modify the chunk to fit the invocation */
292	memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
293	p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
294
295	if (imm_one) {
296		p[kvm_emulate_wrtee_reg_offs] =
297			KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
298	} else {
299		/* Make clobbered registers work too */
300		switch (get_rt(rt)) {
301		case 30:
302			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
303					 magic_var(scratch2), KVM_RT_30);
304			break;
305		case 31:
306			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
307					 magic_var(scratch1), KVM_RT_30);
308			break;
309		default:
310			p[kvm_emulate_wrtee_reg_offs] |= rt;
311			break;
312		}
313	}
314
315	p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
316	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
317
318	/* Patch the invocation */
319	kvm_patch_ins_b(inst, distance_start);
320}
321
322extern u32 kvm_emulate_wrteei_0_branch_offs;
323extern u32 kvm_emulate_wrteei_0_len;
324extern u32 kvm_emulate_wrteei_0[];
325
326static void __init kvm_patch_ins_wrteei_0(u32 *inst)
327{
328	u32 *p;
329	int distance_start;
330	int distance_end;
331	ulong next_inst;
332
333	p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
334	if (!p)
335		return;
336
337	/* Find out where we are and put everything there */
338	distance_start = (ulong)p - (ulong)inst;
339	next_inst = ((ulong)inst + 4);
340	distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
341
342	/* Make sure we only write valid b instructions */
343	if (distance_start > KVM_INST_B_MAX) {
344		kvm_patching_worked = false;
345		return;
346	}
347
348	memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
349	p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
350	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
351
352	/* Patch the invocation */
353	kvm_patch_ins_b(inst, distance_start);
354}
355
356#endif
357
358#ifdef CONFIG_PPC_BOOK3S_32
359
360extern u32 kvm_emulate_mtsrin_branch_offs;
361extern u32 kvm_emulate_mtsrin_reg1_offs;
362extern u32 kvm_emulate_mtsrin_reg2_offs;
363extern u32 kvm_emulate_mtsrin_orig_ins_offs;
364extern u32 kvm_emulate_mtsrin_len;
365extern u32 kvm_emulate_mtsrin[];
366
367static void __init kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
368{
369	u32 *p;
370	int distance_start;
371	int distance_end;
372	ulong next_inst;
373
374	p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
375	if (!p)
376		return;
377
378	/* Find out where we are and put everything there */
379	distance_start = (ulong)p - (ulong)inst;
380	next_inst = ((ulong)inst + 4);
381	distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
382
383	/* Make sure we only write valid b instructions */
384	if (distance_start > KVM_INST_B_MAX) {
385		kvm_patching_worked = false;
386		return;
387	}
388
389	/* Modify the chunk to fit the invocation */
390	memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
391	p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
392	p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
393	p[kvm_emulate_mtsrin_reg2_offs] |= rt;
394	p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
395	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
396
397	/* Patch the invocation */
398	kvm_patch_ins_b(inst, distance_start);
399}
400
401#endif
402
403static void __init kvm_map_magic_page(void *data)
404{
405	u32 *features = data;
406
407	ulong in[8] = {0};
408	ulong out[8];
409
410	in[0] = KVM_MAGIC_PAGE;
411	in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
412
413	epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
414
415	*features = out[0];
416}
417
418static void __init kvm_check_ins(u32 *inst, u32 features)
419{
420	u32 _inst = *inst;
421	u32 inst_no_rt = _inst & ~KVM_MASK_RT;
422	u32 inst_rt = _inst & KVM_MASK_RT;
423
424	switch (inst_no_rt) {
425	/* Loads */
426	case KVM_INST_MFMSR:
427		kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
428		break;
429	case KVM_INST_MFSPR(SPRN_SPRG0):
430		kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
431		break;
432	case KVM_INST_MFSPR(SPRN_SPRG1):
433		kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
434		break;
435	case KVM_INST_MFSPR(SPRN_SPRG2):
436		kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
437		break;
438	case KVM_INST_MFSPR(SPRN_SPRG3):
439		kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
440		break;
441	case KVM_INST_MFSPR(SPRN_SRR0):
442		kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
443		break;
444	case KVM_INST_MFSPR(SPRN_SRR1):
445		kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
446		break;
447#ifdef CONFIG_BOOKE
448	case KVM_INST_MFSPR(SPRN_DEAR):
449#else
450	case KVM_INST_MFSPR(SPRN_DAR):
451#endif
452		kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
453		break;
454	case KVM_INST_MFSPR(SPRN_DSISR):
455		kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
456		break;
457
458#ifdef CONFIG_PPC_BOOK3E_MMU
459	case KVM_INST_MFSPR(SPRN_MAS0):
460		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
461			kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
462		break;
463	case KVM_INST_MFSPR(SPRN_MAS1):
464		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
465			kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
466		break;
467	case KVM_INST_MFSPR(SPRN_MAS2):
468		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
469			kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
470		break;
471	case KVM_INST_MFSPR(SPRN_MAS3):
472		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
473			kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
474		break;
475	case KVM_INST_MFSPR(SPRN_MAS4):
476		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
477			kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
478		break;
479	case KVM_INST_MFSPR(SPRN_MAS6):
480		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
481			kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
482		break;
483	case KVM_INST_MFSPR(SPRN_MAS7):
484		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
485			kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
486		break;
487#endif /* CONFIG_PPC_BOOK3E_MMU */
488
489	case KVM_INST_MFSPR(SPRN_SPRG4):
490#ifdef CONFIG_BOOKE
491	case KVM_INST_MFSPR(SPRN_SPRG4R):
492#endif
493		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
494			kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
495		break;
496	case KVM_INST_MFSPR(SPRN_SPRG5):
497#ifdef CONFIG_BOOKE
498	case KVM_INST_MFSPR(SPRN_SPRG5R):
499#endif
500		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
501			kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
502		break;
503	case KVM_INST_MFSPR(SPRN_SPRG6):
504#ifdef CONFIG_BOOKE
505	case KVM_INST_MFSPR(SPRN_SPRG6R):
506#endif
507		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
508			kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
509		break;
510	case KVM_INST_MFSPR(SPRN_SPRG7):
511#ifdef CONFIG_BOOKE
512	case KVM_INST_MFSPR(SPRN_SPRG7R):
513#endif
514		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
515			kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
516		break;
517
518#ifdef CONFIG_BOOKE
519	case KVM_INST_MFSPR(SPRN_ESR):
520		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
521			kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
522		break;
523#endif
524
525	case KVM_INST_MFSPR(SPRN_PIR):
526		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
527			kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
528		break;
529
530
531	/* Stores */
532	case KVM_INST_MTSPR(SPRN_SPRG0):
533		kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
534		break;
535	case KVM_INST_MTSPR(SPRN_SPRG1):
536		kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
537		break;
538	case KVM_INST_MTSPR(SPRN_SPRG2):
539		kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
540		break;
541	case KVM_INST_MTSPR(SPRN_SPRG3):
542		kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
543		break;
544	case KVM_INST_MTSPR(SPRN_SRR0):
545		kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
546		break;
547	case KVM_INST_MTSPR(SPRN_SRR1):
548		kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
549		break;
550#ifdef CONFIG_BOOKE
551	case KVM_INST_MTSPR(SPRN_DEAR):
552#else
553	case KVM_INST_MTSPR(SPRN_DAR):
554#endif
555		kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
556		break;
557	case KVM_INST_MTSPR(SPRN_DSISR):
558		kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
559		break;
560#ifdef CONFIG_PPC_BOOK3E_MMU
561	case KVM_INST_MTSPR(SPRN_MAS0):
562		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
563			kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
564		break;
565	case KVM_INST_MTSPR(SPRN_MAS1):
566		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
567			kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
568		break;
569	case KVM_INST_MTSPR(SPRN_MAS2):
570		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
571			kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
572		break;
573	case KVM_INST_MTSPR(SPRN_MAS3):
574		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
575			kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
576		break;
577	case KVM_INST_MTSPR(SPRN_MAS4):
578		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
579			kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
580		break;
581	case KVM_INST_MTSPR(SPRN_MAS6):
582		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
583			kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
584		break;
585	case KVM_INST_MTSPR(SPRN_MAS7):
586		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
587			kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
588		break;
589#endif /* CONFIG_PPC_BOOK3E_MMU */
590
591	case KVM_INST_MTSPR(SPRN_SPRG4):
592		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
593			kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
594		break;
595	case KVM_INST_MTSPR(SPRN_SPRG5):
596		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
597			kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
598		break;
599	case KVM_INST_MTSPR(SPRN_SPRG6):
600		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
601			kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
602		break;
603	case KVM_INST_MTSPR(SPRN_SPRG7):
604		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
605			kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
606		break;
607
608#ifdef CONFIG_BOOKE
609	case KVM_INST_MTSPR(SPRN_ESR):
610		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
611			kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
612		break;
613#endif
614
615	/* Nops */
616	case KVM_INST_TLBSYNC:
617		kvm_patch_ins_nop(inst);
618		break;
619
620	/* Rewrites */
621	case KVM_INST_MTMSRD_L1:
622		kvm_patch_ins_mtmsrd(inst, inst_rt);
623		break;
624	case KVM_INST_MTMSR:
625	case KVM_INST_MTMSRD_L0:
626		kvm_patch_ins_mtmsr(inst, inst_rt);
627		break;
628#ifdef CONFIG_BOOKE
629	case KVM_INST_WRTEE:
630		kvm_patch_ins_wrtee(inst, inst_rt, 0);
631		break;
632#endif
633	}
634
635	switch (inst_no_rt & ~KVM_MASK_RB) {
636#ifdef CONFIG_PPC_BOOK3S_32
637	case KVM_INST_MTSRIN:
638		if (features & KVM_MAGIC_FEAT_SR) {
639			u32 inst_rb = _inst & KVM_MASK_RB;
640			kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
641		}
642		break;
 
643#endif
644	}
645
646	switch (_inst) {
647#ifdef CONFIG_BOOKE
648	case KVM_INST_WRTEEI_0:
649		kvm_patch_ins_wrteei_0(inst);
650		break;
651
652	case KVM_INST_WRTEEI_1:
653		kvm_patch_ins_wrtee(inst, 0, 1);
654		break;
655#endif
656	}
657}
658
659extern u32 kvm_template_start[];
660extern u32 kvm_template_end[];
661
662static void __init kvm_use_magic_page(void)
663{
664	u32 *p;
665	u32 *start, *end;
 
666	u32 features;
667
668	/* Tell the host to map the magic page to -4096 on all CPUs */
669	on_each_cpu(kvm_map_magic_page, &features, 1);
670
671	/* Quick self-test to see if the mapping works */
672	if (!fault_in_pages_readable((const char *)KVM_MAGIC_PAGE, sizeof(u32))) {
673		kvm_patching_worked = false;
674		return;
675	}
676
677	/* Now loop through all code and find instructions */
678	start = (void*)_stext;
679	end = (void*)_etext;
680
681	/*
682	 * Being interrupted in the middle of patching would
683	 * be bad for SPRG4-7, which KVM can't keep in sync
684	 * with emulated accesses because reads don't trap.
685	 */
686	local_irq_disable();
687
688	for (p = start; p < end; p++) {
689		/* Avoid patching the template code */
690		if (p >= kvm_template_start && p < kvm_template_end) {
691			p = kvm_template_end - 1;
692			continue;
693		}
694		kvm_check_ins(p, features);
695	}
696
697	local_irq_enable();
698
699	printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
700			 kvm_patching_worked ? "worked" : "failed");
701}
702
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
703static int __init kvm_guest_init(void)
704{
705	if (!kvm_para_available())
706		return 0;
707
708	if (!epapr_paravirt_enabled)
709		return 0;
710
711	if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
712		kvm_use_magic_page();
713
714#ifdef CONFIG_PPC_BOOK3S_64
715	/* Enable napping */
716	powersave_nap = 1;
717#endif
 
 
 
718
719	return 0;
720}
721
722postcore_initcall(kvm_guest_init);
v3.5.6
 
  1/*
  2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
  3 * Copyright 2010-2011 Freescale Semiconductor, Inc.
  4 *
  5 * Authors:
  6 *     Alexander Graf <agraf@suse.de>
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License, version 2, as
 10 * published by the Free Software Foundation.
 11 *
 12 * This program is distributed in the hope that it will be useful,
 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15 * GNU General Public License for more details.
 16 *
 17 * You should have received a copy of the GNU General Public License
 18 * along with this program; if not, write to the Free Software
 19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 20 */
 21
 22#include <linux/kvm_host.h>
 23#include <linux/init.h>
 24#include <linux/export.h>
 
 25#include <linux/kvm_para.h>
 26#include <linux/slab.h>
 27#include <linux/of.h>
 
 28
 29#include <asm/reg.h>
 30#include <asm/sections.h>
 31#include <asm/cacheflush.h>
 32#include <asm/disassemble.h>
 33#include <asm/ppc-opcode.h>
 
 34
 35#define KVM_MAGIC_PAGE		(-4096L)
 36#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
 37
 38#define KVM_INST_LWZ		0x80000000
 39#define KVM_INST_STW		0x90000000
 40#define KVM_INST_LD		0xe8000000
 41#define KVM_INST_STD		0xf8000000
 42#define KVM_INST_NOP		0x60000000
 43#define KVM_INST_B		0x48000000
 44#define KVM_INST_B_MASK		0x03ffffff
 45#define KVM_INST_B_MAX		0x01ffffff
 46#define KVM_INST_LI		0x38000000
 47
 48#define KVM_MASK_RT		0x03e00000
 49#define KVM_RT_30		0x03c00000
 50#define KVM_MASK_RB		0x0000f800
 51#define KVM_INST_MFMSR		0x7c0000a6
 52
 53#define SPR_FROM		0
 54#define SPR_TO			0x100
 55
 56#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
 57				    (((sprn) & 0x1f) << 16) | \
 58				    (((sprn) & 0x3e0) << 6) | \
 59				    (moveto))
 60
 61#define KVM_INST_MFSPR(sprn)	KVM_INST_SPR(sprn, SPR_FROM)
 62#define KVM_INST_MTSPR(sprn)	KVM_INST_SPR(sprn, SPR_TO)
 63
 64#define KVM_INST_TLBSYNC	0x7c00046c
 65#define KVM_INST_MTMSRD_L0	0x7c000164
 66#define KVM_INST_MTMSRD_L1	0x7c010164
 67#define KVM_INST_MTMSR		0x7c000124
 68
 69#define KVM_INST_WRTEE		0x7c000106
 70#define KVM_INST_WRTEEI_0	0x7c000146
 71#define KVM_INST_WRTEEI_1	0x7c008146
 72
 73#define KVM_INST_MTSRIN		0x7c0001e4
 74
 75static bool kvm_patching_worked = true;
 76static char kvm_tmp[1024 * 1024];
 
 77static int kvm_tmp_index;
 78
 79static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
 80{
 81	*inst = new_inst;
 82	flush_icache_range((ulong)inst, (ulong)inst + 4);
 83}
 84
 85static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
 86{
 87#ifdef CONFIG_64BIT
 88	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
 89#else
 90	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
 91#endif
 92}
 93
 94static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
 95{
 96#ifdef CONFIG_64BIT
 97	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
 98#else
 99	kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
100#endif
101}
102
103static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
104{
105	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
106}
107
108static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
109{
110#ifdef CONFIG_64BIT
111	kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
112#else
113	kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
114#endif
115}
116
117static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
118{
119	kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
120}
121
122static void kvm_patch_ins_nop(u32 *inst)
123{
124	kvm_patch_ins(inst, KVM_INST_NOP);
125}
126
127static void kvm_patch_ins_b(u32 *inst, int addr)
128{
129#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
130	/* On relocatable kernels interrupts handlers and our code
131	   can be in different regions, so we don't patch them */
132
133	if ((ulong)inst < (ulong)&__end_interrupts)
134		return;
135#endif
136
137	kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
138}
139
140static u32 *kvm_alloc(int len)
141{
142	u32 *p;
143
144	if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
145		printk(KERN_ERR "KVM: No more space (%d + %d)\n",
146				kvm_tmp_index, len);
147		kvm_patching_worked = false;
148		return NULL;
149	}
150
151	p = (void*)&kvm_tmp[kvm_tmp_index];
152	kvm_tmp_index += len;
153
154	return p;
155}
156
157extern u32 kvm_emulate_mtmsrd_branch_offs;
158extern u32 kvm_emulate_mtmsrd_reg_offs;
159extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
160extern u32 kvm_emulate_mtmsrd_len;
161extern u32 kvm_emulate_mtmsrd[];
162
163static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
164{
165	u32 *p;
166	int distance_start;
167	int distance_end;
168	ulong next_inst;
169
170	p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
171	if (!p)
172		return;
173
174	/* Find out where we are and put everything there */
175	distance_start = (ulong)p - (ulong)inst;
176	next_inst = ((ulong)inst + 4);
177	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
178
179	/* Make sure we only write valid b instructions */
180	if (distance_start > KVM_INST_B_MAX) {
181		kvm_patching_worked = false;
182		return;
183	}
184
185	/* Modify the chunk to fit the invocation */
186	memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
187	p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
188	switch (get_rt(rt)) {
189	case 30:
190		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
191				 magic_var(scratch2), KVM_RT_30);
192		break;
193	case 31:
194		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
195				 magic_var(scratch1), KVM_RT_30);
196		break;
197	default:
198		p[kvm_emulate_mtmsrd_reg_offs] |= rt;
199		break;
200	}
201
202	p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
203	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
204
205	/* Patch the invocation */
206	kvm_patch_ins_b(inst, distance_start);
207}
208
209extern u32 kvm_emulate_mtmsr_branch_offs;
210extern u32 kvm_emulate_mtmsr_reg1_offs;
211extern u32 kvm_emulate_mtmsr_reg2_offs;
212extern u32 kvm_emulate_mtmsr_orig_ins_offs;
213extern u32 kvm_emulate_mtmsr_len;
214extern u32 kvm_emulate_mtmsr[];
215
216static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
217{
218	u32 *p;
219	int distance_start;
220	int distance_end;
221	ulong next_inst;
222
223	p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
224	if (!p)
225		return;
226
227	/* Find out where we are and put everything there */
228	distance_start = (ulong)p - (ulong)inst;
229	next_inst = ((ulong)inst + 4);
230	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
231
232	/* Make sure we only write valid b instructions */
233	if (distance_start > KVM_INST_B_MAX) {
234		kvm_patching_worked = false;
235		return;
236	}
237
238	/* Modify the chunk to fit the invocation */
239	memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
240	p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
241
242	/* Make clobbered registers work too */
243	switch (get_rt(rt)) {
244	case 30:
245		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
246				 magic_var(scratch2), KVM_RT_30);
247		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
248				 magic_var(scratch2), KVM_RT_30);
249		break;
250	case 31:
251		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
252				 magic_var(scratch1), KVM_RT_30);
253		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
254				 magic_var(scratch1), KVM_RT_30);
255		break;
256	default:
257		p[kvm_emulate_mtmsr_reg1_offs] |= rt;
258		p[kvm_emulate_mtmsr_reg2_offs] |= rt;
259		break;
260	}
261
262	p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
263	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
264
265	/* Patch the invocation */
266	kvm_patch_ins_b(inst, distance_start);
267}
268
269#ifdef CONFIG_BOOKE
270
271extern u32 kvm_emulate_wrtee_branch_offs;
272extern u32 kvm_emulate_wrtee_reg_offs;
273extern u32 kvm_emulate_wrtee_orig_ins_offs;
274extern u32 kvm_emulate_wrtee_len;
275extern u32 kvm_emulate_wrtee[];
276
277static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
278{
279	u32 *p;
280	int distance_start;
281	int distance_end;
282	ulong next_inst;
283
284	p = kvm_alloc(kvm_emulate_wrtee_len * 4);
285	if (!p)
286		return;
287
288	/* Find out where we are and put everything there */
289	distance_start = (ulong)p - (ulong)inst;
290	next_inst = ((ulong)inst + 4);
291	distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
292
293	/* Make sure we only write valid b instructions */
294	if (distance_start > KVM_INST_B_MAX) {
295		kvm_patching_worked = false;
296		return;
297	}
298
299	/* Modify the chunk to fit the invocation */
300	memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
301	p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
302
303	if (imm_one) {
304		p[kvm_emulate_wrtee_reg_offs] =
305			KVM_INST_LI | __PPC_RT(30) | MSR_EE;
306	} else {
307		/* Make clobbered registers work too */
308		switch (get_rt(rt)) {
309		case 30:
310			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
311					 magic_var(scratch2), KVM_RT_30);
312			break;
313		case 31:
314			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
315					 magic_var(scratch1), KVM_RT_30);
316			break;
317		default:
318			p[kvm_emulate_wrtee_reg_offs] |= rt;
319			break;
320		}
321	}
322
323	p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
324	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
325
326	/* Patch the invocation */
327	kvm_patch_ins_b(inst, distance_start);
328}
329
330extern u32 kvm_emulate_wrteei_0_branch_offs;
331extern u32 kvm_emulate_wrteei_0_len;
332extern u32 kvm_emulate_wrteei_0[];
333
334static void kvm_patch_ins_wrteei_0(u32 *inst)
335{
336	u32 *p;
337	int distance_start;
338	int distance_end;
339	ulong next_inst;
340
341	p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
342	if (!p)
343		return;
344
345	/* Find out where we are and put everything there */
346	distance_start = (ulong)p - (ulong)inst;
347	next_inst = ((ulong)inst + 4);
348	distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
349
350	/* Make sure we only write valid b instructions */
351	if (distance_start > KVM_INST_B_MAX) {
352		kvm_patching_worked = false;
353		return;
354	}
355
356	memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
357	p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
358	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
359
360	/* Patch the invocation */
361	kvm_patch_ins_b(inst, distance_start);
362}
363
364#endif
365
366#ifdef CONFIG_PPC_BOOK3S_32
367
368extern u32 kvm_emulate_mtsrin_branch_offs;
369extern u32 kvm_emulate_mtsrin_reg1_offs;
370extern u32 kvm_emulate_mtsrin_reg2_offs;
371extern u32 kvm_emulate_mtsrin_orig_ins_offs;
372extern u32 kvm_emulate_mtsrin_len;
373extern u32 kvm_emulate_mtsrin[];
374
375static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
376{
377	u32 *p;
378	int distance_start;
379	int distance_end;
380	ulong next_inst;
381
382	p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
383	if (!p)
384		return;
385
386	/* Find out where we are and put everything there */
387	distance_start = (ulong)p - (ulong)inst;
388	next_inst = ((ulong)inst + 4);
389	distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
390
391	/* Make sure we only write valid b instructions */
392	if (distance_start > KVM_INST_B_MAX) {
393		kvm_patching_worked = false;
394		return;
395	}
396
397	/* Modify the chunk to fit the invocation */
398	memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
399	p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
400	p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
401	p[kvm_emulate_mtsrin_reg2_offs] |= rt;
402	p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
403	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
404
405	/* Patch the invocation */
406	kvm_patch_ins_b(inst, distance_start);
407}
408
409#endif
410
411static void kvm_map_magic_page(void *data)
412{
413	u32 *features = data;
414
415	ulong in[8];
416	ulong out[8];
417
418	in[0] = KVM_MAGIC_PAGE;
419	in[1] = KVM_MAGIC_PAGE;
420
421	kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE);
422
423	*features = out[0];
424}
425
426static void kvm_check_ins(u32 *inst, u32 features)
427{
428	u32 _inst = *inst;
429	u32 inst_no_rt = _inst & ~KVM_MASK_RT;
430	u32 inst_rt = _inst & KVM_MASK_RT;
431
432	switch (inst_no_rt) {
433	/* Loads */
434	case KVM_INST_MFMSR:
435		kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
436		break;
437	case KVM_INST_MFSPR(SPRN_SPRG0):
438		kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
439		break;
440	case KVM_INST_MFSPR(SPRN_SPRG1):
441		kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
442		break;
443	case KVM_INST_MFSPR(SPRN_SPRG2):
444		kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
445		break;
446	case KVM_INST_MFSPR(SPRN_SPRG3):
447		kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
448		break;
449	case KVM_INST_MFSPR(SPRN_SRR0):
450		kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
451		break;
452	case KVM_INST_MFSPR(SPRN_SRR1):
453		kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
454		break;
455#ifdef CONFIG_BOOKE
456	case KVM_INST_MFSPR(SPRN_DEAR):
457#else
458	case KVM_INST_MFSPR(SPRN_DAR):
459#endif
460		kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
461		break;
462	case KVM_INST_MFSPR(SPRN_DSISR):
463		kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
464		break;
465
466#ifdef CONFIG_PPC_BOOK3E_MMU
467	case KVM_INST_MFSPR(SPRN_MAS0):
468		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
469			kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
470		break;
471	case KVM_INST_MFSPR(SPRN_MAS1):
472		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
473			kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
474		break;
475	case KVM_INST_MFSPR(SPRN_MAS2):
476		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
477			kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
478		break;
479	case KVM_INST_MFSPR(SPRN_MAS3):
480		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
481			kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
482		break;
483	case KVM_INST_MFSPR(SPRN_MAS4):
484		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
485			kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
486		break;
487	case KVM_INST_MFSPR(SPRN_MAS6):
488		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
489			kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
490		break;
491	case KVM_INST_MFSPR(SPRN_MAS7):
492		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
493			kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
494		break;
495#endif /* CONFIG_PPC_BOOK3E_MMU */
496
497	case KVM_INST_MFSPR(SPRN_SPRG4):
498#ifdef CONFIG_BOOKE
499	case KVM_INST_MFSPR(SPRN_SPRG4R):
500#endif
501		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
502			kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
503		break;
504	case KVM_INST_MFSPR(SPRN_SPRG5):
505#ifdef CONFIG_BOOKE
506	case KVM_INST_MFSPR(SPRN_SPRG5R):
507#endif
508		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
509			kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
510		break;
511	case KVM_INST_MFSPR(SPRN_SPRG6):
512#ifdef CONFIG_BOOKE
513	case KVM_INST_MFSPR(SPRN_SPRG6R):
514#endif
515		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
516			kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
517		break;
518	case KVM_INST_MFSPR(SPRN_SPRG7):
519#ifdef CONFIG_BOOKE
520	case KVM_INST_MFSPR(SPRN_SPRG7R):
521#endif
522		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
523			kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
524		break;
525
526#ifdef CONFIG_BOOKE
527	case KVM_INST_MFSPR(SPRN_ESR):
528		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
529			kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
530		break;
531#endif
532
533	case KVM_INST_MFSPR(SPRN_PIR):
534		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
535			kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
536		break;
537
538
539	/* Stores */
540	case KVM_INST_MTSPR(SPRN_SPRG0):
541		kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
542		break;
543	case KVM_INST_MTSPR(SPRN_SPRG1):
544		kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
545		break;
546	case KVM_INST_MTSPR(SPRN_SPRG2):
547		kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
548		break;
549	case KVM_INST_MTSPR(SPRN_SPRG3):
550		kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
551		break;
552	case KVM_INST_MTSPR(SPRN_SRR0):
553		kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
554		break;
555	case KVM_INST_MTSPR(SPRN_SRR1):
556		kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
557		break;
558#ifdef CONFIG_BOOKE
559	case KVM_INST_MTSPR(SPRN_DEAR):
560#else
561	case KVM_INST_MTSPR(SPRN_DAR):
562#endif
563		kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
564		break;
565	case KVM_INST_MTSPR(SPRN_DSISR):
566		kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
567		break;
568#ifdef CONFIG_PPC_BOOK3E_MMU
569	case KVM_INST_MTSPR(SPRN_MAS0):
570		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
571			kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
572		break;
573	case KVM_INST_MTSPR(SPRN_MAS1):
574		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
575			kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
576		break;
577	case KVM_INST_MTSPR(SPRN_MAS2):
578		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
579			kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
580		break;
581	case KVM_INST_MTSPR(SPRN_MAS3):
582		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
583			kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
584		break;
585	case KVM_INST_MTSPR(SPRN_MAS4):
586		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
587			kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
588		break;
589	case KVM_INST_MTSPR(SPRN_MAS6):
590		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
591			kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
592		break;
593	case KVM_INST_MTSPR(SPRN_MAS7):
594		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
595			kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
596		break;
597#endif /* CONFIG_PPC_BOOK3E_MMU */
598
599	case KVM_INST_MTSPR(SPRN_SPRG4):
600		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
601			kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
602		break;
603	case KVM_INST_MTSPR(SPRN_SPRG5):
604		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
605			kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
606		break;
607	case KVM_INST_MTSPR(SPRN_SPRG6):
608		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
609			kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
610		break;
611	case KVM_INST_MTSPR(SPRN_SPRG7):
612		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
613			kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
614		break;
615
616#ifdef CONFIG_BOOKE
617	case KVM_INST_MTSPR(SPRN_ESR):
618		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
619			kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
620		break;
621#endif
622
623	/* Nops */
624	case KVM_INST_TLBSYNC:
625		kvm_patch_ins_nop(inst);
626		break;
627
628	/* Rewrites */
629	case KVM_INST_MTMSRD_L1:
630		kvm_patch_ins_mtmsrd(inst, inst_rt);
631		break;
632	case KVM_INST_MTMSR:
633	case KVM_INST_MTMSRD_L0:
634		kvm_patch_ins_mtmsr(inst, inst_rt);
635		break;
636#ifdef CONFIG_BOOKE
637	case KVM_INST_WRTEE:
638		kvm_patch_ins_wrtee(inst, inst_rt, 0);
639		break;
640#endif
641	}
642
643	switch (inst_no_rt & ~KVM_MASK_RB) {
644#ifdef CONFIG_PPC_BOOK3S_32
645	case KVM_INST_MTSRIN:
646		if (features & KVM_MAGIC_FEAT_SR) {
647			u32 inst_rb = _inst & KVM_MASK_RB;
648			kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
649		}
650		break;
651		break;
652#endif
653	}
654
655	switch (_inst) {
656#ifdef CONFIG_BOOKE
657	case KVM_INST_WRTEEI_0:
658		kvm_patch_ins_wrteei_0(inst);
659		break;
660
661	case KVM_INST_WRTEEI_1:
662		kvm_patch_ins_wrtee(inst, 0, 1);
663		break;
664#endif
665	}
666}
667
668extern u32 kvm_template_start[];
669extern u32 kvm_template_end[];
670
671static void kvm_use_magic_page(void)
672{
673	u32 *p;
674	u32 *start, *end;
675	u32 tmp;
676	u32 features;
677
678	/* Tell the host to map the magic page to -4096 on all CPUs */
679	on_each_cpu(kvm_map_magic_page, &features, 1);
680
681	/* Quick self-test to see if the mapping works */
682	if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
683		kvm_patching_worked = false;
684		return;
685	}
686
687	/* Now loop through all code and find instructions */
688	start = (void*)_stext;
689	end = (void*)_etext;
690
691	/*
692	 * Being interrupted in the middle of patching would
693	 * be bad for SPRG4-7, which KVM can't keep in sync
694	 * with emulated accesses because reads don't trap.
695	 */
696	local_irq_disable();
697
698	for (p = start; p < end; p++) {
699		/* Avoid patching the template code */
700		if (p >= kvm_template_start && p < kvm_template_end) {
701			p = kvm_template_end - 1;
702			continue;
703		}
704		kvm_check_ins(p, features);
705	}
706
707	local_irq_enable();
708
709	printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
710			 kvm_patching_worked ? "worked" : "failed");
711}
712
713unsigned long kvm_hypercall(unsigned long *in,
714			    unsigned long *out,
715			    unsigned long nr)
716{
717	unsigned long register r0 asm("r0");
718	unsigned long register r3 asm("r3") = in[0];
719	unsigned long register r4 asm("r4") = in[1];
720	unsigned long register r5 asm("r5") = in[2];
721	unsigned long register r6 asm("r6") = in[3];
722	unsigned long register r7 asm("r7") = in[4];
723	unsigned long register r8 asm("r8") = in[5];
724	unsigned long register r9 asm("r9") = in[6];
725	unsigned long register r10 asm("r10") = in[7];
726	unsigned long register r11 asm("r11") = nr;
727	unsigned long register r12 asm("r12");
728
729	asm volatile("bl	kvm_hypercall_start"
730		     : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
731		       "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
732		       "=r"(r12)
733		     : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
734		       "r"(r9), "r"(r10), "r"(r11)
735		     : "memory", "cc", "xer", "ctr", "lr");
736
737	out[0] = r4;
738	out[1] = r5;
739	out[2] = r6;
740	out[3] = r7;
741	out[4] = r8;
742	out[5] = r9;
743	out[6] = r10;
744	out[7] = r11;
745
746	return r3;
747}
748EXPORT_SYMBOL_GPL(kvm_hypercall);
749
750static int kvm_para_setup(void)
751{
752	extern u32 kvm_hypercall_start;
753	struct device_node *hyper_node;
754	u32 *insts;
755	int len, i;
756
757	hyper_node = of_find_node_by_path("/hypervisor");
758	if (!hyper_node)
759		return -1;
760
761	insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len);
762	if (len % 4)
763		return -1;
764	if (len > (4 * 4))
765		return -1;
766
767	for (i = 0; i < (len / 4); i++)
768		kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]);
769
770	return 0;
771}
772
773static __init void kvm_free_tmp(void)
774{
775	unsigned long start, end;
776
777	start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK;
778	end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
779
780	/* Free the tmp space we don't need */
781	for (; start < end; start += PAGE_SIZE) {
782		ClearPageReserved(virt_to_page(start));
783		init_page_count(virt_to_page(start));
784		free_page(start);
785		totalram_pages++;
786	}
787}
788
789static int __init kvm_guest_init(void)
790{
791	if (!kvm_para_available())
792		goto free_tmp;
793
794	if (kvm_para_setup())
795		goto free_tmp;
796
797	if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
798		kvm_use_magic_page();
799
800#ifdef CONFIG_PPC_BOOK3S_64
801	/* Enable napping */
802	powersave_nap = 1;
803#endif
804
805free_tmp:
806	kvm_free_tmp();
807
808	return 0;
809}
810
811postcore_initcall(kvm_guest_init);