Linux Audio

Check our new training course

Loading...
v6.2
  1// SPDX-License-Identifier: GPL-2.0-only
  2/*
  3 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
  4 * Copyright 2010-2011 Freescale Semiconductor, Inc.
  5 *
  6 * Authors:
  7 *     Alexander Graf <agraf@suse.de>
 
 
 
 
 
 
 
 
 
 
 
 
 
  8 */
  9
 10#include <linux/kvm_host.h>
 11#include <linux/init.h>
 12#include <linux/export.h>
 13#include <linux/kmemleak.h>
 14#include <linux/kvm_para.h>
 15#include <linux/slab.h>
 16#include <linux/of.h>
 17#include <linux/pagemap.h>
 18
 19#include <asm/reg.h>
 20#include <asm/sections.h>
 21#include <asm/cacheflush.h>
 22#include <asm/disassemble.h>
 23#include <asm/ppc-opcode.h>
 24#include <asm/epapr_hcalls.h>
 25
 26#define KVM_MAGIC_PAGE		(-4096L)
 27#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
 28
 29#define KVM_INST_LWZ		0x80000000
 30#define KVM_INST_STW		0x90000000
 31#define KVM_INST_LD		0xe8000000
 32#define KVM_INST_STD		0xf8000000
 33#define KVM_INST_NOP		0x60000000
 34#define KVM_INST_B		0x48000000
 35#define KVM_INST_B_MASK		0x03ffffff
 36#define KVM_INST_B_MAX		0x01ffffff
 37#define KVM_INST_LI		0x38000000
 38
 39#define KVM_MASK_RT		0x03e00000
 40#define KVM_RT_30		0x03c00000
 41#define KVM_MASK_RB		0x0000f800
 42#define KVM_INST_MFMSR		0x7c0000a6
 43
 44#define SPR_FROM		0
 45#define SPR_TO			0x100
 46
 47#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
 48				    (((sprn) & 0x1f) << 16) | \
 49				    (((sprn) & 0x3e0) << 6) | \
 50				    (moveto))
 51
 52#define KVM_INST_MFSPR(sprn)	KVM_INST_SPR(sprn, SPR_FROM)
 53#define KVM_INST_MTSPR(sprn)	KVM_INST_SPR(sprn, SPR_TO)
 
 
 
 
 
 
 54
 55#define KVM_INST_TLBSYNC	0x7c00046c
 56#define KVM_INST_MTMSRD_L0	0x7c000164
 57#define KVM_INST_MTMSRD_L1	0x7c010164
 58#define KVM_INST_MTMSR		0x7c000124
 59
 60#define KVM_INST_WRTEE		0x7c000106
 61#define KVM_INST_WRTEEI_0	0x7c000146
 62#define KVM_INST_WRTEEI_1	0x7c008146
 63
 64#define KVM_INST_MTSRIN		0x7c0001e4
 65
 66static bool kvm_patching_worked = true;
 67extern char kvm_tmp[];
 68extern char kvm_tmp_end[];
 69static int kvm_tmp_index;
 70
 71static void __init kvm_patch_ins(u32 *inst, u32 new_inst)
 72{
 73	*inst = new_inst;
 74	flush_icache_range((ulong)inst, (ulong)inst + 4);
 75}
 76
 77static void __init kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
 78{
 79#ifdef CONFIG_64BIT
 80	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
 81#else
 82	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
 83#endif
 84}
 85
 86static void __init kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
 87{
 88#ifdef CONFIG_64BIT
 89	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
 90#else
 91	kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
 92#endif
 93}
 94
 95static void __init kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
 96{
 97	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
 98}
 99
100static void __init kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
101{
102#ifdef CONFIG_64BIT
103	kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
104#else
105	kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
106#endif
107}
108
109static void __init kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
110{
111	kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
112}
113
114static void __init kvm_patch_ins_nop(u32 *inst)
115{
116	kvm_patch_ins(inst, KVM_INST_NOP);
117}
118
119static void __init kvm_patch_ins_b(u32 *inst, int addr)
120{
121#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
122	/* On relocatable kernels interrupts handlers and our code
123	   can be in different regions, so we don't patch them */
124
 
125	if ((ulong)inst < (ulong)&__end_interrupts)
126		return;
127#endif
128
129	kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
130}
131
132static u32 * __init kvm_alloc(int len)
133{
134	u32 *p;
135
136	if ((kvm_tmp_index + len) > (kvm_tmp_end - kvm_tmp)) {
137		printk(KERN_ERR "KVM: No more space (%d + %d)\n",
138				kvm_tmp_index, len);
139		kvm_patching_worked = false;
140		return NULL;
141	}
142
143	p = (void*)&kvm_tmp[kvm_tmp_index];
144	kvm_tmp_index += len;
145
146	return p;
147}
148
149extern u32 kvm_emulate_mtmsrd_branch_offs;
150extern u32 kvm_emulate_mtmsrd_reg_offs;
151extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
152extern u32 kvm_emulate_mtmsrd_len;
153extern u32 kvm_emulate_mtmsrd[];
154
155static void __init kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
156{
157	u32 *p;
158	int distance_start;
159	int distance_end;
160	ulong next_inst;
161
162	p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
163	if (!p)
164		return;
165
166	/* Find out where we are and put everything there */
167	distance_start = (ulong)p - (ulong)inst;
168	next_inst = ((ulong)inst + 4);
169	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
170
171	/* Make sure we only write valid b instructions */
172	if (distance_start > KVM_INST_B_MAX) {
173		kvm_patching_worked = false;
174		return;
175	}
176
177	/* Modify the chunk to fit the invocation */
178	memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
179	p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
180	switch (get_rt(rt)) {
181	case 30:
182		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
183				 magic_var(scratch2), KVM_RT_30);
184		break;
185	case 31:
186		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
187				 magic_var(scratch1), KVM_RT_30);
188		break;
189	default:
190		p[kvm_emulate_mtmsrd_reg_offs] |= rt;
191		break;
192	}
193
194	p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
195	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
196
197	/* Patch the invocation */
198	kvm_patch_ins_b(inst, distance_start);
199}
200
201extern u32 kvm_emulate_mtmsr_branch_offs;
202extern u32 kvm_emulate_mtmsr_reg1_offs;
203extern u32 kvm_emulate_mtmsr_reg2_offs;
204extern u32 kvm_emulate_mtmsr_orig_ins_offs;
205extern u32 kvm_emulate_mtmsr_len;
206extern u32 kvm_emulate_mtmsr[];
207
208static void __init kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
209{
210	u32 *p;
211	int distance_start;
212	int distance_end;
213	ulong next_inst;
214
215	p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
216	if (!p)
217		return;
218
219	/* Find out where we are and put everything there */
220	distance_start = (ulong)p - (ulong)inst;
221	next_inst = ((ulong)inst + 4);
222	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
223
224	/* Make sure we only write valid b instructions */
225	if (distance_start > KVM_INST_B_MAX) {
226		kvm_patching_worked = false;
227		return;
228	}
229
230	/* Modify the chunk to fit the invocation */
231	memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
232	p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
233
234	/* Make clobbered registers work too */
235	switch (get_rt(rt)) {
236	case 30:
237		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
238				 magic_var(scratch2), KVM_RT_30);
239		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
240				 magic_var(scratch2), KVM_RT_30);
241		break;
242	case 31:
243		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
244				 magic_var(scratch1), KVM_RT_30);
245		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
246				 magic_var(scratch1), KVM_RT_30);
247		break;
248	default:
249		p[kvm_emulate_mtmsr_reg1_offs] |= rt;
250		p[kvm_emulate_mtmsr_reg2_offs] |= rt;
251		break;
252	}
253
254	p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
255	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
256
257	/* Patch the invocation */
258	kvm_patch_ins_b(inst, distance_start);
259}
260
261#ifdef CONFIG_BOOKE
262
263extern u32 kvm_emulate_wrtee_branch_offs;
264extern u32 kvm_emulate_wrtee_reg_offs;
265extern u32 kvm_emulate_wrtee_orig_ins_offs;
266extern u32 kvm_emulate_wrtee_len;
267extern u32 kvm_emulate_wrtee[];
268
269static void __init kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
270{
271	u32 *p;
272	int distance_start;
273	int distance_end;
274	ulong next_inst;
275
276	p = kvm_alloc(kvm_emulate_wrtee_len * 4);
277	if (!p)
278		return;
279
280	/* Find out where we are and put everything there */
281	distance_start = (ulong)p - (ulong)inst;
282	next_inst = ((ulong)inst + 4);
283	distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
284
285	/* Make sure we only write valid b instructions */
286	if (distance_start > KVM_INST_B_MAX) {
287		kvm_patching_worked = false;
288		return;
289	}
290
291	/* Modify the chunk to fit the invocation */
292	memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
293	p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
294
295	if (imm_one) {
296		p[kvm_emulate_wrtee_reg_offs] =
297			KVM_INST_LI | __PPC_RT(R30) | MSR_EE;
298	} else {
299		/* Make clobbered registers work too */
300		switch (get_rt(rt)) {
301		case 30:
302			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
303					 magic_var(scratch2), KVM_RT_30);
304			break;
305		case 31:
306			kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
307					 magic_var(scratch1), KVM_RT_30);
308			break;
309		default:
310			p[kvm_emulate_wrtee_reg_offs] |= rt;
311			break;
312		}
313	}
314
315	p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
316	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
317
318	/* Patch the invocation */
319	kvm_patch_ins_b(inst, distance_start);
320}
321
322extern u32 kvm_emulate_wrteei_0_branch_offs;
323extern u32 kvm_emulate_wrteei_0_len;
324extern u32 kvm_emulate_wrteei_0[];
325
326static void __init kvm_patch_ins_wrteei_0(u32 *inst)
327{
328	u32 *p;
329	int distance_start;
330	int distance_end;
331	ulong next_inst;
332
333	p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
334	if (!p)
335		return;
336
337	/* Find out where we are and put everything there */
338	distance_start = (ulong)p - (ulong)inst;
339	next_inst = ((ulong)inst + 4);
340	distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
341
342	/* Make sure we only write valid b instructions */
343	if (distance_start > KVM_INST_B_MAX) {
344		kvm_patching_worked = false;
345		return;
346	}
347
348	memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
349	p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
350	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
351
352	/* Patch the invocation */
353	kvm_patch_ins_b(inst, distance_start);
354}
355
356#endif
357
358#ifdef CONFIG_PPC_BOOK3S_32
359
360extern u32 kvm_emulate_mtsrin_branch_offs;
361extern u32 kvm_emulate_mtsrin_reg1_offs;
362extern u32 kvm_emulate_mtsrin_reg2_offs;
363extern u32 kvm_emulate_mtsrin_orig_ins_offs;
364extern u32 kvm_emulate_mtsrin_len;
365extern u32 kvm_emulate_mtsrin[];
366
367static void __init kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
368{
369	u32 *p;
370	int distance_start;
371	int distance_end;
372	ulong next_inst;
373
374	p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
375	if (!p)
376		return;
377
378	/* Find out where we are and put everything there */
379	distance_start = (ulong)p - (ulong)inst;
380	next_inst = ((ulong)inst + 4);
381	distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
382
383	/* Make sure we only write valid b instructions */
384	if (distance_start > KVM_INST_B_MAX) {
385		kvm_patching_worked = false;
386		return;
387	}
388
389	/* Modify the chunk to fit the invocation */
390	memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
391	p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
392	p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
393	p[kvm_emulate_mtsrin_reg2_offs] |= rt;
394	p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
395	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
396
397	/* Patch the invocation */
398	kvm_patch_ins_b(inst, distance_start);
399}
400
401#endif
402
403static void __init kvm_map_magic_page(void *data)
404{
405	u32 *features = data;
406
407	ulong in[8] = {0};
408	ulong out[8];
409
410	in[0] = KVM_MAGIC_PAGE;
411	in[1] = KVM_MAGIC_PAGE | MAGIC_PAGE_FLAG_NOT_MAPPED_NX;
412
413	epapr_hypercall(in, out, KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE));
414
415	*features = out[0];
416}
417
418static void __init kvm_check_ins(u32 *inst, u32 features)
419{
420	u32 _inst = *inst;
421	u32 inst_no_rt = _inst & ~KVM_MASK_RT;
422	u32 inst_rt = _inst & KVM_MASK_RT;
423
424	switch (inst_no_rt) {
425	/* Loads */
426	case KVM_INST_MFMSR:
427		kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
428		break;
429	case KVM_INST_MFSPR(SPRN_SPRG0):
430		kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
431		break;
432	case KVM_INST_MFSPR(SPRN_SPRG1):
433		kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
434		break;
435	case KVM_INST_MFSPR(SPRN_SPRG2):
436		kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
437		break;
438	case KVM_INST_MFSPR(SPRN_SPRG3):
439		kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
440		break;
441	case KVM_INST_MFSPR(SPRN_SRR0):
442		kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
443		break;
444	case KVM_INST_MFSPR(SPRN_SRR1):
445		kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
446		break;
447#ifdef CONFIG_BOOKE
448	case KVM_INST_MFSPR(SPRN_DEAR):
449#else
450	case KVM_INST_MFSPR(SPRN_DAR):
451#endif
452		kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
453		break;
454	case KVM_INST_MFSPR(SPRN_DSISR):
455		kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
456		break;
457
458#ifdef CONFIG_PPC_E500
459	case KVM_INST_MFSPR(SPRN_MAS0):
460		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
461			kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
462		break;
463	case KVM_INST_MFSPR(SPRN_MAS1):
464		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
465			kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
466		break;
467	case KVM_INST_MFSPR(SPRN_MAS2):
468		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
469			kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
470		break;
471	case KVM_INST_MFSPR(SPRN_MAS3):
472		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
473			kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
474		break;
475	case KVM_INST_MFSPR(SPRN_MAS4):
476		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
477			kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
478		break;
479	case KVM_INST_MFSPR(SPRN_MAS6):
480		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
481			kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
482		break;
483	case KVM_INST_MFSPR(SPRN_MAS7):
484		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
485			kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
486		break;
487#endif /* CONFIG_PPC_E500 */
488
489	case KVM_INST_MFSPR(SPRN_SPRG4):
490#ifdef CONFIG_BOOKE
491	case KVM_INST_MFSPR(SPRN_SPRG4R):
492#endif
493		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
494			kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
495		break;
496	case KVM_INST_MFSPR(SPRN_SPRG5):
497#ifdef CONFIG_BOOKE
498	case KVM_INST_MFSPR(SPRN_SPRG5R):
499#endif
500		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
501			kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
502		break;
503	case KVM_INST_MFSPR(SPRN_SPRG6):
504#ifdef CONFIG_BOOKE
505	case KVM_INST_MFSPR(SPRN_SPRG6R):
506#endif
507		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
508			kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
509		break;
510	case KVM_INST_MFSPR(SPRN_SPRG7):
511#ifdef CONFIG_BOOKE
512	case KVM_INST_MFSPR(SPRN_SPRG7R):
513#endif
514		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
515			kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
516		break;
517
518#ifdef CONFIG_BOOKE
519	case KVM_INST_MFSPR(SPRN_ESR):
520		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
521			kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
522		break;
523#endif
524
525	case KVM_INST_MFSPR(SPRN_PIR):
526		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
527			kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
528		break;
529
530
531	/* Stores */
532	case KVM_INST_MTSPR(SPRN_SPRG0):
533		kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
534		break;
535	case KVM_INST_MTSPR(SPRN_SPRG1):
536		kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
537		break;
538	case KVM_INST_MTSPR(SPRN_SPRG2):
539		kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
540		break;
541	case KVM_INST_MTSPR(SPRN_SPRG3):
542		kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
543		break;
544	case KVM_INST_MTSPR(SPRN_SRR0):
545		kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
546		break;
547	case KVM_INST_MTSPR(SPRN_SRR1):
548		kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
549		break;
550#ifdef CONFIG_BOOKE
551	case KVM_INST_MTSPR(SPRN_DEAR):
552#else
553	case KVM_INST_MTSPR(SPRN_DAR):
554#endif
555		kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
556		break;
557	case KVM_INST_MTSPR(SPRN_DSISR):
558		kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
559		break;
560#ifdef CONFIG_PPC_E500
561	case KVM_INST_MTSPR(SPRN_MAS0):
562		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
563			kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
564		break;
565	case KVM_INST_MTSPR(SPRN_MAS1):
566		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
567			kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
568		break;
569	case KVM_INST_MTSPR(SPRN_MAS2):
570		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
571			kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
572		break;
573	case KVM_INST_MTSPR(SPRN_MAS3):
574		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
575			kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
576		break;
577	case KVM_INST_MTSPR(SPRN_MAS4):
578		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
579			kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
580		break;
581	case KVM_INST_MTSPR(SPRN_MAS6):
582		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
583			kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
584		break;
585	case KVM_INST_MTSPR(SPRN_MAS7):
586		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
587			kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
588		break;
589#endif /* CONFIG_PPC_E500 */
590
591	case KVM_INST_MTSPR(SPRN_SPRG4):
592		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
593			kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
594		break;
595	case KVM_INST_MTSPR(SPRN_SPRG5):
596		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
597			kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
598		break;
599	case KVM_INST_MTSPR(SPRN_SPRG6):
600		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
601			kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
602		break;
603	case KVM_INST_MTSPR(SPRN_SPRG7):
604		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
605			kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
606		break;
607
608#ifdef CONFIG_BOOKE
609	case KVM_INST_MTSPR(SPRN_ESR):
610		if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
611			kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
612		break;
613#endif
614
615	/* Nops */
616	case KVM_INST_TLBSYNC:
617		kvm_patch_ins_nop(inst);
618		break;
619
620	/* Rewrites */
621	case KVM_INST_MTMSRD_L1:
622		kvm_patch_ins_mtmsrd(inst, inst_rt);
623		break;
624	case KVM_INST_MTMSR:
625	case KVM_INST_MTMSRD_L0:
626		kvm_patch_ins_mtmsr(inst, inst_rt);
627		break;
628#ifdef CONFIG_BOOKE
629	case KVM_INST_WRTEE:
630		kvm_patch_ins_wrtee(inst, inst_rt, 0);
631		break;
632#endif
633	}
634
635	switch (inst_no_rt & ~KVM_MASK_RB) {
636#ifdef CONFIG_PPC_BOOK3S_32
637	case KVM_INST_MTSRIN:
638		if (features & KVM_MAGIC_FEAT_SR) {
639			u32 inst_rb = _inst & KVM_MASK_RB;
640			kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
641		}
642		break;
 
643#endif
644	}
645
646	switch (_inst) {
647#ifdef CONFIG_BOOKE
648	case KVM_INST_WRTEEI_0:
649		kvm_patch_ins_wrteei_0(inst);
650		break;
651
652	case KVM_INST_WRTEEI_1:
653		kvm_patch_ins_wrtee(inst, 0, 1);
654		break;
655#endif
656	}
657}
658
659extern u32 kvm_template_start[];
660extern u32 kvm_template_end[];
661
662static void __init kvm_use_magic_page(void)
663{
664	u32 *p;
665	u32 *start, *end;
 
666	u32 features;
667
668	/* Tell the host to map the magic page to -4096 on all CPUs */
669	on_each_cpu(kvm_map_magic_page, &features, 1);
670
671	/* Quick self-test to see if the mapping works */
672	if (fault_in_readable((const char __user *)KVM_MAGIC_PAGE,
673			      sizeof(u32))) {
674		kvm_patching_worked = false;
675		return;
676	}
677
678	/* Now loop through all code and find instructions */
679	start = (void*)_stext;
680	end = (void*)_etext;
681
682	/*
683	 * Being interrupted in the middle of patching would
684	 * be bad for SPRG4-7, which KVM can't keep in sync
685	 * with emulated accesses because reads don't trap.
686	 */
687	local_irq_disable();
688
689	for (p = start; p < end; p++) {
690		/* Avoid patching the template code */
691		if (p >= kvm_template_start && p < kvm_template_end) {
692			p = kvm_template_end - 1;
693			continue;
694		}
695		kvm_check_ins(p, features);
696	}
697
698	local_irq_enable();
699
700	printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
701			 kvm_patching_worked ? "worked" : "failed");
702}
703
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
704static int __init kvm_guest_init(void)
705{
706	if (!kvm_para_available())
707		return 0;
708
709	if (!epapr_paravirt_enabled)
710		return 0;
711
712	if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
713		kvm_use_magic_page();
714
715#ifdef CONFIG_PPC_BOOK3S_64
716	/* Enable napping */
717	powersave_nap = 1;
718#endif
 
 
 
719
720	return 0;
721}
722
723postcore_initcall(kvm_guest_init);
v3.1
 
  1/*
  2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
 
  3 *
  4 * Authors:
  5 *     Alexander Graf <agraf@suse.de>
  6 *
  7 * This program is free software; you can redistribute it and/or modify
  8 * it under the terms of the GNU General Public License, version 2, as
  9 * published by the Free Software Foundation.
 10 *
 11 * This program is distributed in the hope that it will be useful,
 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 14 * GNU General Public License for more details.
 15 *
 16 * You should have received a copy of the GNU General Public License
 17 * along with this program; if not, write to the Free Software
 18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 19 */
 20
 21#include <linux/kvm_host.h>
 22#include <linux/init.h>
 
 
 23#include <linux/kvm_para.h>
 24#include <linux/slab.h>
 25#include <linux/of.h>
 
 26
 27#include <asm/reg.h>
 28#include <asm/sections.h>
 29#include <asm/cacheflush.h>
 30#include <asm/disassemble.h>
 
 
 31
 32#define KVM_MAGIC_PAGE		(-4096L)
 33#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
 34
 35#define KVM_INST_LWZ		0x80000000
 36#define KVM_INST_STW		0x90000000
 37#define KVM_INST_LD		0xe8000000
 38#define KVM_INST_STD		0xf8000000
 39#define KVM_INST_NOP		0x60000000
 40#define KVM_INST_B		0x48000000
 41#define KVM_INST_B_MASK		0x03ffffff
 42#define KVM_INST_B_MAX		0x01ffffff
 
 43
 44#define KVM_MASK_RT		0x03e00000
 45#define KVM_RT_30		0x03c00000
 46#define KVM_MASK_RB		0x0000f800
 47#define KVM_INST_MFMSR		0x7c0000a6
 48#define KVM_INST_MFSPR_SPRG0	0x7c1042a6
 49#define KVM_INST_MFSPR_SPRG1	0x7c1142a6
 50#define KVM_INST_MFSPR_SPRG2	0x7c1242a6
 51#define KVM_INST_MFSPR_SPRG3	0x7c1342a6
 52#define KVM_INST_MFSPR_SRR0	0x7c1a02a6
 53#define KVM_INST_MFSPR_SRR1	0x7c1b02a6
 54#define KVM_INST_MFSPR_DAR	0x7c1302a6
 55#define KVM_INST_MFSPR_DSISR	0x7c1202a6
 56
 57#define KVM_INST_MTSPR_SPRG0	0x7c1043a6
 58#define KVM_INST_MTSPR_SPRG1	0x7c1143a6
 59#define KVM_INST_MTSPR_SPRG2	0x7c1243a6
 60#define KVM_INST_MTSPR_SPRG3	0x7c1343a6
 61#define KVM_INST_MTSPR_SRR0	0x7c1a03a6
 62#define KVM_INST_MTSPR_SRR1	0x7c1b03a6
 63#define KVM_INST_MTSPR_DAR	0x7c1303a6
 64#define KVM_INST_MTSPR_DSISR	0x7c1203a6
 65
 66#define KVM_INST_TLBSYNC	0x7c00046c
 67#define KVM_INST_MTMSRD_L0	0x7c000164
 68#define KVM_INST_MTMSRD_L1	0x7c010164
 69#define KVM_INST_MTMSR		0x7c000124
 70
 
 71#define KVM_INST_WRTEEI_0	0x7c000146
 72#define KVM_INST_WRTEEI_1	0x7c008146
 73
 74#define KVM_INST_MTSRIN		0x7c0001e4
 75
 76static bool kvm_patching_worked = true;
 77static char kvm_tmp[1024 * 1024];
 
 78static int kvm_tmp_index;
 79
 80static inline void kvm_patch_ins(u32 *inst, u32 new_inst)
 81{
 82	*inst = new_inst;
 83	flush_icache_range((ulong)inst, (ulong)inst + 4);
 84}
 85
 86static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt)
 87{
 88#ifdef CONFIG_64BIT
 89	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
 90#else
 91	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc));
 92#endif
 93}
 94
 95static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt)
 96{
 97#ifdef CONFIG_64BIT
 98	kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc));
 99#else
100	kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc));
101#endif
102}
103
104static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt)
105{
106	kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff));
107}
108
109static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt)
110{
111#ifdef CONFIG_64BIT
112	kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc));
113#else
114	kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc));
115#endif
116}
117
118static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt)
119{
120	kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc));
121}
122
123static void kvm_patch_ins_nop(u32 *inst)
124{
125	kvm_patch_ins(inst, KVM_INST_NOP);
126}
127
128static void kvm_patch_ins_b(u32 *inst, int addr)
129{
130#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
131	/* On relocatable kernels interrupts handlers and our code
132	   can be in different regions, so we don't patch them */
133
134	extern u32 __end_interrupts;
135	if ((ulong)inst < (ulong)&__end_interrupts)
136		return;
137#endif
138
139	kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK));
140}
141
142static u32 *kvm_alloc(int len)
143{
144	u32 *p;
145
146	if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) {
147		printk(KERN_ERR "KVM: No more space (%d + %d)\n",
148				kvm_tmp_index, len);
149		kvm_patching_worked = false;
150		return NULL;
151	}
152
153	p = (void*)&kvm_tmp[kvm_tmp_index];
154	kvm_tmp_index += len;
155
156	return p;
157}
158
159extern u32 kvm_emulate_mtmsrd_branch_offs;
160extern u32 kvm_emulate_mtmsrd_reg_offs;
161extern u32 kvm_emulate_mtmsrd_orig_ins_offs;
162extern u32 kvm_emulate_mtmsrd_len;
163extern u32 kvm_emulate_mtmsrd[];
164
165static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt)
166{
167	u32 *p;
168	int distance_start;
169	int distance_end;
170	ulong next_inst;
171
172	p = kvm_alloc(kvm_emulate_mtmsrd_len * 4);
173	if (!p)
174		return;
175
176	/* Find out where we are and put everything there */
177	distance_start = (ulong)p - (ulong)inst;
178	next_inst = ((ulong)inst + 4);
179	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs];
180
181	/* Make sure we only write valid b instructions */
182	if (distance_start > KVM_INST_B_MAX) {
183		kvm_patching_worked = false;
184		return;
185	}
186
187	/* Modify the chunk to fit the invocation */
188	memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4);
189	p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK;
190	switch (get_rt(rt)) {
191	case 30:
192		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
193				 magic_var(scratch2), KVM_RT_30);
194		break;
195	case 31:
196		kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs],
197				 magic_var(scratch1), KVM_RT_30);
198		break;
199	default:
200		p[kvm_emulate_mtmsrd_reg_offs] |= rt;
201		break;
202	}
203
204	p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst;
205	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4);
206
207	/* Patch the invocation */
208	kvm_patch_ins_b(inst, distance_start);
209}
210
211extern u32 kvm_emulate_mtmsr_branch_offs;
212extern u32 kvm_emulate_mtmsr_reg1_offs;
213extern u32 kvm_emulate_mtmsr_reg2_offs;
214extern u32 kvm_emulate_mtmsr_orig_ins_offs;
215extern u32 kvm_emulate_mtmsr_len;
216extern u32 kvm_emulate_mtmsr[];
217
218static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
219{
220	u32 *p;
221	int distance_start;
222	int distance_end;
223	ulong next_inst;
224
225	p = kvm_alloc(kvm_emulate_mtmsr_len * 4);
226	if (!p)
227		return;
228
229	/* Find out where we are and put everything there */
230	distance_start = (ulong)p - (ulong)inst;
231	next_inst = ((ulong)inst + 4);
232	distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs];
233
234	/* Make sure we only write valid b instructions */
235	if (distance_start > KVM_INST_B_MAX) {
236		kvm_patching_worked = false;
237		return;
238	}
239
240	/* Modify the chunk to fit the invocation */
241	memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4);
242	p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK;
243
244	/* Make clobbered registers work too */
245	switch (get_rt(rt)) {
246	case 30:
247		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
248				 magic_var(scratch2), KVM_RT_30);
249		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
250				 magic_var(scratch2), KVM_RT_30);
251		break;
252	case 31:
253		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs],
254				 magic_var(scratch1), KVM_RT_30);
255		kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs],
256				 magic_var(scratch1), KVM_RT_30);
257		break;
258	default:
259		p[kvm_emulate_mtmsr_reg1_offs] |= rt;
260		p[kvm_emulate_mtmsr_reg2_offs] |= rt;
261		break;
262	}
263
264	p[kvm_emulate_mtmsr_orig_ins_offs] = *inst;
265	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4);
266
267	/* Patch the invocation */
268	kvm_patch_ins_b(inst, distance_start);
269}
270
271#ifdef CONFIG_BOOKE
272
273extern u32 kvm_emulate_wrteei_branch_offs;
274extern u32 kvm_emulate_wrteei_ee_offs;
275extern u32 kvm_emulate_wrteei_len;
276extern u32 kvm_emulate_wrteei[];
 
277
278static void kvm_patch_ins_wrteei(u32 *inst)
279{
280	u32 *p;
281	int distance_start;
282	int distance_end;
283	ulong next_inst;
284
285	p = kvm_alloc(kvm_emulate_wrteei_len * 4);
286	if (!p)
287		return;
288
289	/* Find out where we are and put everything there */
290	distance_start = (ulong)p - (ulong)inst;
291	next_inst = ((ulong)inst + 4);
292	distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_branch_offs];
293
294	/* Make sure we only write valid b instructions */
295	if (distance_start > KVM_INST_B_MAX) {
296		kvm_patching_worked = false;
297		return;
298	}
299
300	/* Modify the chunk to fit the invocation */
301	memcpy(p, kvm_emulate_wrteei, kvm_emulate_wrteei_len * 4);
302	p[kvm_emulate_wrteei_branch_offs] |= distance_end & KVM_INST_B_MASK;
303	p[kvm_emulate_wrteei_ee_offs] |= (*inst & MSR_EE);
304	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_len * 4);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
305
306	/* Patch the invocation */
307	kvm_patch_ins_b(inst, distance_start);
308}
309
310#endif
311
312#ifdef CONFIG_PPC_BOOK3S_32
313
314extern u32 kvm_emulate_mtsrin_branch_offs;
315extern u32 kvm_emulate_mtsrin_reg1_offs;
316extern u32 kvm_emulate_mtsrin_reg2_offs;
317extern u32 kvm_emulate_mtsrin_orig_ins_offs;
318extern u32 kvm_emulate_mtsrin_len;
319extern u32 kvm_emulate_mtsrin[];
320
321static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb)
322{
323	u32 *p;
324	int distance_start;
325	int distance_end;
326	ulong next_inst;
327
328	p = kvm_alloc(kvm_emulate_mtsrin_len * 4);
329	if (!p)
330		return;
331
332	/* Find out where we are and put everything there */
333	distance_start = (ulong)p - (ulong)inst;
334	next_inst = ((ulong)inst + 4);
335	distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs];
336
337	/* Make sure we only write valid b instructions */
338	if (distance_start > KVM_INST_B_MAX) {
339		kvm_patching_worked = false;
340		return;
341	}
342
343	/* Modify the chunk to fit the invocation */
344	memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4);
345	p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK;
346	p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10);
347	p[kvm_emulate_mtsrin_reg2_offs] |= rt;
348	p[kvm_emulate_mtsrin_orig_ins_offs] = *inst;
349	flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4);
350
351	/* Patch the invocation */
352	kvm_patch_ins_b(inst, distance_start);
353}
354
355#endif
356
357static void kvm_map_magic_page(void *data)
358{
359	u32 *features = data;
360
361	ulong in[8];
362	ulong out[8];
363
364	in[0] = KVM_MAGIC_PAGE;
365	in[1] = KVM_MAGIC_PAGE;
366
367	kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE);
368
369	*features = out[0];
370}
371
372static void kvm_check_ins(u32 *inst, u32 features)
373{
374	u32 _inst = *inst;
375	u32 inst_no_rt = _inst & ~KVM_MASK_RT;
376	u32 inst_rt = _inst & KVM_MASK_RT;
377
378	switch (inst_no_rt) {
379	/* Loads */
380	case KVM_INST_MFMSR:
381		kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
382		break;
383	case KVM_INST_MFSPR_SPRG0:
384		kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
385		break;
386	case KVM_INST_MFSPR_SPRG1:
387		kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
388		break;
389	case KVM_INST_MFSPR_SPRG2:
390		kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
391		break;
392	case KVM_INST_MFSPR_SPRG3:
393		kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
394		break;
395	case KVM_INST_MFSPR_SRR0:
396		kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
397		break;
398	case KVM_INST_MFSPR_SRR1:
399		kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
400		break;
401	case KVM_INST_MFSPR_DAR:
 
 
 
 
402		kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
403		break;
404	case KVM_INST_MFSPR_DSISR:
405		kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
406		break;
407
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
408	/* Stores */
409	case KVM_INST_MTSPR_SPRG0:
410		kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
411		break;
412	case KVM_INST_MTSPR_SPRG1:
413		kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
414		break;
415	case KVM_INST_MTSPR_SPRG2:
416		kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
417		break;
418	case KVM_INST_MTSPR_SPRG3:
419		kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
420		break;
421	case KVM_INST_MTSPR_SRR0:
422		kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
423		break;
424	case KVM_INST_MTSPR_SRR1:
425		kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
426		break;
427	case KVM_INST_MTSPR_DAR:
 
 
 
 
428		kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
429		break;
430	case KVM_INST_MTSPR_DSISR:
431		kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
432		break;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
433
434	/* Nops */
435	case KVM_INST_TLBSYNC:
436		kvm_patch_ins_nop(inst);
437		break;
438
439	/* Rewrites */
440	case KVM_INST_MTMSRD_L1:
441		kvm_patch_ins_mtmsrd(inst, inst_rt);
442		break;
443	case KVM_INST_MTMSR:
444	case KVM_INST_MTMSRD_L0:
445		kvm_patch_ins_mtmsr(inst, inst_rt);
446		break;
 
 
 
 
 
447	}
448
449	switch (inst_no_rt & ~KVM_MASK_RB) {
450#ifdef CONFIG_PPC_BOOK3S_32
451	case KVM_INST_MTSRIN:
452		if (features & KVM_MAGIC_FEAT_SR) {
453			u32 inst_rb = _inst & KVM_MASK_RB;
454			kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb);
455		}
456		break;
457		break;
458#endif
459	}
460
461	switch (_inst) {
462#ifdef CONFIG_BOOKE
463	case KVM_INST_WRTEEI_0:
 
 
 
464	case KVM_INST_WRTEEI_1:
465		kvm_patch_ins_wrteei(inst);
466		break;
467#endif
468	}
469}
470
471static void kvm_use_magic_page(void)
 
 
 
472{
473	u32 *p;
474	u32 *start, *end;
475	u32 tmp;
476	u32 features;
477
478	/* Tell the host to map the magic page to -4096 on all CPUs */
479	on_each_cpu(kvm_map_magic_page, &features, 1);
480
481	/* Quick self-test to see if the mapping works */
482	if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) {
 
483		kvm_patching_worked = false;
484		return;
485	}
486
487	/* Now loop through all code and find instructions */
488	start = (void*)_stext;
489	end = (void*)_etext;
490
491	for (p = start; p < end; p++)
 
 
 
 
 
 
 
 
 
 
 
 
492		kvm_check_ins(p, features);
 
 
 
493
494	printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
495			 kvm_patching_worked ? "worked" : "failed");
496}
497
498unsigned long kvm_hypercall(unsigned long *in,
499			    unsigned long *out,
500			    unsigned long nr)
501{
502	unsigned long register r0 asm("r0");
503	unsigned long register r3 asm("r3") = in[0];
504	unsigned long register r4 asm("r4") = in[1];
505	unsigned long register r5 asm("r5") = in[2];
506	unsigned long register r6 asm("r6") = in[3];
507	unsigned long register r7 asm("r7") = in[4];
508	unsigned long register r8 asm("r8") = in[5];
509	unsigned long register r9 asm("r9") = in[6];
510	unsigned long register r10 asm("r10") = in[7];
511	unsigned long register r11 asm("r11") = nr;
512	unsigned long register r12 asm("r12");
513
514	asm volatile("bl	kvm_hypercall_start"
515		     : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6),
516		       "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11),
517		       "=r"(r12)
518		     : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8),
519		       "r"(r9), "r"(r10), "r"(r11)
520		     : "memory", "cc", "xer", "ctr", "lr");
521
522	out[0] = r4;
523	out[1] = r5;
524	out[2] = r6;
525	out[3] = r7;
526	out[4] = r8;
527	out[5] = r9;
528	out[6] = r10;
529	out[7] = r11;
530
531	return r3;
532}
533EXPORT_SYMBOL_GPL(kvm_hypercall);
534
535static int kvm_para_setup(void)
536{
537	extern u32 kvm_hypercall_start;
538	struct device_node *hyper_node;
539	u32 *insts;
540	int len, i;
541
542	hyper_node = of_find_node_by_path("/hypervisor");
543	if (!hyper_node)
544		return -1;
545
546	insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len);
547	if (len % 4)
548		return -1;
549	if (len > (4 * 4))
550		return -1;
551
552	for (i = 0; i < (len / 4); i++)
553		kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]);
554
555	return 0;
556}
557
558static __init void kvm_free_tmp(void)
559{
560	unsigned long start, end;
561
562	start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK;
563	end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK;
564
565	/* Free the tmp space we don't need */
566	for (; start < end; start += PAGE_SIZE) {
567		ClearPageReserved(virt_to_page(start));
568		init_page_count(virt_to_page(start));
569		free_page(start);
570		totalram_pages++;
571	}
572}
573
574static int __init kvm_guest_init(void)
575{
576	if (!kvm_para_available())
577		goto free_tmp;
578
579	if (kvm_para_setup())
580		goto free_tmp;
581
582	if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE))
583		kvm_use_magic_page();
584
585#ifdef CONFIG_PPC_BOOK3S_64
586	/* Enable napping */
587	powersave_nap = 1;
588#endif
589
590free_tmp:
591	kvm_free_tmp();
592
593	return 0;
594}
595
596postcore_initcall(kvm_guest_init);