Linux Audio

Check our new training course

Loading...
v6.2
  1/* SPDX-License-Identifier: GPL-2.0 */
  2/*
  3 * access guest memory
  4 *
  5 * Copyright IBM Corp. 2008, 2014
 
 
 
 
  6 *
  7 *    Author(s): Carsten Otte <cotte@de.ibm.com>
  8 */
  9
 10#ifndef __KVM_S390_GACCESS_H
 11#define __KVM_S390_GACCESS_H
 12
 13#include <linux/compiler.h>
 14#include <linux/kvm_host.h>
 15#include <linux/uaccess.h>
 16#include <linux/ptrace.h>
 17#include "kvm-s390.h"
 18
 19/**
 20 * kvm_s390_real_to_abs - convert guest real address to guest absolute address
 21 * @prefix - guest prefix
 22 * @gra - guest real address
 23 *
 24 * Returns the guest absolute address that corresponds to the passed guest real
 25 * address @gra of by applying the given prefix.
 26 */
 27static inline unsigned long _kvm_s390_real_to_abs(u32 prefix, unsigned long gra)
 28{
 29	if (gra < 2 * PAGE_SIZE)
 30		gra += prefix;
 31	else if (gra >= prefix && gra < prefix + 2 * PAGE_SIZE)
 32		gra -= prefix;
 33	return gra;
 
 
 
 34}
 35
 36/**
 37 * kvm_s390_real_to_abs - convert guest real address to guest absolute address
 38 * @vcpu - guest virtual cpu
 39 * @gra - guest real address
 40 *
 41 * Returns the guest absolute address that corresponds to the passed guest real
 42 * address @gra of a virtual guest cpu by applying its prefix.
 43 */
 44static inline unsigned long kvm_s390_real_to_abs(struct kvm_vcpu *vcpu,
 45						 unsigned long gra)
 46{
 47	return _kvm_s390_real_to_abs(kvm_s390_get_prefix(vcpu), gra);
 
 
 
 
 
 
 
 48}
 49
 50/**
 51 * _kvm_s390_logical_to_effective - convert guest logical to effective address
 52 * @psw: psw of the guest
 53 * @ga: guest logical address
 54 *
 55 * Convert a guest logical address to an effective address by applying the
 56 * rules of the addressing mode defined by bits 31 and 32 of the given PSW
 57 * (extendended/basic addressing mode).
 58 *
 59 * Depending on the addressing mode, the upper 40 bits (24 bit addressing
 60 * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing
 61 * mode) of @ga will be zeroed and the remaining bits will be returned.
 62 */
 63static inline unsigned long _kvm_s390_logical_to_effective(psw_t *psw,
 64							   unsigned long ga)
 65{
 66	if (psw_bits(*psw).eaba == PSW_BITS_AMODE_64BIT)
 67		return ga;
 68	if (psw_bits(*psw).eaba == PSW_BITS_AMODE_31BIT)
 69		return ga & ((1UL << 31) - 1);
 70	return ga & ((1UL << 24) - 1);
 
 
 
 71}
 72
 73/**
 74 * kvm_s390_logical_to_effective - convert guest logical to effective address
 75 * @vcpu: guest virtual cpu
 76 * @ga: guest logical address
 77 *
 78 * Convert a guest vcpu logical address to a guest vcpu effective address by
 79 * applying the rules of the vcpu's addressing mode defined by PSW bits 31
 80 * and 32 (extendended/basic addressing mode).
 81 *
 82 * Depending on the vcpu's addressing mode the upper 40 bits (24 bit addressing
 83 * mode), 33 bits (31 bit addressing mode) or no bits (64 bit addressing mode)
 84 * of @ga will be zeroed and the remaining bits will be returned.
 85 */
 86static inline unsigned long kvm_s390_logical_to_effective(struct kvm_vcpu *vcpu,
 87							  unsigned long ga)
 88{
 89	return _kvm_s390_logical_to_effective(&vcpu->arch.sie_block->gpsw, ga);
 90}
 91
 92/*
 93 * put_guest_lc, read_guest_lc and write_guest_lc are guest access functions
 94 * which shall only be used to access the lowcore of a vcpu.
 95 * These functions should be used for e.g. interrupt handlers where no
 96 * guest memory access protection facilities, like key or low address
 97 * protection, are applicable.
 98 * At a later point guest vcpu lowcore access should happen via pinned
 99 * prefix pages, so that these pages can be accessed directly via the
100 * kernel mapping. All of these *_lc functions can be removed then.
101 */
102
103/**
104 * put_guest_lc - write a simple variable to a guest vcpu's lowcore
105 * @vcpu: virtual cpu
106 * @x: value to copy to guest
107 * @gra: vcpu's destination guest real address
108 *
109 * Copies a simple value from kernel space to a guest vcpu's lowcore.
110 * The size of the variable may be 1, 2, 4 or 8 bytes. The destination
111 * must be located in the vcpu's lowcore. Otherwise the result is undefined.
112 *
113 * Returns zero on success or -EFAULT on error.
114 *
115 * Note: an error indicates that either the kernel is out of memory or
116 *	 the guest memory mapping is broken. In any case the best solution
117 *	 would be to terminate the guest.
118 *	 It is wrong to inject a guest exception.
119 */
120#define put_guest_lc(vcpu, x, gra)				\
121({								\
122	struct kvm_vcpu *__vcpu = (vcpu);			\
123	__typeof__(*(gra)) __x = (x);				\
124	unsigned long __gpa;					\
125								\
126	__gpa = (unsigned long)(gra);				\
127	__gpa += kvm_s390_get_prefix(__vcpu);			\
128	kvm_write_guest(__vcpu->kvm, __gpa, &__x, sizeof(__x));	\
129})
130
131/**
132 * write_guest_lc - copy data from kernel space to guest vcpu's lowcore
133 * @vcpu: virtual cpu
134 * @gra: vcpu's source guest real address
135 * @data: source address in kernel space
136 * @len: number of bytes to copy
137 *
138 * Copy data from kernel space to guest vcpu's lowcore. The entire range must
139 * be located within the vcpu's lowcore, otherwise the result is undefined.
140 *
141 * Returns zero on success or -EFAULT on error.
142 *
143 * Note: an error indicates that either the kernel is out of memory or
144 *	 the guest memory mapping is broken. In any case the best solution
145 *	 would be to terminate the guest.
146 *	 It is wrong to inject a guest exception.
147 */
148static inline __must_check
149int write_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
150		   unsigned long len)
151{
152	unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
 
 
 
153
154	return kvm_write_guest(vcpu->kvm, gpa, data, len);
155}
156
157/**
158 * read_guest_lc - copy data from guest vcpu's lowcore to kernel space
159 * @vcpu: virtual cpu
160 * @gra: vcpu's source guest real address
161 * @data: destination address in kernel space
162 * @len: number of bytes to copy
163 *
164 * Copy data from guest vcpu's lowcore to kernel space. The entire range must
165 * be located within the vcpu's lowcore, otherwise the result is undefined.
166 *
167 * Returns zero on success or -EFAULT on error.
168 *
169 * Note: an error indicates that either the kernel is out of memory or
170 *	 the guest memory mapping is broken. In any case the best solution
171 *	 would be to terminate the guest.
172 *	 It is wrong to inject a guest exception.
173 */
174static inline __must_check
175int read_guest_lc(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
176		  unsigned long len)
177{
178	unsigned long gpa = gra + kvm_s390_get_prefix(vcpu);
 
 
179
180	return kvm_read_guest(vcpu->kvm, gpa, data, len);
 
 
 
181}
182
183enum gacc_mode {
184	GACC_FETCH,
185	GACC_STORE,
186	GACC_IFETCH,
187};
188
189int guest_translate_address_with_key(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
190				     unsigned long *gpa, enum gacc_mode mode,
191				     u8 access_key);
192
193int check_gva_range(struct kvm_vcpu *vcpu, unsigned long gva, u8 ar,
194		    unsigned long length, enum gacc_mode mode, u8 access_key);
195
196int check_gpa_range(struct kvm *kvm, unsigned long gpa, unsigned long length,
197		    enum gacc_mode mode, u8 access_key);
198
199int access_guest_abs_with_key(struct kvm *kvm, gpa_t gpa, void *data,
200			      unsigned long len, enum gacc_mode mode, u8 access_key);
 
 
201
202int access_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
203			  void *data, unsigned long len, enum gacc_mode mode,
204			  u8 access_key);
205
206int access_guest_real(struct kvm_vcpu *vcpu, unsigned long gra,
207		      void *data, unsigned long len, enum gacc_mode mode);
208
209/**
210 * write_guest_with_key - copy data from kernel space to guest space
211 * @vcpu: virtual cpu
212 * @ga: guest address
213 * @ar: access register
214 * @data: source address in kernel space
215 * @len: number of bytes to copy
216 * @access_key: access key the storage key needs to match
217 *
218 * Copy @len bytes from @data (kernel space) to @ga (guest address).
219 * In order to copy data to guest space the PSW of the vcpu is inspected:
220 * If DAT is off data will be copied to guest real or absolute memory.
221 * If DAT is on data will be copied to the address space as specified by
222 * the address space bits of the PSW:
223 * Primary, secondary, home space or access register mode.
224 * The addressing mode of the PSW is also inspected, so that address wrap
225 * around is taken into account for 24-, 31- and 64-bit addressing mode,
226 * if the to be copied data crosses page boundaries in guest address space.
227 * In addition low address, DAT and key protection checks are performed before
228 * copying any data.
229 *
230 * This function modifies the 'struct kvm_s390_pgm_info pgm' member of @vcpu.
231 * In case of an access exception (e.g. protection exception) pgm will contain
232 * all data necessary so that a subsequent call to 'kvm_s390_inject_prog_vcpu()'
233 * will inject a correct exception into the guest.
234 * If no access exception happened, the contents of pgm are undefined when
235 * this function returns.
236 *
237 * Returns:  - zero on success
238 *	     - a negative value if e.g. the guest mapping is broken or in
239 *	       case of out-of-memory. In this case the contents of pgm are
240 *	       undefined. Also parts of @data may have been copied to guest
241 *	       space.
242 *	     - a positive value if an access exception happened. In this case
243 *	       the returned value is the program interruption code and the
244 *	       contents of pgm may be used to inject an exception into the
245 *	       guest. No data has been copied to guest space.
246 *
247 * Note: in case an access exception is recognized no data has been copied to
248 *	 guest space (this is also true, if the to be copied data would cross
249 *	 one or more page boundaries in guest space).
250 *	 Therefore this function may be used for nullifying and suppressing
251 *	 instruction emulation.
252 *	 It may also be used for terminating instructions, if it is undefined
253 *	 if data has been changed in guest space in case of an exception.
254 */
255static inline __must_check
256int write_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
257			 void *data, unsigned long len, u8 access_key)
258{
259	return access_guest_with_key(vcpu, ga, ar, data, len, GACC_STORE,
260				     access_key);
261}
262
263/**
264 * write_guest - copy data from kernel space to guest space
265 * @vcpu: virtual cpu
266 * @ga: guest address
267 * @ar: access register
268 * @data: source address in kernel space
269 * @len: number of bytes to copy
270 *
271 * The behaviour of write_guest is identical to write_guest_with_key, except
272 * that the PSW access key is used instead of an explicit argument.
273 */
274static inline __must_check
275int write_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
276		unsigned long len)
277{
278	u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key;
279
280	return write_guest_with_key(vcpu, ga, ar, data, len, access_key);
281}
282
283/**
284 * read_guest_with_key - copy data from guest space to kernel space
285 * @vcpu: virtual cpu
286 * @ga: guest address
287 * @ar: access register
288 * @data: destination address in kernel space
289 * @len: number of bytes to copy
290 * @access_key: access key the storage key needs to match
291 *
292 * Copy @len bytes from @ga (guest address) to @data (kernel space).
293 *
294 * The behaviour of read_guest_with_key is identical to write_guest_with_key,
295 * except that data will be copied from guest space to kernel space.
296 */
297static inline __must_check
298int read_guest_with_key(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar,
299			void *data, unsigned long len, u8 access_key)
300{
301	return access_guest_with_key(vcpu, ga, ar, data, len, GACC_FETCH,
302				     access_key);
303}
304
305/**
306 * read_guest - copy data from guest space to kernel space
307 * @vcpu: virtual cpu
308 * @ga: guest address
309 * @ar: access register
310 * @data: destination address in kernel space
311 * @len: number of bytes to copy
312 *
313 * Copy @len bytes from @ga (guest address) to @data (kernel space).
314 *
315 * The behaviour of read_guest is identical to read_guest_with_key, except
316 * that the PSW access key is used instead of an explicit argument.
317 */
318static inline __must_check
319int read_guest(struct kvm_vcpu *vcpu, unsigned long ga, u8 ar, void *data,
320	       unsigned long len)
321{
322	u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key;
 
 
323
324	return read_guest_with_key(vcpu, ga, ar, data, len, access_key);
 
 
 
 
 
325}
326
327/**
328 * read_guest_instr - copy instruction data from guest space to kernel space
329 * @vcpu: virtual cpu
330 * @ga: guest address
331 * @data: destination address in kernel space
332 * @len: number of bytes to copy
333 *
334 * Copy @len bytes from the given address (guest space) to @data (kernel
335 * space).
336 *
337 * The behaviour of read_guest_instr is identical to read_guest, except that
338 * instruction data will be read from primary space when in home-space or
339 * address-space mode.
340 */
341static inline __must_check
342int read_guest_instr(struct kvm_vcpu *vcpu, unsigned long ga, void *data,
343		     unsigned long len)
344{
345	u8 access_key = psw_bits(vcpu->arch.sie_block->gpsw).key;
346
347	return access_guest_with_key(vcpu, ga, 0, data, len, GACC_IFETCH,
348				     access_key);
349}
350
351/**
352 * write_guest_abs - copy data from kernel space to guest space absolute
353 * @vcpu: virtual cpu
354 * @gpa: guest physical (absolute) address
355 * @data: source address in kernel space
356 * @len: number of bytes to copy
357 *
358 * Copy @len bytes from @data (kernel space) to @gpa (guest absolute address).
359 * It is up to the caller to ensure that the entire guest memory range is
360 * valid memory before calling this function.
361 * Guest low address and key protection are not checked.
362 *
363 * Returns zero on success or -EFAULT on error.
364 *
365 * If an error occurs data may have been copied partially to guest memory.
366 */
367static inline __must_check
368int write_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
369		    unsigned long len)
370{
371	return kvm_write_guest(vcpu->kvm, gpa, data, len);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
372}
373
374/**
375 * read_guest_abs - copy data from guest space absolute to kernel space
376 * @vcpu: virtual cpu
377 * @gpa: guest physical (absolute) address
378 * @data: destination address in kernel space
379 * @len: number of bytes to copy
380 *
381 * Copy @len bytes from @gpa (guest absolute address) to @data (kernel space).
382 * It is up to the caller to ensure that the entire guest memory range is
383 * valid memory before calling this function.
384 * Guest key protection is not checked.
385 *
386 * Returns zero on success or -EFAULT on error.
387 *
388 * If an error occurs data may have been copied partially to kernel space.
389 */
390static inline __must_check
391int read_guest_abs(struct kvm_vcpu *vcpu, unsigned long gpa, void *data,
392		   unsigned long len)
393{
394	return kvm_read_guest(vcpu->kvm, gpa, data, len);
395}
396
397/**
398 * write_guest_real - copy data from kernel space to guest space real
399 * @vcpu: virtual cpu
400 * @gra: guest real address
401 * @data: source address in kernel space
402 * @len: number of bytes to copy
403 *
404 * Copy @len bytes from @data (kernel space) to @gra (guest real address).
405 * It is up to the caller to ensure that the entire guest memory range is
406 * valid memory before calling this function.
407 * Guest low address and key protection are not checked.
408 *
409 * Returns zero on success or -EFAULT on error.
410 *
411 * If an error occurs data may have been copied partially to guest memory.
412 */
413static inline __must_check
414int write_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
415		     unsigned long len)
416{
417	return access_guest_real(vcpu, gra, data, len, 1);
 
 
418}
419
420/**
421 * read_guest_real - copy data from guest space real to kernel space
422 * @vcpu: virtual cpu
423 * @gra: guest real address
424 * @data: destination address in kernel space
425 * @len: number of bytes to copy
426 *
427 * Copy @len bytes from @gra (guest real address) to @data (kernel space).
428 * It is up to the caller to ensure that the entire guest memory range is
429 * valid memory before calling this function.
430 * Guest key protection is not checked.
431 *
432 * Returns zero on success or -EFAULT on error.
433 *
434 * If an error occurs data may have been copied partially to kernel space.
435 */
436static inline __must_check
437int read_guest_real(struct kvm_vcpu *vcpu, unsigned long gra, void *data,
438		    unsigned long len)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
439{
440	return access_guest_real(vcpu, gra, data, len, 0);
441}
442
443void ipte_lock(struct kvm *kvm);
444void ipte_unlock(struct kvm *kvm);
445int ipte_lock_held(struct kvm *kvm);
446int kvm_s390_check_low_addr_prot_real(struct kvm_vcpu *vcpu, unsigned long gra);
 
 
 
447
448/* MVPG PEI indication bits */
449#define PEI_DAT_PROT 2
450#define PEI_NOT_PTE 4
451
452int kvm_s390_shadow_fault(struct kvm_vcpu *vcpu, struct gmap *shadow,
453			  unsigned long saddr, unsigned long *datptr);
 
454
455#endif /* __KVM_S390_GACCESS_H */
 
 
 
 
 
 
 
 
 
v3.5.6
 
  1/*
  2 * access.h -  access guest memory
  3 *
  4 * Copyright IBM Corp. 2008,2009
  5 *
  6 * This program is free software; you can redistribute it and/or modify
  7 * it under the terms of the GNU General Public License (version 2 only)
  8 * as published by the Free Software Foundation.
  9 *
 10 *    Author(s): Carsten Otte <cotte@de.ibm.com>
 11 */
 12
 13#ifndef __KVM_S390_GACCESS_H
 14#define __KVM_S390_GACCESS_H
 15
 16#include <linux/compiler.h>
 17#include <linux/kvm_host.h>
 18#include <asm/uaccess.h>
 
 19#include "kvm-s390.h"
 20
 21static inline void __user *__guestaddr_to_user(struct kvm_vcpu *vcpu,
 22					       unsigned long guestaddr)
 
 
 
 
 
 
 
 23{
 24	unsigned long prefix  = vcpu->arch.sie_block->prefix;
 25
 26	if (guestaddr < 2 * PAGE_SIZE)
 27		guestaddr += prefix;
 28	else if ((guestaddr >= prefix) && (guestaddr < prefix + 2 * PAGE_SIZE))
 29		guestaddr -= prefix;
 30
 31	return (void __user *) gmap_fault(guestaddr, vcpu->arch.gmap);
 32}
 33
 34static inline int get_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 35				u64 *result)
 
 
 
 
 
 
 
 
 36{
 37	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 38
 39	BUG_ON(guestaddr & 7);
 40
 41	if (IS_ERR((void __force *) uptr))
 42		return PTR_ERR((void __force *) uptr);
 43
 44	return get_user(*result, (unsigned long __user *) uptr);
 45}
 46
 47static inline int get_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 48				u32 *result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 49{
 50	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 51
 52	BUG_ON(guestaddr & 3);
 53
 54	if (IS_ERR((void __force *) uptr))
 55		return PTR_ERR((void __force *) uptr);
 56
 57	return get_user(*result, (u32 __user *) uptr);
 58}
 59
 60static inline int get_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 61				u16 *result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 62{
 63	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 
 64
 65	BUG_ON(guestaddr & 1);
 
 
 
 
 
 
 
 
 
 66
 67	if (IS_ERR(uptr))
 68		return PTR_ERR(uptr);
 69
 70	return get_user(*result, (u16 __user *) uptr);
 71}
 72
 73static inline int get_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 74			       u8 *result)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 75{
 76	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 77
 78	if (IS_ERR((void __force *) uptr))
 79		return PTR_ERR((void __force *) uptr);
 80
 81	return get_user(*result, (u8 __user *) uptr);
 82}
 83
 84static inline int put_guest_u64(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 85				u64 value)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 86{
 87	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 88
 89	BUG_ON(guestaddr & 7);
 90
 91	if (IS_ERR((void __force *) uptr))
 92		return PTR_ERR((void __force *) uptr);
 93
 94	return put_user(value, (u64 __user *) uptr);
 95}
 96
 97static inline int put_guest_u32(struct kvm_vcpu *vcpu, unsigned long guestaddr,
 98				u32 value)
 99{
100	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
 
101
102	BUG_ON(guestaddr & 3);
 
 
103
104	if (IS_ERR((void __force *) uptr))
105		return PTR_ERR((void __force *) uptr);
106
107	return put_user(value, (u32 __user *) uptr);
108}
109
110static inline int put_guest_u16(struct kvm_vcpu *vcpu, unsigned long guestaddr,
111				u16 value)
112{
113	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
114
115	BUG_ON(guestaddr & 1);
 
 
116
117	if (IS_ERR((void __force *) uptr))
118		return PTR_ERR((void __force *) uptr);
119
120	return put_user(value, (u16 __user *) uptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
121}
122
123static inline int put_guest_u8(struct kvm_vcpu *vcpu, unsigned long guestaddr,
124			       u8 value)
125{
126	void __user *uptr = __guestaddr_to_user(vcpu, guestaddr);
127
128	if (IS_ERR((void __force *) uptr))
129		return PTR_ERR((void __force *) uptr);
130
131	return put_user(value, (u8 __user *) uptr);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
132}
133
134
135static inline int __copy_to_guest_slow(struct kvm_vcpu *vcpu,
136				       unsigned long guestdest,
137				       void *from, unsigned long n)
 
 
 
 
 
 
 
 
 
 
 
 
138{
139	int rc;
140	unsigned long i;
141	u8 *data = from;
142
143	for (i = 0; i < n; i++) {
144		rc = put_guest_u8(vcpu, guestdest++, *(data++));
145		if (rc < 0)
146			return rc;
147	}
148	return 0;
149}
150
151static inline int __copy_to_guest_fast(struct kvm_vcpu *vcpu,
152				       unsigned long guestdest,
153				       void *from, unsigned long n)
154{
155	int r;
156	void __user *uptr;
157	unsigned long size;
158
159	if (guestdest + n < guestdest)
160		return -EFAULT;
161
162	/* simple case: all within one segment table entry? */
163	if ((guestdest & PMD_MASK) == ((guestdest+n) & PMD_MASK)) {
164		uptr = (void __user *) gmap_fault(guestdest, vcpu->arch.gmap);
165
166		if (IS_ERR((void __force *) uptr))
167			return PTR_ERR((void __force *) uptr);
168
169		r = copy_to_user(uptr, from, n);
170
171		if (r)
172			r = -EFAULT;
173
174		goto out;
175	}
176
177	/* copy first segment */
178	uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
179
180	if (IS_ERR((void __force *) uptr))
181		return PTR_ERR((void __force *) uptr);
182
183	size = PMD_SIZE - (guestdest & ~PMD_MASK);
184
185	r = copy_to_user(uptr, from, size);
186
187	if (r) {
188		r = -EFAULT;
189		goto out;
190	}
191	from += size;
192	n -= size;
193	guestdest += size;
194
195	/* copy full segments */
196	while (n >= PMD_SIZE) {
197		uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
198
199		if (IS_ERR((void __force *) uptr))
200			return PTR_ERR((void __force *) uptr);
201
202		r = copy_to_user(uptr, from, PMD_SIZE);
203
204		if (r) {
205			r = -EFAULT;
206			goto out;
207		}
208		from += PMD_SIZE;
209		n -= PMD_SIZE;
210		guestdest += PMD_SIZE;
211	}
212
213	/* copy the tail segment */
214	if (n) {
215		uptr = (void __user *)gmap_fault(guestdest, vcpu->arch.gmap);
216
217		if (IS_ERR((void __force *) uptr))
218			return PTR_ERR((void __force *) uptr);
219
220		r = copy_to_user(uptr, from, n);
221
222		if (r)
223			r = -EFAULT;
224	}
225out:
226	return r;
227}
228
229static inline int copy_to_guest_absolute(struct kvm_vcpu *vcpu,
230					 unsigned long guestdest,
231					 void *from, unsigned long n)
232{
233	return __copy_to_guest_fast(vcpu, guestdest, from, n);
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
234}
235
236static inline int copy_to_guest(struct kvm_vcpu *vcpu, unsigned long guestdest,
237				void *from, unsigned long n)
238{
239	unsigned long prefix  = vcpu->arch.sie_block->prefix;
240
241	if ((guestdest < 2 * PAGE_SIZE) && (guestdest + n > 2 * PAGE_SIZE))
242		goto slowpath;
243
244	if ((guestdest < prefix) && (guestdest + n > prefix))
245		goto slowpath;
246
247	if ((guestdest < prefix + 2 * PAGE_SIZE)
248	    && (guestdest + n > prefix + 2 * PAGE_SIZE))
249		goto slowpath;
250
251	if (guestdest < 2 * PAGE_SIZE)
252		guestdest += prefix;
253	else if ((guestdest >= prefix) && (guestdest < prefix + 2 * PAGE_SIZE))
254		guestdest -= prefix;
255
256	return __copy_to_guest_fast(vcpu, guestdest, from, n);
257slowpath:
258	return __copy_to_guest_slow(vcpu, guestdest, from, n);
259}
260
261static inline int __copy_from_guest_slow(struct kvm_vcpu *vcpu, void *to,
262					 unsigned long guestsrc,
263					 unsigned long n)
264{
265	int rc;
266	unsigned long i;
267	u8 *data = to;
268
269	for (i = 0; i < n; i++) {
270		rc = get_guest_u8(vcpu, guestsrc++, data++);
271		if (rc < 0)
272			return rc;
273	}
274	return 0;
275}
276
277static inline int __copy_from_guest_fast(struct kvm_vcpu *vcpu, void *to,
278					 unsigned long guestsrc,
279					 unsigned long n)
280{
281	int r;
282	void __user *uptr;
283	unsigned long size;
284
285	if (guestsrc + n < guestsrc)
286		return -EFAULT;
287
288	/* simple case: all within one segment table entry? */
289	if ((guestsrc & PMD_MASK) == ((guestsrc+n) & PMD_MASK)) {
290		uptr = (void __user *) gmap_fault(guestsrc, vcpu->arch.gmap);
291
292		if (IS_ERR((void __force *) uptr))
293			return PTR_ERR((void __force *) uptr);
294
295		r = copy_from_user(to, uptr, n);
296
297		if (r)
298			r = -EFAULT;
299
300		goto out;
301	}
302
303	/* copy first segment */
304	uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
305
306	if (IS_ERR((void __force *) uptr))
307		return PTR_ERR((void __force *) uptr);
308
309	size = PMD_SIZE - (guestsrc & ~PMD_MASK);
310
311	r = copy_from_user(to, uptr, size);
312
313	if (r) {
314		r = -EFAULT;
315		goto out;
316	}
317	to += size;
318	n -= size;
319	guestsrc += size;
320
321	/* copy full segments */
322	while (n >= PMD_SIZE) {
323		uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
324
325		if (IS_ERR((void __force *) uptr))
326			return PTR_ERR((void __force *) uptr);
327
328		r = copy_from_user(to, uptr, PMD_SIZE);
329
330		if (r) {
331			r = -EFAULT;
332			goto out;
333		}
334		to += PMD_SIZE;
335		n -= PMD_SIZE;
336		guestsrc += PMD_SIZE;
337	}
338
339	/* copy the tail segment */
340	if (n) {
341		uptr = (void __user *)gmap_fault(guestsrc, vcpu->arch.gmap);
342
343		if (IS_ERR((void __force *) uptr))
344			return PTR_ERR((void __force *) uptr);
345
346		r = copy_from_user(to, uptr, n);
347
348		if (r)
349			r = -EFAULT;
350	}
351out:
352	return r;
353}
354
355static inline int copy_from_guest_absolute(struct kvm_vcpu *vcpu, void *to,
356					   unsigned long guestsrc,
357					   unsigned long n)
358{
359	return __copy_from_guest_fast(vcpu, to, guestsrc, n);
360}
361
362static inline int copy_from_guest(struct kvm_vcpu *vcpu, void *to,
363				  unsigned long guestsrc, unsigned long n)
364{
365	unsigned long prefix  = vcpu->arch.sie_block->prefix;
366
367	if ((guestsrc < 2 * PAGE_SIZE) && (guestsrc + n > 2 * PAGE_SIZE))
368		goto slowpath;
369
370	if ((guestsrc < prefix) && (guestsrc + n > prefix))
371		goto slowpath;
 
372
373	if ((guestsrc < prefix + 2 * PAGE_SIZE)
374	    && (guestsrc + n > prefix + 2 * PAGE_SIZE))
375		goto slowpath;
376
377	if (guestsrc < 2 * PAGE_SIZE)
378		guestsrc += prefix;
379	else if ((guestsrc >= prefix) && (guestsrc < prefix + 2 * PAGE_SIZE))
380		guestsrc -= prefix;
381
382	return __copy_from_guest_fast(vcpu, to, guestsrc, n);
383slowpath:
384	return __copy_from_guest_slow(vcpu, to, guestsrc, n);
385}
386#endif